diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index ab1365966f..1652aff9e5 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -1754,3 +1754,31 @@ releases: - Changes in sda_host_port_onboarding_workflow_manager module - Changes in template_workflow_manager module - Added attribute native_vlan_id and allowed_vlan_ranges in sda_host_port_onboarding_workflow_manager module + + 6.42.0: + release_date: "2025-11-17" + changes: + release_summary: Enhancements in workflow manager modules + minor_changes: + - New module 'backup_and_restore_workflow_manager' for backup and restore workflow management + - New module 'fabric_devices_info_workflow_manager' for gathering fabric device information + - New module 'network_devices_info_workflow_manager' for gathering network device information + - New module 'reports_workflow_manager' for managing reports + - New module 'wired_campus_automation_workflow_manager' for managing wired campus automation + - Enhancements in 'lan_automation_workflow_manager' to support port channel + - Enhancements in 'pnp_workflow_manager' to support authorization + - Changes in accesspoint_workflow_manager module + - Changes in application_policy_workflow_manager module + - Changes in ise_radius_integration_workflow_manager module + - Changes in network_profile_wireless_workflow_manager module + - Changes in network_settings_workflow_manager module + - Changes in provision_workflow_manager module + - Changes in sda_fabric_sites_zones_workflow_manager module + - Changes in swim_workflow_manager module + - Changes in template_workflow_manager module + - Changes in wireless_design_workflow_manager module + - Added 'feature_template_designs' attribute in network_profile_wireless_workflow_manager module + - Added 'authorize' attribute in network_profile_wireless_workflow_manager module + - Added 'ap_authorization_list_name', 'authorize_mesh_and_non_mesh_aps', 'feature_template' attributes in provision_workflow_manager module + - Added 'image_name', sync_cco, image_distribution_timeout, device_tag, image_activation_timeout, compatible_devices in swim_workflow_manager module + - Added 'profile_names' in template_workflow_manager module diff --git a/galaxy.yml b/galaxy.yml index 390ac44aec..ee680c2e44 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,7 +1,7 @@ --- namespace: cisco name: dnac -version: 6.40.0 +version: 6.42.0 readme: README.md authors: - Rafael Campos diff --git a/playbooks/backup_and_restore_workflow_manager.yml b/playbooks/backup_and_restore_workflow_manager.yml new file mode 100644 index 0000000000..bdd28d24b6 --- /dev/null +++ b/playbooks/backup_and_restore_workflow_manager.yml @@ -0,0 +1,94 @@ +--- +- name: Backup and restore operations + hosts: localhost + connection: local + gather_facts: false + + vars_files: + - "credentials.yml" + - "backup_secrets.yml" + + vars: + dnac_login: &dnac_login + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: "DEBUG" + config_verify: true + dnac_api_task_timeout: 4000 + dnac_task_poll_interval: 1 + + tasks: + - name: Configure NFS server for secure backup storage connectivity + cisco.dnac.backup_and_restore_workflow_manager: + <<: *dnac_login + state: merged + config: + - nfs_configuration: + - server_ip: "{{ nfs_configuration.server_ip }}" + source_path: "{{ nfs_configuration.source_path }}" + nfs_port: 2049 + nfs_version: nfs4 + nfs_portmapper_port: 111 + tags: nfs_configuration + + - name: Configure backup target with encryption and data retention policies + cisco.dnac.backup_and_restore_workflow_manager: + <<: *dnac_login + state: merged + config: + - backup_storage_configuration: + - server_type: NFS + nfs_details: + server_ip: "{{ backup_configuration.server_ip }}" + source_path: "{{ backup_configuration.source_path }}" + nfs_port: 2049 + nfs_version: nfs4 + nfs_portmapper_port: 111 + data_retention_period: 51 + encryption_passphrase: "{{ backup_configuration.encryption_passphrase }}" + tags: backup_configuration + + - name: Create backup with name and scope specifications + cisco.dnac.backup_and_restore_workflow_manager: + <<: *dnac_login + state: merged + config: + - backup: + - name: BACKUP29_09 + scope: CISCO_DNA_DATA_WITHOUT_ASSURANCE + tags: backup_creation + + - name: Restore backup + cisco.dnac.backup_and_restore_workflow_manager: + <<: *dnac_login + state: merged + config: + - restore_operations: + - name: "BACKUP29_09" + encryption_passphrase: "{{ restore_operations.encryption_passphrase }}" + tags: restore_backup + + - name: Delete NFS configuration from backup infrastructure + cisco.dnac.backup_and_restore_workflow_manager: + <<: *dnac_login + state: deleted + config: + - nfs_configuration: + - server_ip: "{{ nfs_configuration.server_ip }}" + source_path: "{{ nfs_configuration.source_path }}" + tags: delete_nfs_configuration + + - name: Delete backups for backup lifecycle management + cisco.dnac.backup_and_restore_workflow_manager: + <<: *dnac_login + state: deleted + config: + - backup: + - name: BACKUP29_09 + tags: delete_backup diff --git a/playbooks/fabric_devices_info_workflow_manager.yml b/playbooks/fabric_devices_info_workflow_manager.yml new file mode 100644 index 0000000000..f52a74f56e --- /dev/null +++ b/playbooks/fabric_devices_info_workflow_manager.yml @@ -0,0 +1,45 @@ +--- +- name: Get Fabric devices info from Cisco Catalyst Center + hosts: localhost + connection: local + vars_files: + - "credentials.yml" + tasks: + - name: Get Fabric devices info from Cisco Catalyst Center + cisco.dnac.fabric_devices_info_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: gathered + config: + - fabric_devices: + - fabric_site_hierarchy: "Global/rishipat_area/Fabric-area-1" # Mandatory parameter + fabric_device_role: "EDGE_NODE" + device_identifier: + - ip_address: ["192.168.200.69"] + - serial_number: ["FOC2443L1VQ"] + - hostname: ["Fabric-9300-2-2.rcdnlabcead.com"] + timeout: 30 + retries: 3 + interval: 10 + requested_info: + - fabric_info + - handoff_info + - onboarding_info + - connected_devices_info + - device_health_info + - device_issues_info + output_file_info: + file_path: /Users/priyadharshini/Downloads/fabric_device_info + file_format: yaml + file_mode: w + timestamp: false diff --git a/playbooks/network_devices_info_workflow_manager.yml b/playbooks/network_devices_info_workflow_manager.yml new file mode 100644 index 0000000000..d6462b61fc --- /dev/null +++ b/playbooks/network_devices_info_workflow_manager.yml @@ -0,0 +1,58 @@ +--- +- name: Get network devices info on Cisco Catalyst Center + hosts: localhost + connection: local + vars_files: + - "credentials.yml" + tasks: + - name: Get Network Devices Info on Cisco Catalyst Center + cisco.dnac.network_devices_info_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: false + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: gathered + config: + - network_devices: + - site_hierarchy: Global/USA/SAN JOSE + device_type: "Cisco Catalyst 9300 Switch" + device_role: "ACCESS" + device_family: "Switches and Hubs" + software_version: "17.12.1" + os_type: "IOS-XE" + device_identifier: + - ip_address: ["204.1.2.1"] + - serial_number: ["FCW2137L0SB"] + - hostname: ["SJ-BN-9300.cisco.local"] + - mac_address: ["90:88:55:90:26:00"] + timeout: 60 + retries: 3 + interval: 10 + requested_info: + - device_info + - interface_info + - interface_vlan_info + - line_card_info + - supervisor_card_info + - poe_info + - module_count_info + - connected_device_info + - device_interfaces_by_range_info + - device_config_info + - device_summary_info + - device_polling_interval_info + - device_stack_info + - device_link_mismatch_info + output_file_info: + file_path: /Users/priyadharshini/Downloads/info + file_format: json + file_mode: w + timestamp: true diff --git a/playbooks/network_profile_wireless_workflow_manager.yml b/playbooks/network_profile_wireless_workflow_manager.yml index 93a1985980..58df448151 100644 --- a/playbooks/network_profile_wireless_workflow_manager.yml +++ b/playbooks/network_profile_wireless_workflow_manager.yml @@ -47,6 +47,14 @@ vlan_id: 22 day_n_templates: - "WLC_Standard_Config" + feature_template_designs: + - design_type: CLEANAIR_CONFIGURATION + feature_templates: + - Campus Wireless Profile Clean Air 5GHz + - Campus Wireless Profile Clean Air 6GHz + - design_type: ADVANCED_SSID_CONFIGURATION + feature_templates: + - Campus Wireless Profile design - profile_name: "Enterprise_Wireless_Profile" site_names: @@ -75,4 +83,12 @@ vlan_id: 35 day_n_templates: - WLC_Advanced_Config + feature_template_designs: + - design_type: ADVANCED_SSID_CONFIGURATION + feature_templates: + - Enterprise Wireless Advanced SSID Design + applicability_ssids: + - HQ_WiFi + - Branch_Secure + register: output_list diff --git a/playbooks/pnp_workflow_manager.yml b/playbooks/pnp_workflow_manager.yml index 4fd9f4e181..3ed1c51c10 100644 --- a/playbooks/pnp_workflow_manager.yml +++ b/playbooks/pnp_workflow_manager.yml @@ -32,6 +32,7 @@ state: Unclaimed pid: c9300-24P is_sudi_required: false + authorize: true - serial_number: QTC2320E0H9 state: Unclaimed @@ -55,6 +56,7 @@ state: Unclaimed pid: c9300-24P is_sudi_required: true + authorize: true - name: Claim a pre-added switch, apply a template, and perform an image upgrade for a specific site cisco.dnac.pnp_workflow_manager: diff --git a/playbooks/provision_workflow_manager.yml b/playbooks/provision_workflow_manager.yml index 20fab74c18..3bb1c97612 100644 --- a/playbooks/provision_workflow_manager.yml +++ b/playbooks/provision_workflow_manager.yml @@ -22,5 +22,15 @@ dnac_task_poll_interval: 1 state: merged config: - - site_name_hierarchy: Global/Chennai/LTTS/FLOOR1 + - site_name_hierarchy: Global/USA/SAN JOSE/BLD23 management_ip_address: 1.1.1.1 + primary_managed_ap_locations: + - Global/USA/SAN JOSE/BLD23/FLOOR1_LEVEL2 + ap_authorization_list_name: "AP-Auth-List" + authorize_mesh_and_non_mesh_aps: false + feature_template: + - design_name: test + additional_identifiers: + wlan_profile_name: ARUBA_SSID_profile + site_name_hierarchy: Global/USA/SAN JOSE/BLD23 + excluded_attributes: [certificate_settings] diff --git a/playbooks/reports_workflow_manager.yml b/playbooks/reports_workflow_manager.yml new file mode 100644 index 0000000000..e50b626868 --- /dev/null +++ b/playbooks/reports_workflow_manager.yml @@ -0,0 +1,45 @@ +--- +- name: Configure reports on Cisco Catalyst Center + hosts: dnac_servers + vars_files: + - credentials.yml + gather_facts: false + connection: local + tasks: + - name: Create/Schedule a report configuration. + cisco.dnac.reports_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_port: "{{ dnac_port }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log_level: DEBUG + dnac_log: true + state: merged + config_verify: true + config: + - generate_report: + - name: "compliance_report" + view_group_name: "Compliance" + deliveries: + - delivery_type: "DOWNLOAD" + file_path: "/Users/mekandar/Desktop" + schedule: + schedule_type: "SCHEDULE_NOW" + time_zone: "Asia/Calcutta" + view: + view_name: "Network Device Compliance" + field_groups: + - name: "inventoryAllData" + display_name: "All Data" + format: + format_type: "CSV" + filters: + - name: "Location" + display_name: "Location" + filter_type: "MULTI_SELECT_TREE" + value: + - value: "Global/India" + tags: [] diff --git a/playbooks/template_workflow_manager.yml b/playbooks/template_workflow_manager.yml index 473e1000ee..a6be0ce0ce 100644 --- a/playbooks/template_workflow_manager.yml +++ b/playbooks/template_workflow_manager.yml @@ -37,6 +37,8 @@ version_description: "{{ item.description }}" language: "{{ item.language }}" software_type: "{{ item.type }}" + profile_names: + - "{{ item.profiles }}" device_types: - product_family: "{{ item.family }}" export: diff --git a/playbooks/wired_campus_automation_workflow_manager.yml b/playbooks/wired_campus_automation_workflow_manager.yml new file mode 100644 index 0000000000..8ce393e635 --- /dev/null +++ b/playbooks/wired_campus_automation_workflow_manager.yml @@ -0,0 +1,998 @@ +--- +# =================================================================================================== +# WIRED CAMPUS AUTOMATION WORKFLOW MANAGER - EXAMPLE PLAYBOOK +# =================================================================================================== +# +# This playbook demonstrates comprehensive usage examples for the Wired Campus Automation +# Workflow Manager module. It includes positive scenarios for both merged and deleted states +# with inline configurations for easy reference. +# +# FEATURES COVERED: +# - VLANs (create, update, delete) +# - CDP (configure, reset) +# - LLDP (configure, reset) +# - STP (configure, reset/delete instances) +# - VTP (configure, reset) +# - DHCP Snooping (configure, reset) +# - IGMP Snooping (configure, reset) +# - MLD Snooping (configure, reset) +# - Authentication (configure, reset) +# - Logical Ports (configure port channels) +# - Port Configuration (interface-level settings) +# - Comprehensive multi-feature configurations +# +# =================================================================================================== + +- name: Cisco DNA Center Wired Campus Automation Examples + hosts: dnac_servers + gather_facts: false + vars_files: + - "credentials.yml" + vars: + # DNA Center connection parameters + dnac_host: "{{ ansible_host }}" + dnac_username: "{{ username }}" + dnac_password: "{{ password }}" + dnac_verify: false + dnac_port: 443 + dnac_version: "3.1.3.0" + dnac_debug: false + + # Common login anchor for reuse + dnac_login: &dnac_login + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: INFO + + tasks: + # ============================================================================= + # VLAN CONFIGURATION EXAMPLES + # ============================================================================= + + - name: "EXAMPLE: Create multiple VLANs with comprehensive settings" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + vlans: + - vlan_id: 100 + vlan_name: "Production_Network" + vlan_admin_status: true + - vlan_id: 200 + vlan_name: "Development_Network" + vlan_admin_status: true + - vlan_id: 300 + vlan_name: "Guest_Network" + vlan_admin_status: false + register: result_vlans_create + tags: [vlans, create] + + - name: "EXAMPLE: Update existing VLAN settings" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + vlans: + - vlan_id: 300 + vlan_name: "Guest_Network_Updated" + vlan_admin_status: true + register: result_vlans_update + tags: [vlans, update] + + - name: "EXAMPLE: Delete specific VLANs" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: deleted + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + vlans: + - vlan_id: 300 + register: result_vlans_delete + tags: [vlans, delete] + + - name: "EXAMPLE: Delete specific VLANs with full configuration" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: deleted + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + vlans: + - vlan_id: 100 + vlan_name: "Production_Network" + vlan_admin_status: true + - vlan_id: 200 + vlan_name: "Development_Network" + vlan_admin_status: true + - vlan_id: 300 + vlan_name: "Guest_Network" + vlan_admin_status: false + register: result_vlans_delete_full_config + tags: [vlans, delete] + + # ============================================================================= + # CDP CONFIGURATION EXAMPLES + # ============================================================================= + + - name: "EXAMPLE: Configure CDP with all parameters" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + cdp: + cdp_admin_status: true + cdp_hold_time: 180 + cdp_timer: 60 + cdp_advertise_v2: true + cdp_log_duplex_mismatch: true + register: result_cdp_configure + tags: [cdp, configure] + + - name: "EXAMPLE: Update CDP timer settings only" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + cdp: + cdp_hold_time: 240 + cdp_timer: 90 + register: result_cdp_update + tags: [cdp, update] + + - name: "EXAMPLE: Reset CDP to default settings" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: deleted + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + cdp: {} + register: result_cdp_reset + tags: [cdp, reset] + + - name: "EXAMPLE: Reset CDP to default settings while providing full configuration" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: deleted + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + cdp: + cdp_admin_status: true + cdp_hold_time: 180 + cdp_timer: 60 + cdp_advertise_v2: true + cdp_log_duplex_mismatch: true + register: result_cdp_reset_full_config + tags: [cdp, reset] + + # ============================================================================= + # LLDP CONFIGURATION EXAMPLES + # ============================================================================= + + - name: "EXAMPLE: Configure LLDP with comprehensive settings" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + lldp: + lldp_admin_status: true + lldp_hold_time: 300 + lldp_timer: 30 + lldp_reinitialization_delay: 5 + register: result_lldp_configure + tags: [lldp, configure] + + - name: "EXAMPLE: Configure LLDP with boundary values" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + lldp: + lldp_admin_status: true + lldp_hold_time: 0 # No aging + lldp_timer: 5 # Minimum timer + lldp_reinitialization_delay: 2 # Minimum delay + register: result_lldp_boundary + tags: [lldp, boundary] + + - name: "EXAMPLE: Reset LLDP to default settings" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: deleted + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + lldp: {} + register: result_lldp_reset + tags: [lldp, reset] + + - name: "EXAMPLE: Reset LLDP to default settings while providing full configuration" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: deleted + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + lldp: + lldp_admin_status: true + lldp_hold_time: 300 + lldp_timer: 30 + lldp_reinitialization_delay: 5 + register: result_lldp_reset_full_config + tags: [lldp, reset] + + # ============================================================================= + # STP CONFIGURATION EXAMPLES + # ============================================================================= + + - name: "EXAMPLE: Configure STP with global settings and instances" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + stp: + stp_mode: "MST" + stp_portfast_mode: "ENABLE" + stp_bpdu_guard: true + stp_bpdu_filter: false + stp_backbonefast: true + stp_extended_system_id: true + stp_logging: true + stp_loopguard: false + stp_transmit_hold_count: 6 + stp_uplinkfast: false + stp_uplinkfast_max_update_rate: 200 + stp_etherchannel_guard: true + stp_instances: + - stp_instance_vlan_id: 100 + stp_instance_priority: 32768 + enable_stp: true + stp_instance_max_age_timer: 20 + stp_instance_hello_interval_timer: 2 + stp_instance_forward_delay_timer: 15 + - stp_instance_vlan_id: 200 + stp_instance_priority: 16384 + enable_stp: true + register: result_stp_configure + tags: [stp, configure] + + - name: "EXAMPLE: Update STP mode and add new instance" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + stp: + stp_mode: "RSTP" + stp_instances: + - stp_instance_vlan_id: 400 + stp_instance_priority: 8192 + enable_stp: true + register: result_stp_update + tags: [stp, update] + + - name: "EXAMPLE: Delete specific STP instances" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: deleted + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 400 + register: result_stp_delete_instances + tags: [stp, delete] + + - name: "EXAMPLE: Reset STP to default settings" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: deleted + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + stp: {} + register: result_stp_reset + tags: [stp, reset] + + - name: "EXAMPLE: Delete specific STP instances while providing full configuration" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: deleted + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + stp: + stp_mode: "MST" + stp_portfast_mode: "ENABLE" + stp_bpdu_guard: true + stp_bpdu_filter: false + stp_backbonefast: true + stp_extended_system_id: true + stp_logging: true + stp_loopguard: false + stp_transmit_hold_count: 6 + stp_uplinkfast: false + stp_uplinkfast_max_update_rate: 200 + stp_etherchannel_guard: true + stp_instances: + - stp_instance_vlan_id: 100 + stp_instance_priority: 32768 + enable_stp: true + stp_instance_max_age_timer: 20 + stp_instance_hello_interval_timer: 2 + stp_instance_forward_delay_timer: 15 + - stp_instance_vlan_id: 200 + stp_instance_priority: 16384 + enable_stp: true + register: result_stp_delete_instances_full_config + tags: [stp, delete] + + # ============================================================================= + # VTP CONFIGURATION EXAMPLES + # ============================================================================= + + - name: "EXAMPLE: Configure VTP with comprehensive settings" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + vtp: + vtp_mode: "TRANSPARENT" + vtp_version: "VERSION_2" + vtp_domain_name: "CORPORATE_DOMAIN" + vtp_pruning: true + vtp_configuration_file_name: "flash:vtp_config.dat" + vtp_source_interface: "Loopback0" + register: result_vtp_configure + tags: [vtp, configure] + + - name: "EXAMPLE: Update VTP to client mode" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + vtp: + vtp_mode: "CLIENT" + vtp_pruning: false + register: result_vtp_update + tags: [vtp, update] + + - name: "EXAMPLE: Reset VTP to default settings" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: deleted + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + vtp: {} + register: result_vtp_reset + tags: [vtp, reset] + + - name: "EXAMPLE: Reset VTP to default settings while providing full configuration" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: deleted + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + vtp: + vtp_mode: "TRANSPARENT" + vtp_version: "VERSION_2" + vtp_domain_name: "CORPORATE_DOMAIN" + vtp_pruning: true + vtp_configuration_file_name: "flash:vtp_config.dat" + vtp_source_interface: "Loopback0" + register: result_vtp_reset_full_config + tags: [vtp, reset] + + # ============================================================================= + # DHCP SNOOPING CONFIGURATION EXAMPLES + # ============================================================================= + + - name: "EXAMPLE: Configure DHCP Snooping with comprehensive settings" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + dhcp_snooping: + dhcp_admin_status: true + dhcp_snooping_vlans: [100, 200, 300] + dhcp_snooping_glean: true + dhcp_snooping_database_agent_url: "tftp://192.168.1.100/dhcp_binding.db" + dhcp_snooping_database_timeout: 600 + dhcp_snooping_database_write_delay: 300 + dhcp_snooping_proxy_bridge_vlans: [100, 200] + register: result_dhcp_snooping_configure + tags: [dhcp_snooping, configure] + + - name: "EXAMPLE: Update DHCP Snooping VLAN list" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + dhcp_snooping: + dhcp_snooping_vlans: [100, 200, 400, 500] + register: result_dhcp_snooping_update + tags: [dhcp_snooping, update] + + - name: "EXAMPLE: Reset DHCP Snooping to default settings" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: deleted + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + dhcp_snooping: {} + register: result_dhcp_snooping_reset + tags: [dhcp_snooping, reset] + + - name: "EXAMPLE: Reset DHCP Snooping to default settings while providing full configuration" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: deleted + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + dhcp_snooping: + dhcp_admin_status: true + dhcp_snooping_vlans: [100, 200, 300] + dhcp_snooping_glean: true + dhcp_snooping_database_agent_url: "tftp://192.168.1.100/dhcp_binding.db" + dhcp_snooping_database_timeout: 600 + dhcp_snooping_database_write_delay: 300 + dhcp_snooping_proxy_bridge_vlans: [100, 200] + register: result_dhcp_snooping_reset_full_config + tags: [dhcp_snooping, reset] + + # ============================================================================= + # IGMP SNOOPING CONFIGURATION EXAMPLES + # ============================================================================= + + - name: "EXAMPLE: Configure IGMP Snooping with global and VLAN-specific settings" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + igmp_snooping: + enable_igmp_snooping: true + igmp_snooping_querier: false + igmp_snooping_querier_address: "192.168.1.10" + igmp_snooping_querier_version: "VERSION_2" + igmp_snooping_querier_query_interval: 125 + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 100 + enable_igmp_snooping: true + igmp_snooping_querier: false + igmp_snooping_querier_address: "192.168.1.11" + igmp_snooping_querier_version: "VERSION_2" + igmp_snooping_querier_query_interval: 125 + igmp_snooping_mrouter_port_list: ["GigabitEthernet1/0/1", "GigabitEthernet1/0/2"] + - igmp_snooping_vlan_id: 200 + enable_igmp_snooping: true + igmp_snooping_querier: true + igmp_snooping_querier_version: "VERSION_3" + igmp_snooping_querier_query_interval: 90 + register: result_igmp_snooping_configure + tags: [igmp_snooping, configure] + + - name: "EXAMPLE: Update IGMP Snooping VLAN settings" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 300 + enable_igmp_snooping: true + igmp_snooping_querier: false + igmp_snooping_querier_query_interval: 180 + register: result_igmp_snooping_update + tags: [igmp_snooping, update] + + # ============================================================================= + # MLD SNOOPING CONFIGURATION EXAMPLES + # ============================================================================= + + - name: "EXAMPLE: Configure MLD Snooping with comprehensive IPv6 settings" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + mld_snooping: + enable_mld_snooping: true + mld_snooping_querier: false + mld_snooping_querier_address: "fe80::1" + mld_snooping_querier_version: "VERSION_2" + mld_snooping_listener: true + mld_snooping_querier_query_interval: 125 + mld_snooping_vlans: + - mld_snooping_vlan_id: 100 + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: false + mld_snooping_querier: false + mld_snooping_querier_address: "fe80::10" + mld_snooping_querier_version: "VERSION_2" + mld_snooping_querier_query_interval: 125 + mld_snooping_mrouter_port_list: ["GigabitEthernet1/0/3", "GigabitEthernet1/0/4"] + - mld_snooping_vlan_id: 200 + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: true + mld_snooping_querier: true + mld_snooping_querier_version: "VERSION_1" + register: result_mld_snooping_configure + tags: [mld_snooping, configure] + + # ============================================================================= + # AUTHENTICATION CONFIGURATION EXAMPLES + # ============================================================================= + + - name: "EXAMPLE: Enable 802.1X Authentication with new style configuration" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + authentication: + enable_dot1x_authentication: true + authentication_config_mode: "NEW_STYLE" + register: result_authentication_configure + tags: [authentication, configure] + + - name: "EXAMPLE: Switch to legacy authentication mode" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + authentication: + authentication_config_mode: "LEGACY" + register: result_authentication_update + tags: [authentication, update] + + - name: "EXAMPLE: Reset Authentication to default settings" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: deleted + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + authentication: {} + register: result_authentication_reset + tags: [authentication, reset] + + - name: "EXAMPLE: Reset Authentication to default settings while providing full configuration" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: deleted + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + authentication: + enable_dot1x_authentication: true + authentication_config_mode: "NEW_STYLE" + register: result_authentication_reset_full_config + tags: [authentication, reset] + + # ============================================================================= + # LOGICAL PORTS (PORT CHANNELS) CONFIGURATION EXAMPLES + # ============================================================================= + + - name: "EXAMPLE: Configure multiple port channels with different protocols" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + logical_ports: + port_channel_auto: false + port_channel_lacp_system_priority: 4096 + port_channel_load_balancing_method: "SRC_DST_MIXED_IP_PORT" + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_min_links: 2 + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + port_channel_mode: "ACTIVE" + port_channel_port_priority: 128 + port_channel_rate: 30 + - port_channel_interface_name: "GigabitEthernet1/0/11" + port_channel_mode: "ACTIVE" + port_channel_port_priority: 128 + port_channel_rate: 30 + - port_channel_protocol: "PAGP" + port_channel_name: "Port-channel2" + port_channel_min_links: 1 + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/12" + port_channel_mode: "DESIRABLE" + port_channel_port_priority: 128 + port_channel_learn_method: "AGGREGATION_PORT" + register: result_logical_ports_configure + tags: [logical_ports, configure] + + - name: "EXAMPLE: Update port channel configuration and add new member" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + logical_ports: + port_channel_load_balancing_method: "SRC_DST_MAC" + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/13" + port_channel_mode: "ACTIVE" + port_channel_port_priority: 256 + port_channel_rate: 1 + register: result_logical_ports_update + tags: [logical_ports, update] + + - name: "EXAMPLE: Configure static port channel (no protocol)" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + logical_ports: + port_channel_auto: false + port_channels: + - port_channel_protocol: "NONE" + port_channel_name: "Port-channel3" + port_channel_min_links: 2 + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/14" + port_channel_mode: "ON" + - port_channel_interface_name: "GigabitEthernet1/0/15" + port_channel_mode: "ON" + register: result_logical_ports_static + tags: [logical_ports, static] + + # ============================================================================= + # PORT CONFIGURATION EXAMPLES + # ============================================================================= + + - name: "EXAMPLE: Configure access port with comprehensive settings" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + port_configuration: + - interface_name: "GigabitEthernet1/0/5" + switchport_interface_config: + switchport_description: "Access Port - Production Network" + switchport_mode: "ACCESS" + access_vlan: 100 + admin_status: true + voice_vlan: 200 + vlan_trunking_interface_config: + enable_dtp_negotiation: "AUTO" + protected: false + dot1x_interface_config: + dot1x_interface_authentication_order: ["DOT1X", "MAB"] + dot1x_interface_authentication_mode: "OPEN" + dot1x_interface_pae_type: "AUTHENTICATOR" + dot1x_interface_control_direction: "BOTH" + dot1x_interface_host_mode: "MULTI_AUTHENTICATION" + dot1x_interface_port_control: "AUTO" + dot1x_interface_inactivity_timer: 300 + dot1x_interface_max_reauth_requests: 3 + dot1x_interface_reauth_timer: 3600 + mab_interface_config: + mab_interface_enable: true + stp_interface_config: + stp_interface_enable_portfast: true + stp_interface_enable_bpdu_guard: true + stp_interface_enable_bpdu_filter: false + stp_interface_enable_root_guard: false + stp_interface_enable_loop_guard: false + stp_interface_port_priority: 128 + stp_interface_cost: 19 + dhcp_snooping_interface_config: + dhcp_snooping_interface_rate_limit: 100 + dhcp_snooping_interface_trust: true + cdp_interface_config: + cdp_interface_admin_status: true + cdp_interface_logging: true + lldp_interface_config: + lldp_interface_transmit: true + lldp_interface_receive: true + vtp_interface_config: + vtp_interface_admin_status: true + register: result_port_config_access + tags: [port_configuration, access] + + - name: "EXAMPLE: Configure trunk port with VLAN settings" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + port_configuration: + - interface_name: "GigabitEthernet1/0/6" + switchport_interface_config: + switchport_description: "Trunk Port - Inter-Switch Link" + switchport_mode: "TRUNK" + allowed_vlans: [100, 200, 300, 400] + native_vlan_id: 100 + admin_status: true + vlan_trunking_interface_config: + enable_dtp_negotiation: "ON" + protected: true + pruning_vlan_ids: [300, 400] + stp_interface_config: + stp_interface_enable_portfast: false + stp_interface_enable_bpdu_guard: false + stp_interface_enable_bpdu_filter: false + stp_interface_enable_root_guard: true + stp_interface_enable_loop_guard: true + stp_interface_port_priority: 64 + stp_interface_cost: 100 + register: result_port_config_trunk + tags: [port_configuration, trunk] + + - name: "EXAMPLE: Update interface description and VLAN assignment" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + port_configuration: + - interface_name: "GigabitEthernet1/0/5" + switchport_interface_config: + switchport_description: "Updated Access Port - Security Network" + access_vlan: 300 + register: result_port_config_update + tags: [port_configuration, update] + + # ============================================================================= + # COMPREHENSIVE MULTI-FEATURE EXAMPLES + # ============================================================================= + + - name: "EXAMPLE: Comprehensive network configuration with all features" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: merged + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + # VLANs for the complete network + vlans: + - vlan_id: 10 + vlan_name: "Management" + vlan_admin_status: true + - vlan_id: 20 + vlan_name: "Production" + vlan_admin_status: true + - vlan_id: 30 + vlan_name: "Development" + vlan_admin_status: true + - vlan_id: 40 + vlan_name: "Guest" + vlan_admin_status: true + + # Discovery protocols + cdp: + cdp_admin_status: true + cdp_hold_time: 180 + cdp_timer: 60 + cdp_advertise_v2: true + cdp_log_duplex_mismatch: true + + lldp: + lldp_admin_status: true + lldp_hold_time: 240 + lldp_timer: 30 + lldp_reinitialization_delay: 3 + + # Spanning Tree Protocol + stp: + stp_mode: "RSTP" + stp_portfast_mode: "ENABLE" + stp_bpdu_guard: true + stp_bpdu_filter: false + stp_backbonefast: true + stp_extended_system_id: true + stp_logging: true + stp_instances: + - stp_instance_vlan_id: 10 + stp_instance_priority: 32768 + enable_stp: true + - stp_instance_vlan_id: 20 + stp_instance_priority: 16384 + enable_stp: true + + # VLAN Trunking Protocol + vtp: + vtp_mode: "SERVER" + vtp_version: "VERSION_2" + vtp_domain_name: "ENTERPRISE_DOMAIN" + vtp_pruning: true + + # Security features + dhcp_snooping: + dhcp_admin_status: true + dhcp_snooping_vlans: [20, 30, 40] + dhcp_snooping_glean: true + + igmp_snooping: + enable_igmp_snooping: true + igmp_snooping_querier: false + igmp_snooping_querier_version: "VERSION_2" + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 20 + enable_igmp_snooping: true + igmp_snooping_querier: false + + # Authentication + authentication: + enable_dot1x_authentication: true + authentication_config_mode: "NEW_STYLE" + + # Port channels for redundancy + logical_ports: + port_channel_auto: false + port_channel_lacp_system_priority: 8192 + port_channel_load_balancing_method: "SRC_DST_IP" + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel10" + port_channel_min_links: 2 + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/16" + port_channel_mode: "ACTIVE" + port_channel_port_priority: 128 + port_channel_rate: 30 + - port_channel_interface_name: "GigabitEthernet1/0/17" + port_channel_mode: "ACTIVE" + port_channel_port_priority: 128 + port_channel_rate: 30 + + # Interface configurations + port_configuration: + - interface_name: "GigabitEthernet1/0/1" + switchport_interface_config: + switchport_description: "Management Port" + switchport_mode: "ACCESS" + access_vlan: 10 + admin_status: true + stp_interface_config: + stp_interface_enable_portfast: true + stp_interface_enable_bpdu_guard: true + dhcp_snooping_interface_config: + dhcp_snooping_interface_trust: true + - interface_name: "GigabitEthernet1/0/2" + switchport_interface_config: + switchport_description: "Production User Port" + switchport_mode: "ACCESS" + access_vlan: 20 + admin_status: true + dot1x_interface_config: + dot1x_interface_authentication_order: ["DOT1X", "MAB"] + dot1x_interface_port_control: "AUTO" + stp_interface_config: + stp_interface_enable_portfast: true + register: result_comprehensive_config + tags: [comprehensive, all_features] + + - name: "EXAMPLE: Comprehensive cleanup - Reset all features to defaults" + cisco.dnac.wired_campus_automation_workflow_manager: + <<: *dnac_login + state: deleted + config: + - ip_address: "204.1.2.3" + device_collection_status_check: false + layer2_configuration: + # Delete test VLANs + vlans: + - vlan_id: 10 + - vlan_id: 20 + - vlan_id: 30 + - vlan_id: 40 + - vlan_id: 100 + - vlan_id: 200 + - vlan_id: 300 + - vlan_id: 400 + - vlan_id: 500 + + # Reset all protocols to defaults + cdp: {} + lldp: {} + vtp: {} + dhcp_snooping: {} + igmp_snooping: {} + mld_snooping: {} + authentication: {} + + # Reset STP and delete instances + stp: {} + register: result_comprehensive_cleanup + tags: [cleanup, reset_all] diff --git a/plugins/module_utils/dnac.py b/plugins/module_utils/dnac.py index 74c1b87f34..7ea2431abe 100644 --- a/plugins/module_utils/dnac.py +++ b/plugins/module_utils/dnac.py @@ -58,6 +58,7 @@ def __init__(self, module): self.dnac = DNACSDK(params=dnac_params) self.dnac_apply = {'exec': self.dnac._exec} self.get_diff_state_apply = {'merged': self.get_diff_merged, + 'queried': self.get_diff_queried, 'deleted': self.get_diff_deleted, 'replaced': self.get_diff_replaced, 'overridden': self.get_diff_overridden, @@ -117,7 +118,7 @@ def __init__(self, module): masked_config = self.get_safe_log_config(masked_config) self.log('Cisco Catalyst Center parameters: {0}'.format(masked_config), "DEBUG") - self.supported_states = ["merged", "deleted", "replaced", "overridden", "gathered", "rendered", "parsed"] + self.supported_states = ["merged", "queried", "deleted", "replaced", "overridden", "gathered", "rendered", "parsed"] self.result = {"changed": False, "diff": [], "response": [], "warnings": []} def compare_dnac_versions(self, version1, version2): @@ -192,6 +193,11 @@ def get_diff_merged(self): self.merged = True return self + def get_diff_queried(self): + # Implement logic to query the resource configuration + self.queried = True + return self + def get_diff_deleted(self): # Implement logic to delete the resource self.deleted = True @@ -902,7 +908,10 @@ def get_site_id(self, site_name): # Check if the response is empty if response is None: - self.msg = "No site details retrieved for site name: {0}".format(site_name) + self.msg = ( + f"The site '{site_name}' does not exist in the Catalyst Center. " + "Please create the site using the 'cisco.dnac.site_workflow_manager' module." + ) self.fail_and_exit(self.msg) self.log("Site details retrieved for site '{0}'': {1}".format(site_name, str(response)), "DEBUG") @@ -1892,6 +1901,115 @@ def check_tasks_response_status(self, response, api_name): return self + def remove_nulls(self, obj): + """ + Recursively remove keys or elements with None values from dictionaries and lists. + + This function traverses the given object and removes: + - Any dictionary key-value pairs where the value is None. + - Any list elements that are None. + Nested dictionaries and lists are processed recursively. + + Parameters: + obj (dict | list | any): The object to clean. Can be a dictionary, + a list, or any other type. Non-dict/list values are returned as-is. + + Returns: + dict | list | any: A new object of the same type as `obj`, but with + all None values removed. If `obj` is a dict or list, the result + will also be a dict or list with cleaned contents. For other types, + the object is returned unchanged. + + Description: + - Recursively processes nested data structures to remove null values + - Preserves original data structure while cleaning null entries + - Handles complex nested combinations of dictionaries and lists + - Logs processing workflow for traceability + """ + self.log( + "Starting null value removal for data structure of type={0}".format(type(obj).__name__), + "DEBUG" + ) + + if isinstance(obj, dict): + self.log("Processing dictionary with {0} keys for null removal".format(len(obj)), "DEBUG") + cleaned_dict = {} + removed_keys = [] + + for k, v in obj.items(): + if v is not None: + cleaned_dict[k] = self.remove_nulls(v) + else: + removed_keys.append(k) + + if removed_keys: + self.log( + "Removed {0} null keys from dictionary: {1}".format( + len(removed_keys), removed_keys + ), + "DEBUG" + ) + + self.log( + "Completed dictionary null removal: {0} keys retained from {1} original keys".format( + len(cleaned_dict), len(obj) + ), + "DEBUG" + ) + return cleaned_dict + + if isinstance(obj, list): + self.log("Processing list with {0} elements for null removal".format(len(obj)), "DEBUG") + cleaned_list = [] + removed_count = 0 + + for v in obj: + if v is not None: + cleaned_list.append(self.remove_nulls(v)) + else: + removed_count += 1 + + if removed_count > 0: + self.log( + "Removed {0} null elements from list".format(removed_count), + "DEBUG" + ) + + self.log( + "Completed list null removal: {0} elements retained from {1} original elements".format( + len(cleaned_list), len(obj) + ), + "DEBUG" + ) + return cleaned_list + + self.log( + "Processing non-container type {0} - returning as-is".format(type(obj).__name__), + "DEBUG" + ) + return obj + + def snake_to_camel(self, snake_str): + """Convert snake_case string to camelCase.""" + parts = snake_str.split('_') + return parts[0] + ''.join(word.capitalize() for word in parts[1:]) + + def convert_keys_to_camel_case(self, data): + """ + Recursively convert all dict keys from snake_case to camelCase. + Handles dicts, lists, and nested structures. + """ + if isinstance(data, dict): + new_dict = {} + for k, v in data.items(): + new_key = self.snake_to_camel(k) + new_dict[new_key] = self.convert_keys_to_camel_case(v) + return new_dict + elif isinstance(data, list): + return [self.convert_keys_to_camel_case(item) for item in data] + else: + return data + def set_operation_result(self, operation_status, is_changed, status_message, log_level, additional_info=None): """ Update the result of the operation with the provided status, message, and log level. diff --git a/plugins/module_utils/network_profiles.py b/plugins/module_utils/network_profiles.py index a6dc58e8ba..6211021fae 100644 --- a/plugins/module_utils/network_profiles.py +++ b/plugins/module_utils/network_profiles.py @@ -799,3 +799,76 @@ def find_duplicate_value(self, config_list, key_name): seen.add(value) return list(duplicates) + + def deduplicate_list_of_dict(self, list_of_dicts): + """ + Removes duplicate dictionaries from a list. + + Args: + list_of_dicts (list): A list of dictionaries to deduplicate. + + Returns: + list: A list of unique dictionaries (duplicates removed). + + Description: + Iterates through a list of dictionaries and removes duplicates based on their content. + Uses a content-based comparison approach where dictionaries with identical key-value pairs + are considered duplicates. The first occurrence of each unique dictionary is preserved. + Empty lists and non-list inputs are handled gracefully. + """ + self.log("Initiating deduplication process for dictionary list", "DEBUG") + + # Input validation + if not isinstance(list_of_dicts, list): + self.log("Input is not a list, returning empty list. Input type: {0}".format( + type(list_of_dicts).__name__), "WARNING") + return [] + + if not list_of_dicts: + self.log("Empty list provided for deduplication - returning empty list", "DEBUG") + return [] + + original_count = len(list_of_dicts) + self.log("Starting deduplication for list with {0} dictionaries: {1}".format( + original_count, self.pprint(list_of_dicts)), "DEBUG") + + seen = set() + unique_dicts = [] + duplicates_found = 0 + + for index, d in enumerate(list_of_dicts): + # Validate that each item is a dictionary + if not isinstance(d, dict): + self.log("Skipping non-dictionary item at index {0}: {1} (type: {2})".format( + index, d, type(d).__name__), "WARNING") + continue + + # Convert dictionary to a tuple of sorted items (hashable representation) + try: + identifier = tuple(sorted(d.items())) + except TypeError as e: + self.log("Cannot create hashable identifier for dictionary at index {0}: {1}. Error: {2}".format( + index, d, str(e)), "WARNING") + # For unhashable values, fall back to string representation + identifier = str(sorted(d.items())) + + if identifier not in seen: + seen.add(identifier) + unique_dicts.append(d) + self.log("Added unique dictionary at index {0} to result list".format(index), "DEBUG") + else: + duplicates_found += 1 + self.log("Found duplicate dictionary at index {0} - skipping".format(index), "DEBUG") + + final_count = len(unique_dicts) + + if duplicates_found > 0: + self.log("Deduplication completed: removed {0} duplicate(s) from {1} total items. Final count: {2}".format( + duplicates_found, original_count, final_count), "INFO") + else: + self.log("Deduplication completed: no duplicates found in {0} items".format( + original_count), "DEBUG") + + self.log("Deduplicated list result: {0}".format(self.pprint(unique_dicts)), "DEBUG") + + return unique_dicts diff --git a/plugins/module_utils/validation.py b/plugins/module_utils/validation.py index de9d4daf6b..0314304b7d 100644 --- a/plugins/module_utils/validation.py +++ b/plugins/module_utils/validation.py @@ -268,6 +268,7 @@ def validate_dict(item, param_spec, param_name, invalid_params, module=None): "bool": validate_bool, "list": validate_list, "dict": validate_dict, + "raw": lambda item, *_: item, } validator = switch.get(data_type) @@ -345,6 +346,7 @@ def validate_list_of_dicts(param_list, spec, module=None): "bool": validate_bool, "list": validate_list, "dict": validate_dict, + "raw": lambda item, *_: item, } validator = switch.get(data_type) diff --git a/plugins/modules/accesspoint_workflow_manager.py b/plugins/modules/accesspoint_workflow_manager.py index 90f61b0856..8917b07974 100644 --- a/plugins/modules/accesspoint_workflow_manager.py +++ b/plugins/modules/accesspoint_workflow_manager.py @@ -30,8 +30,10 @@ - Abhishek Maheshwari (@abmahesh) options: config_verify: - description: Set to true to verify the Cisco Catalyst Center configuration after - applying the playbook config. + description: > + Indicates whether configuration verification is enabled. + This flag is always set to false. As a result, some field changes + may not exhibit idempotent behavior due to Access Point reboots. type: bool default: false state: @@ -119,8 +121,10 @@ type: str required: false led_status: - description: State of the AP's LED. Accepts "Enabled" or "Disabled". For example, - "Enabled". + description: > + Specifies led status for the access point accepts "Enabled" or "Disabled". + When a led brightness level is provided, the led status defaults to "Enabled". + For example, "Enabled". type: str required: false led_brightness_level: @@ -255,8 +259,9 @@ required: false antenna_gain: description: | - Specifies the antenna gain value in decibels (dB) for the 2.4GHz radio interface, valid values range - from 0 to 40. For example, 10. + Specifies the antenna gain value in decibels (dB) for the 2.4GHz radio interface, + and antenna gain must be greater than cable loss. + Valid values range from 0 to 20. For example, 10. type: int required: false radio_role_assignment: @@ -271,7 +276,7 @@ required: false cable_loss: description: | - Cable loss in dB for the 2.4GHz radio interface. Valid values are from 0 to 40. + Cable loss in dB for the 2.4GHz radio interface. Valid values are from 0 to 20. This value must be less than the antenna gain. For example, 2. type: int required: false @@ -281,23 +286,33 @@ type: str required: false channel_assignment_mode: - description: Mode of channel assignment for the 2.4GHz radio interface. - Accepts "Global" or "Custom". For example, "Custom". + description: > + Specifies the channel assignment for the 2.4GHz radio interface. + If the channel assignment is set to "Global", the channel_number is not required. + When a channel_number is provided, the channel assignment defaults to "Custom". + Accepts "Global" or "Custom". For example: "Global". type: str required: false channel_number: - description: Custom channel number configured for the 2.4GHz radio interface. - For example, 6. + description: > + Defines the custom channel number for the 2.4GHz radio interface. + When a channel number is provided, the channel assignment defaults to "Custom". + Valid values range from 1 to 14. For example: 3. type: int required: false power_assignment_mode: - description: Mode of power assignment for the 2.4GHz radio interface. - Accepts "Global" or "Custom". For example, "Custom". + description: > + Specifies the power assignment mode for the 2.4GHz radio interface. + If the power assignment mode is set to "Global", the power level is not required. + When a power level is provided, the power assignment mode automatically defaults to "Custom". + Accepts "Global" or "Custom". For example: "Global". type: str required: false power_level: - description: Custom power level configured for the 2.4GHz radio interface. - For example, 3. + description: > + Defines the custom power level for the 2.4GHz radio interface. + When a power level is provided, the power assignment mode automatically defaults to "Custom". + Valid values range from 1 to 8. For example: 3. type: int required: false 5ghz_radio: @@ -317,8 +332,9 @@ required: false antenna_gain: description: | - Antenna gain value in decibels (dB) for the 5GHz radio interface, valid values range - from 0 to 40. For example, 5. + Antenna gain value in decibels (dB) for the 5GHz radio interface, + and antenna gain must be greater than cable loss. + Valid values range from 0 to 20. For example, 5. type: int required: false radio_role_assignment: @@ -333,7 +349,7 @@ required: false cable_loss: description: | - Cable loss in dB for the 5GHz radio interface. Valid values are from 0 to 40. + Cable loss in dB for the 5GHz radio interface. Valid values are from 0 to 20. This value must be less than the antenna gain. For example, 3. type: int required: false @@ -343,29 +359,41 @@ type: str required: false channel_assignment_mode: - description: Mode of channel assignment for the 5GHz radio interface. - Accepts "Global" or "Custom". For example, "Custom". + description: > + Specifies the channel assignment for the 5GHz radio interface. + If the channel assignment is set to "Global", the channel_number is not required. + When a channel_number is provided, the channel assignment defaults to "Custom". + Accepts "Global" or "Custom". For example: "Global". type: str required: false channel_number: - description: Custom channel number configured for the 5GHz radio interface. - For example, 36. + description: > + Defines the custom channel number for the 5GHz radio interface. + When a channel number is provided, the channel assignment defaults to "Custom". + Valid values range from 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, + 124, 128, 132, 136, 140, 144, 149, 153, 157, 161, 165, 169, 173. + For example: 36. type: int required: false channel_width: description: | - Width of the channel configured for the XOR radio interface. Accepts values + Width of the channel configured for the 5GHz radio interface. Accepts values "20 MHz", "40 MHz", "80 MHz" or "160 MHz". For example, 20 MHz. type: str required: false power_assignment_mode: - description: Mode of power assignment for the 5GHz radio interface. Accepts - "Global" or "Custom". For example, "Custom". + description: > + Specifies the power assignment mode for the 5 GHz radio interface. + If the power assignment mode is set to "Global", the power level is not required. + When a power level is provided, the power assignment mode automatically defaults to "Custom". + Accepts "Global" or "Custom". For example: "Global". type: str required: false power_level: - description: Custom power level configured for the 5GHz radio interface. - For example, 3. + description: > + Defines the custom power level for the 5 GHz radio interface. + When a power level is provided, the power assignment mode automatically defaults to "Custom". + Valid values range from 1 to 8. For example: 3. type: int required: false 6ghz_radio: @@ -385,8 +413,9 @@ required: false antenna_gain: description: | - Antenna gain value in decibels (dB) for the 6GHz radio interface, valid values range - from 0 to 40. For example, 30. + Antenna gain value in decibels (dB) for the 6GHz radio interface, + and antenna gain must be greater than cable loss. Valid values range + from 0 to 40. For example, 10. type: int required: false radio_role_assignment: @@ -411,29 +440,43 @@ type: str required: false channel_assignment_mode: - description: Mode of channel assignment for the 6GHz radio interface. - Accepts "Global" or "Custom". For example, "Custom". + description: > + Specifies the channel assignment for the 6GHz radio interface. + If the channel assignment is set to "Global", the channel_number is not required. + When a channel_number is provided, the channel assignment defaults to "Custom". + Accepts "Global" or "Custom". For example: "Global". type: str required: false channel_number: - description: Custom channel number configured for the 6GHz radio interface. - For example, 6. + description: > + Defines the custom channel number for the 6GHz radio interface. + When a channel number is provided, the channel assignment defaults to "Custom". + Valid values range from 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, + 57, 61, 65, 69, 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, + 125, 129, 133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, + 185, 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233. + For example: 53. type: int required: false channel_width: description: | - Width of the channel configured for the XOR radio interface. Accepts values + Width of the channel configured for the 6GHz radio interface. Accepts values "20 MHz", "40 MHz", "80 MHz", "160 MHz" or "320 MHz". For example, 20 MHz. type: str required: false power_assignment_mode: - description: Mode of power assignment for the 6GHz radio interface. Accepts - "Global" or "Custom". For example, "Custom". + description: > + Specifies the power assignment mode for the 6GHz radio interface. + If the power assignment mode is set to "Global", the power level is not required. + When a power level is provided, the power assignment mode automatically defaults to "Custom". + Accepts "Global" or "Custom". For example: "Global". type: str required: false power_level: - description: Custom power level configured for the 6GHz radio interface. - For example, 3. + description: > + Defines the custom power level for the 6GHz radio interface. + When a power level is provided, the power assignment mode automatically defaults to "Custom". + Valid values range from 1 to 8. For example: 3. type: int required: false xor_radio: @@ -453,7 +496,8 @@ required: false antenna_gain: description: | - Antenna gain value in decibels (dB) for the XOR radio interface, valid values range + Antenna gain value in decibels (dB) for the XOR radio interface, + and antenna gain must be greater than cable loss. Valid values range from 0 to 40. For example, 14. type: int required: false @@ -489,7 +533,9 @@ required: false channel_assignment_mode: description: | - Mode of channel assignment for the XOR radio interface. Accepts "Global" or "Custom". + If the channel assignment mode is set to "Global", the channel number is not required. + When a channel number is provided, the channel assignment mode defaults to "Custom". + Accepts "Global" or "Custom". For example: "Global". - For "Custom" mode and a radio band of "2.4 GHz", valid values are from 1 to 14. - For "Custom" mode and a radio band of "5 GHz", valid values are 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, @@ -507,6 +553,18 @@ required: false channel_number: description: Custom channel number configured for the XOR radio interface. + - For "Custom" mode and a radio band of "2.4 GHz", valid values are from 1 to 14. + - For "Custom" mode and a radio band of "5 GHz", valid values are + 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, + 112, 116, 120, 124, 128, 132, 136, 140, 144, + 149, 153, 157, 161, 165, 169, 173. + - For "Custom" mode and a radio band of "6 GHz", valid values are + 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, + 53, 57, 61, 65, 69, 73, 77, 81, 85, 89, 93, 97, + 101, 105, 109, 113, 117, 121, 125, 129, 133, 137, + 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, + 181, 185, 189, 193, 197, 201, 205, 209, 213, 217, + 221, 225, 229, 233. For example, 6. type: int required: false @@ -517,14 +575,18 @@ type: str required: false power_assignment_mode: - description: | - Mode of power assignment for the XOR radio interface. Accepts "Global" or "Custom." - In "Custom" mode, valid values range from 1 to 8. + description: > + Specifies the power assignment mode for the XOR radio interface. + If the power assignment mode is set to "Global", the power level is not required. + When a power level is provided, the power assignment mode automatically defaults to "Custom". + Accepts "Global" or "Custom". For example: "Global". type: str required: false power_level: - description: Custom power level configured for the XOR radio interface. - For example, 3. + description: > + Defines the custom power level for the XOR radio interface. + When a power level is provided, the power assignment mode automatically defaults to "Custom". + Valid values range from 1 to 8. For example: 3. type: int required: false tri_radio: @@ -544,7 +606,8 @@ required: false antenna_gain: description: | - Antenna gain value in decibels (dB) for the TRI radio interface, valid values range + Antenna gain value in decibels (dB) for the TRI radio interface, + and antenna gain must be greater than cable loss. Valid values range from 0 to 40. For example, 16. type: int required: false @@ -571,14 +634,19 @@ required: false channel_assignment_mode: description: | - Mode of channel assignment for the TRI radio interface. Accepts "Global" or "Custom". + Specifies the channel assignment mode for the TRI radio interface. + If the channel assignment mode is set to "Global", the channel number is not required. + When a channel number is provided, the channel assignment mode automatically defaults to "Custom". + Accepts "Global" or "Custom". For example: "Global". For Custom, it accepts values like 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 149, 153, 157, 161, 165, 169, 173. (eg. Custom) type: str required: false channel_number: description: Custom channel number configured for the TRI radio interface. - For example, 6. + For Custom, it accepts values like 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, + 132, 136, 140, 144, 149, 153, 157, 161, 165, 169, 173. (eg. Custom) + For example, 36. type: int required: false channel_width: @@ -588,14 +656,18 @@ type: str required: false power_assignment_mode: - description: | - Mode of power assignment for the TRI radio interface. Accepts "Global" or "Custom". - In Custom, it accepts values 1 to 8. + description: > + Specifies the power assignment mode for the TRI radio interface. + If the power assignment mode is set to "Global", the power level is not required. + When a power level is provided, the power assignment mode automatically defaults to "Custom". + Accepts "Global" or "Custom". For example: "Global". type: str required: false power_level: - description: Custom power level configured for the TRI radio interface. - For example, 3. + description: > + Defines the custom power level for the TRI radio interface. + When a power level is provided, the power assignment mode automatically defaults to "Custom". + Valid values range from 1 to 8. For example: 3. type: int required: false dual_radio_mode: @@ -735,7 +807,9 @@ type: str required: false led_status: - description: State of the AP's LED. Accepts "Enabled" or "Disabled". + description: > + Specifies led status for the access point accepts "Enabled" or "Disabled". + When a led brightness level is provided, the led status defaults to "Enabled". For example, "Enabled". type: str required: false @@ -857,8 +931,9 @@ required: false antenna_gain: description: | - Specifies the antenna gain value in decibels (dB) for the 2.4GHz radio interface, valid values range - from 0 to 40. For example, 10. + Specifies the antenna gain value in decibels (dB) for the 2.4GHz radio interface, + and antenna gain must be greater than cable loss. Valid values range + from 0 to 20. For example, 10. type: int required: false radio_role_assignment: @@ -873,7 +948,7 @@ required: false cable_loss: description: | - Cable loss in dB for the 2.4GHz radio interface. Valid values are from 0 to 40. + Cable loss in dB for the 2.4GHz radio interface. Valid values are from 0 to 20. This value must be less than the antenna gain. For example, 2. type: int required: false @@ -883,23 +958,33 @@ type: str required: false channel_assignment_mode: - description: Mode of channel assignment for the 2.4GHz radio interface. - Accepts "Global" or "Custom". For example, "Custom". + description: > + Specifies the channel assignment for the 2.4GHz radio interface. + If the channel assignment is set to "Global", the channel_number is not required. + When a channel_number is provided, the channel assignment defaults to "Custom". + Accepts "Global" or "Custom". For example: "Global". type: str required: false channel_number: - description: Custom channel number configured for the 2.4GHz radio - interface. For example, 6. + description: > + Defines the custom channel number for the 2.4GHz radio interface. + When a channel number is provided, the channel assignment defaults to "Custom". + Valid values range from 1 to 14. For example: 3. type: int required: false power_assignment_mode: - description: Mode of power assignment for the 2.4GHz radio interface. - Accepts "Global" or "Custom". For example, "Custom". + description: > + Specifies the power assignment mode for the 2.4GHz radio interface. + If the power assignment mode is set to "Global", the power level is not required. + When a power level is provided, the power assignment mode automatically defaults to "Custom". + Accepts "Global" or "Custom". For example: "Global". type: str required: false power_level: - description: Custom power level configured for the 2.4GHz radio - interface. For example, 3. + description: > + Defines the custom power level for the 2.4GHz radio interface. + When a power level is provided, the power assignment mode automatically defaults to "Custom". + Valid values range from 1 to 8. For example: 3. type: int required: false 5ghz_radio: @@ -919,8 +1004,9 @@ required: false antenna_gain: description: | - Antenna gain value in decibels (dB) for the 5GHz radio interface, valid values range - from 0 to 40. For example, 5. + Antenna gain value in decibels (dB) for the 5GHz radio interface, + and antenna gain must be greater than cable loss. Valid values range + from 0 to 20. For example, 5. type: int required: false radio_role_assignment: @@ -936,7 +1022,7 @@ required: false cable_loss: description: | - Cable loss in dB for the 5GHz radio interface. Valid values are from 0 to 40. + Cable loss in dB for the 5GHz radio interface. Valid values are from 0 to 20. This value must be less than the antenna gain. For example, 3. type: int required: false @@ -946,23 +1032,35 @@ type: str required: false channel_assignment_mode: - description: Mode of channel assignment for the 5GHz radio interface. - Accepts "Global" or "Custom". For example, "Custom". + description: > + Specifies the channel assignment for the 5GHz radio interface. + If the channel assignment is set to "Global", the channel_number is not required. + When a channel_number is provided, the channel assignment defaults to "Custom". + Accepts "Global" or "Custom". For example: "Global". type: str required: false channel_number: - description: Custom channel number configured for the 5GHz radio - interface. For example, 36. + description: > + Defines the custom channel number for the 5GHz radio interface. + When a channel number is provided, the channel assignment defaults to "Custom". + Valid values range from 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, + 124, 128, 132, 136, 140, 144, 149, 153, 157, 161, 165, 169, 173. + For example: 36. type: int required: false power_assignment_mode: - description: Mode of power assignment for the 5GHz radio interface. - Accepts "Global" or "Custom". For example, "Custom". + description: > + Specifies the power assignment mode for the 5GHz radio interface. + If the power assignment mode is set to "Global", the power level is not required. + When a power level is provided, the power assignment mode automatically defaults to "Custom". + Accepts "Global" or "Custom". For example: "Global". type: str required: false power_level: - description: Custom power level configured for the 5GHz radio - interface. For example, 3. + description: > + Defines the custom power level for the 5GHz radio interface. + When a power level is provided, the power assignment mode automatically defaults to "Custom". + Valid values range from 1 to 8. For example: 3. type: int required: false 6ghz_radio: @@ -982,8 +1080,9 @@ required: false antenna_gain: description: | - Antenna gain value in decibels (dB) for the 6GHz radio interface, valid values range - from 0 to 40. For example, 30. + Antenna gain value in decibels (dB) for the 6GHz radio interface, + and antenna gain must be greater than cable loss. Valid values range + from 0 to 40. For example, 10. type: int required: false radio_role_assignment: @@ -1008,23 +1107,37 @@ type: str required: false channel_assignment_mode: - description: Mode of channel assignment for the 6GHz radio interface. - Accepts "Global" or "Custom". For example, "Custom". + description: > + Specifies the channel assignment for the 6GHz radio interface. + If the channel assignment is set to "Global", the channel_number is not required. + When a channel_number is provided, the channel assignment defaults to "Custom". + Accepts "Global" or "Custom". For example: "Global". type: str required: false channel_number: - description: Custom channel number configured for the 6GHz radio - interface. For example, 6. + description: > + Defines the custom channel number for the 6GHz radio interface. + When a channel number is provided, the channel assignment defaults to "Custom". + Valid values range from 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, + 57, 61, 65, 69, 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, + 125, 129, 133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, + 185, 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233. + For example: 53. type: int required: false power_assignment_mode: - description: Mode of power assignment for the 6GHz radio interface. - Accepts "Global" or "Custom". For example, "Custom". + description: > + Specifies the power assignment mode for the 6GHz radio interface. + If the power assignment mode is set to "Global", the power level is not required. + When a power level is provided, the power assignment mode automatically defaults to "Custom". + Accepts "Global" or "Custom". For example: "Global". type: str required: false power_level: - description: Custom power level configured for the 6GHz radio - interface. For example, 3. + description: > + Defines the custom power level for the 6GHz radio interface. + When a power level is provided, the power assignment mode automatically defaults to "Custom". + Valid values range from 1 to 8. For example: 3. type: int required: false xor_radio: @@ -1044,7 +1157,8 @@ required: false antenna_gain: description: | - Antenna gain value in decibels (dB) for the XOR radio interface, valid values range + Antenna gain value in decibels (dB) for the XOR radio interface, + and antenna gain must be greater than cable loss. Valid values range from 0 to 40. For example, 14. type: int required: false @@ -1080,7 +1194,9 @@ required: false channel_assignment_mode: description: | - Mode of channel assignment for the XOR radio interface. Accepts "Global" or "Custom". + If the channel assignment mode is set to "Global", the channel number is not required. + When a channel number is provided, the channel assignment mode defaults to "Custom". + Accepts "Global" or "Custom". For example: "Global". - For "Custom" mode and a radio band of "2.4 GHz", valid values are from 1 to 14. - For "Custom" mode and a radio band of "5 GHz", valid values are 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, @@ -1097,8 +1213,20 @@ type: str required: false channel_number: - description: Custom channel number configured for the XOR radio - interface. For example, 6. + description: Custom channel number configured for the XOR radio interface. + - For "Custom" mode and a radio band of "2.4 GHz", valid values are from 1 to 14. + - For "Custom" mode and a radio band of "5 GHz", valid values are + 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, + 112, 116, 120, 124, 128, 132, 136, 140, 144, + 149, 153, 157, 161, 165, 169, 173. + - For "Custom" mode and a radio band of "6 GHz", valid values are + 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, + 53, 57, 61, 65, 69, 73, 77, 81, 85, 89, 93, 97, + 101, 105, 109, 113, 117, 121, 125, 129, 133, 137, + 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, + 181, 185, 189, 193, 197, 201, 205, 209, 213, 217, + 221, 225, 229, 233. + For example, 6. type: int required: false channel_width: @@ -1108,14 +1236,18 @@ type: str required: false power_assignment_mode: - description: | - Mode of power assignment for the XOR radio interface. Accepts "Global" or "Custom." - In "Custom" mode, valid values range from 1 to 8. + description: > + Specifies the power assignment mode for the XOR radio interface. + If the power assignment mode is set to "Global", the power level is not required. + When a power level is provided, the power assignment mode automatically defaults to "Custom". + Accepts "Global" or "Custom". For example: "Global". type: str required: false power_level: - description: Custom power level configured for the XOR radio interface. - For example, 3. + description: > + Defines the custom power level for the XOR radio interface. + When a power level is provided, the power assignment mode automatically defaults to "Custom". + Valid values range from 1 to 8. For example: 3. type: int required: false tri_radio: @@ -1135,7 +1267,8 @@ required: false antenna_gain: description: | - Antenna gain value in decibels (dB) for the TRI radio interface, valid values range + Antenna gain value in decibels (dB) for the TRI radio interface, + and antenna gain must be greater than cable loss. Valid values range from 0 to 40. For example, 16. type: int required: false @@ -1162,14 +1295,19 @@ required: false channel_assignment_mode: description: | - Mode of channel assignment for the TRI radio interface. Accepts "Global" or "Custom". + Specifies the channel assignment mode for the TRI radio interface. + If the channel assignment mode is set to "Global", the channel number is not required. + When a channel number is provided, the channel assignment mode automatically defaults to "Custom". + Accepts "Global" or "Custom". For example: "Global". For Custom, it accepts values like 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 149, 153, 157, 161, 165, 169, 173. (eg. Custom) type: str required: false channel_number: - description: Custom channel number configured for the TRI radio - interface. For example, 6. + description: Custom channel number configured for the TRI radio interface. + For Custom, it accepts values like 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, + 132, 136, 140, 144, 149, 153, 157, 161, 165, 169, 173. (eg. Custom) + For example, 36. type: int required: false channel_width: @@ -1179,14 +1317,18 @@ type: str required: false power_assignment_mode: - description: | - Mode of power assignment for the TRI radio interface. Accepts "Global" or "Custom". - In Custom, it accepts values 1 to 8. + description: > + Specifies the power assignment mode for the TRI radio interface. + If the power assignment mode is set to "Global", the power level is not required. + When a power level is provided, the power assignment mode automatically defaults to "Custom". + Accepts "Global" or "Custom". For example: "Global". type: str required: false power_level: - description: Custom power level configured for the TRI radio interface. - For example, 3. + description: > + Defines the custom power level for the TRI radio interface. + When a power level is provided, the power assignment mode automatically defaults to "Custom". + Valid values range from 1 to 8. For example: 3. type: int required: false dual_radio_mode: @@ -1826,9 +1968,10 @@ def __init__(self, module): self.radio_interface = ["6ghz_radio", "xor_radio", "tri_radio"] self.allowed_series = { "6ghz_radio": ["9136I", "9162I", "9163E", "9164I", "IW9167IH", "9178I", "9176I", - "9176D1"], - "xor_radio": ["280", "380", "480", "9120", "9166", "IW9167EH", "IW9165E", "IW9165DH"], - "tri_radio": ["9124AXE", "9130AXI", "9130AXE", "9178I"] + "9176D1", "9172I", "9172H", "9179F"], + "xor_radio": ["280", "380", "480", "9120", "9166", "IW9167EH", "IW9165E", "IW9165DH", + "9176", "CW9174I", "CW9174E"], + "tri_radio": ["9124AXE", "9130AXI", "9130AXE", "9178I", "9179F"] } self.allowed_channel_no = { "2.4ghz_radio": list(range(1, 15)), @@ -2006,6 +2149,7 @@ def get_have(self, input_config): have["ip_address"] = self.payload["access_point_details"]["management_ip_address"] have["device_id"] = self.payload["access_point_details"]["id"] have["wlc_provision_status"] = self.payload.get("wlc_provision_status") + have["ap_provision_status"] = self.payload.get("ap_provision_status") have["associated_wlc_ip"] = self.payload["access_point_details"]["associated_wlc_ip"] have["hostname"] = self.payload["access_point_details"]["hostname"] have["ap_type"] = self.payload["access_point_details"]["family"] @@ -2058,7 +2202,13 @@ def get_diff_merged(self, ap_config): if site: if site_required_changes: - if self.have.get("wlc_provision_status") == "success": + if self.have.get("wlc_provision_status") != "success": + self.msg = "Wireless Controller {0} not provisioned at the site {1}.".format( + self.have.get("associated_wlc_ip"), self.have.get("site_name_hierarchy")) + self.log(self.msg, "INFO") + self.result["changed"] = False + responses["accesspoints_updates"].update({"provision_message": self.msg}) + else: provision_status, provision_details = self.provision_device() if provision_status == "SUCCESS": self.result["changed"] = True @@ -2067,6 +2217,12 @@ def get_diff_merged(self, ap_config): responses["accesspoints_updates"].update({ "provision_message": self.msg }) + else: + self.msg = "Unable to provision the AP {0} for the site {1}.".format( + self.have.get("hostname"), self.have.get("site_name_hierarchy")) + self.log(self.msg, "INFO") + self.result["changed"] = False + responses["accesspoints_updates"].update({"provision_message": self.msg}) else: self.msg = "AP {0} already provisioned at site {1}.".format( self.have["hostname"], self.have.get("site_name_hierarchy")) @@ -2607,13 +2763,25 @@ def validate_radio_parameters(self, radio_config, radio_series, errormsg): validate_str(antenna_name, param_spec, "antenna_name", errormsg) antenna_gain = radio_config.get("antenna_gain") - if antenna_gain and antenna_gain not in range(0, 41): - errormsg.append("antenna_gain: Invalid '{0}' in playbook, allowed range of min: 0 and max: 40" - .format(antenna_gain)) + if antenna_gain: + if antenna_gain and radio_series in ["6ghz_radio"]: + errormsg.append("antenna_gain: '{0}' Not supported for 6 GHz Radio in playbook." + .format(antenna_gain)) + elif antenna_gain not in range(0, 21) and radio_series in ["2.4ghz_radio", "5ghz_radio"]: + errormsg.append("antenna_gain: Invalid '{0}' in playbook, allowed range of min: 0 and max: 20" + .format(antenna_gain)) + elif antenna_gain not in range(0, 41) and radio_series in ["xor_radio", "tri_radio"]: + errormsg.append("antenna_gain: Invalid '{0}' in playbook, allowed range of min: 0 and max: 40" + .format(antenna_gain)) cable_loss = radio_config.get("cable_loss") if cable_loss: - if not 0 <= cable_loss <= 40: + if cable_loss and radio_series in ["6ghz_radio"]: + errormsg.append("cable_loss: '{0}' Not supported for 6 GHz Radio in playbook." + .format(cable_loss)) + elif radio_series in ["2.4ghz_radio", "5ghz_radio"] and not 0 <= cable_loss <= 20: + errormsg.append("cable_loss: Invalid '{0}' in playbook. Must be between 0 and 20.".format(cable_loss)) + elif radio_series in ["xor_radio", "tri_radio"] and not 0 <= cable_loss <= 40: errormsg.append("cable_loss: Invalid '{0}' in playbook. Must be between 0 and 40.".format(cable_loss)) elif antenna_gain and cable_loss >= antenna_gain: errormsg.append("cable_loss: Invalid '{0}' in playbook. Must be less than antenna_gain: {1}." @@ -2648,7 +2816,8 @@ def validate_radio_parameters(self, radio_config, radio_series, errormsg): else: current_radio_role = self.check_current_radio_role_assignment( radio_series, self.have["current_ap_config"].get("radio_dtos" , []), radio_band) - if self.want.get(radio_series).get("radio_role_assignment") != "Client-Serving" and radio_series != "5ghz_radio": + if (self.want.get(radio_series).get("radio_role_assignment") != "Client-Serving" and + radio_series not in ("5ghz_radio", "6ghz_radio")): errormsg.append( "channel_number: This configuration is only supported with Client-Serving Radio Role Assignment {0} " .format(current_radio_role) @@ -2685,7 +2854,8 @@ def validate_radio_parameters(self, radio_config, radio_series, errormsg): else: current_radio_role = self.check_current_radio_role_assignment( radio_series, self.have["current_ap_config"].get("radio_dtos", []), radio_band) - if self.want.get(radio_series).get("radio_role_assignment") != "Client-Serving" and radio_series != "5ghz_radio": + if (self.want.get(radio_series).get("radio_role_assignment") != "Client-Serving" and + radio_series not in ("5ghz_radio", "6ghz_radio")): errormsg.append( "power_level: This configuration is only supported with Client-Serving Radio Role Assignment {0} " .format(current_radio_role) @@ -2742,25 +2912,15 @@ def check_current_radio_role_assignment(self, radio_type, radio_dtos, radio_band """ role_assignment = None for each_dto in radio_dtos: - slot_id = each_dto["slot_id"] + if_type = each_dto["if_type_value"] role_assignment = each_dto.get("radio_role_assignment") - if (radio_type == "2.4ghz_radio" and slot_id == 0) or \ - (radio_type == "5ghz_radio" and slot_id == 1) or \ - (radio_type == "6ghz_radio" and slot_id == 2): + if (radio_type == "2.4ghz_radio" and if_type == "2.4 GHz") or \ + (radio_type == "5ghz_radio" and if_type == "5 GHz") or \ + (radio_type == "6ghz_radio" and if_type == "6 GHz") or \ + (radio_type == "xor_radio" and if_type == "Dual Radio") or \ + (radio_type == "tri_radio" and if_type == "Tri Radio"): break - if radio_type == "xor_radio": - if (radio_band == "2.4 GHz" and slot_id == 0) or \ - (radio_band == "5 GHz" and slot_id == 2) or \ - (radio_band == "6 GHz" and slot_id == 2): - break - - if radio_type == "tri_radio": - if (radio_band == "2.4 GHz" and slot_id == 0) or \ - (radio_band == "5 GHz" and slot_id == 1) or \ - (radio_band == "5 GHz" and slot_id == 2): - break - self.log("Completed checking radio role assignments. Role assignment: {0}, radio type: {1}, radio band: {2}" .format(role_assignment, radio_type, radio_band), "INFO") return role_assignment @@ -2927,6 +3087,19 @@ def get_current_config(self, input_config): self.log("Access point exists: {0}, Current configuration: {1}" .format(accesspoint_exists, current_configuration), "INFO") + current_eth_configuration = {} + if accesspoint_exists: + self.payload["access_point_details"] = current_configuration + ap_ethernet_mac_address = current_configuration["ap_ethernet_mac_address"] + ap_config_exists, current_eth_configuration = self.get_accesspoint_config( + ap_ethernet_mac_address) + self.log("Access point configuration exists: {0}, Current configuration: {1}" + .format(ap_config_exists, str(current_eth_configuration)), "INFO") + + if ap_config_exists: + self.payload["access_point_config"] = current_eth_configuration + self.log("Updated payload with access point configuration: {0}".format(str(self.payload)), "INFO") + if input_config.get("site"): site_exists, current_site = self.site_exists(input_config) self.log("Site exists: {0}, Current site: {1}".format(site_exists, current_site), "INFO") @@ -2939,26 +3112,21 @@ def get_current_config(self, input_config): current_configuration["mac_address"], site_exists, current_site, current_configuration) }) - provision_status, wlc_details = self.verify_ap_provision( + provision_status, wlc_details = self.verify_wlc_provision( current_configuration["associated_wlc_ip"]) self.payload["wlc_provision_status"] = provision_status self.log("WLC provision status: {0}".format(provision_status), "INFO") - if accesspoint_exists: - self.payload["access_point_details"] = current_configuration - ap_ethernet_mac_address = current_configuration["ap_ethernet_mac_address"] - ap_config_exists, current_configuration = self.get_accesspoint_config( - ap_ethernet_mac_address) - self.log("Access point configuration exists: {0}, Current configuration: {1}" - .format(ap_config_exists, str(current_configuration)), "INFO") - - if ap_config_exists: - self.payload["access_point_config"] = current_configuration - self.log("Updated payload with access point configuration: {0}".format(str(self.payload)), "INFO") + if self.compare_dnac_versions(self.get_ccc_version(), "3.1.3.0") >= 0: + if current_eth_configuration.get("provisioning_status"): + self.payload["ap_provision_status"] = "Provisioned" + else: + self.payload["ap_provision_status"] = None + self.log("AP provision status: {0}".format(self.payload["ap_provision_status"]), "INFO") self.log("Completed retrieving current configuration. Access point exists: {0}, Current configuration: {1}" - .format(accesspoint_exists, current_configuration), "INFO") - return (accesspoint_exists, current_configuration) + .format(accesspoint_exists, current_eth_configuration), "INFO") + return (accesspoint_exists, current_eth_configuration) def get_accesspoint_config(self, ap_ethernet_mac_address): """ @@ -3122,9 +3290,9 @@ def get_site_device(self, site_id, ap_mac_address, site_exist=None, current_site Error: {1}".format(site_id, str(e)), "ERROR") return False - def verify_ap_provision(self, wlc_ip_address): + def verify_wlc_provision(self, wlc_ip_address): """ - Verifies if the AP (device) is provisioned. + Verifies if the WLC (device) is provisioned. Parameters: self (object): An instance of a class used for interacting with Cisco Catalyst Center. @@ -3389,22 +3557,22 @@ def compare_radio_config(self, current_radio, want_radio): self.log("Current radio configuration: {}".format(current_radio), "INFO") self.log("Desired radio configuration: {}".format(want_radio), "INFO") available_key = { - "_0": ("admin_status", "antenna_gain", "antenna_name", "radio_role_assignment", + "_1": ("admin_status", "antenna_gain", "antenna_name", "radio_role_assignment", "power_assignment_mode", "power_level", "channel_assignment_mode", "channel_number", "cable_loss", "antenna_cable_name", "radio_type", "radio_band"), - "_1": ("admin_status", "antenna_gain", "antenna_name", "radio_role_assignment", + "_2": ("admin_status", "antenna_gain", "antenna_name", "radio_role_assignment", "power_assignment_mode", "power_level", "channel_assignment_mode", "channel_number", "cable_loss", "antenna_cable_name", "channel_width", "radio_type", "radio_band", "dual_radio_mode"), - "_2": ("admin_status", "radio_role_assignment", "radio_type", + "_6": ("admin_status", "radio_role_assignment", "radio_type", "power_assignment_mode", "power_level", "channel_assignment_mode", "channel_number", "channel_width", "dual_radio_mode", "radio_band"), "_3": ("admin_status", "antenna_gain", "antenna_name", "radio_role_assignment", "power_assignment_mode", "power_level", "channel_assignment_mode", "channel_number", "cable_loss", "antenna_cable_name", "radio_band", "channel_width", "radio_type"), - "_4": ("admin_status", "antenna_gain", "antenna_name", "radio_role_assignment", + "_5": ("admin_status", "antenna_gain", "antenna_name", "radio_role_assignment", "power_assignment_mode", "power_level", "channel_assignment_mode", "channel_number", "cable_loss", "antenna_cable_name", "dual_radio_mode", "channel_width", "radio_type") @@ -3414,15 +3582,17 @@ def compare_radio_config(self, current_radio, want_radio): unmatch_count = 0 self.keymap["power_level"] = "powerlevel" dtos_keys = list(want_radio.keys()) - slot_id_key = "_" + str(current_radio["slot_id"]) - self.log("Comparing keys for slot ID: {}".format(current_radio["slot_id"]), "INFO") + if_type_key = "_" + str(current_radio["if_type"]) + self.log("Comparing keys for if_type: {}".format(current_radio["if_type"]), "INFO") for dto_key in dtos_keys: - if dto_key in available_key[slot_id_key]: + if dto_key in available_key[if_type_key]: if dto_key == "antenna_name": temp_dtos[dto_key] = want_radio[dto_key] unmatch_count = unmatch_count + 1 self.log("Antenna name unmatched: {0}".format(want_radio[dto_key]), "INFO") + elif dto_key == "admin_status" and want_radio.get(dto_key) == "Enabled": + temp_dtos[self.keymap[dto_key]] = want_radio.get(dto_key) elif dto_key == "cable_loss": cable_loss = int(want_radio[dto_key]) antenna_gain = int(want_radio.get("antenna_gain", 0)) @@ -3505,19 +3675,19 @@ def config_diff(self, current_ap_config): current_radio_dtos = current_ap_config.get("radio_dtos") radio_data = {} for each_radio in current_radio_dtos: - if each_key == "2.4ghz_radio" and each_radio["slot_id"] == 0: + if each_key == "2.4ghz_radio" and each_radio["if_type_value"] == "2.4 GHz": radio_data = self.compare_radio_config(each_radio, self.want[each_key]) - elif each_key == "5ghz_radio" and each_radio["slot_id"] == 1: + elif each_key == "5ghz_radio" and each_radio["if_type_value"] == "5 GHz": radio_data = self.compare_radio_config(each_radio, self.want[each_key]) - elif each_key == "6ghz_radio" and each_radio["slot_id"] == 2: + elif each_key == "6ghz_radio" and each_radio["if_type_value"] == "6 GHz": radio_data = self.compare_radio_config(each_radio, self.want[each_key]) - elif each_key == "xor_radio" and each_radio["slot_id"] == 0: + elif each_key == "xor_radio" and each_radio["if_type_value"] == "Dual Radio": radio_data = self.compare_radio_config(each_radio, self.want[each_key]) - elif each_key == "tri_radio" and each_radio.get("dual_radio_mode") is not None: + elif each_key == "tri_radio" and each_radio["if_type_value"] == "Tri Radio": radio_data = self.compare_radio_config(each_radio, self.want[each_key]) if radio_data.get("unmatch") != 0: @@ -3526,15 +3696,15 @@ def config_diff(self, current_ap_config): "clean_air_si_6ghz"): current_radio_dtos = current_ap_config.get("radio_dtos") for each_dtos in current_radio_dtos: - if each_key == "clean_air_si_2.4ghz" and each_dtos["slot_id"] == 0 \ + if each_key == "clean_air_si_2.4ghz" and each_dtos["if_type_value"] == "2.4 GHz" \ and each_dtos["clean_air_si"] != self.want.get(each_key): update_config["cleanAirSI24"] = self.want[each_key] break - elif each_key == "clean_air_si_5ghz" and each_dtos["slot_id"] == 1 \ + elif each_key == "clean_air_si_5ghz" and each_dtos["if_type_value"] == "5 GHz" \ and each_dtos["clean_air_si"] != self.want.get(each_key): update_config["cleanAirSI5"] = self.want[each_key] break - elif each_key == "clean_air_si_6ghz" and each_dtos["slot_id"] == 2 \ + elif each_key == "clean_air_si_6ghz" and each_dtos["if_type_value"] == "6 GHz" \ and each_dtos["clean_air_si"] != self.want.get(each_key): update_config["cleanAirSI6"] = self.want[each_key] break @@ -3551,17 +3721,17 @@ def config_diff(self, current_ap_config): for ctrl_name in ["primary_controller_name", "secondary_controller_name", "tertiary_controller_name"]: if ctrl_name == "primary_controller_name" and self.want.get(ctrl_name): if self.want.get(ctrl_name) == "Inherit from site / Clear": - update_config[self.keymap[ctrl_name]] = self.want.get(ctrl_name) + update_config["primaryControllerName"] = self.want.get(ctrl_name) update_config[self.keymap["primary_ip_address"]] = {} update_config[self.keymap["primary_ip_address"]]["address"] = "0.0.0.0" - update_config[self.keymap["secondary_controller_name"]] = self.want.get(ctrl_name) + update_config["secondaryControllerName"] = self.want.get(ctrl_name) update_config[self.keymap["secondary_ip_address"]] = {} update_config[self.keymap["secondary_ip_address"]]["address"] = "0.0.0.0" - update_config[self.keymap["tertiary_controller_name"]] = self.want.get(ctrl_name) + update_config["tertiaryControllerName"] = self.want.get(ctrl_name) update_config[self.keymap["tertiary_ip_address"]] = {} update_config[self.keymap["tertiary_ip_address"]]["address"] = "0.0.0.0" else: - update_config[self.keymap[ctrl_name]] = self.want[ctrl_name] + update_config["primaryControllerName"] = self.want[ctrl_name] update_config[self.keymap["primary_ip_address"]] = {} if self.want.get("primary_ip_address", {}).get("address"): update_config[self.keymap["primary_ip_address"]]["address"] = \ @@ -3570,14 +3740,14 @@ def config_diff(self, current_ap_config): update_config[self.keymap["primary_ip_address"]]["address"] = "0.0.0.0" elif ctrl_name == "secondary_controller_name" and self.want.get(ctrl_name): if self.want.get(ctrl_name) == "Inherit from site / Clear": - update_config[self.keymap[ctrl_name]] = self.want.get(ctrl_name) + update_config["secondaryControllerName"] = self.want.get(ctrl_name) update_config[self.keymap["secondary_ip_address"]] = {} update_config[self.keymap["secondary_ip_address"]]["address"] = "0.0.0.0" - update_config[self.keymap["tertiary_controller_name"]] = self.want.get(ctrl_name) + update_config["tertiaryControllerName"] = self.want.get(ctrl_name) update_config[self.keymap["tertiary_ip_address"]] = {} update_config[self.keymap["tertiary_ip_address"]]["address"] = "0.0.0.0" else: - update_config[self.keymap[ctrl_name]] = self.want[ctrl_name] + update_config["secondaryControllerName"] = self.want[ctrl_name] update_config[self.keymap["secondary_ip_address"]] = {} if self.want.get("secondary_ip_address", {}).get("address"): update_config[self.keymap["secondary_ip_address"]]["address"] = \ @@ -3586,11 +3756,11 @@ def config_diff(self, current_ap_config): update_config[self.keymap["secondary_ip_address"]]["address"] = "0.0.0.0" elif ctrl_name == "tertiary_controller_name" and self.want.get(ctrl_name): if self.want.get(ctrl_name) == "Inherit from site / Clear": - update_config[self.keymap[ctrl_name]] = self.want.get(ctrl_name) + update_config["tertiaryControllerName"] = self.want.get(ctrl_name) update_config[self.keymap["tertiary_ip_address"]] = {} update_config[self.keymap["tertiary_ip_address"]]["address"] = "0.0.0.0" else: - update_config[self.keymap[ctrl_name]] = self.want[ctrl_name] + update_config["tertiaryControllerName"] = self.want[ctrl_name] update_config[self.keymap["tertiary_ip_address"]] = {} if self.want.get("tertiary_ip_address", {}).get("address"): update_config[self.keymap["tertiary_ip_address"]]["address"] = \ @@ -3598,6 +3768,10 @@ def config_diff(self, current_ap_config): else: update_config[self.keymap["tertiary_ip_address"]]["address"] = "0.0.0.0" + # remove the controller name if the key is in snake case. + if update_config.get(ctrl_name) is not None: + del update_config[ctrl_name] + if update_config: update_config["macAddress"] = current_ap_config["eth_mac"] @@ -3746,7 +3920,7 @@ def update_ap_configuration(self, ap_config): radio_dtos["configureChannel"] = True if each_radio.get(self.keymap["channel_number"]) is not None: - radio_dtos[self.keymap["channel_number"]] = \ + radio_dtos["channelNumber"] = \ each_radio.get(self.keymap["channel_number"]) radio_dtos["configureChannel"] = True radio_dtos[self.keymap["channel_assignment_mode"]] = 2 @@ -3772,7 +3946,7 @@ def update_ap_configuration(self, ap_config): radio_dtos["configurePower"] = True self.log(self.pprint(each_radio), "INFO") if each_radio.get(self.keymap["power_level"]) is not None: - radio_dtos[self.keymap["power_level"]] = \ + radio_dtos["powerlevel"] = \ each_radio.get(self.keymap["power_level"]) radio_dtos[self.keymap["power_assignment_mode"]] = 2 radio_dtos["configurePower"] = True @@ -3787,8 +3961,12 @@ def update_ap_configuration(self, ap_config): radio_dtos["configureAntennaPatternName"] = True if each_radio.get(self.keymap["radio_band"]) is not None: - radio_dtos[self.keymap["radio_band"]] = "RADIO24" \ - if each_radio[self.keymap["radio_band"]] == "2.4 GHz" else "RADIO5" + if each_radio[self.keymap["radio_band"]] == "2.4 GHz": + radio_dtos[self.keymap["radio_band"]] = "RADIO24" + elif each_radio[self.keymap["radio_band"]] == "5 GHz": + radio_dtos[self.keymap["radio_band"]] = "RADIO5" + else: + radio_dtos[self.keymap["radio_band"]] = "RADIO6" if each_radio.get(self.keymap["radio_role_assignment"]) is not None: if each_radio.get(self.keymap["radio_role_assignment"]) == "Auto": diff --git a/plugins/modules/application_policy_workflow_manager.py b/plugins/modules/application_policy_workflow_manager.py index 798931d698..7e8914977b 100644 --- a/plugins/modules/application_policy_workflow_manager.py +++ b/plugins/modules/application_policy_workflow_manager.py @@ -889,7 +889,7 @@ RETURN = r""" # Case 1: Successful creation of application queuing profile -creation _of_application_queuing_profile_response_task_execution: +creation_of_application_queuing_profile_response_task_execution: description: A dictionary with details for successful task execution. returned: always type: dict @@ -1059,7 +1059,7 @@ "status": "success" } # Case 19: update not required for application policy -update_not_required_ for_application_policy_response_task_execution: +update_not_required_for_application_policy_response_task_execution: description: With task id get details for successful task execution returned: always type: dict diff --git a/plugins/modules/backup_and_restore_workflow_manager.py b/plugins/modules/backup_and_restore_workflow_manager.py new file mode 100644 index 0000000000..f3d3d460be --- /dev/null +++ b/plugins/modules/backup_and_restore_workflow_manager.py @@ -0,0 +1,3104 @@ +# !/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Cisco Systems +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +__author__ = ("Priyadharshini B", "Karthick S N", "Madhan Sankaranarayanan") + +DOCUMENTATION = r""" +--- +module: backup_and_restore_workflow_manager + +short_description: > + Resource module for comprehensive backup and restore workflow management with NFS server configuration in Cisco Catalyst Center. + +description: + - Automates comprehensive backup and restore workflow management in Cisco + Catalyst Center including NFS server configuration, backup target setup, + create backup, and restoration operations. + - Enables NFS server configuration for secure backup storage with + customizable port settings, protocol versions, and source path management. + - Supports backup configuration with encryption, retention policies, and + server type specification for enterprise data protection. + - Facilitates backup restoration with encryption passphrase validation for + secure data recovery operations. + - Supports deletion operations for NFS configurations and backup + to maintain clean backup infrastructure. + - Integrates with Cisco Catalyst Center's backup framework for centralized + network infrastructure data protection and disaster recovery. + +version_added: "6.31.0" +extends_documentation_fragment: + - cisco.dnac.workflow_manager_params + +author: + - Priyadharshini B (@pbalaku2) + - Karthick S N (@kasn) + - Madhan Sankaranarayanan (@madhansansel) + +options: + dnac_api_task_timeout: + description: + - Time in seconds to wait for API tasks to complete before timing out. + - For backup operations (creation/deletion), default timeout of 1200 seconds is typically sufficient. + - For restore operations, use a significantly higher value (minimum 3600 seconds or above) + as restore processes can take substantially longer depending on backup size and system load. + - If timeout is reached, the operation may still be running on Catalyst Center backend. + type: int + default: 1200 + config_verify: + description: + - Set to True to verify the Cisco Catalyst Center after applying changes. + type: bool + default: true + state: + description: + - Specifies the desired operational state for backup and restore + configuration management. + - Use C(merged) to create new backup configurations or update existing + NFS settings, backups, and restoration parameters. + - Use C(deleted) to remove NFS configurations, backups, or + cleanup backup infrastructure components based on configuration + provided. + - Supports selective deletion for backup lifecycle management and + infrastructure cleanup operations. + type: str + choices: ["merged", "deleted"] + default: merged + config: + description: + - List of comprehensive backup and restore configuration specifications + including NFS server setup, backup target configuration, creating backup + parameters, and restoration details. + - Each configuration supports NFS server management, backup policy + definition, backup creation, and restore operation parameters for + enterprise backup infrastructure automation. + type: list + elements: dict + required: true + suboptions: + nfs_configuration: + description: + - Configuration details for NFS backup server setup and management. + - Defines NFS server connection parameters including IP address, + source paths, port configurations, and protocol version settings + for secure backup storage infrastructure. + type: list + elements: dict + suboptions: + server_ip: + description: + - IP address of the NFS server for backup storage connectivity. + - Must be a valid IPv4 address accessible from Cisco Catalyst + Center for backup operations. + type: str + required: true + source_path: + description: + - Directory path on the NFS server designated for storing backup + files and data. + - Path must exist on the NFS server and have appropriate + permissions for backup operations. + type: str + required: true + nfs_port: + description: + - Port number used for NFS service communication and data + transfer operations. + - Must be accessible and not blocked by firewalls between + Catalyst Center and NFS server. + type: int + default: 2049 + nfs_version: + description: + - NFS protocol version for backup storage communication. + - Determines compatibility and security features available for + backup operations. + type: str + default: nfs4 + choices: ["nfs3", "nfs4"] + nfs_portmapper_port: + description: + - Port number for the NFS portmapper service on target server. + - Used for dynamic port allocation and service discovery. + type: int + default: 111 + backup_storage_configuration: + description: + - Configuration for backup storage infrastructure and data management policies. + - Sets up NFS storage targets, encryption, and data retention settings. + - This configures WHERE and HOW backup data will be stored. + - Does not create or execute backup, only prepares storage infrastructure. + type: list + elements: dict + suboptions: + server_type: + description: + - Type of backup storage server for data preservation. + - Only NFS storage type is supported in Catalyst Center version 3.1.3.0. + - PHYSICAL_DISK type is not supported in Catalyst Center version 3.1.3.0. + type: str + required: true + choices: ["NFS", "PHYSICAL_DISK"] + nfs_details: + description: + - Connection details for NFS backup targets including server + information and storage path specifications. + - Used to retrieve mount path for backup storage operations. + type: dict + suboptions: + server_ip: + description: IP address of the NFS server for backup operations. + type: str + required: true + source_path: + description: Directory path on the NFS server for backup storage. + type: str + required: true + nfs_port: + description: Port number used to access NFS services. + type: int + nfs_version: + description: NFS protocol version for backup communication. + type: str + default: nfs4 + choices: ["nfs3", "nfs4"] + nfs_portmapper_port: + description: Port number for the NFS portmapper service. + type: int + default: 111 + data_retention_period: + description: + - Number of days to retain backup before cleanup. + - Range must be between 3 and 60 days. + type: int + required: true + encryption_passphrase: + description: + - Passphrase for encrypting backup data during storage operations. + - Strongly recommended for secure data protection and compliance. + type: str + backup: + description: + - Configuration for creating and executing backup jobs. + - Creates backup jobs with specified name and data scope. + - This CREATES and EXECUTES backup immediately (not scheduling). + - Requires backup storage configuration to be set up first. + type: list + elements: dict + suboptions: + name: + description: + - Name parameter for backup identification and management operations. + - Serves dual purpose based on operation context and additional parameters. + - For backup creation (state=merged), when generate_new_backup=false or not specified, creates backup with this exact name. + - Follows standard Ansible idempotency - if backup exists with same name, no new backup is created. + - When generate_new_backup=true, uses this name as prefix and appends timestamp. + - Timestamp format is "YYYYMMDD_HHMMSS" using Indian Standard Time (IST) (e.g., 20241230_143052). + - Example with generate_new_backup=true and name="DAILY_BACKUP" creates "DAILY_BACKUP_20241230_143052". + - For backup deletion (state=deleted), when used alone, deletes backup with this exact name. + - When used with backup_retention_days, treats this as prefix to filter backups by name and retention period. + - Example with name="DAILY_BACKUP" and backup_retention_days=7 deletes all backups starting with + "DAILY_BACKUP" created more than 7 days ago. + - Backup name must begin with an alphabet and can contain letters, digits, and the following special characters @, _, -, space, and #. + type: str + generate_new_backup: + description: + - Controls backup naming strategy and creation behavior. + - When true, always creates new backup using name as prefix with timestamp suffix. + - When false or not specified, uses exact name and follows idempotent behavior. + - Only applicable when state=merged for backup creation operations. + - Ignored during deletion operations (state=deleted). + - Timestamp format is "YYYYMMDD_HHMMSS" using Indian Standard Time (IST). + - Useful for automated backup schedules where unique names are required. + type: bool + default: false + scope: + description: + - Defines backup scope including assurance data specifications. + - Determines what data types are included in backup operations. + type: str + choices: ["CISCO_DNA_DATA_WITH_ASSURANCE", "CISCO_DNA_DATA_WITHOUT_ASSURANCE"] + delete_all_backup: + description: + - Set to C(true) to delete all existing backups from Cisco Catalyst Center. + - Only valid when C(state=deleted) is specified. + - When enabled, removes all backup regardless of name or creation date. + - Use with extreme caution as this operation is irreversible and will permanently remove all backup data. + - Takes precedence over individual backup name deletion when both are specified. + - Useful for complete backup infrastructure cleanup or maintenance operations. + type: bool + default: false + backup_retention_days: + description: + - Duration-based backup retention policy for automated cleanup operations. + - Retains backups created within the specified number of days and deletes older backups. + - When used with name parameter, applies retention policy only to backups matching the name prefix. + - When used alone, applies retention policy to all backups in the system. + - Must be a positive integer representing number of days (e.g., 7 for one week, 30 for one month). + - Only valid when state=deleted is specified. + - Example backup_retention_days=7 with name="DAILY_BACKUP" retains backups with "DAILY_BACKUP" prefix created in last 7 days, deletes older ones. + - Example backup_retention_days=30 without name parameter retains all backups created in last 30 days, + deletes all older backups regardless of name. + - Ignored when delete_all_backup=true is specified. + type: int + restore_operations: + description: + - Parameters for restoring data from previously created backups + including authentication and validation requirements. + type: list + elements: dict + suboptions: + name: + description: Name of the backup to restore from available backup list. + type: str + required: true + encryption_passphrase: + description: + - Passphrase for decrypting backup data during restore operations. + - Must match the passphrase used during backup creation. + type: str + +requirements: +- dnacentersdk >= 2.9.3 +- python >= 3.9.19 + +notes: +- Backup and restore functionality is available in Cisco Catalyst Center + version 3.1.3.0 and later for comprehensive data protection workflow +- NFS server configuration must be completed and healthy before backup + target configuration to ensure proper mount path availability +- Backup and restore functionality requires encryption passphrases for secure + data protection. Never hardcode these values in playbooks. +- Use Ansible Vault to encrypt sensitive backup configuration parameters + including encryption passphrases and NFS server credentials. +- Store backup encryption passphrases in separate encrypted variable files + (e.g., backup_secrets.yml) and decrypt during playbook execution. +- Consider using environment variables for backup credentials in CI/CD + pipelines to avoid exposing sensitive data in version control. +- The same encryption passphrase used during backup creation must be + provided during restore operations for successful data recovery. +- Encryption passphrases are automatically masked in logs when using + no_log parameter specifications in the module documentation. +- Encryption passphrases used during backup creation must be identical + to those provided during restore operations for successful data recovery +- Data retention periods are enforced automatically with cleanup occurring + after the specified retention period expires (3-60 days) +- Backup and restore operations are asynchronous with task monitoring + to track completion status and provide operational feedback +- NFS configurations require proper network connectivity and permissions + between Catalyst Center and the target NFS server infrastructure +- Only NFS storage type is supported for backup targets in version 3.1.3.0 + with additional storage types planned for future releases. + +- SDK Methods used are + - backup.Backup.get_backup_and_restore_execution + - backup.Backup.get_backup_by_id + - backup.Backup.get_backup_and_restore_executions + - backup.Backup.get_backup_configuration + - backup.Backup.create_n_f_s_configuration + - backup.Backup.get_all_backup + - backup.Backup.delete_n_f_s_configuration + - backup.Backup.create_backup + - backup.Backup.delete_backup + - backup.Backup.get_backup_storages + - backup.Backup.get_all_n_f_s_configurations + - backup.Backup.create_backup_configuration + - restore.Restore.restore_backup + +- Paths used are + - GET/dna/system/api/v1/backupRestoreExecutions/${id} + - GET/dna/system/api/v1/backups/${id} + - GET/dna/system/api/v1/backupRestoreExecutions + - GET/dna/system/api/v1/backupConfiguration + - POST/dna/system/api/v1/backupNfsConfigurations + - GET/dna/system/api/v1/backups + - DELETE/dna/system/api/v1/backupNfsConfigurations/${id} + - POST/dna/system/api/v1/backups + - DELETE/dna/system/api/v1/backups/${id} + - GET/dna/system/api/v1/backupStorages + - GET/dna/system/api/v1/backupNfsConfigurations + - POST/dna/system/api/v1/backupConfiguration + - POST/dna/system/api/v1/backups/${id}/restore +""" + +EXAMPLES = r""" + +# Example 1: Configure NFS server for backup storage infrastructure +- name: Configure NFS backup server for enterprise data protection + hosts: localhost + vars_files: + - "credentials.yml" + - "backup_secrets.yml" + connection: local + gather_facts: false + tasks: + - name: Configure NFS server for secure backup storage connectivity + cisco.dnac.backup_and_restore_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: merged + config: + - nfs_configuration: + - server_ip: "{{ nfs_configuration.server_ip }}" + source_path: "{{ nfs_configuration.source_path }}" + nfs_port: 2049 + nfs_version: nfs4 + nfs_portmapper_port: 111 + +# Example 2: Configure backup target with encryption and retention policies +- name: Configure backup target for automated data protection workflow + hosts: localhost + vars_files: + - "credentials.yml" + - "backup_secrets.yml" + connection: local + gather_facts: false + tasks: + - name: Configure backup target with encryption and data retention policies + cisco.dnac.backup_and_restore_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: merged + config: + - backup_storage_configuration: + - server_type: NFS + nfs_details: + server_ip: "{{ nfs_configuration.server_ip }}" + source_path: "{{ nfs_configuration.source_path }}" + nfs_port: 2049 + nfs_version: nfs4 + nfs_portmapper_port: 111 + data_retention_period: 51 + encryption_passphrase: "{{ backup_storage_configuration.encryption_passphrase }}" + +# Example 3: Create backup for systematic data preservation +- name: Create backup for automated network infrastructure backup + hosts: localhost + vars_files: + - "credentials.yml" + connection: local + gather_facts: false + tasks: + - name: Create backup with name and scope specifications + cisco.dnac.backup_and_restore_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: merged + config: + - backup: + - name: BACKUP24_07 + scope: CISCO_DNA_DATA_WITHOUT_ASSURANCE + +# Example 4: Restore backup for disaster recovery operations +- name: Restore backup for disaster recovery and data restoration + hosts: localhost + vars_files: + - "credentials.yml" + - "backup_secrets.yml" + connection: local + gather_facts: false + tasks: + - name: Restore backup + cisco.dnac.backup_and_restore_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: merged + config: + - restore_operations: + - name: "BACKUP17_09" + encryption_passphrase: "{{ restore_operations.encryption_passphrase }}" + +# Example 5: Delete NFS configuration for infrastructure cleanup +- name: Remove NFS configuration from backup infrastructure + hosts: localhost + vars_files: + - "credentials.yml" + connection: local + gather_facts: false + tasks: + - name: Delete NFS configuration from backup infrastructure + cisco.dnac.backup_and_restore_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: deleted + config: + - nfs_configuration: + - server_ip: "{{ nfs_configuration.server_ip }}" + source_path: "{{ nfs_configuration.source_path }}" + +# Example 6: Delete backup for lifecycle management +- name: Remove backup from automated backup operations + hosts: localhost + vars_files: + - "credentials.yml" + connection: local + gather_facts: false + tasks: + - name: Delete backup for backup lifecycle management + cisco.dnac.backup_and_restore_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: deleted + config: + - backup: + - name: BACKUP24_07 + +# Example 7: Delete backups using retention policy with name prefix filtering +- name: Remove old backups using retention-based cleanup with name filtering + hosts: localhost + vars_files: + - "credentials.yml" + connection: local + gather_facts: false + tasks: + - name: Delete backups with prefix 'BACKUP03_10' older than 7 days using retention policy + cisco.dnac.backup_and_restore_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: deleted + config: + - backup: + - name: BACKUP03_10 + backup_retention_days: 7 + +# Example 8: Delete backups with prefix 'BACKUP03_10' older than 7 days using retention policy +- name: Remove all old backups using retention policy + hosts: localhost + vars_files: + - "credentials.yml" + connection: local + gather_facts: false + tasks: + - name: Delete all backups older than 7 days retention policy + cisco.dnac.backup_and_restore_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: deleted + config: + - backup: + - backup_retention_days: 7 + +# Example 9: Delete all backups for complete infrastructure cleanup +- name: Remove all backups from Cisco Catalyst Center + hosts: localhost + vars_files: + - "credentials.yml" + connection: local + gather_facts: false + tasks: + - name: Delete all existing backups for infrastructure cleanup + cisco.dnac.backup_and_restore_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: deleted + config: + - backup: + - delete_all_backup: true + +# Example 10: Comprehensive backup workflow for enterprise deployment +- name: Complete backup and restore workflow for enterprise infrastructure + hosts: localhost + vars_files: + - "credentials.yml" + connection: local + gather_facts: false + tasks: + - name: Configure comprehensive backup infrastructure with NFS and scheduling + cisco.dnac.backup_and_restore_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: merged + config: + - nfs_configuration: + - server_ip: "{{ nfs_configuration.server_ip }}" + source_path: "{{ nfs_configuration.source_path }}" + nfs_port: 2049 + nfs_version: nfs4 + nfs_portmapper_port: 111 + backup_configuration: + - server_type: NFS + nfs_details: + server_ip: "{{ backup_storage_configuration.server_ip }}" + source_path: "{{ backup_storage_configuration.source_path }}" + nfs_port: 2049 + nfs_version: nfs4 + nfs_portmapper_port: 111 + data_retention_period: 30 + encryption_passphrase: Enterprise@Backup2024 + backup: + - name: ENTERPRISE_DAILY_BACKUP + scope: CISCO_DNA_DATA_WITH_ASSURANCE + +# Example 11: Multiple NFS server configuration for redundant backup storage +- name: Configure multiple NFS servers for backup redundancy + hosts: localhost + vars_files: + - "credentials.yml" + connection: local + gather_facts: false + tasks: + - name: Configure primary and secondary NFS servers for backup redundancy + cisco.dnac.backup_and_restore_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: merged + config: + - nfs_configuration: + - server_ip: "{{ nfs_configuration.server_ip }}" + source_path: "{{ nfs_configuration.source_path }}" + nfs_port: 2049 + nfs_version: nfs4 + nfs_portmapper_port: 111 + - server_ip: "{{ nfs_configuration.server_ip }}" + source_path: "{{ nfs_configuration.source_path }}" + nfs_port: 2049 + nfs_version: nfs4 + nfs_portmapper_port: 111 + +# Example 12: Create backup with timestamp prefix for automated backup workflows +- name: Create automated backup with timestamp for unique identification + hosts: localhost + vars_files: + - "credentials.yml" + connection: local + gather_facts: false + tasks: + - name: Generate timestamped backup for automated data protection workflows + cisco.dnac.backup_and_restore_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: merged + config: + - backup: + - name: "DAILY_AUTO_BACKUP" + scope: "CISCO_DNA_DATA_WITHOUT_ASSURANCE" + generate_new_backup: true +""" + +RETURN = r""" + +# Case 1: Successful NFS server configuration for backup storage +response_nfs_configuration_created: + description: + - Confirms successful creation of NFS server configuration for backup + storage infrastructure in Cisco Catalyst Center. + - Provides details about the configured NFS server path and connectivity + status for backup operations. + returned: when NFS configuration is successfully created + type: dict + sample: + changed: true + msg: "NFS Configuration(s) '/home/nfsshare/backups/enterprise' created + successfully in Cisco Catalyst Center." + response: "NFS Configuration(s) '/home/nfsshare/backups/enterprise' + created successfully in Cisco Catalyst Center." + +# Case 2: Successful backup target configuration with encryption +response_backup_storage_configuration_created: + description: + - Confirms successful creation or update of backup target configuration + including storage type, retention policies, and encryption settings. + - Validates backup infrastructure readiness for automated data protection + workflows in enterprise environments. + returned: when backup configuration is successfully created or updated + type: dict + sample: + changed: true + msg: "Backup Configuration(s) '/home/nfsshare/backups/enterprise' + created successfully in Cisco Catalyst Center." + response: "Backup Configuration(s) '/home/nfsshare/backups/enterprise' + created successfully in Cisco Catalyst Center." + +# Case 3: Successful backups creation for automated operations +response_backup_created: + description: + - Confirms successful creation of backups for systematic data + preservation with scope-based inclusion specifications. + - Provides verification of backup operations for network + infrastructure data protection and disaster recovery preparedness. + returned: when backups is successfully created + type: dict + sample: + changed: true + msg: "Backup(s) 'ENTERPRISE_BACKUP_2024' created + successfully in Cisco Catalyst Center." + response: "Backup(s) 'ENTERPRISE_BACKUP_2024' created + successfully in Cisco Catalyst Center." + +# Case 4: Successful backup restoration for disaster recovery +response_backup_restored: + description: + - Confirms successful restoration of network infrastructure data from + encrypted backup for disaster recovery operations. + - Validates data recovery completion with encryption passphrase + authentication for secure backup restoration workflows. + returned: when backup restoration is successfully completed + type: dict + sample: + changed: true + msg: "Backup(s) 'enterprise_backup_20240315' restored successfully + in Cisco Catalyst Center." + response: "Backup(s) 'enterprise_backup_20240315' restored successfully + in Cisco Catalyst Center." + +# Case 5: Successful NFS configuration removal for infrastructure cleanup +response_nfs_configuration_deleted: + description: + - Confirms successful deletion of NFS server configuration from backup + infrastructure for decommissioning or reconfiguration purposes. + - Validates cleanup of backup storage connectivity for infrastructure + lifecycle management and resource optimization. + returned: when NFS configuration is successfully deleted + type: dict + sample: + changed: true + msg: "NFS Configuration(s) '/home/nfsshare/backups/legacy' deleted + successfully from Cisco Catalyst Center." + response: "NFS Configuration(s) '/home/nfsshare/backups/legacy' deleted + successfully from Cisco Catalyst Center." + +# Case 6: Successful backups removal for lifecycle management +response_backup_deleted: + description: + - Confirms successful deletion of backups from automated backup + operations for schedule lifecycle management. + - Provides verification of backups cleanup for operational + efficiency and resource management in backup infrastructure. + returned: when backups is successfully deleted + type: dict + sample: + changed: true + msg: "Backup(s) 'LEGACY_BACKUP_2023' deleted successfully + from Cisco Catalyst Center." + response: "Backup(s) 'LEGACY_BACKUP_2023' deleted successfully + from Cisco Catalyst Center." + +# Case 7: Configuration already exists - no changes required +response_no_changes_required: + description: + - Indicates that the requested backup and restore configuration already + exists in the desired state, requiring no modifications. + - Confirms idempotent operation completion with existing configuration + validation for backup infrastructure consistency. + returned: when configuration already exists in desired state + type: dict + sample: + changed: false + msg: "NFS Configuration(s) '/home/nfsshare/backups/existing' already + exist in Cisco Catalyst Center." + response: "NFS Configuration(s) '/home/nfsshare/backups/existing' + already exist in Cisco Catalyst Center." + +# Case 8: Operation failure with detailed error information +response_operation_failed: + description: + - Provides detailed error information when backup and restore operations + fail due to validation, connectivity, or configuration issues. + - Includes specific failure reasons for troubleshooting backup + infrastructure problems and operational recovery guidance. + returned: when operations fail due to errors or validation issues + type: dict + sample: + changed: false + failed: true + msg: "Mount path not retrievable as NFS node is unhealthy for server IP '172.27.17.90', + source path '/home/nfsshare/backups/TB19'." + response: "Mount path not retrievable as NFS node is unhealthy for server IP '172.27.17.90', + source path '/home/nfsshare/backups/TB19'." + +""" + +from ansible_collections.cisco.dnac.plugins.module_utils.dnac import ( + DnacBase, +) +from ansible.module_utils.basic import AnsibleModule +from datetime import datetime, timezone, timedelta + +import time +import json +import re + +from ansible_collections.cisco.dnac.plugins.module_utils.validation import ( + validate_list_of_dicts,) + + +class BackupRestore(DnacBase): + def __init__(self, module): + super().__init__(module) + self.supported_states = ["merged", "deleted"] + self.total_response = [] + self.max_timeout = self.params.get('dnac_api_task_timeout') + self.created_nfs_config = [] + self.already_exists_nfs_config = [] + self.deleted_nfs_config = [] + self.already_deleted_nfs_config = [] + self.created_backup_config = [] + self.already_exists_backup_config = [] + self.updated_backup_config = [] + self.backup = [] + self.backup_failed = [] + self.deleted_backup = [] + self.delete_backup_failed = [] + self.already_backup_exists = [] + self.restored_backup = [] + + def validate_input(self): + """ + Validate the playbook configuration for backup and restore workflow. + + This method verifies the structure, types, and content of the 'config' attribute to ensure that it aligns + with the expected schema for backup, restore, NFS, and scheduling configurations. It performs multiple + checks to prevent malformed or incomplete input from proceeding further in the workflow. + + Args: + self: The instance containing the 'config' attribute to be validated. + + Returns: + The current instance with updated attributes: + - self.msg: A descriptive message indicating the validation outcome. + - self.status: The validation result ('success' or 'failed'). + - self.validated_config: The validated configuration if validation passes. + + Validations Performed: + - Ensures 'config' is present and is a list. + - Each item in 'config' must be a dictionary. + - Uses a predefined specification ('config_spec') to validate structure and data types of fields including: + - 'nfs_configuration': Validates server IP, path, port, version (nfs3/nfs4), portmapper. + - 'backup_storage_configuration': Validates server type, NFS details, retention period (3–60), passphrase. + - 'backup': Validates backup name format and scope values. + - 'restore_operations': Validates presence of backup name and encryption passphrase. + - Validates allowed values, default values, and optional/mandatory constraints using 'validate_list_of_dicts'. + - Logs both the input and the result of validation for traceability. + """ + self.log("Validating backup and restore configuration...", "INFO") + + if not self.config: + self.msg = "Backup and restore configuration is not available in playbook for validation" + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + if not isinstance(self.config, list): + self.msg = "Backup configuration must be a list structure, found type: {0}".format(type(self.config)) + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + config_data = self.config + + for config_index, config_item in enumerate(self.config): + if not isinstance(config_item, dict): + self.msg = "Configuration item {0} must be dictionary structure, found type: {1}".format( + config_index + 1, type(config_item).__name__) + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + self.validated_config = self.config + + config_spec = { + "nfs_configuration": { + "type": "list", + "elements": "dict", + "server_ip": { + "type": "str", + "required": True + }, + "source_path": { + "type": "str", + "required": True + }, + "nfs_port": { + "type": "int", + "default": 2049, + "range_min": 1, + "range_max": 65535 + }, + "nfs_version": { + "type": "str", + "allowed_values": ["nfs3", "nfs4"], + "default": "nfs4" + }, + "nfs_portmapper_port": { + "type": "int", + "default": 111, + "range_min": 1, + "range_max": 65535 + } + }, + "backup_storage_configuration": { + "type": "list", + "elements": "dict", + "server_type": { + "type": "str", + "required": True, + "allowed_values": ["NFS", "PHYSICAL_DISK"] + }, + "nfs_details": { + "type": "dict", + "elements": "dict", + "server_ip": { + "type": "str", + "required": True + }, + "source_path": { + "type": "str", + "required": True + }, + "nfs_port": { + "type": "int", + "default": 2049, + "range_min": 1, + "range_max": 65535 + }, + "nfs_version": { + "type": "str", + "allowed_values": ["nfs3", "nfs4"], + "default": "nfs4" + }, + "nfs_portmapper_port": { + "type": "int", + "default": 111, + "range_min": 1, + "range_max": 65535 + } + }, + "data_retention_period": {"type": "int", "range_min": 3, "range_max": 60}, + "encryption_passphrase": {"type": "str"}, + }, + "backup": { + "type": "list", + "elements": "dict", + "name": { + "type": "str", + }, + "scope": { + "type": "str", + "allowed_values": [ + "CISCO_DNA_DATA_WITH_ASSURANCE", + "CISCO_DNA_DATA_WITHOUT_ASSURANCE" + ] + }, + "generate_new_backup": { + "type": "bool", + "default": False + }, + "delete_all_backup": { + "type": "bool", + "default": False + }, + "backup_retention_days": { + "type": "int", + } + }, + "restore_operations": { + "type": "list", + "elements": "dict", + "name": { + "type": "str", + "required": True + }, + "encryption_passphrase": { + "type": "str", + "required": True + } + } + } + + try: + valid_config, invalid_params = validate_list_of_dicts(self.config, config_spec) + + if invalid_params: + self.msg = "Configuration validation failed with invalid parameters: {0}".format( + invalid_params) + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + self.validated_config = valid_config + + self.log("Backup and restore configuration validation completed successfully", "INFO") + self.log("Validated {0} configuration sections for workflow processing".format( + len(valid_config)), "DEBUG") + + return self + + except Exception as validation_exception: + self.msg = "Configuration validation encountered error: {0}".format( + str(validation_exception)) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + def get_want(self, config): + """ + Extract the desired state ('want') from the backup and restore playbook block. + + Args: + self (object): An instance of a class interacting with Cisco Catalyst Center. + config (dict): A dictionary containing the playbook configuration, expected to include + one or more of the following keys: + - 'backup_storage_configuration' + - 'nfs_configuration' + - 'backup' + - 'restore_operations' + + Returns: + self: The current instance of the class with the 'want' attribute populated + based on the validated backup and restore configuration from the playbook. + + Description: + This method processes the user-provided configuration to extract only the relevant + sections required for backup and restore operations. Specifically, it performs the following steps: + + - Validates that at least one of the expected keys is present in the config. + - Extracts values from 'backup_storage_configuration', 'nfs_configuration', 'backup', + and 'restore_operations', if present. + - Logs the final desired state for visibility. + """ + self.log("Extracting desired backup and restore workflow state from playbook configuration", "DEBUG") + self.log("Processing configuration sections for comprehensive workflow validation", "DEBUG") + + want = {} + backup_config = config.get("backup_storage_configuration") + nfs_config = config.get("nfs_configuration") + backup = config.get("backup") + restore_operations = config.get("restore_operations") + + config_sections = [] + if backup_config: + config_sections.append("backup_storage_configuration") + if nfs_config: + config_sections.append("nfs_configuration") + if backup: + config_sections.append("backup") + if restore_operations: + config_sections.append("restore_operations") + + self.log("Available configuration sections: {0}".format(", ".join(config_sections) if config_sections else "none"), "DEBUG") + + if not any([backup_config, nfs_config, backup, restore_operations]): + self.msg = ( + "Backup and restore workflow requires at least one configuration section: " + "'backup_storage_configuration', 'nfs_configuration', 'backup', or 'restore_operations'" + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + want = { + "backup_storage_configuration": backup_config, + "backup": backup, + "restore_operations": restore_operations, + "nfs_configuration": nfs_config + } + + self.want = want + self.log("Backup and restore workflow desired state extraction completed successfully", "DEBUG") + self.log("Extracted {0} configuration sections for workflow processing".format(len(config_sections)), "DEBUG") + return self + + def get_nfs_configuration_details(self): + """ + Retrieves all NFS server configurations for backup storage infrastructure validation. + + This method fetches comprehensive NFS configuration data from Cisco Catalyst Center + to support backup workflow operations including server connectivity validation, + mount path verification, and configuration matching for backup target setup. + + Args: + self (object): An instance of a class interacting with Cisco Catalyst Center. + + Returns: + list: Complete list of current NFS configurations from Catalyst Center + containing server specifications, mount paths, and health status. + Returns empty list if no configurations exist or on API failure. + Description: + This method evaluates the desired NFS configuration from the playbook-provided input and attempts to + locate a matching NFS configuration from Catalyst Center. + + It performs the following operations: + - Parses the 'server_ip' and 'source_path' from either: + - The first item in 'nfs_configuration', or + - The nested 'nfs_details' under 'backup_storage_configuration'. + - Calls the Catalyst Center API ('get_all_n_f_s_configurations') to retrieve existing NFS configurations. + - Validates the API response structure and logs it for traceability. + - Iterates through existing NFS configs to find a match based on both 'server' and 'sourcePath' fields. + """ + self.log("Retrieving NFS server configurations for backup infrastructure validation", "DEBUG") + self.log("Executing API call to fetch all existing NFS configurations from Catalyst Center", "DEBUG") + + current_nfs_configs = [] + + try: + response = self.dnac._exec( + family="backup", + function="get_all_n_f_s_configurations", + ) + self.log( + "Received API response from 'get_all_n_f_s_configurations': {0}".format(str(response)), + "DEBUG", + ) + + if not response or "response" not in response: + self.log("Invalid NFS configuration API response structure - missing required response field", "ERROR") + self.log("Received response data: {0}".format(response), "ERROR") + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + current_nfs_configs = response.get("response", []) + + except Exception as e: + self.msg = "An error occurred while retrieving all NFS configuration details: {0}".format(e) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + self.log("Retrieved {0} NFS configurations for backup infrastructure evaluation".format( + len(current_nfs_configs)), "error") + + return current_nfs_configs + + def get_backup_configuration(self): + """ + Retrieves and validates backup target configuration for enterprise data protection. + + This method fetches current backup configuration from Cisco Catalyst Center + and performs validation against desired backup settings including server type, + NFS connectivity details, and storage path specifications for backup + infrastructure verification and configuration management. + + Args: + self (object): An instance of a class interacting with Cisco Catalyst Center. + + Returns: + tuple: Contains backup configuration status and data: + - backup_configuration_exists (bool): Whether backup configuration + exists in Catalyst Center + - current_backup_configuration (dict): Current backup settings + retrieved from system + - matched_config (dict): Matched configuration if server and path + align with desired state + + Description: + This method checks the desired backup configuration provided in the playbook and attempts to match it + with the backup configuration retrieved from Catalyst Center. + + This includes: + - Executes the Catalyst Center API 'get_backup_configuration' to retrieve current backup settings. + - If the expected type is NFS, compares the retrieved 'server' and 'sourcePath' values against expected input. + - Logs and returns the matched configuration if all values align. + """ + self.log("Retrieving backup configuration details...", "DEBUG") + + backup_configuration_exists = False + current_backup_configuration = {} + matched_config = {} + + backup_config_list = self.want.get("backup_storage_configuration", []) + expected_server_type = expected_server_ip = expected_source_path = None + + if backup_config_list and isinstance(backup_config_list, list): + backup_config = backup_config_list[0] + expected_server_type = backup_config.get("type") + nfs_details = backup_config.get("nfs_details", {}) + expected_server_ip = nfs_details.get("server_ip") + expected_source_path = nfs_details.get("source_path") + + self.log("Retrieving backup target configuration for enterprise data protection validation", "DEBUG") + self.log("Expected backup configuration - server_type: {0}, server_ip: {1}, source_path: {2}".format( + expected_server_type, expected_server_ip, expected_source_path), "DEBUG") + + try: + response = self.dnac._exec( + family="backup", + function="get_backup_configuration", + ) + self.log( + "Received API response from 'get_backup_configuration': {0}".format(str(response)), + "DEBUG", + ) + + if not response or "response" not in response: + self.log( + "Invalid or empty response for backup configurations: {0}".format(response), + "ERROR", + ) + return backup_configuration_exists, current_backup_configuration, matched_config + + current_backup_configuration = response.get("response", {}) + backup_configuration_exists = bool(current_backup_configuration) + + self.log("Backup configuration exists in system: {0}".format(backup_configuration_exists), "DEBUG") + + if expected_server_type and expected_server_type.upper() == "NFS": + self.log("Validating NFS backup configuration against desired state", "DEBUG") + + current_server = current_backup_configuration.get("server") + current_path = current_backup_configuration.get("sourcePath") + + self.log("Current NFS configuration - server: {0}, sourcePath: {1}".format( + current_server, current_path), "DEBUG") + + if current_server == expected_server_ip and current_path == expected_source_path: + matched_config = current_backup_configuration + self.log("Backup configuration successfully matched with desired NFS settings", "DEBUG") + else: + self.log("Backup configuration does not match desired NFS settings", "DEBUG") + + except Exception as e: + self.msg = "An error occurred while retrieving the backup configuration details: {0}".format(e) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + return backup_configuration_exists, current_backup_configuration, matched_config + + def get_backup(self): + """ + Retrieves and validates backup for enterprise data protection management. + + This method fetches existing backup from Cisco Catalyst Center + and performs validation against desired backup configuration + including name matching and schedule status verification for backup + infrastructure planning and schedule management operations. + + Args: + self (object): An instance of a class interacting with Cisco Catalyst Center. + + Returns: + tuple: Contains backup status and data: + - backup_exists (bool): Whether any backup exist in the system + - current_backups (list): Complete list of backup + retrieved from Catalyst Center + - matched_config (dict): Matched backup configuration + by name if found + + Description: + This method processes the desired backup configuration from the playbook input + and attempts to identify a matching backup from Catalyst Center. + + Specifically, it performs the following operations: + - Extracts the 'name' field from the first entry in the 'backup' section of the 'want' state. + - Invokes the 'get_all_backup' API to retrieve the list of all backup from Catalyst Center. + - Validates the structure of the API response. + - Iterates through the list of backup to find an entry with a matching name. + - Logs and returns the matched backup configuration, if found. + """ + self.log("Retrieving backup details...", "DEBUG") + + backup_exists = False + current_backups = [] + matched_config = {} + + backup_list = self.want.get("backup", []) + expected_backup_name = None + + if backup_list and isinstance(backup_list, list): + backup = backup_list[0] + expected_backup_name = backup.get("name") + + self.log("Retrieving backup for enterprise data protection validation", "DEBUG") + self.log("Expected backup name: {0}".format(expected_backup_name), "DEBUG") + + try: + response = self.dnac._exec( + family="backup", + function="get_all_backup", + ) + self.log( + "Received API response from 'get_all_backup': {0}".format(str(response)), + "DEBUG", + ) + + if not response or "response" not in response: + self.log( + "Invalid or empty response for backup: {0}".format(response), + "ERROR", + ) + return backup_exists, current_backups, matched_config + + current_backups = response.get("response", []) + backup_exists = bool(current_backups) + + self.log("Retrieved {0} backup for validation".format(len(current_backups)), "DEBUG") + self.log("backup exist in system: {0}".format(backup_exists), "DEBUG") + + if expected_backup_name: + self.log("Searching for backup with name: {0}".format(expected_backup_name), "DEBUG") + + for backup in current_backups: + current_backup_name = backup.get("name") + if current_backup_name == expected_backup_name: + matched_config = backup + self.log("Successfully matched backup configuration by name", "DEBUG") + break + + if not matched_config: + self.log("No backup found with name: {0}".format(expected_backup_name), "DEBUG") + else: + self.log("No backup name specified for matching", "DEBUG") + + except Exception as e: + self.log( + "An error occurred while retrieving backup: {0}".format(e), + "ERROR" + ) + + return backup_exists, current_backups, matched_config + + def get_have(self): + """ + Retrieves current backup infrastructure state for enterprise workflow validation. + + This method fetches comprehensive current state information from Cisco Catalyst + Center including NFS server configurations, backup target settings, and backup + details for comparison against desired state in backup and restore + workflow management operations. + + Args: + self (object): An instance of a class interacting with Cisco Catalyst Center. + + Returns: + self: The current instance with the 'have' attribute populated with actual system state details + including NFS configuration, backup configuration, and backup. + + Description: + This method evaluates the desired configuration ('want') and gathers corresponding current state + ('have') from Cisco Catalyst Center. + + Specifically, it performs the following actions: + - If 'nfs_configuration' is provided in the desired state: + - Calls 'get_nfs_configuration_details()' to fetch and match the NFS config. + - Extracts and stores the matched configuration and its existence flag. + - If 'backup_storage_configuration' is present: + - Calls 'get_backup_configuration()' to retrieve the existing backup config. + - Stores the matched configuration and existence flag. + - If 'backup' is provided: + - Calls 'get_backup()' to retrieve and match backup by name. + - Stores the matched backup and its existence flag. + - If 'restore_operations' is provided: + - Logs that restore processing is initiated, though no current state is retrieved for it. + """ + self.log("Retrieving current backup infrastructure state for enterprise workflow validation", "DEBUG") + self.log("Processing desired configuration sections for current state comparison", "DEBUG") + have = {} + self.log("Fetching NFS server configurations for backup storage validation", "DEBUG") + + current_nfs_configs = self.get_nfs_configuration_details() + have["current_nfs_configurations"] = current_nfs_configs + self.log("Retrieved {0} NFS configurations for backup infrastructure evaluation".format( + len(current_nfs_configs)), "DEBUG") + + backup_configuration_details = self.want.get("backup_storage_configuration", []) + + if backup_configuration_details: + self.log("Retrieving current backup target configuration for validation", "DEBUG") + backup_configuration_exists, current_backup_config, matched_backup = self.get_backup_configuration() + have["backup_configuration_exists"] = bool(current_backup_config) + have["current_backup_configuration"] = current_backup_config if current_backup_config else {} + + self.log("Backup configuration exists in system: {0}".format( + have["backup_configuration_exists"]), "DEBUG") + self.log("Current backup configuration details retrieved for comparison", "DEBUG") + + backup_details = self.want.get("backup", []) + if backup_details: + self.log("Retrieving current backup information for validation", "DEBUG") + backup_exists, current_backups, matched_backup = self.get_backup() + matched_exists = isinstance(matched_backup, dict) and matched_backup.get("name") + have["backup_exists"] = bool(matched_exists) + have["current_backup"] = matched_backup if matched_exists else {} + have["all_backups"] = current_backups if current_backups else [] + + self.log("Backup exists in system: {0}".format(have["backup_exists"]), "DEBUG") + self.log("Current backup details retrieved for comparison", "DEBUG") + + restore_operations = self.want.get("restore_operations", []) + if restore_operations: + self.log("Processing restore operation context for backup workflow validation", "DEBUG") + self.log("Processing restore details...", "DEBUG") + + self.have = have + + self.log("Current backup infrastructure state retrieval completed successfully", "DEBUG") + self.log("Retrieved state includes {0} configuration sections for validation".format( + len([k for k in have.keys() if have[k]])), "DEBUG") + return self + + def get_diff_merged(self, config): + """ + Processes backup workflow configuration for merged state operations. + + This method orchestrates comprehensive backup and restore workflow processing + by analyzing desired configuration sections and triggering appropriate diff + computations for NFS server setup, backup target configuration, backup + scheduling, and restore operations in enterprise data protection workflows. + + Args: + self (object): An instance of the class used for interacting with Cisco Catalyst Center. + config (dict): The configuration dictionary containing the desired state for: + - NFS configuration + - Backup configuration + - backup create + - Restore details + + Returns: + self: The current instance of the class, with updated diff state for each applicable configuration section. + + Description: + This method processes the configuration details provided in the playbook for Catalyst Center backup and restore workflows. + It checks for the presence of specific configuration options—such as NFS configuration, backup configuration, backup create, + and restore details—and triggers corresponding diff methods for each section: + + - 'get_diff_nfs_configuration()': Validates and computes the difference between current and desired NFS settings. + - 'get_diff_backup_configuration()': Handles comparison for backup configuration profiles. + - 'get_diff_backup()': Evaluates the defined backup settings. + - 'get_diff_restore_backup()': Verifies restore parameters and validates their applicability. + + These methods compare the desired state (from 'self.want') with the current state (from 'self.have') and determine + what changes (if any) need to be made. The result is used later in execution to decide whether a configuration + update or restore action is required. + + The method also logs the progress of each configuration section for traceability and debugging purposes. + """ + self.log("Processing backup workflow configuration for merged state operations", "DEBUG") + self.log("Configuration sections for processing: {0}".format( + ", ".join([k for k in config.keys() if config.get(k)])), "DEBUG") + + self.config = config + + if config.get("nfs_configuration"): + self.log("Processing NFS server configuration for backup storage validation", "DEBUG") + self.get_diff_nfs_configuration() + + if config.get("backup_storage_configuration"): + self.log("Processing backup target configuration for data protection workflow", "DEBUG") + self.get_diff_backup_configuration() + + if config.get("backup"): + self.log("Processing backup details...", "INFO") + self.get_diff_backup() + + if config.get("restore_operations"): + self.log("Processing restore operation configuration for disaster recovery workflow", "DEBUG") + self.get_diff_restore_backup() + + self.log("Backup workflow configuration processing completed for merged state", "DEBUG") + return self + + def get_diff_deleted(self, config): + """ + Processes backup infrastructure deletion requests for cleanup operations. + + This method orchestrates comprehensive backup and NFS component removal + by analyzing configuration sections marked for deletion and triggering + appropriate cleanup workflows for NFS server configurations and backup + in enterprise backup infrastructure lifecycle management. + + Args: + self (object): An instance of the class used for interacting with Cisco Catalyst Center. + config (dict): The configuration dictionary containing the details for NFS configuration and backup + that are marked for deletion. + + Returns: + self: The current instance of the class, with updated 'result' and 'have' attributes reflecting deletion operations. + + Description: + This method analyzes the playbook configuration to determine which backup and NFS components should be removed + from the Catalyst Center. It checks for keys like 'nfs_configuration' and 'backup' and invokes the + appropriate deletion workflows: + + - 'delete_nfs_configuration()': Initiates deletion of the specified NFS configuration. + - 'delete_backup()': Triggers removal of backup if they exist. + + Each operation is logged for traceability and debugging. The outcomes from these deletion tasks are used to + update internal tracking attributes like 'result', which determines if a change occurred ('changed: True') + during execution. + """ + self.log("Processing backup infrastructure deletion requests for cleanup operations", "DEBUG") + self.log("Configuration sections for deletion: {0}".format( + ", ".join([k for k in config.keys() if config.get(k)])), "DEBUG") + + self.config = config + + if config.get("nfs_configuration"): + self.log("Processing NFS server configuration deletion for infrastructure cleanup", "DEBUG") + self.delete_nfs_configuration() + + if config.get("backup"): + self.log("Processing backup details for deletion...", "INFO") + self.delete_backup() + + self.log("Backup infrastructure deletion processing completed successfully", "DEBUG") + return self + + def get_diff_nfs_configuration(self): + """ + Validates and manages NFS server configuration for backup storage infrastructure. + + This method processes desired NFS server configurations against current state + to determine necessary actions for backup storage infrastructure setup. It + ensures only missing NFS configurations are created while avoiding duplicates + and validates connectivity specifications for enterprise data protection workflows. + + Args: + self (object): An instance of the class used for interacting with Cisco Catalyst Center. + + Returns: + self: The current instance with updated 'result' and 'have' attributes based on the NFS configuration status. + + Description: + This method checks the desired NFS configuration provided in the playbook and compares it with the existing + (current) state ('self.have'). For each NFS configuration entry: + + - It ensures that both 'server_ip' and 'source_path' are provided. + - If no matching configuration exists in the current state, it initiates the creation of the new NFS configuration. + - If the configuration already exists, it logs an informational message and sets the operation result accordingly. + + The method ensures only missing NFS configurations are created and avoids duplicates. If input validation fails, + an appropriate error message is set and the operation is marked as failed. + """ + self.log("Processing NFS configuration details for diff...", "INFO") + + current_nfs_configs = self.have.get("current_nfs_configurations", []) + expected_nfs_configs = self.want.get("nfs_configuration", []) + + self.log("Processing NFS server configurations for backup storage infrastructure validation", "DEBUG") + self.log("Expected NFS configurations: {0}, Current NFS configurations: {1}".format( + len(expected_nfs_configs), len(current_nfs_configs)), "DEBUG") + + for config_index, nfs_config_details in enumerate(expected_nfs_configs): + server_ip = nfs_config_details.get("server_ip") + source_path = nfs_config_details.get("source_path") + self.log("Processing NFS configuration {0}: server_ip={1}, source_path={2}".format( + config_index + 1, server_ip, source_path), "DEBUG") + + if not server_ip or not source_path: + self.msg = ("NFS configuration validation failed: Both 'server_ip' and 'source_path' " + "must be specified for backup storage infrastructure setup") + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + continue + + self.log("Searching for existing NFS configuration matching server: {0}, path: {1}".format( + server_ip, source_path), "DEBUG") + + nfs_configuration_found = False + for existing_config in current_nfs_configs: + spec = existing_config.get("spec", {}) + existing_server = spec.get("server") + existing_path = spec.get("sourcePath") + + self.log("Comparing with existing NFS: server={0}, path={1}".format( + existing_server, existing_path), "DEBUG") + + if existing_server == server_ip and existing_path == source_path: + nfs_configuration_found = True + self.log("Found matching NFS configuration for backup storage infrastructure", "DEBUG") + break + + if not nfs_configuration_found: + self.log("NFS configuration not found - initiating creation for backup storage infrastructure", "DEBUG") + self.log("Creating NFS server configuration for server '{0}' with source path '{1}'".format( + server_ip, source_path), "INFO") + self.create_nfs_configuration(nfs_config_details) + + else: + self.msg = ("NFS server configuration already exists for server_ip '{0}' " + "and source_path '{1}' in backup storage infrastructure").format( + server_ip, source_path) + self.already_exists_nfs_config.append(source_path) + self.set_operation_result("success", False, self.msg, "INFO") + + self.log("NFS server configuration processing completed for backup storage infrastructure", "DEBUG") + return self + + def get_diff_backup_configuration(self): + """ + Validates and manages the creation or update of backup configuration in Cisco Catalyst Center. + + This method processes desired backup target configurations against current state + to determine necessary actions for backup infrastructure setup. It ensures proper + NFS connectivity validation, mount path retrieval, and configuration updates for + enterprise backup and restore workflows. + + Args: + self (object): An instance of the class responsible for backup and restore workflows. + + Returns: + self: The current instance with updated result based on the success or failure of the backup configuration logic. + + Description: + This method performs a diff operation to reconcile the desired backup configuration state ('self.want') + with the current system state ('self.have'). + + For each backup configuration provided: + - It validates that required 'server_ip' and 'source_path' under 'nfs_details' are present. + - If a backup configuration does not already exist, it initiates the creation using 'create_backup_configuration'. + - If it exists, it checks for the health of associated NFS nodes. + - If nodes are unhealthy, the operation is halted with an appropriate failure message. + - If healthy, it retrieves the mount path from the matched configuration. + - It then compares the current backup settings (server type, retention period, mount path) with the desired ones. + - If all match, the method exits without making changes. + - If any differ, a payload is constructed and the configuration is updated via the Catalyst Center API. + + The method sets the operation result and logs all relevant details for debugging and auditability. + """ + self.log("Processing backup configuration details...", "INFO") + + expected_backup_configs = self.want.get("backup_storage_configuration", []) + backup_configuration = self.have + + self.log("Processing backup target configurations for enterprise data protection validation", "DEBUG") + self.log("Expected backup configurations: {0}".format(len(expected_backup_configs)), "DEBUG") + + for config_index, backup_config_details in enumerate(expected_backup_configs): + nfs_details = backup_config_details.get("nfs_details", {}) + server_ip = nfs_details.get("server_ip") + source_path = nfs_details.get("source_path") + + self.log("Processing backup configuration {0}: server_ip={1}, source_path={2}".format( + config_index + 1, server_ip, source_path), "DEBUG") + + backup_configuration_exists = backup_configuration.get("backup_configuration_exists") + self.log("Backup configuration exists: {0}".format(backup_configuration_exists), "DEBUG") + if backup_configuration.get("backup_configuration_exists") is False: + self.log( + "Backup configuration does not exist. Initiating creation process.", + "INFO", + ) + self.create_backup_configuration(backup_config_details) + continue + + self.log("Required Backup configuration details: {0}".format(backup_config_details), "DEBUG") + self.log("Existing Backup configuration details: {0}".format(backup_configuration.get('current_backup_configuration')), "DEBUG") + self.log("Existing NFS details: {0}".format(nfs_details), "DEBUG") + + current_nfs_config = self.get_nfs_configuration_details() + self.log("Current NFS configurations: {0}".format(current_nfs_config), "DEBUG") + + nfs_exists = False + matched_config = None + mount_path = None + + if current_nfs_config: + for current_nfs_config_item in current_nfs_config: + current_nfs_server = current_nfs_config_item.get('spec', {}).get('server') + current_nfs_source_path = current_nfs_config_item.get('spec', {}).get('sourcePath') + + self.log("Comparing NFS config: server={0}, path={1}".format( + current_nfs_server, current_nfs_source_path), "DEBUG") + + if (server_ip == current_nfs_server and source_path == current_nfs_source_path): + nfs_exists = True + matched_config = current_nfs_config_item + self.log("Found matching NFS configuration for backup target", "DEBUG") + break + + self.log("NFS exists: {0}".format(nfs_exists), "DEBUG") + + if not nfs_exists: + self.log("NFS mount path not found for {0}:{1}, attempting to create/verify NFS configuration.".format(server_ip, source_path), "INFO") + self.create_nfs_configuration(nfs_details) + + time.sleep(30) + + refreshed_config = self.get_nfs_configuration_details() + for item in refreshed_config: + if ( + item.get("spec", {}).get("server") == server_ip + and item.get("spec", {}).get("sourcePath") == source_path + ): + matched_config = item + break + + unhealthy_nodes = matched_config.get("status", {}).get("unhealthyNodes") if matched_config else None + + self.log("NFS node health status - unhealthy nodes: {0}".format(unhealthy_nodes), "DEBUG") + + if unhealthy_nodes: + timeout_seconds = 120 + retry_interval = 10 + + self.log("Detected unhealthy NFS node(s). Retrying health check for up to {0} seconds.".format(timeout_seconds), "INFO") + + start_time = time.time() + while unhealthy_nodes and (time.time() - start_time) < timeout_seconds: + self.log("Waiting {0} seconds before next health check...".format(retry_interval), "DEBUG") + time.sleep(retry_interval) + refreshed_config = self.get_nfs_configuration_details() + matched_config = next( + ( + item + for item in refreshed_config + if item.get("spec", {}).get("server") == server_ip + and item.get("spec", {}).get("sourcePath") == source_path + ), + None, + ) + unhealthy_nodes = matched_config.get("status", {}).get("unhealthyNodes") if matched_config else None + self.log("NFS node retry check - unhealthy nodes: {0}".format(unhealthy_nodes), "DEBUG") + + if unhealthy_nodes: + spec = matched_config.get("spec", {}) + server_ip = spec.get("server") + source_path = spec.get("sourcePath") + + self.msg = ( + "Mount path not retrievable as NFS node is unhealthy for server IP '{0}', source path '{1}'." + .format(server_ip, source_path) + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + else: + self.log( + "NFS node health validation completed successfully - server '{0}' with source path '{1}' " + "recovered from unhealthy state to healthy status".format(server_ip, source_path), + "INFO", + ) + mount_path = matched_config.get("status", {}).get("destinationPath") if matched_config else None + + else: + self.log("NFS node is healthy - retrieving mount path for backup configuration", "DEBUG") + mount_path = matched_config.get("status", {}).get("destinationPath") if matched_config else None + self.log("Retrieved mount path: {0}".format(mount_path), "DEBUG") + + current_backup = backup_configuration.get('current_backup_configuration', {}) + config_server_type = backup_config_details.get('server_type') + current_type = current_backup.get('type') + + final_server_type = current_backup.get('type') if config_server_type == current_type else config_server_type + + config_retention = backup_config_details.get('data_retention_period') + current_retention = current_backup.get('dataRetention') + final_data_retention = ( + config_retention if config_retention is not None else current_retention + ) + current_mount_path = current_backup.get('mountPath') + + final_mount_path = current_mount_path if mount_path == current_mount_path else mount_path + + self.log("Comparing backup parameters - server_type: {0}=={1}, retention: {2}=={3}, mount_path: {4}=={5}".format( + config_server_type, current_type, config_retention, current_retention, mount_path, current_mount_path), "DEBUG") + + if ( + config_server_type == current_type and + config_retention == current_retention and + mount_path == current_mount_path + ): + self.msg = ( + "Backup configuration already exists with desired settings for source path '{0}'".format(source_path) + ) + self.already_exists_backup_config.append(source_path) + self.set_operation_result("success", False, self.msg, "INFO") + return self + + payload = { + 'mountPath': final_mount_path, + 'type': final_server_type, + 'dataRetention': final_data_retention + } + + if 'encryption_passphrase' in backup_config_details and backup_config_details['encryption_passphrase']: + payload['encryptionPassphrase'] = backup_config_details['encryption_passphrase'] + + self.log("Final payload for backup configuration: {0}".format(json.dumps(payload, indent=4)), "DEBUG") + + try: + response = self.dnac._exec( + family="backup", + function="create_backup_configuration", + op_modifies=True, + params={"payload": payload} + ) + self.log("Received API response from 'create_backup_configuration': {0}".format(response), "DEBUG") + self.updated_backup_config.append(source_path) + + if response is None: + self.msg = "Backup configuration updated successfully" + self.set_operation_result("success", True, self.msg, "INFO") + return self + + except Exception as e: + self.msg = "An error occurred while updating backup configuration: {0}".format(e) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + self.log("Backup target configuration processing completed for enterprise data protection", "DEBUG") + return self + + def get_diff_backup(self): + """ + Validates and manages the creation of a backup in Cisco Catalyst Center. + + Args: + self (object): An instance of the class responsible for backup and restore workflows. + + Returns: + self: The current instance with updated result based on the success or failure of the backup logic. + + Description: + This method checks the desired backup configuration ('self.want') against the existing + backup configuration ('self.have') to determine whether a new backup needs to be created. + + For each backup provided: + - It ensures that both the 'name' and 'scope' fields are specified. + - If these mandatory fields are missing, the operation fails with an appropriate error message. + - If the backup does not exist ('backup_exists' is False), it initiates the creation + of the backup. + - If the backup already exists, no changes are made, and an informational success message is logged. + """ + self.log("Processing backup details...", "INFO") + + backup = self.have + backup_detail = self.want.get("backup", []) + for backup_details in backup_detail: + name = backup_details.get("name") + scope = backup_details.get("scope") + generate_new_backup = backup_details.get("generate_new_backup", False) + self.log(generate_new_backup) + + if not name or not scope: + self.msg = ( + "Mandatory fields 'name', 'scope' must be specified for backup." + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + self.log( + "Checking backup for name: {0}, scope: {1}".format( + name, scope + ), + "DEBUG", + ) + + if generate_new_backup: + self.log( + "generate_new_backup enabled. Creating new backup with timestamp for prefix '{0}'.".format(name), + "INFO", + ) + self.create_backup(backup_details) + continue + + if not backup.get("backup_exists"): + self.log( + "Backup does not exist. Initiating creation process for name='{0}'.".format(name), + "INFO", + ) + self.create_backup(backup_details) + else: + self.msg = "Backup '{0}' already exists.".format(name) + self.already_backup_exists.append(name) + self.set_operation_result("success", False, self.msg, "INFO") + return self + + def get_diff_restore_backup(self): + """ + Validates and manages backup restoration operations for disaster recovery workflows. + + This method processes desired backup restoration requests against available backup + to determine restoration feasibility and initiate recovery operations. It ensures + proper validation of restore parameters including backup existence, encryption + credentials, and restoration prerequisites for enterprise disaster recovery. + + Args: + self (object): An instance of the class handling backup and restore workflows. + + Returns: + self: The current instance with updated result after attempting restore operation(s). + + Description: + This method processes the restore configuration provided in 'self.want["restore_operations"]'. + + For each restore entry: + - It checks for the presence of mandatory fields 'name' and 'encryption_passphrase'. + - If either field is missing, the operation fails with an appropriate error message. + - If both fields are present, it logs the action and initiates the restore operation. + """ + self.log("Processing restore details...", "INFO") + + expected_restore_details = self.want.get("restore_operations", []) + + self.log("Processing backup restoration requests for disaster recovery workflows", "DEBUG") + self.log("Expected restore operations: {0}".format(len(expected_restore_details)), "DEBUG") + + if not expected_restore_details: + self.log("No restore operations specified - skipping restoration processing", "DEBUG") + return self + + # Process each restore request for validation and execution + for restore_index, restore_detail in enumerate(expected_restore_details): + backup_name = restore_detail.get("name") + encryption_passphrase = restore_detail.get("encryption_passphrase") + + self.log("Processing restore operation {0}: backup_name={1}".format( + restore_index + 1, backup_name), "DEBUG") + + if not backup_name or not encryption_passphrase: + self.msg = ("Restore operation validation failed: Both 'name' and 'encryption_passphrase' " + "must be specified for backup restoration and disaster recovery") + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + self.log("Initiating restore for backup name: {0}".format(backup_name), "INFO") + self.restore_backup() + + self.log("Backup restoration processing completed for disaster recovery workflows", "DEBUG") + return self + + def create_backup_configuration(self, backup_config_details): + """ + Validates and creates a backup configuration in Cisco Catalyst Center. + + Args: + backup_config_details (dict): Dictionary containing the backup configuration parameters. + Mandatory fields: + - server_type (str): Type of server (e.g., NFS). + - nfs_details (dict): Dictionary with 'server_ip' and 'source_path'. + - data_retention_period (int): Number of days to retain backup data (between 3 and 60). + - encryption_passphrase (str): Passphrase for encrypting backup data. + + Returns: + self: The current class instance with updated operation result. + + Description: + - Validates presence of all mandatory fields. + - Validates NFS details ('server_ip' and 'source_path'). + - Retrieves or creates NFS configuration to obtain a valid mount path. + - Ensures 'data_retention_period' is within allowed limits (3–60 days). + - Constructs and sends the backup configuration payload using Catalyst Center APIs. + - Logs API responses and updates the operation result accordingly. + """ + self.log( + "Starting backup configuration creation for server_type={0}, retention={1}".format( + backup_config_details.get("server_type"), + backup_config_details.get("data_retention_period") + ), + "INFO" + ) + + mandatory_fields = ["server_type", "nfs_details", "data_retention_period", "encryption_passphrase"] + for field in mandatory_fields: + if field not in backup_config_details: + self.msg = "Mandatory field '{0}' is missing in backup configuration.".format(field) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + nfs_details = backup_config_details.get("nfs_details", {}) + server_ip = nfs_details.get("server_ip") + source_path = nfs_details.get("source_path") + + self.log( + "Extracted NFS details for backup configuration: server_ip={0}, source_path={1}".format( + server_ip, source_path + ), + "DEBUG" + ) + + if not server_ip or not source_path: + self.msg = "Both 'server_ip' and 'source_path' must be specified in NFS details." + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + current_nfs_configs = self.get_nfs_configuration_details() + matched_config = None + for config in current_nfs_configs: + spec = config.get("spec", {}) + if spec.get("server") == server_ip and spec.get("sourcePath") == source_path: + matched_config = config + self.log( + "Found existing NFS configuration for server_ip={0}, source_path={1}. Using existing mount path." + .format(server_ip, source_path), + "INFO" + ) + break + + mount_path = None + if matched_config: + mount_path = matched_config.get("status", {}).get("destinationPath") + + if not mount_path: + self.log( + "NFS mount path not found for server_ip={0}, source_path={1}. Attempting to create/verify NFS configuration." + .format(server_ip, source_path), + "INFO" + ) + try: + self.create_nfs_configuration(nfs_details) + + current_nfs_configs_after_create = self.get_nfs_configuration_details() + matched_config_after_create = None + for config in current_nfs_configs_after_create: + spec = config.get("spec", {}) + if spec.get("server") == server_ip and spec.get("sourcePath") == source_path: + matched_config_after_create = config + self.log( + "Found newly created NFS configuration for server_ip={0}, source_path={1}." + .format(server_ip, source_path), + "INFO" + ) + break + if matched_config_after_create: + mount_path = matched_config_after_create.get("status", {}).get("destinationPath") + self.log("Successfully created/verified NFS configuration. Retrieved destinationPath: {0}".format(mount_path), "INFO") + else: + self.msg = "Failed to find newly created NFS configuration for {0}:{1}.".format(server_ip, source_path) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + except Exception as e: + self.msg = "Failed to create NFS configuration: {0}".format(e) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + if not mount_path: + self.msg = "Failed to retrieve NFS destination path even after creation/verification." + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + data_retention_period = backup_config_details.get("data_retention_period") + if not (3 <= data_retention_period <= 60): + self.msg = ( + "Data retention period must be between 3 and 60 days, found: {0}".format(data_retention_period) + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + payload = { + "type": backup_config_details["server_type"].upper(), + "encryptionPassphrase": backup_config_details["encryption_passphrase"], + "mountPath": mount_path, + "dataRetention": data_retention_period, + } + + optional_fields = [("encryption_passphrase", "encryptionPassphrase")] + self.log("Adding optional fields to backup configuration payload", "DEBUG") + for field, key in optional_fields: + value = backup_config_details.get(field) + if value is not None: + payload[key] = value + self.log("Added optional field: {0} with value: {1}".format(key, payload[key]), "DEBUG") + + self.log("Generated payload for create backup configuration: {0}".format(json.dumps(payload, indent=4)), "DEBUG") + + try: + response = self.dnac._exec( + family="backup", + function="create_backup_configuration", + op_modifies=True, + params={"payload": payload} + ) + self.log("Received API response from 'create_backup_configuration': {0}".format(response), "DEBUG") + self.created_backup_config.append(source_path) + + if response is None: + self.msg = "Backup configuration created successfully for {0}".format(server_ip) + self.set_operation_result("success", True, self.msg, "INFO") + return self + + except Exception as e: + self.msg = "An error occurred while creating backup configuration: {0}".format(e) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + self.log( + "Completed backup configuration creation for server_ip={0}, source_path={1}".format( + server_ip, source_path + ), + "INFO" + ) + return self + + def create_nfs_configuration(self, nfs_config_details): + """ + Validates and creates an NFS configuration in Cisco Catalyst Center. + + Args: + nfs_config_details (dict): Dictionary containing details of the NFS configuration. + Mandatory fields: + - server_ip (str): IP address of the NFS server. + - source_path (str): Source path on the NFS server. + Optional fields: + - nfs_port (int): Port number used for NFS communication. + - nfs_version (str): Version of NFS protocol (e.g., "v3", "v4"). + - nfs_portmapper_port (int): Port number for the portmapper service. + + Returns: + self: The current class instance with updated operation result. + + Description: + - Validates presence of mandatory fields ('server_ip', 'source_path'). + - Constructs a payload with optional fields if provided. + - Sends the configuration to Catalyst Center using the 'create_n_f_s_configuration' API. + - Logs API responses and updates the operation result accordingly. + """ + self.log( + "Starting NFS configuration creation for server_ip={0}, source_path={1}".format( + nfs_config_details.get("server_ip"), nfs_config_details.get("source_path") + ), + "INFO" + ) + + mandatory_fields = ["server_ip", "source_path"] + + for field in mandatory_fields: + if field not in nfs_config_details: + self.msg = "Mandatory field '{0}' is missing in NFS configuration.".format(field) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + payload = { + "server": nfs_config_details["server_ip"], + "sourcePath": nfs_config_details["source_path"], + } + + optional_fields = [ + ("nfs_port", "nfsPort"), + ("nfs_version", "nfsVersion"), + ("nfs_portmapper_port", "portMapperPort"), + ] + + self.log("Adding optional fields to NFS payload", "DEBUG") + for field, key in optional_fields: + value = nfs_config_details.get(field) + if value is not None: + payload[key] = ( + int(value) + if field in ("nfs_port", "nfs_portmapper_port") + else value + ) + self.log( + "Added optional field: {0} with value: {1}".format(key, payload[key]), + "DEBUG" + ) + self.log("Generated payload for create NFS configuration:{0}".format(json.dumps(payload, indent=4)), "DEBUG") + + try: + response = self.dnac._exec( + family="backup", + function="create_n_f_s_configuration", + op_modifies=True, + params={"payload": payload} + ) + self.log("Received API response from 'create_n_f_s_configuration': {0}".format(response), "DEBUG") + self.created_nfs_config.append(nfs_config_details["source_path"]) + + if response is None: + self.msg = ( + "NFS configuration created successfully for server {0} " + "with source_path {1}".format( + nfs_config_details["server_ip"], + nfs_config_details["source_path"] + ) + ) + self.set_operation_result("success", True, self.msg, "INFO") + return self + + except Exception as e: + self.msg = "An error occurred while creating NFS configuration: {0}".format(e) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + self.log( + "Exiting NFS configuration creation for server_ip={0}, source_path={1}".format( + nfs_config_details.get("server_ip"), nfs_config_details.get("source_path") + ), + "INFO" + ) + + return self + + def create_backup(self, backup_details): + """ + Validates and creates a backup in Cisco Catalyst Center. + + Args: + backup_details (dict): Dictionary containing backup details. + Mandatory fields: + - name (str): Name of the backup. Must start with an alphabet + and can include alphanumeric characters and special characters + (@, #, _, -, space). + - scope (str): Scope of the backup (e.g., "SYSTEM"). + + Returns: + self: The current class instance with updated operation result. + + Workflow: + - Validates mandatory fields ('name', 'scope'). + - Ensures 'name' follows the naming convention. + - Constructs and sends a payload to the Catalyst Center API using 'create_backup'. + - Extracts the task ID from the API response. + - Polls task status using 'get_backup_status_by_task_id'. + - Based on task status, sets the operation result to success, failure, or warning. + """ + self.log("Creating backup: {0}".format(backup_details), "INFO") + + name_pattern = r"^[A-Za-z][A-Za-z0-9@#_\-]*$" + + name = backup_details.get("name") + self.log("Validating backup name: {0}".format(name), "DEBUG") + if not re.match(name_pattern, name): + self.msg = ( + "Backup name must begin with an alphabet and can contain letters, digits, " + "and the following special characters: @, #, _, -, and space." + ) + self.set_operation_result( + "failed", False, self.msg, "ERROR" + ).check_return_status() + + scope = backup_details.get("scope") + generate_new_backup = backup_details.get("generate_new_backup", False) + final_name = name + + if generate_new_backup: + ist = timezone(timedelta(hours=5, minutes=30)) + timestamp = datetime.now(ist).strftime("%Y%m%d_%H%M%S") + final_name = "{0}_{1}".format(name, timestamp) + self.log("generate_new_backup enabled: Final backup name (IST) = {0}".format(final_name), "DEBUG") + + if not name or not scope: + self.msg = "Mandatory fields 'name' and 'scope' must be specified for backup." + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + payload = { + "name": final_name, + "scope": scope, + } + + self.log("Generated payload for create backup: {0}".format(json.dumps(payload, indent=4)), "DEBUG") + + try: + response = self.dnac._exec( + family="backup", + function="create_backup", + op_modifies=True, + params={"payload": payload} + ) + self.log("Received API response from 'create_backup': {0}".format(response), "DEBUG") + + task_id = self.get_backup_task_id_from_response(response, "create_backup") + status = self.get_backup_status_by_task_id(task_id) + + if status not in ["FAILED", "CANCELLED", "IN_PROGRESS"]: + self.msg = "Backup '{0}' created successfully.".format(name) + self.set_operation_result("success", True, self.msg, "INFO") + self.backup.append(final_name) + + if status == "FAILED": + self.msg = "Creation of backup '{0}' failed".format(name) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + self.backup_failed.append(name) + + if status == "CANCELLED": + self.msg = "Creation of backup '{0}' was cancelled.".format(name) + self.set_operation_result("failed", False, self.msg, "WARNING").check_return_status() + + except Exception as e: + self.msg = "An error occurred while creating backup: {0}".format(e) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + self.log( + "Exiting backup creation for name='{0}', scope='{1}'".format(name, scope), + "INFO" + ) + return self + + def restore_backup(self): + """ + Validates restore details and initiates backup restoration in Cisco Catalyst Center. + + Returns: + self: The current instance with updated operation result status. + + Description: + This method performs the following steps: + - Extracts restore details ('name' and 'encryption_passphrase') from 'self.want'. + - Validates input fields and ensures a backup with the specified name exists. + - Retrieves the configured backup encryption passphrase. + - Validates the input passphrase against the configured one. + - Constructs the payload and calls the 'restore_backup' API to start the restore operation. + - Monitors the task status to confirm success, failure, or cancellation. + - Sets the operation result based on the final status of the restore task. + """ + self.log("Processing restoration for existing backup...", "INFO") + restore_operations = self.want.get("restore_operations", []) + + for restore in restore_operations: + name = restore.get("name") + encryption_passphrase = restore.get("encryption_passphrase") + + self.log("Validating restore details: name={0}, encryption_passphrase={1}".format(name, encryption_passphrase), "DEBUG") + + if not name or not encryption_passphrase: + self.msg = "Both 'name' and 'encryption_passphrase' must be specified for restore." + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + backup_exists, current_backups, backup = self.get_backup() + + matched_backup = None + + for backup in current_backups: + self.log("Comparing backup name: '{0}' with expected: '{1}'".format(backup.get("name"), name), "DEBUG") + if backup.get("name") == name: + self.log( + "Found matching backup for restoration: name={0}".format(name), + "INFO" + ) + matched_backup = backup + matched_backup_id = backup.get("id") + break + + self.log("Matched backup: {0}".format(matched_backup), "DEBUG") + + if not matched_backup: + self.msg = "No backup found with the name '{0}'.".format(name) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + payload = { + "encryptionPassphrase": encryption_passphrase + } + + self.log("Payload for restore operation: {0}".format(json.dumps(payload, indent=4)), "DEBUG") + self.log("Initiating restore operation for backup '{0}'".format(name), "INFO") + + try: + response = self.dnac._exec( + family="restore", + function="restore_backup", + op_modifies=True, + params={"id": matched_backup_id, "payload": payload} + ) + self.log("Received API response from 'restore_backup': {0}".format(response), "DEBUG") + self.restored_backup.append(name) + + task_id = self.get_backup_task_id_from_response(response, "restore_backup") + status = self.get_backup_status_by_task_id(task_id) + + if status not in ["FAILED", "CANCELLED", "IN_PROGRESS"]: + self.msg = "Restore operation for '{0}' completed successfully.".format(name) + self.set_operation_result("success", True, self.msg, "INFO") + return self + + if status == "FAILED": + self.msg = "Restore operation for '{0}' failed.".format(name) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + if status == "CANCELLED": + self.msg = "Restore operation for '{0}' was cancelled.".format(name) + self.set_operation_result("failed", False, self.msg, "WARNING").check_return_status() + + except Exception as e: + self.msg = "An error occurred while restoring backup: {0}".format(e) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + self.log("Exiting backup restoration workflow.", "INFO") + return self + + def get_backup_task_id_from_response(self, response, api_name): + """ + Extracts the task ID from the given API response dictionary. + + This method is used to retrieve the task ID associated with a backup-related operation + (e.g., create or restore), which is later used to track the status of the task. + + Args: + response (dict): The response returned from the Catalyst Center API call. + api_name (str): The name of the API function for logging context. + + Returns: + str or None: The extracted task ID if available, otherwise, None. + """ + self.log("Extracting task ID from response of '{0}'.".format(api_name), "DEBUG") + + if not response or not isinstance(response, dict): + self.log("Invalid or empty response received from '{0}'.".format(api_name), "ERROR") + return None + + task_info = response.get("response", {}) + task_id = task_info.get("taskId") + + if not task_id: + self.log("Returning None as task ID for '{0}'.".format(api_name), "DEBUG") + return None + + self.log("Extracted Task ID '{0}' from '{1}' response.".format(task_id, api_name), "DEBUG") + return task_id + + def get_backup_status_by_task_id(self, task_id): + """ + Polls the backup and restore execution status using the provided task ID. + + This method repeatedly queries the Cisco Catalyst Center API to retrieve the current execution + status of a backup or restore operation. It continues polling until a terminal state is reached + ('SUCCESS', 'FAILED', or 'CANCELLED'), or until the configured timeout period is exceeded. + + Args: + task_id (str): The task ID associated with a backup or restore operation. + + Returns: + str: The final status of the task. Possible values are: + - 'SUCCESS': The operation completed successfully. + - 'FAILED': The operation failed. + - 'CANCELLED': The operation was cancelled. + - 'UNKNOWN': No valid status could be determined (e.g., invalid task ID). + """ + self.log("Checking backup status for task ID: {0}".format(task_id), "INFO") + + if not task_id: + self.log("No task ID provided to get_backup_status_by_task_id.", "ERROR") + return "UNKNOWN" + + start_time = time.time() + + while True: + elapsed_time = time.time() - start_time + if elapsed_time >= self.max_timeout: + self.msg = "Max timeout of {0} sec reached while waiting for backup task ID '{1}'.".format(self.max_timeout, task_id) + self.log(self.msg, "WARNING") + self.status = "failed" + return "FAILED" + + try: + response = self.dnac._exec( + family="backup", + function="get_backup_and_restore_execution", + params={"id": task_id} + ) + + self.log("Received API response from 'get_backup_and_restore_execution': {0}".format(response), "DEBUG") + + if isinstance(response, list): + response = response[0] if response else {} + + execution_data = response.get("response", {}) + status = execution_data.get("status", "UNKNOWN").upper() + + self.log("Backup execution status for task ID '{0}': '{1}'.".format(task_id, status), "DEBUG") + + if status in ["SUCCESS", "FAILED", "CANCELLED"]: + self.status = status.lower() + self.log("Returning backup status '{0}' for task ID '{1}'.".format(status, task_id), "INFO") + return status + else: + self.log("Backup task ID '{0}' is still in progress. Status: '{1}'. Retrying...".format(task_id, status), "DEBUG") + time.sleep(5) + + except Exception as e: + self.msg = "Error while retrieving status for task ID '{0}': {1}".format(task_id, str(e)) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + self.log("Backup status polling for task ID '{0}' completed.".format(task_id), "DEBUG") + + def delete_nfs_configuration(self): + """ + Deletes an existing NFS configuration from Cisco Catalyst Center. + + Returns: + self: Returns the instance with updated operation result. + + Description: + - Validates that both 'server_ip' and 'source_path' are provided. + - Checks if the NFS configuration exists in the current state. + - If the configuration exists, it calls the API to delete it. + - If the configuration does not exist, it logs a message and exits successfully. + """ + self.log( + "Starting NFS configuration deletion workflow for {0} configurations".format( + len(self.want.get("nfs_configuration", [])) + ), + "INFO" + ) + + desired_nfs_configs = self.want.get("nfs_configuration", []) + current_nfs_configs = self.have.get("current_nfs_configurations", []) + + for config_index, nfs_config_details in enumerate(desired_nfs_configs): + server_ip = nfs_config_details.get("server_ip") + source_path = nfs_config_details.get("source_path") + + self.log( + "Processing NFS deletion {0}: server_ip={1}, source_path={2}".format( + config_index + 1, server_ip, source_path + ), + "DEBUG" + ) + + if not server_ip or not source_path: + self.msg = "Both 'server_ip' and 'source_path' must be specified to delete an NFS configuration." + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + continue + + self.log("Attempting to delete NFS configuration for server: {0}, path: {1}".format(server_ip, source_path), "DEBUG") + + nfs_to_delete = None + for existing_config in current_nfs_configs: + spec = existing_config.get("spec", {}) + if spec.get("server") == server_ip and spec.get("sourcePath") == source_path: + nfs_to_delete = existing_config + self.log( + "Found existing NFS configuration for deletion: server_ip={0}, source_path={1}".format( + server_ip, source_path + ), + "INFO" + ) + break + + if not nfs_to_delete: + self.msg = ( + "NFS configuration with server_ip '{0}' and source_path '{1}' " + "does not exist in the Cisco Catalyst Center or has already been deleted." + ).format(server_ip, source_path) + self.set_operation_result("success", False, self.msg, "INFO") + self.deleted_nfs_config.append(source_path) + continue + + nfs_config_id = nfs_to_delete.get("id") + if not nfs_config_id: + self.msg = "Unable to retrieve ID for NFS configuration '{0}:{1}'.".format(server_ip, source_path) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + continue + + try: + self.log( + "Initiating deletion of NFS configuration via Catalyst Center API for server_ip={0}, source_path={1}".format( + server_ip, source_path + ), + "INFO" + ) + response = self.dnac._exec( + family="backup", + function="delete_n_f_s_configuration", + op_modifies=True, + params={"id": nfs_config_id}, + ) + + self.log( + "Received API response from 'delete_n_f_s_configuration' for {0}:{1}: {2}".format( + server_ip, source_path, response + ), + "DEBUG", + ) + self.deleted_nfs_config.append(source_path) + + if response: + self.msg = "NFS configuration deleted successfully for {0}:{1}".format(server_ip, source_path) + self.set_operation_result("success", True, self.msg, "INFO") + return self + + self.msg = "Failed to delete NFS configuration for {0}:{1}. API response: {2}".format(server_ip, source_path, response) + self.set_operation_result("failed", False, self.msg, "ERROR") + + except Exception as e: + self.msg = "Error occurred while deleting NFS configuration {0}:{1}: {2}".format(server_ip, source_path, e) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + self.log("Completed NFS configuration deletion workflow", "INFO") + return self + + def delete_backup(self): + """ + Deletes an existing backup from Cisco Catalyst Center. + + Returns: + self: Returns the instance with updated operation result. + + Description: + - Validates that the 'name' of the backup is provided in the desired state. + - Checks if the backup exists in the current state. + - If the backup exists, retrieves its ID and calls the API to delete it. + - Monitors the deletion task until completion and updates the result accordingly. + - If the backup does not exist or is already deleted, logs an informational message and exits successfully. + - Handles failures and unexpected task status with appropriate error messages. + """ + self.log("Starting backup deletion workflow", "INFO") + + backup_details = self.want.get("backup", []) + self.log("backup details: {0}".format(backup_details), "INFO") + + if not backup_details: + self.log("No backup details provided for deletion", "DEBUG") + return self + + backup = self.have + self.log("Current backup: {0}".format(backup), "DEBUG") + + delete_all = backup_details[0].get("delete_all_backup", False) + name = backup_details[0].get("name") + backup_retention_days = backup_details[0].get("backup_retention_days") + + backups_to_delete = [] + + if delete_all: + backups_to_delete = backup.get("all_backups", []) + if not backups_to_delete: + self.msg = "No backup available in Cisco Catalyst Center to delete." + self.set_operation_result("success", False, self.msg, "INFO") + return self + self.log("Deleting ALL backup from Catalyst Center", "INFO") + + elif backup_retention_days: + self.log("Initiating backup retention-based cleanup", "INFO") + + retention_days = int(backup_retention_days) + if retention_days <= 0: + self.msg = "Invalid value for backup_retention_days: must be a positive integer." + self.set_operation_result("failed", True, self.msg, "ERROR") + return self + + ist = timezone(timedelta(hours=5, minutes=30)) + current_time = datetime.now(ist) + cutoff_date_time = current_time - timedelta(days=retention_days) + + self.log("Current IST time: {0}".format(cutoff_date_time), "DEBUG") + + self.log( + "Applying retention policy: retain backups created after '{0}', delete older ones.".format( + cutoff_date_time.strftime("%Y-%m-%d %H:%M:%S") + ), + "INFO", + ) + + all_backups = backup.get("all_backups", []) + + if name: + self.log( + "Filtering backups by name prefix '{0}' and retention period of {1} days".format( + name, retention_days + ), + "INFO", + ) + else: + self.log( + "Applying retention policy to all backups (no name prefix provided) with retention of {0} days".format( + retention_days + ), + "INFO", + ) + + for backup in all_backups: + backup_name = backup.get("name") + created_date_time = backup.get("createdDate") # e.g., "2025-09-11T04:19:58Z" + + if created_date_time: + created_date_time = datetime.strptime(created_date_time, "%Y-%m-%dT%H:%M:%SZ") + created_date_time = created_date_time.replace(tzinfo=timezone.utc) + created_date_time = created_date_time.astimezone(ist) + + self.log( + "Backup '{0}' created (IST): {1}".format( + backup_name, created_date_time.strftime("%Y-%m-%d %H:%M:%S") + ), + "DEBUG", + ) + + if (not name or backup_name.startswith(name)) and created_date_time < cutoff_date_time: + backups_to_delete.append(backup) + + self.log( + "Backups identified for deletion (older than {0} days): {1}".format( + retention_days, backups_to_delete + ), + "DEBUG", + ) + self.log("Total backups to delete based on retention policy: {0}".format(backups_to_delete), "INFO") + + if not backups_to_delete: + target = "with prefix '{0}' ".format(name) if name else "" + self.msg = "No backups found {0}older than retention period ({1} days).".format( + target, retention_days + ) + self.set_operation_result("success", False, self.msg, "INFO") + return self + + elif name: + if not backup.get("backup_exists"): + self.msg = "Backup with name '{0}' does not exist in the Cisco Catalyst Center or has already been deleted.".format(name) + self.set_operation_result("success", False, self.msg, "INFO") + return self + + current_backup = backup.get("current_backup", {}) + if not current_backup or not current_backup.get("id"): + self.msg = "Unable to retrieve backup ID for '{0}'.".format(name) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + backups_to_delete = [current_backup] + self.log("Deleting specific backup '{0}'".format(name), "INFO") + + else: + self.msg = "Either set 'delete_all_backup: true' or provide a 'name' for deletion." + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + for backup in backups_to_delete: + self.log("Processing deletion for backup: {0}".format(backup), "DEBUG") + backup_name = backup.get("name") + backup_id = backup.get("id") + + try: + self.log( + "Initiating deletion of backup '{0}' via Catalyst Center API".format(backup_name), + "INFO" + ) + response = self.dnac._exec( + family="backup", + function="delete_backup", + op_modifies=True, + params={"id": backup_id}, + ) + self.log("Received API response from 'delete_backup': {0}".format(response), "DEBUG") + + task_id = self.get_backup_task_id_from_response(response, "delete_backup") + status = self.get_backup_status_by_task_id(task_id) + + if status == "SUCCESS": + self.msg = "Backup '{0}' deleted successfully.".format(backup_name) + self.set_operation_result("success", True, self.msg, "INFO") + self.deleted_backup.append(backup_name) + time.sleep(30) + + elif status == "FAILED": + self.msg = "Deletion of backup '{0}' failed.".format(backup_name) + self.set_operation_result("failed", False, self.msg, "ERROR") + self.delete_backup_failed.append(backup_name) + + else: + self.msg = "Unexpected deletion status '{0}' for backup '{1}'.".format(status, backup_name) + self.set_operation_result("failed", False, self.msg, "WARNING") + self.delete_backup_failed.append(backup_name) + + except Exception as e: + self.msg = "An error occurred while deleting backup: {0}".format(e) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + self.log("Exiting backup deletion workflow", "INFO") + return self + + def verify_diff_merged(self): + """ + Verifies the successful creation of NFS configuration, backup, and backup configuration + in Cisco Catalyst Center by comparing the desired state with the current state. + + Returns: + self: Returns the instance after performing verification and logging results. + + Description: + - For each provided configuration type (NFS, backup, backup configuration), fetches the current state. + - Compares the current state (have) against the desired state (want). + - Logs verification success if the configuration is found in the current state. + - Logs a warning or info message if the configuration is not found, indicating a possible failure in execution. + """ + self.log("Starting verification of merged configuration changes in Catalyst Center", "INFO") + + if self.want.get("nfs_configuration"): + self.log("Verifying NFS configuration creation/update results", "DEBUG") + self.get_have() + self.log("Current State (have): {0}".format(str(self.have)), "INFO") + self.log("Desired State (want): {0}".format(str(self.want)), "INFO") + + nfs_configuration_exists = self.have.get("nfs_configuration_exists") + desired_config = self.want.get("nfs_configuration", [])[0] + server_ip = desired_config.get("server_ip") + source_path = desired_config.get("source_path") + + if nfs_configuration_exists: + self.log( + "The playbook input for NFS configuration with server_ip '{0}' and source_path " + "'{1}' does not align with the Cisco Catalyst Center, indicating that the creation " + "task may not have executed successfully.".format(server_ip, source_path) + ) + else: + self.log( + "The playbook input for NFS configuration with server_ip '{0}' and source_path '{1}' does not align with " + "the Cisco Catalyst Center, indicating that the creation task may not have executed successfully.".format( + server_ip, source_path + ) + ) + + if self.want.get("backup"): + self.log("Verifying backup creation results", "DEBUG") + self.get_have() + self.log("Current State (have): {0}".format(str(self.have)), "INFO") + self.log("Desired State (want): {0}".format(str(self.want)), "INFO") + + backup_exists = self.have.get("backup_exists") + backup = self.want.get("backup", [])[0] + name = backup.get("name") + scope = backup.get("scope") + + if backup_exists: + self.log("Waiting for backup creation to complete on backend", "DEBUG") + time.sleep(90) + self.log( + "The playbook input for backup with name '{0}' and scope '{1}' does not " + "align with the Cisco Catalyst Center, indicating that the creation task may not " + "have executed successfully.".format(name, scope) + ) + else: + self.log( + "The playbook input for backup with name '{0}' and scope '{1}' does not align with the " + "Cisco Catalyst Center, indicating that the creation task may not have executed successfully.".format( + name, scope + ) + ) + + if self.want.get("backup_storage_configuration"): + self.log("Verifying backup configuration creation/update results", "DEBUG") + self.get_have() + self.log("Current State (have): {0}".format(str(self.have)), "INFO") + self.log("Desired State (want): {0}".format(str(self.want)), "INFO") + + backup_config_exists = self.have.get("backup_configuration_exists") + backup_config = self.want.get("backup_storage_configuration", [])[0] + server_ip = backup_config.get("server_ip") + path = backup_config.get("path") + + if backup_config_exists: + self.log( + "The playbook input for backup configuration with server_ip '{0}' and source_path " + "'{1}' does not align with the Cisco Catalyst Center, indicating that the creation " + "task may not have executed successfully.".format(server_ip, path) + ) + else: + self.log( + "The playbook input for backup configuration with server_ip '{0}' and path '{1}' does not align with the " + "Cisco Catalyst Center, indicating that the creation task may not have executed successfully.".format( + server_ip, path + ) + ) + + self.log("Completed verification of merged configuration changes", "INFO") + return self + + def verify_diff_deleted(self): + """ + Verifies the successful deletion of NFS configuration and backup + from Cisco Catalyst Center by comparing the desired state with the current state. + + Returns: + self: Returns the instance after performing verification and logging results. + + Description: + - For each configuration type marked for deletion (NFS, backup), fetches the current state. + - Compares the current state (have) against the desired state (want). + - Logs confirmation if the configuration is no longer present, verifying successful deletion. + - Logs a warning if the configuration is still present, indicating the deletion may have failed. + - Introduces a delay for backup verification to allow for asynchronous cleanup on the backend. + """ + self.log("Starting verification of deleted configuration changes in Catalyst Center", "INFO") + + if self.want.get("nfs_configuration"): + self.log("Verifying NFS configuration deletion results", "DEBUG") + self.get_have() + self.log("Current State (have): {0}".format(str(self.have)), "INFO") + self.log("Desired State (want): {0}".format(str(self.want)), "INFO") + + nfs_config_exists = self.have.get("nfs_configuration_exists") + nfs_details = self.want.get("nfs_configuration", [])[0] + server_ip = nfs_details.get("server_ip") + source_path = nfs_details.get("source_path") + + if not nfs_config_exists: + self.log( + "NFS configuration with server_ip '{0}' and source_path '{1}' " + "has been successfully deleted from Cisco Catalyst Center.".format(server_ip, source_path), + "INFO" + ) + else: + self.log( + "NFS configuration with server_ip '{0}' and source_path '{1}' still exists in Cisco Catalyst Center, " + "indicating that the deletion task may not have executed successfully.".format(server_ip, source_path), + "WARNING" + ) + + if self.want.get("backup"): + self.log("Waiting for backup deletion to complete on backend", "DEBUG") + time.sleep(30) + self.get_have() + self.log("Current State (have): {0}".format(str(self.have)), "INFO") + self.log("Desired State (want): {0}".format(str(self.want)), "INFO") + + backup_exists = self.have.get("backup_exists") + self.log("Backup exists: {0}".format(backup_exists), "DEBUG") + backup_name = self.want.get("backup", [])[0].get("name") + + if not backup_exists: + self.log( + "The backup '{0}' is not present in Cisco Catalyst Center " + "and its deletion has been verified.".format(backup_name) + ) + else: + self.log( + "The playbook input for backup '{0}' does not align with Cisco Catalyst " + "Center, indicating that the deletion task may not have executed successfully." + .format(backup_name), + "WARNING" + ) + + self.log("Completed verification of deleted configuration changes", "INFO") + return self + + def update_messages(self): + """ + Consolidates and logs messages for backup and restore operations including NFS + configurations, backup configurations, create backup, and restore operations. + Ensures no duplicates and builds a clean response. + + Returns: + self (object): The updated instance with populated result and msg. + """ + self.result["changed"] = False + result_msg_list = [] + no_update_list = [] + + if self.created_nfs_config: + msg = "NFS Configuration(s) '{0}' created successfully in Cisco Catalyst Center.".format( + "', '".join(self.created_nfs_config) + ) + result_msg_list.append(msg) + + if self.already_exists_nfs_config: + msg = "NFS Configuration(s) '{0}' already exist in Cisco Catalyst Center.".format( + "', '".join(self.already_exists_nfs_config) + ) + no_update_list.append(msg) + + if self.deleted_nfs_config: + msg = "NFS Configuration(s) '{0}' deleted successfully from Cisco Catalyst Center.".format( + "', '".join(self.deleted_nfs_config) + ) + result_msg_list.append(msg) + + if self.created_backup_config: + msg = "Backup Configuration(s) '{0}' created successfully in Cisco Catalyst Center.".format( + "', '".join(self.created_backup_config) + ) + result_msg_list.append(msg) + + if self.already_exists_backup_config: + msg = "Backup Configuration(s) '{0}' already exist in Cisco Catalyst Center.".format( + "', '".join(self.already_exists_backup_config) + ) + no_update_list.append(msg) + + if self.updated_backup_config: + msg = "Backup Configuration(s) '{0}' updated successfully in Cisco Catalyst Center.".format( + "', '".join(self.updated_backup_config) + ) + result_msg_list.append(msg) + + if self.backup: + msg = "Backup(s) '{0}' created successfully in Cisco Catalyst Center.".format( + "', '".join(self.backup) + ) + result_msg_list.append(msg) + + if self.backup_failed: + msg = "Backup(s) '{0}' creation failed in Cisco Catalyst Center.".format( + "', '".join(self.backup_failed) + ) + result_msg_list.append(msg) + + if self.deleted_backup: + backup_details = self.want.get("backup", []) + delete_all = backup_details[0].get("delete_all_backup", False) + + if delete_all: + msg = "All Backup(s) '{0}' deleted successfully from Cisco Catalyst Center.".format( + "', '".join(self.deleted_backup) + ) + result_msg_list.append(msg) + else: + msg = "Backup(s) '{0}' deleted successfully from Cisco Catalyst Center.".format( + "', '".join(self.deleted_backup) + ) + result_msg_list.append(msg) + + if self.delete_backup_failed: + msg = "Backup(s) '{0}' deletion failed in Cisco Catalyst Center.".format( + "', '".join(self.delete_backup_failed) + ) + result_msg_list.append(msg) + + if self.restored_backup: + msg = "Backup(s) '{0}' restored successfully in Cisco Catalyst Center.".format( + "', '".join(self.restored_backup) + ) + result_msg_list.append(msg) + + if result_msg_list and no_update_list: + self.result["changed"] = True + self.msg = "{0} {1}".format( + " ".join(result_msg_list), " ".join(no_update_list) + ) + elif result_msg_list: + self.result["changed"] = True + self.msg = " ".join(result_msg_list) + elif no_update_list: + self.msg = " ".join(no_update_list) + + self.log(self.msg, "INFO") + self.result["response"] = self.msg + self.result["msg"] = self.msg + + return self + + +def main(): + """ main entry point for module execution """ + element_spec = {'dnac_host': {'required': True, 'type': 'str'}, + 'dnac_port': {'type': 'str', 'default': '443'}, + 'dnac_username': {'type': 'str', 'default': 'admin', 'aliases': ['user']}, + 'dnac_password': {'type': 'str', 'no_log': True}, + 'dnac_verify': {'type': 'bool', 'default': True}, + 'dnac_version': {'type': 'str', 'default': '2.2.3.3'}, + 'dnac_debug': {'type': 'bool', 'default': False}, + 'dnac_log_level': {'type': 'str', 'default': 'WARNING'}, + "dnac_log_file_path": {"type": 'str', "default": 'dnac.log'}, + "dnac_log_append": {"type": 'bool', "default": True}, + 'dnac_log': {'type': 'bool', 'default': False}, + 'validate_response_schema': {'type': 'bool', 'default': True}, + 'config_verify': {'type': 'bool', "default": True}, + 'dnac_api_task_timeout': {'type': 'int', "default": 1200}, + 'dnac_task_poll_interval': {'type': 'int', "default": 2}, + 'config': {'required': True, 'type': 'list', 'elements': 'dict'}, + 'state': {'default': 'merged', 'choices': ["merged", "deleted"]} + } + + module = AnsibleModule(argument_spec=element_spec, supports_check_mode=False) + ccc_backup_restore = BackupRestore(module) + state = ccc_backup_restore.params.get("state") + + current_version = ccc_backup_restore.get_ccc_version() + min_supported_version = "3.1.3.0" + + if ccc_backup_restore.compare_dnac_versions(current_version, min_supported_version) < 0: + ccc_backup_restore.status = "failed" + ccc_backup_restore.msg = ( + "The specified version '{0}' does not support the 'Backup and restore' feature. " + "Supported version(s) start from '{1}' onwards.".format(current_version, min_supported_version) + ) + ccc_backup_restore.log(ccc_backup_restore.msg, "ERROR") + ccc_backup_restore.check_return_status() + + if state not in ccc_backup_restore.supported_states: + ccc_backup_restore.status = "invalid" + ccc_backup_restore.msg = "State {0} is invalid".format(state) + ccc_backup_restore.check_return_status() + + ccc_backup_restore.validate_input().check_return_status() + config_verify = ccc_backup_restore.params.get("config_verify") + + for config in ccc_backup_restore.validated_config: + ccc_backup_restore.reset_values() + ccc_backup_restore.get_want(config).check_return_status() + ccc_backup_restore.get_have().check_return_status() + + ccc_backup_restore.get_diff_state_apply[state](config) + + if config_verify: + ccc_backup_restore.verify_diff_state_apply[state]().check_return_status() + + ccc_backup_restore.update_messages() + module.exit_json(**ccc_backup_restore.result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/fabric_devices_info_workflow_manager.py b/plugins/modules/fabric_devices_info_workflow_manager.py new file mode 100644 index 0000000000..6a13bbaa02 --- /dev/null +++ b/plugins/modules/fabric_devices_info_workflow_manager.py @@ -0,0 +1,4364 @@ +# !/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Cisco Systems +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +__author__ = ("Priyadharshini B", "Madhan Sankaranarayanan") + +DOCUMENTATION = r""" +--- +module: fabric_devices_info_workflow_manager +short_description: > + Comprehensive fabric device information gathering module for Cisco Catalyst Center with advanced filtering and output capabilities. + +description: + - Retrieves comprehensive fabric device information from Cisco Catalyst Center using flexible, user-defined filtering criteria. + - Supports device identification through fabric site hierarchy and optional fabric device role filtering for targeted information retrieval. + - Enables selective information retrieval across six categories are fabric configuration details, Layer 2/3 handoff configurations, device onboarding status, + connected neighbor devices, health metrics, and active issues. + - Implements robust data collection with configurable retry mechanisms, timeout handling, and polling intervals for reliable operation in enterprise + environments. + - Provides flexible file output capabilities using the C(output_file_info) parameter with support for JSON and YAML formats, configurable + file modes (overwrite or append), and optional timestamp inclusion. + - When C(output_file_info) is specified, results are written to the designated file. otherwise, results are returned + in the standard Ansible module output. + - Returns structured data for each requested information category, or an empty result set when no devices match + the specified filter criteria after exhausting all retry attempts. + - Operates as a read-only facts/info module ensuring safe execution in check mode without modifying device configurations. + +version_added: "6.32.0" +extends_documentation_fragment: + - cisco.dnac.workflow_manager_params + +author: + - Priyadharshini B (@pbalaku2) + - Madhan Sankaranarayanan (@madhansansel) + +options: + config_verify: + description: Set to true to verify the Cisco Catalyst Center after applying the playbook config. + type: bool + default: false + state: + description: The desired state of the configuration after module execution. + type: str + choices: ["gathered"] + default: gathered + config: + description: List of dictionaries specifying fabric device query parameters. + type: list + elements: dict + required: true + suboptions: + fabric_devices: + description: + - Defines fabric device filtering criteria to retrieve information from Software-Defined Access (SDA) fabric sites. + - Each device entry must include the fabric_site_hierarchy parameter to identify the fabric site. + - Optional device_identifier parameter provides additional filtering capabilities within the fabric site. + type: list + elements: dict + suboptions: + fabric_site_hierarchy: + description: + - Hierarchical path of the fabric site to query for fabric device information. + - Must be an existing site configured as a Software-Defined Access (SDA) fabric site in Cisco Catalyst Center. + - Site path must follow the full hierarchical structure (e.g., "Global/Region/Building/Floor"). + - All fabric devices within this site hierarchy will be included unless further filtered by other parameters. + - Site hierarchy paths must match exactly as configured in Cisco Catalyst Center's site management structure. + type: str + required: true + fabric_device_role: + description: + - Optional filter to restrict fabric device information retrieval to specific fabric roles. + - When specified, only fabric devices with the matching role will have their information retrieved. + - If omitted, all fabric devices within the specified fabric site hierarchy are included. + - Role-based filtering improves performance for large fabric deployments by reducing the scope of devices processed. + type: str + required: false + choices: + - CONTROL_PLANE_NODE # SDA control plane devices managing fabric overlay + - BORDER_NODE # Fabric border devices connecting to external networks + - EDGE_NODE # Fabric edge devices connecting endpoints + - EXTENDED_NODE # Fabric extended nodes for specific deployment scenarios + - WIRELESS_CONTROLLER_NODE # Wireless controllers in fabric deployments + device_identifier: + description: + - Optional list of device identification criteria to further filter fabric devices within the specified fabric site. + - Provides granular control over which fabric devices have their information retrieved. + - If omitted, all fabric devices within the fabric site hierarchy (and optional role filter) are processed. + - Multiple identification methods can be combined for comprehensive device targeting. + - Only devices that are both fabric-enabled and match the identifier criteria will be processed. + - For IP-based identification, specify either ip_address (for individual IPs) OR ip_address_range (for IP ranges), + not both in the same device_identifier entry. + - When multiple identification parameters (ip_address, hostname, serial_number) are specified in the same entry, + they must all refer to the same physical device for proper validation. + - Use separate device_identifier entries when targeting different devices with different identification methods. + type: list + elements: dict + suboptions: + ip_address: + description: + - List of management IP addresses to identify specific fabric devices within specified fabric site. + - Each IP address must correspond to a managed device in the Cisco Catalyst Center inventory. + - Only devices with matching IP addresses that are also fabric-enabled will have their information retrieved. + - IP addresses must be valid IPv4 addresses in dotted decimal notation. + - Cannot be used together with ip_address_range parameter - choose one identification method per device_identifier entry. + - Mutually exclusive with ip_address_range - specify either ip_address OR ip_address_range, not both. + type: list + elements: str + required: false + ip_address_range: + description: + - IP address range specification for bulk device identification within specified fabric sites. + - Format "start_ip-end_ip" (e.g., "192.168.1.1-192.168.1.50") for contiguous IP ranges. + - Range is automatically expanded into individual IP addresses for processing. + - Only fabric-enabled devices within the specified range will have their information retrieved. + - Useful for targeting entire subnets or network segments within fabric deployments. + - Cannot be used together with ip_address parameter - choose one identification method per device_identifier entry. + - Mutually exclusive with ip_address - specify either ip_address_range OR ip_address, not both. + type: str + required: false + serial_number: + description: + - List of device serial numbers to identify specific fabric devices. + - Each serial number must match exactly as recorded in Cisco Catalyst Center device inventory. + - Only devices with matching serial numbers that are also fabric-enabled will have their information retrieved. + - Serial numbers are case-sensitive and must match the format used by the device manufacturer. + type: list + elements: str + required: false + hostname: + description: + - List of device hostnames to identify specific fabric devices. + - Each hostname must match exactly as configured in Cisco Catalyst Center device inventory. + - Only devices with matching hostnames that are also fabric-enabled will have their information retrieved. + - Hostnames are case-sensitive and must match the exact device hostname configuration. + type: list + elements: str + required: false + timeout: + description: + - Maximum time in seconds to wait for device information retrieval operations to complete. + - Applied to each individual device lookup operation during the filtering process. + - If device information retrieval fails within this timeout period, the operation will retry based on the 'retries' parameter. + - Longer timeouts may be needed for environments with slower network connectivity or larger device inventories. + - If timeout is greater than (retries * interval), the operation will continue retrying until the timeout period ends. + - Total operation time is bounded by the timeout value regardless of retry configuration. + type: int + default: 120 + retries: + description: + - Number of retry attempts for device information retrieval operations when initial attempts fail. + - Applied to each individual device lookup and fabric device filtering operation. + - Higher retry counts improve reliability in environments with intermittent connectivity or high API load. + - Total operation time is affected by retries combined with timeout and interval settings. + - Actual retry attempts may be less than specified if timeout period is reached first. + type: int + default: 3 + interval: + description: + - Time in seconds to wait between retry attempts for device information retrieval operations. + - Applied as a delay between failed attempts during device lookup and fabric filtering processes. + - Combined with timeout and retries to determine total operation duration. + - If (retries * interval) exceeds timeout, retries will continue until timeout is reached. + - Longer intervals help reduce API load on Cisco Catalyst Center during retry operations. + - Should be balanced with timeout settings to avoid excessively long operation times. + type: int + default: 10 + requested_info: + description: + - List of fabric device information types to retrieve for each identified fabric device. + - If omitted or empty, all available information categories will be retrieved by default. + - Selective information retrieval improves performance and reduces API load for large fabric deployments. + - Each information type corresponds to specific Cisco Catalyst Center APIs and data sources. + type: list + elements: str + choices: + - fabric_info # Fabric configuration details, device roles, and fabric site associations + - handoff_info # Layer 2/3 handoff configurations for border and control plane nodes + - onboarding_info # Device provisioning status, port assignments, port channels and SSID details for wireless devices + - connected_devices_info # Neighbor device information via CDP/LLDP discovery protocols + - device_health_info # Health metrics including CPU, memory, temperature, and performance data + - device_issues_info # Active alerts, issues, and problems detected on fabric devices + output_file_info: + description: + - Controls file output generation for fabric device information retrieval results. + - When provided, saves retrieved device information to the specified file + along with returning the data in standard Ansible module output. + - Supports flexible file formatting, writing modes, and optional timestamp inclusion for audit purposes. + - Enables automated reporting and data archival workflows for fabric device monitoring operations. + type: dict + suboptions: + file_path: + description: + - Absolute path to the output file without file extension. + - File extension is automatically appended based on the selected file format (.json or .yaml). + - Directory structure will be created automatically if it does not exist. + - Path must be writable by the user executing the Ansible playbook. + type: str + required: true + file_format: + description: + - Output data format for the generated file. + - Determines file structure and extension applied to the file path. + - YAML format provides better human readability while JSON offers programmatic parsing advantages. + - Format selection affects file extension and data serialization method. + type: str + default: yaml + choices: + - json + - yaml + file_mode: + description: + - File writing mode determining how data is written to the target file. + - Use 'w' to overwrite existing file content or 'a' to append new data to existing content. + - Append mode enables incremental data collection across multiple playbook runs. + - Overwrite mode ensures clean data sets for each execution. + type: str + default: w + choices: + - w + - a + timestamp: + description: + - Controls inclusion of data retrieval timestamp in the output file content. + - When enabled, adds the data collection timestamp as the first entry for audit trail purposes. + - Useful for tracking when fabric device information was collected in automated workflows. + - Timestamp format follows "YYYY-MM-DD HH:MM:SS" standard format. + type: bool + default: false + +requirements: +- dnacentersdk >= 2.9.3 +- python >= 3.9.19 + +notes: +- This is a facts/info module that only retrieves information and does not modify any device configurations or network state. +- Writing to a local file is for reporting, archival, and audit purposes only and does not affect the state of any managed devices. +- Module is safe to use in check mode as it performs read-only operations against Cisco Catalyst Center APIs. +- Fabric device filtering automatically identifies SDA fabric-enabled devices from the specified fabric site hierarchy. +- The fabric_site_hierarchy parameter is required and must reference an existing SDA fabric site in Cisco Catalyst Center. +- Device identification through device_identifier parameters provides granular control over which fabric devices are processed. +- Information retrieval is optimized based on device capabilities - + SSID details are only retrieved for wireless controllers, handoff information is role-specific. +- Retry mechanisms with configurable timeout, retry count, and polling intervals ensure reliable data collection in enterprise-scale deployments. +- Requires Cisco Catalyst Center version 2.3.7.9 or later for fabric device information retrieval functionality. +- File output supports both JSON and YAML formats with flexible writing modes (overwrite/append) and optional timestamp inclusion for audit trails. +- Module handles mixed wired and wireless fabric environments automatically, applying appropriate API calls based on device type detection. + +- SDK Methods used are + - devices.Devices.get_device_list + - sda.Sda.get_fabric_devices + - sda.Sda.get_fabric_sites + - sda.Sda.get_fabric_devices_layer3_handoffs_with_sda_transit + - sda.Sda.get_fabric_devices_layer3_handoffs_with_ip_transit + - sda.Sda.get_fabric_devices_layer2_handoffs + - devices.Devices.get_interface_info_by_id + - devices.Devices.get_connected_device_detail + - devices.Devices.devices + - issues.Issues.issues + - sda.Sda.get_provisioned_wired_device + - sda.Sda.get_port_assignments + - wireless.Wireless.get_ssid_details_for_specific_wireless_controller + +- Paths used are + - GET/dna/intent/api/v1/network-device + - GET/dna/intent/api/v1/sda/fabricDevices + - GET/dna/intent/api/v1/sda/fabricSites + - GET/dna/intent/api/v1/sda/fabricDevices/layer3Handoffs/sdaTransits + - GET/dna/intent/api/v1/sda/fabricDevices/layer3Handoffs/ipTransits + - GET/dna/intent/api/v1/sda/fabricDevices/layer2Handoffs + - GET/dna/intent/api/v1/interface/network-device/{deviceId} + - GET/dna/intent/api/v1/network-device/{deviceUuid}/interface/{interfaceUuid}/neighbor + - GET/dna/intent/api/v1/device-health + - GET/dna/intent/api/v1/issues + - GET/dna/intent/api/v1/business/sda/provision-device + - GET/dna/intent/api/v1/sda/portAssignments + - GET/dna/intent/api/v1/wireless/controller/{networkDeviceId}/ssidDetails +""" + +EXAMPLES = r""" + +# Case 1: Retrieves all information for devices that are part of the fabric, from Cisco Catalyst Center. +- name: Get Fabric device information from Cisco Catalyst Center + hosts: localhost + connection: local + vars_files: + - "credentials.yml" + tasks: + - name: Gather detailed facts for specific fabric devices + cisco.dnac.fabric_devices_info_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: gathered + config: + - fabric_devices: + - fabric_site_hierarchy: "Global/rishipat_area/Fabric-area-1" # Mandatory parameter + fabric_device_role: "CONTROL_PLANE_NODE" + device_identifier: + - ip_address: ["192.168.200.69"] + - serial_number: ["FJC272121AG"] + - hostname: ["SJ-BN-9300.cisco.local"] + timeout: 30 + retries: 3 + interval: 10 + output_file_info: + file_path: /Users/priyadharshini/Downloads/fabric_device_info + file_format: yaml + file_mode: a + timestamp: true + +# Case 2: Retrieves specific information for devices that are part of the fabric, from Cisco Catalyst Center. +- name: Get Fabric device information from Cisco Catalyst Center + hosts: localhost + connection: local + vars_files: + - "credentials.yml" + tasks: + - name: Gather detailed facts for specific fabric devices + cisco.dnac.fabric_devices_info_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: gathered + config: + - fabric_devices: + - fabric_site_hierarchy: "Global/rishipat_area/Fabric-area-1" # Mandatory parameter + fabric_device_role: "CONTROL_PLANE_NODE" + device_identifier: + - ip_address: ["192.168.200.69"] + - serial_number: ["FJC272121AG"] + - hostname: ["SJ-BN-9300.cisco.local"] + timeout: 30 + retries: 3 + interval: 10 + requested_info: + - fabric_info + - handoff_info + - onboarding_info + - connected_devices_info + - device_health_info + - device_issues_info + output_file_info: + file_path: /Users/priyadharshini/Downloads/fabric_device_info + file_format: json + file_mode: w + timestamp: true + +# Case 3: Retrieves all information for devices that are part of the fabric, from Cisco Catalyst Center. +- name: Get Fabric device information from Cisco Catalyst Center + hosts: localhost + connection: local + vars_files: + - "credentials.yml" + tasks: + - name: Gather detailed facts for specific fabric devices + cisco.dnac.fabric_devices_info_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: gathered + config: + - fabric_devices: + - fabric_site_hierarchy: "Global/rishipat_area/Fabric-area-1" # Mandatory parameter + fabric_device_role: "CONTROL_PLANE_NODE" + device_identifier: + - ip_address: ["192.168.200.69"] + - serial_number: ["FJC272121AG"] + - hostname: ["SJ-BN-9300.cisco.local"] + timeout: 30 + retries: 3 + interval: 10 + requested_info: + - all + output_file_info: + file_path: /Users/priyadharshini/Downloads/fabric_device_info + file_format: yaml + file_mode: a + timestamp: true +""" + +RETURN = r""" + +# Case 1: Successfully retrieved fabric information for devices that are part of the fabric, from Cisco Catalyst Center +response_fabric_info: + description: + - Fabric information for filtered fabric devices + - Returned for each fabric device matching the filters. + returned: always + type: list + + sample: + { + "response": [ + "The fabric devices filtered from the network devices are: ['204.1.2.2']", + { + "fabric_info": [ + { + "device_ip": "204.1.2.2", + "fabric_details": [ + { + "borderDeviceSettings": { + "borderTypes": [ + "LAYER_3" + ], + "layer3Settings": { + "borderPriority": 10, + "importExternalRoutes": false, + "isDefaultExit": true, + "localAutonomousSystemNumber": "5", + "prependAutonomousSystemCount": 0 + } + }, + "deviceRoles": [ + "BORDER_NODE", + "CONTROL_PLANE_NODE", + "EDGE_NODE" + ], + "fabricId": "c9fda934-a212-4a1b-be5f-f391d2ff8863", + "id": "9294625f-52d4-485f-9d36-5abcfa4f863f", + "networkDeviceId": "e5cc9398-afbf-40a2-a8b1-e9cf0635c28a" + } + ] + } + ] + } + ], + "status": "success" + } + +# Case 2: Successfully retrieved handoff info for devices that are part of the fabric, from Cisco Catalyst Center +response_fabric_devices_layer3_handoffs_sda_info: + description: + - Handoff information for filtered fabric devices. + - Returned for each fabric device matching the filters. + returned: always + type: list + + "sample": { + "response": [ + "The fabric devices filtered from the network devices are: ['91.1.1.2']", + [ + { + "fabric_devices_layer3_handoffs_sda_info": [ + { + "device_ip": "91.1.1.2", + "handoff_layer3_sda_transit_info": [ + { + "connectedToInternet": true, + "fabricId": "6ea62e10-cc4b-4f67-8251-d0939fdd4ad8", + "isMulticastOverTransitEnabled": false, + "networkDeviceId": "36680b59-39b2-446b-8ceb-5a1e157b5799", + "transitNetworkId": "02f92f56-e9c8-4534-b7f1-e06635061de9" + } + ] + } + ] + } + ], + [ + { + "fabric_devices_layer3_handoffs_ip_info": [ + { + "device_ip": "91.1.1.2", + "handoff_layer3_ip_transit_info": [ + { + "externalConnectivityIpPoolName": "BorderHandOff_sub", + "fabricId": "6ea62e10-cc4b-4f67-8251-d0939fdd4ad8", + "id": "f10250af-bd72-4175-ad9b-ea2831e74a15", + "interfaceName": "TenGigabitEthernet1/0/2", + "localIpAddress": "204.1.16.69/30", + "localIpv6Address": "2004:1:16::1:0:45/126", + "networkDeviceId": "36680b59-39b2-446b-8ceb-5a1e157b5799", + "remoteIpAddress": "204.1.16.70/30", + "remoteIpv6Address": "2004:1:16::1:0:46/126", + "tcpMssAdjustment": 0, + "transitNetworkId": "bbf16d41-031b-4061-b9b6-ae75768ae196", + "virtualNetworkName": "DEFAULT_VN", + "vlanId": 3000 + }, + { + "externalConnectivityIpPoolName": "BorderHandOff_sub", + "fabricId": "6ea62e10-cc4b-4f67-8251-d0939fdd4ad8", + "id": "3cd81271-4621-40fd-aac7-8b8499127c0c", + "interfaceName": "TenGigabitEthernet1/0/2", + "localIpAddress": "204.1.16.73/30", + "localIpv6Address": "2004:1:16::1:0:49/126", + "networkDeviceId": "36680b59-39b2-446b-8ceb-5a1e157b5799", + "remoteIpAddress": "204.1.16.74/30", + "remoteIpv6Address": "2004:1:16::1:0:4a/126", + "tcpMssAdjustment": 0, + "transitNetworkId": "bbf16d41-031b-4061-b9b6-ae75768ae196", + "virtualNetworkName": "Fabric_VN", + "vlanId": 3001 + }, + { + "externalConnectivityIpPoolName": "BorderHandOff_sub", + "fabricId": "6ea62e10-cc4b-4f67-8251-d0939fdd4ad8", + "id": "cdad28e7-8df2-432d-8550-666a9fcfc21c", + "interfaceName": "TenGigabitEthernet1/0/2", + "localIpAddress": "204.1.16.77/30", + "localIpv6Address": "2004:1:16::1:0:4d/126", + "networkDeviceId": "36680b59-39b2-446b-8ceb-5a1e157b5799", + "remoteIpAddress": "204.1.16.78/30", + "remoteIpv6Address": "2004:1:16::1:0:4e/126", + "tcpMssAdjustment": 0, + "transitNetworkId": "bbf16d41-031b-4061-b9b6-ae75768ae196", + "virtualNetworkName": "INFRA_VN", + "vlanId": 3002 + }, + { + "externalConnectivityIpPoolName": "BorderHandOff_sub", + "fabricId": "6ea62e10-cc4b-4f67-8251-d0939fdd4ad8", + "id": "8711bdb5-7a92-4ab0-a7d7-b4053e1db84c", + "interfaceName": "TenGigabitEthernet1/0/2", + "localIpAddress": "204.1.16.81/30", + "localIpv6Address": "2004:1:16::1:0:51/126", + "networkDeviceId": "36680b59-39b2-446b-8ceb-5a1e157b5799", + "remoteIpAddress": "204.1.16.82/30", + "remoteIpv6Address": "2004:1:16::1:0:52/126", + "tcpMssAdjustment": 0, + "transitNetworkId": "bbf16d41-031b-4061-b9b6-ae75768ae196", + "virtualNetworkName": "IntraSubnet_VN", + "vlanId": 3003 + }, + { + "externalConnectivityIpPoolName": "BorderHandOff_sub", + "fabricId": "6ea62e10-cc4b-4f67-8251-d0939fdd4ad8", + "id": "66b48881-e72f-44cc-aedb-6819af25bd27", + "interfaceName": "TenGigabitEthernet1/0/2", + "localIpAddress": "204.1.16.85/30", + "localIpv6Address": "2004:1:16::1:0:55/126", + "networkDeviceId": "36680b59-39b2-446b-8ceb-5a1e157b5799", + "remoteIpAddress": "204.1.16.86/30", + "remoteIpv6Address": "2004:1:16::1:0:56/126", + "tcpMssAdjustment": 0, + "transitNetworkId": "bbf16d41-031b-4061-b9b6-ae75768ae196", + "virtualNetworkName": "SGT_Port_test", + "vlanId": 3004 + }, + { + "externalConnectivityIpPoolName": "BorderHandOff_sub", + "fabricId": "6ea62e10-cc4b-4f67-8251-d0939fdd4ad8", + "id": "6dd7d005-74aa-4762-a59e-1c280a975425", + "interfaceName": "TenGigabitEthernet1/0/2", + "localIpAddress": "204.1.16.89/30", + "localIpv6Address": "2004:1:16::1:0:59/126", + "networkDeviceId": "36680b59-39b2-446b-8ceb-5a1e157b5799", + "remoteIpAddress": "204.1.16.90/30", + "remoteIpv6Address": "2004:1:16::1:0:5a/126", + "tcpMssAdjustment": 0, + "transitNetworkId": "bbf16d41-031b-4061-b9b6-ae75768ae196", + "virtualNetworkName": "VN1", + "vlanId": 3005 + }, + { + "externalConnectivityIpPoolName": "BorderHandOff_sub", + "fabricId": "6ea62e10-cc4b-4f67-8251-d0939fdd4ad8", + "id": "a13167ae-d900-4048-92a6-0d41bd1bd531", + "interfaceName": "TenGigabitEthernet1/0/2", + "localIpAddress": "204.1.16.93/30", + "localIpv6Address": "2004:1:16::1:0:5d/126", + "networkDeviceId": "36680b59-39b2-446b-8ceb-5a1e157b5799", + "remoteIpAddress": "204.1.16.94/30", + "remoteIpv6Address": "2004:1:16::1:0:5e/126", + "tcpMssAdjustment": 0, + "transitNetworkId": "bbf16d41-031b-4061-b9b6-ae75768ae196", + "virtualNetworkName": "VN2", + "vlanId": 3006 + }, + { + "externalConnectivityIpPoolName": "BorderHandOff_sub", + "fabricId": "6ea62e10-cc4b-4f67-8251-d0939fdd4ad8", + "id": "932cd9d7-9067-4224-ab1d-922a7cd79b5b", + "interfaceName": "TenGigabitEthernet1/0/2", + "localIpAddress": "204.1.16.97/30", + "localIpv6Address": "2004:1:16::1:0:61/126", + "networkDeviceId": "36680b59-39b2-446b-8ceb-5a1e157b5799", + "remoteIpAddress": "204.1.16.98/30", + "remoteIpv6Address": "2004:1:16::1:0:62/126", + "tcpMssAdjustment": 0, + "transitNetworkId": "bbf16d41-031b-4061-b9b6-ae75768ae196", + "virtualNetworkName": "VN3", + "vlanId": 3007 + }, + { + "externalConnectivityIpPoolName": "BorderHandOff_sub", + "fabricId": "6ea62e10-cc4b-4f67-8251-d0939fdd4ad8", + "id": "9c09c4a8-5a7f-4b06-ac28-4d895293cfe7", + "interfaceName": "TenGigabitEthernet1/0/2", + "localIpAddress": "204.1.16.101/30", + "localIpv6Address": "2004:1:16::1:0:65/126", + "networkDeviceId": "36680b59-39b2-446b-8ceb-5a1e157b5799", + "remoteIpAddress": "204.1.16.102/30", + "remoteIpv6Address": "2004:1:16::1:0:66/126", + "tcpMssAdjustment": 0, + "transitNetworkId": "bbf16d41-031b-4061-b9b6-ae75768ae196", + "virtualNetworkName": "VN4", + "vlanId": 3008 + }, + { + "externalConnectivityIpPoolName": "BorderHandOff_sub", + "fabricId": "6ea62e10-cc4b-4f67-8251-d0939fdd4ad8", + "id": "df69abf3-266a-4678-84d2-ca8d9340b4c2", + "interfaceName": "TenGigabitEthernet1/0/2", + "localIpAddress": "204.1.16.105/30", + "localIpv6Address": "2004:1:16::1:0:69/126", + "networkDeviceId": "36680b59-39b2-446b-8ceb-5a1e157b5799", + "remoteIpAddress": "204.1.16.106/30", + "remoteIpv6Address": "2004:1:16::1:0:6a/126", + "tcpMssAdjustment": 0, + "transitNetworkId": "bbf16d41-031b-4061-b9b6-ae75768ae196", + "virtualNetworkName": "VN5", + "vlanId": 3009 + }, + { + "externalConnectivityIpPoolName": "BorderHandOff_sub", + "fabricId": "6ea62e10-cc4b-4f67-8251-d0939fdd4ad8", + "id": "d95e8a82-7a71-4f4a-a31a-85385c1e1ef8", + "interfaceName": "TenGigabitEthernet1/0/2", + "localIpAddress": "204.1.16.109/30", + "localIpv6Address": "2004:1:16::1:0:6d/126", + "networkDeviceId": "36680b59-39b2-446b-8ceb-5a1e157b5799", + "remoteIpAddress": "204.1.16.110/30", + "remoteIpv6Address": "2004:1:16::1:0:6e/126", + "tcpMssAdjustment": 0, + "transitNetworkId": "bbf16d41-031b-4061-b9b6-ae75768ae196", + "virtualNetworkName": "VN6", + "vlanId": 3010 + }, + { + "externalConnectivityIpPoolName": "BorderHandOff_sub", + "fabricId": "6ea62e10-cc4b-4f67-8251-d0939fdd4ad8", + "id": "27171568-3f08-4f13-8991-a8904bc7e2a6", + "interfaceName": "TenGigabitEthernet1/0/2", + "localIpAddress": "204.1.16.113/30", + "localIpv6Address": "2004:1:16::1:0:71/126", + "networkDeviceId": "36680b59-39b2-446b-8ceb-5a1e157b5799", + "remoteIpAddress": "204.1.16.114/30", + "remoteIpv6Address": "2004:1:16::1:0:72/126", + "tcpMssAdjustment": 0, + "transitNetworkId": "bbf16d41-031b-4061-b9b6-ae75768ae196", + "virtualNetworkName": "VN7", + "vlanId": 3011 + }, + { + "externalConnectivityIpPoolName": "BorderHandOff_sub", + "fabricId": "6ea62e10-cc4b-4f67-8251-d0939fdd4ad8", + "id": "bb704a7d-8988-4d8c-80e5-4c02bb9ab042", + "interfaceName": "TenGigabitEthernet1/0/2", + "localIpAddress": "204.1.16.117/30", + "localIpv6Address": "2004:1:16::1:0:75/126", + "networkDeviceId": "36680b59-39b2-446b-8ceb-5a1e157b5799", + "remoteIpAddress": "204.1.16.118/30", + "remoteIpv6Address": "2004:1:16::1:0:76/126", + "tcpMssAdjustment": 0, + "transitNetworkId": "bbf16d41-031b-4061-b9b6-ae75768ae196", + "virtualNetworkName": "WiredVNFB1", + "vlanId": 3012 + }, + { + "externalConnectivityIpPoolName": "BorderHandOff_sub", + "fabricId": "6ea62e10-cc4b-4f67-8251-d0939fdd4ad8", + "id": "8d814e72-25af-490d-8f69-dec10af9e790", + "interfaceName": "TenGigabitEthernet1/0/2", + "localIpAddress": "204.1.16.121/30", + "localIpv6Address": "2004:1:16::1:0:79/126", + "networkDeviceId": "36680b59-39b2-446b-8ceb-5a1e157b5799", + "remoteIpAddress": "204.1.16.122/30", + "remoteIpv6Address": "2004:1:16::1:0:7a/126", + "tcpMssAdjustment": 0, + "transitNetworkId": "bbf16d41-031b-4061-b9b6-ae75768ae196", + "virtualNetworkName": "WiredVNFBLayer2", + "vlanId": 3013 + }, + { + "externalConnectivityIpPoolName": "BorderHandOff_sub", + "fabricId": "6ea62e10-cc4b-4f67-8251-d0939fdd4ad8", + "id": "b01aa3a2-61c8-4179-a568-6dcdbafe993f", + "interfaceName": "TenGigabitEthernet1/0/2", + "localIpAddress": "204.1.16.125/30", + "localIpv6Address": "2004:1:16::1:0:7d/126", + "networkDeviceId": "36680b59-39b2-446b-8ceb-5a1e157b5799", + "remoteIpAddress": "204.1.16.126/30", + "remoteIpv6Address": "2004:1:16::1:0:7e/126", + "tcpMssAdjustment": 0, + "transitNetworkId": "bbf16d41-031b-4061-b9b6-ae75768ae196", + "virtualNetworkName": "WiredVNStatic", + "vlanId": 3014 + }, + { + "externalConnectivityIpPoolName": "BorderHandOff_sub", + "fabricId": "6ea62e10-cc4b-4f67-8251-d0939fdd4ad8", + "id": "a4f61e60-b75c-4bcd-b7c4-e3bd68ec324d", + "interfaceName": "TenGigabitEthernet1/0/2", + "localIpAddress": "204.1.16.129/30", + "localIpv6Address": "2004:1:16::1:0:81/126", + "networkDeviceId": "36680b59-39b2-446b-8ceb-5a1e157b5799", + "remoteIpAddress": "204.1.16.130/30", + "remoteIpv6Address": "2004:1:16::1:0:82/126", + "tcpMssAdjustment": 0, + "transitNetworkId": "bbf16d41-031b-4061-b9b6-ae75768ae196", + "virtualNetworkName": "WirelessVNFB", + "vlanId": 3015 + }, + { + "externalConnectivityIpPoolName": "BorderHandOff_sub", + "fabricId": "6ea62e10-cc4b-4f67-8251-d0939fdd4ad8", + "id": "43761af5-509f-4d07-9d2c-8b09f6ba2114", + "interfaceName": "TenGigabitEthernet1/0/2", + "localIpAddress": "204.1.16.133/30", + "localIpv6Address": "2004:1:16::1:0:85/126", + "networkDeviceId": "36680b59-39b2-446b-8ceb-5a1e157b5799", + "remoteIpAddress": "204.1.16.134/30", + "remoteIpv6Address": "2004:1:16::1:0:86/126", + "tcpMssAdjustment": 0, + "transitNetworkId": "bbf16d41-031b-4061-b9b6-ae75768ae196", + "virtualNetworkName": "WirelessVNFGuest", + "vlanId": 3016 + } + ] + } + ] + } + ], + [ + { + "fabric_devices_layer2_handoffs_info": [ + { + "device_ip": "91.1.1.2", + "handoff_layer2_info": [] + } + ] + } + ] + ], + "status": "success" + } + +# Case 3: Successfully retrieved issues for devices that are part of the fabric, from Cisco Catalyst Center +response_device_issues_info: + description: + - Issue information for filtered fabric devices. + - Returned for each fabric device matching the filters. + returned: always + type: list + + sample: { + "response": [ + "The fabric devices filtered from the network devices are: ['204.1.2.2']", + [ + { + "device_issues_info": [ + { + "device_ip": "204.1.2.2", + "issue_details": [ + { + "aiDriven": "No", + "category": "Connected", + "clientMac": null, + "deviceId": "e5cc9398-afbf-40a2-a8b1-e9cf0635c28a", + "deviceRole": "", + "issueId": "4eec8a72-65ff-45ae-89be-f0437eae778e", + "issue_occurence_count": 1703, + "last_occurence_time": 1750856863468, + "name": "AAA Server '172.23.241.245' state on Edge device 'abhitest' is DEAD.", + "priority": "P1", + "siteId": "", + "status": "active" + }, + { + "aiDriven": "No", + "category": "User Defined", + "clientMac": null, + "deviceId": "e5cc9398-afbf-40a2-a8b1-e9cf0635c28a", + "deviceRole": "", + "issueId": "80ba94eb-15d3-48c2-a3f4-20bf99551217", + "issue_occurence_count": 5, + "last_occurence_time": 1750789583288, + "name": "NON_AUTHORITATIVE_CLOCK", + "priority": "P2", + "siteId": "", + "status": "active" + } + ] + } + ] + } + ] + ], + "status": "success" + } + +# Case 4: Successfully retrieved health info for devices that are part of the fabric, from Cisco Catalyst Center +response_device_health_info: + description: + - Health information for filtered fabric devices. + - Returned for each fabric device matching the filters. + returned: always + type: list + + sample: { + "response": [ + "The fabric devices filtered from the network devices are: ['204.1.2.2']", + [ + { + "device_health_info": [ + { + "device_ip": "204.1.2.2", + "health_details": [ + { + "airQualityHealth": {}, + "avgTemperature": 4350.0, + "band": {}, + "clientCount": {}, + "cpuHealth": 10, + "cpuUlitilization": 2.75, + "cpuUtilization": 2.75, + "deviceFamily": "SWITCHES_AND_HUBS", + "deviceType": "Cisco Catalyst 9300 Switch", + "freeMemoryBufferHealth": -1, + "freeTimerScore": -1, + "interDeviceLinkAvailFabric": 10, + "interDeviceLinkAvailHealth": 100, + "interfaceLinkErrHealth": 10, + "interferenceHealth": {}, + "ipAddress": "204.1.2.2", + "issueCount": 2, + "location": "Global/USA/New York/NY_BLD1", + "macAddress": "90:88:55:07:59:00", + "maxTemperature": 5700.0, + "memoryUtilization": 50, + "memoryUtilizationHealth": 10.0, + "model": "Cisco Catalyst 9300 Switch", + "name": "abhitest", + "noiseHealth": {}, + "osVersion": "17.12.4", + "overallHealth": 1, + "packetPoolHealth": -1, + "reachabilityHealth": "REACHABLE", + "utilizationHealth": {}, + "uuid": "e5cc9398-afbf-40a2-a8b1-e9cf0635c28a", + "wanLinkUtilization": -1.0, + "wqePoolsHealth": -1 + } + ] + } + ] + } + ] + ], + "status": "success" + } + +# Case 5: Successfully retrieved connected device info for devices that are part of the fabric, from Cisco Catalyst Center +response_connected_device_info: + description: + - Connected device information for filtered fabric devices. + - Returned for each fabric device matching the filters. + returned: always + type: list + + sample: { + "response": [ + "The fabric devices filtered from the network devices are: ['204.1.2.2']", + [ + { + "connected_device_info": [ + { + "connected_device_details": [ + { + "capabilities": [ + "ROUTER", + "TB_BRIDGE" + ], + "neighborDevice": "AP345D.A80E.20B4", + "neighborPort": "GigabitEthernet0" + }, + { + "capabilities": [ + "IGMP_CONDITIONAL_FILTERING", + "ROUTER", + "SWITCH" + ], + "neighborDevice": "NY-BN-9300", + "neighborPort": "TenGigabitEthernet1/1/2" + }, + { + "capabilities": [ + "ROUTER", + "TB_BRIDGE" + ], + "neighborDevice": "AP6849.9275.0FD0", + "neighborPort": "GigabitEthernet0" + }, + { + "capabilities": [ + "ROUTER", + "TB_BRIDGE" + ], + "neighborDevice": "AP6CD6.E369.49B4", + "neighborPort": "GigabitEthernet0" + }, + { + "capabilities": [ + "ROUTER", + "TB_BRIDGE" + ], + "neighborDevice": "AP34B8.8315.7C6C", + "neighborPort": "GigabitEthernet0" + }, + { + "capabilities": [ + "HOST" + ], + "neighborDevice": "IAC-TSIM", + "neighborPort": "TenGigabitEthernet0/0/2" + }, + { + "capabilities": [ + "IGMP_CONDITIONAL_FILTERING", + "ROUTER", + "SWITCH" + ], + "neighborDevice": "NY-BN-9300", + "neighborPort": "TenGigabitEthernet2/1/2" + } + ], + "device_ip": "204.1.2.2" + } + ] + } + ] + ], + "status": "success" + } + +# Case 6: Successfully retrieved onboarding info for devices that are part of the fabric, from Cisco Catalyst Center +response_onboarding_info: + description: + - Onboarding information for filtered fabric devices. + - Returned for each fabric device matching the filters. + returned: always + type: list + sample: { + "response": [ + "The fabric devices filtered from the network devices are: ['204.192.5.2']", + [ + { + "device_onboarding_info": [ + { + "device_ip": "204.192.5.2", + "port_details": [] + } + ] + } + ], + [ + { + "ssid_info": [ + { + "device_ip": "204.192.5.2", + "ssid_details": [ + { + "adminStatus": true, + "l2Security": "open", + "l3Security": "web_auth", + "managed": true, + "radioPolicy": "2.4GHz + 5GHz", + "ssidName": "ARUBA_SSIDtb17", + "wlanId": 28, + "wlanProfileName": "ARUBA_SSID_profile" + }, + { + "adminStatus": true, + "l2Security": "wpa2_enterprise", + "l3Security": "open", + "managed": true, + "radioPolicy": "2.4GHz + 5GHz + 6GHz", + "ssidName": "CiscoSensorProvisioning", + "wlanId": 1, + "wlanProfileName": "CiscoSensorProvisioning" + }, + { + "adminStatus": true, + "l2Security": "open", + "l3Security": "open", + "managed": true, + "radioPolicy": "2.4GHz + 5GHz", + "ssidName": "GUEST2tb17", + "wlanId": 26, + "wlanProfileName": "GUEST2_profile" + }, + { + "adminStatus": true, + "l2Security": "open", + "l3Security": "web_auth", + "managed": true, + "radioPolicy": "2.4GHz + 5GHz", + "ssidName": "Guest_passthrough_inttb17", + "wlanId": 18, + "wlanProfileName": "Guest_passthrough_int_profile" + }, + { + "adminStatus": true, + "l2Security": "open", + "l3Security": "open", + "managed": true, + "radioPolicy": "2.4GHz", + "ssidName": "GUESTtb17", + "wlanId": 20, + "wlanProfileName": "GUEST_profile" + }, + { + "adminStatus": true, + "l2Security": "wpa2_enterprise", + "l3Security": "web_auth", + "managed": true, + "radioPolicy": "2.4GHz + 5GHz", + "ssidName": "Guest_webauthinternaltb17", + "wlanId": 22, + "wlanProfileName": "Guest_webauthinternal_profile" + }, + { + "adminStatus": true, + "l2Security": "open", + "l3Security": "web_auth", + "managed": true, + "radioPolicy": "2.4GHz + 5GHz", + "ssidName": "Guest_webpassthroughtb17", + "wlanId": 19, + "wlanProfileName": "Guest_webpassthrough_profile" + }, + { + "adminStatus": true, + "l2Security": "open", + "l3Security": "open", + "managed": true, + "radioPolicy": "2.4GHz + 5GHz", + "ssidName": "OPENtb17", + "wlanId": 23, + "wlanProfileName": "OPEN_profile" + }, + { + "adminStatus": true, + "l2Security": "wpa2_enterprise", + "l3Security": "open", + "managed": true, + "radioPolicy": "2.4GHz + 5GHz", + "ssidName": "posturetb17", + "wlanId": 21, + "wlanProfileName": "posture_profile" + }, + { + "adminStatus": true, + "l2Security": "wpa2_enterprise", + "l3Security": "open", + "managed": true, + "radioPolicy": "2.4GHz + 5GHz", + "ssidName": "Radius_ssidtb17", + "wlanId": 17, + "wlanProfileName": "Radius_ssid_profile" + }, + { + "adminStatus": true, + "l2Security": "wpa2_enterprise", + "l3Security": "open", + "managed": true, + "radioPolicy": "2.4GHz + 5GHz", + "ssidName": "Random_mactb17", + "wlanId": 29, + "wlanProfileName": "Random_mac_profile" + }, + { + "adminStatus": true, + "l2Security": "wpa2_personal", + "l3Security": "open", + "managed": true, + "radioPolicy": "2.4GHz + 5GHz", + "ssidName": "Single5KBandtb17", + "wlanId": 27, + "wlanProfileName": "Single5KBand_profile" + }, + { + "adminStatus": true, + "l2Security": "wpa2_enterprise", + "l3Security": "open", + "managed": true, + "radioPolicy": "2.4GHz + 5GHz", + "ssidName": "SSIDDot1XIndiatb17", + "wlanId": 30, + "wlanProfileName": "SSIDDot1XIndia_profile" + }, + { + "adminStatus": true, + "l2Security": "wpa2_enterprise", + "l3Security": "open", + "managed": true, + "radioPolicy": "2.4GHz + 5GHz", + "ssidName": "SSIDDUAL BANDtb17", + "wlanId": 25, + "wlanProfileName": "SSIDDUAL BAND_profile" + }, + { + "adminStatus": false, + "l2Security": "wpa2_enterprise", + "l3Security": "open", + "managed": true, + "radioPolicy": "2.4GHz + 5GHz", + "ssidName": "SSIDSchedulertb17", + "wlanId": 24, + "wlanProfileName": "SSIDScheduler_profile" + } + ] + } + ] + } + ], + [ + { + "provision_status_info": [ + { + "device_ip": "204.192.5.2", + "provision_status": { + "description": "Wired Provisioned device detail retrieved successfully.", + "deviceManagementIpAddress": "204.192.5.2", + "siteNameHierarchy": "Global/USA/SAN JOSE/BLD23", + "status": "success" + } + } + ] + } + ] + ], + "status": "success" + } + +# Case 7: Successfully retrieved all info for devices that are part of the fabric, from Cisco Catalyst Center +response_all_info: + description: + - All fabric related information for filtered fabric devices. + - Returned for each fabric device matching the filters. + returned: always + type: list + sample: { + "response": [ + "The fabric devices filtered from the network devices are: ['204.1.2.2', '204.192.6.200']", + [ + { + "fabric_info": [ + { + "device_ip": "204.1.2.2", + "fabric_details": [ + { + "borderDeviceSettings": { + "borderTypes": ["LAYER_3"], + "layer3Settings": { + "borderPriority": 10, + "importExternalRoutes": false, + "isDefaultExit": true, + "localAutonomousSystemNumber": "5", + "prependAutonomousSystemCount": 0 + } + }, + "deviceRoles": [ + "BORDER_NODE", + "CONTROL_PLANE_NODE", + "EDGE_NODE" + ], + "fabricId": "c9fda934-a212-4a1b-be5f-f391d2ff8863", + "id": "9294625f-52d4-485f-9d36-5abcfa4f863f", + "networkDeviceId": "e5cc9398-afbf-40a2-a8b1-e9cf0635c28a" + } + ] + } + ] + } + ], + [ + { + "device_issues_info": [ + { + "device_ip": "204.1.2.2", + "issue_details": [ + { + "aiDriven": "No", + "category": "Connected", + "clientMac": null, + "deviceId": "e5cc9398-afbf-40a2-a8b1-e9cf0635c28a", + "deviceRole": "", + "issueId": "4eec8a72-65ff-45ae-89be-f0437eae778e", + "issue_occurence_count": 1703, + "last_occurence_time": 1750856863468, + "name": "AAA Server '172.23.241.245' state on Edge device 'abhitest' is DEAD.", + "priority": "P1", + "siteId": "", + "status": "active" + }, + { + "aiDriven": "No", + "category": "User Defined", + "clientMac": null, + "deviceId": "e5cc9398-afbf-40a2-a8b1-e9cf0635c28a", + "deviceRole": "", + "issueId": "80ba94eb-15d3-48c2-a3f4-20bf99551217", + "issue_occurence_count": 5, + "last_occurence_time": 1750789583288, + "name": "NON_AUTHORITATIVE_CLOCK", + "priority": "P2", + "siteId": "", + "status": "active" + } + ] + } + ] + } + ], + [ + { + "device_health_info": [ + { + "device_ip": "204.1.2.2", + "health_details": [ + { + "airQualityHealth": {}, + "avgTemperature": 4350.0, + "band": {}, + "clientCount": {}, + "cpuHealth": 10, + "cpuUlitilization": 2.75, + "cpuUtilization": 2.75, + "deviceFamily": "SWITCHES_AND_HUBS", + "deviceType": "Cisco Catalyst 9300 Switch", + "freeMemoryBufferHealth": -1, + "freeTimerScore": -1, + "interDeviceLinkAvailFabric": 10, + "interDeviceLinkAvailHealth": 100, + "interfaceLinkErrHealth": 10, + "interferenceHealth": {}, + "ipAddress": "204.1.2.2", + "issueCount": 2, + "location": "Global/USA/New York/NY_BLD1", + "macAddress": "90:88:55:07:59:00", + "maxTemperature": 5700.0, + "memoryUtilization": 50, + "memoryUtilizationHealth": 10.0, + "model": "Cisco Catalyst 9300 Switch", + "name": "abhitest", + "noiseHealth": {}, + "osVersion": "17.12.4", + "overallHealth": 1, + "packetPoolHealth": -1, + "reachabilityHealth": "REACHABLE", + "utilizationHealth": {}, + "uuid": "e5cc9398-afbf-40a2-a8b1-e9cf0635c28a", + "wanLinkUtilization": -1.0, + "wqePoolsHealth": -1 + } + ] + } + ] + } + ], + [ + { + "fabric_devices_layer3_handoffs_sda_info": [ + { + "device_ip": "204.1.2.2", + "handoff_info": [] + } + ] + } + ], + [ + { + "fabric_devices_layer3_handoffs_ip_info": [ + { + "device_ip": "204.1.2.2", + "handoff_info": [] + } + ] + } + ], + [ + { + "fabric_devices_layer2_handoffs_info": [ + { + "device_ip": "204.1.2.2", + "handoff_info": [] + } + ] + } + ], + [ + { + "connected_device_info": [ + { + "connected_device_details": [ + { + "capabilities": [ + "IGMP_CONDITIONAL_FILTERING", + "ROUTER", + "SWITCH" + ], + "neighborDevice": "NY-BN-9300", + "neighborPort": "TenGigabitEthernet2/1/2" + }, + { + "capabilities": [ + "IGMP_CONDITIONAL_FILTERING", + "ROUTER", + "SWITCH" + ], + "neighborDevice": "NY-BN-9300", + "neighborPort": "TenGigabitEthernet1/1/2" + }, + { + "capabilities": [ + "ROUTER", + "TB_BRIDGE" + ], + "neighborDevice": "AP6849.9275.0FD0", + "neighborPort": "GigabitEthernet0" + }, + { + "capabilities": [ + "ROUTER", + "TB_BRIDGE" + ], + "neighborDevice": "AP6CD6.E369.49B4", + "neighborPort": "GigabitEthernet0" + }, + { + "capabilities": [ + "ROUTER", + "TB_BRIDGE" + ], + "neighborDevice": "AP34B8.8315.7C6C", + "neighborPort": "GigabitEthernet0" + }, + { + "capabilities": ["HOST"], + "neighborDevice": "IAC-TSIM", + "neighborPort": "TenGigabitEthernet0/0/2" + }, + { + "capabilities": [ + "ROUTER", + "TB_BRIDGE" + ], + "neighborDevice": "AP345D.A80E.20B4", + "neighborPort": "GigabitEthernet0" + } + ], + "device_ip": "204.1.2.2" + } + ] + } + ] + ], + "status": "success" + } + +# Case 8: If no fabric devices is found +response_info: + description: + - Returned when no fabric devices match the provided filters. + returned: always + type: list + + sample: { + "response":[ + "No fabric devices found for the given filters." + ] + } +""" + +from ansible_collections.cisco.dnac.plugins.module_utils.dnac import ( + DnacBase, +) +from ansible.module_utils.basic import AnsibleModule + +try: + import yaml +except ImportError: + yaml = None +import time +import os +import json +import ipaddress +from datetime import datetime + +from ansible_collections.cisco.dnac.plugins.module_utils.validation import ( + validate_list_of_dicts,) + + +class FabricDevicesInfo(DnacBase): + def __init__(self, module): + super().__init__(module) + self.supported_states = ['gathered'] + self.total_response = [] + + def validate_input(self): + """ + Validate the playbook configuration for fabric device structure and integrity. + + This method ensures that the provided 'config' attribute adheres to the expected format for fabric device + processing. It validates the presence and types of required fields, checks for duplicates, and ensures + consistency in user-provided values such as requested information categories. + + Args: + self: The instance of the class that contains the 'config' attribute to be validated. + + Returns: + The method returns the current instance with updated attributes: + - self.msg: A descriptive message indicating the outcome of the validation process. + - self.status: The result of the validation ('success' or 'failed'). + - self.validated_config: A cleaned and validated configuration if validation succeeds. + + Validations Performed: + - 'config' must be a list of dictionaries. + - Each dictionary must contain the key 'fabric_devices' mapped to a list. + - Each 'fabric_device' must be a dictionary containing at least one of: + 'ip_address', 'hostname', 'serial_number', 'device_role', or 'site_hierarchy'. + - All values in those fields (if present) must be lists of strings. + - 'requested_info', if provided, must be a list of allowed strings. + - Validates 'timeout', 'retries', and 'interval' as non-negative integers if specified. + - Ensures 'output_file_path' is a string if provided. + - Detects and prevents duplicate IP addresses, hostnames, or serial numbers across devices. + """ + + config_spec = { + "fabric_devices": { + "type": "list", + "elements": "dict", + "fabric_site_hierarchy": { + "type": "str", + "required": True + }, + "fabric_device_role": { + "type": "str", + "required": False, + "allowed_values": [ + "CONTROL_PLANE_NODE", + "BORDER_NODE", + "EDGE_NODE", + "EXTENDED_NODE", + "WIRELESS_CONTROLLER_NODE" + ] + }, + "device_identifier": { + "type": "list", + "elements": "dict", + "ip_address": { + "type": "list", + "elements": "str", + "required": False + }, + "ip_address_range": { + "type": "str", + "required": False + }, + "serial_number": { + "type": "list", + "elements": "str", + "required": False + }, + "hostname": { + "type": "list", + "elements": "str", + "required": False + } + }, + "timeout": { + "type": "int", + "default": 120, + }, + "retries": { + "type": "int", + "default": 3, + }, + "interval": { + "type": "int", + "default": 10, + }, + "requested_info": { + "type": "list", + "elements": "str", + "allowed_values": [ + "fabric_info", + "handoff_info", + "onboarding_info", + "connected_devices_info", + "device_health_info", + "device_issues_info" + ] + }, + "output_file_info": { + "type": "dict", + "file_path": { + "type": "str", + }, + "file_format": { + "type": "str", + "default": "yaml", + "allowed_values": ["json", "yaml"] + }, + "file_mode": { + "type": "str", + "default": "w", + "allowed_values": ["w", "a"] + }, + "timestamp": { + "type": "bool", + "default": False + } + } + } + } + try: + valid_config, invalid_params = validate_list_of_dicts(self.config, config_spec) + + if invalid_params: + self.msg = "Fabric devices configuration validation failed with invalid parameters: {0}".format( + invalid_params + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + self.validated_config = valid_config + + self.log("Fabric devices configuration validation completed successfully", "INFO") + self.log(self.config) + self.log( + "Validated {0} fabric device configuration section(s) for workflow processing".format( + (valid_config) + ), + "DEBUG" + ) + return self + + except Exception as validation_exception: + self.msg = "Fabric devices configuration validation encountered an error: {0}".format( + str(validation_exception) + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + def get_want(self, config): + """ + Extract the desired state ('want') from a fabric devices playbook block. + + Args: + self (object): An instance of a class interacting with Cisco Catalyst Center. + config (dict): A dictionary containing the playbook configuration, expected to include + a list of fabric devices under the 'fabric_devices' key. + + Returns: + self: The current instance of the class with the 'want' attribute populated + based on the validated fabric device data from the playbook. + Description: + This method processes the 'fabric_devices' section of the provided configuration and + validates its structure and content. Specifically, it performs the following steps: + + - Checks that the 'fabric_devices' key exists and is not empty. + - Validates that each device entry includes at least one of the following: + 'ip_address', 'hostname', 'serial_number', 'device_role', or 'site_hierarchy'. + - If 'requested_info' is provided for a device, verifies that all values are among + the allowed set: + - all + - fabric_info + - handoff_info + - onboarding_info + - connected_devices_info + - device_health_info + - device_issues_info + Upon successful validation, the fabric device data is stored in the instance's 'want' + attribute for use in subsequent processing. + """ + self.log("Extracting desired fabric devices information workflow state from playbook configuration", "DEBUG") + self.log("Processing configuration sections for comprehensive workflow validation", "DEBUG") + + want = {} + fabric_devices = config.get("fabric_devices") + + want["fabric_devices"] = config.get("fabric_devices") + + required_device_keys = [ + "fabric_site_hierarchy" + ] + allowed_return_values = { + "all", + "fabric_info", + "handoff_info", + "onboarding_info", + "connected_devices_info", + "device_health_info", + "device_issues_info", + } + allowed_device_identifier_filters = {"ip_address", "hostname", "serial_number", "ip_address_range"} + allowed_field = { + "fabric_site_hierarchy", "fabric_device_role", "device_identifier", + "timeout", "retries", "interval", "requested_info", "output_file_info" + } + allowed_fabric_device_roles = {"CONTROL_PLANE_NODE", "EDGE_NODE", "BORDER_NODE", "WIRELESS_CONTROLLER_NODE", "EXTENDED_NODE"} + allowed_output_file_info_keys = {"file_path", "file_format", "file_mode", "timestamp"} + allowed_file_formats = {"json", "yaml"} + allowed_file_modes = {"a", "w"} + + for config in self.config: + if "fabric_devices" not in config: + self.msg = "'fabric_devices' key is missing in the config block" + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + for idx, device in enumerate(config["fabric_devices"]): + self.log("Processing device entry {0}: {1}".format(idx + 1, device), "DEBUG") + for key in device: + if key not in allowed_field: + self.msg = "'{0}' is not a valid key in fabric device entry. Allowed keys are: {1}".format( + key, ", ".join(sorted(allowed_field)) + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + if not any(device.get(key) for key in required_device_keys): + self.log("Device index {0} missing required identification keys: {1}".format(idx + 1, required_device_keys), "ERROR") + self.msg = ( + "Each fabric device must contain at least one of: {0}." + .format(", ".join(required_device_keys)) + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + if "fabric_device_role" in device: + if device["fabric_device_role"] not in allowed_fabric_device_roles: + self.msg = ( + "'fabric_device_role' must be one of: {0}" + .format(", ".join(sorted(allowed_fabric_device_roles))) + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + for numeric in ("timeout", "retries", "interval"): + if numeric in device and device[numeric] < 0: + self.msg = "'{0}' must be a non-negative integer".format(numeric) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + valid_keys_found = set() + identifiers = device.get("device_identifier", []) + if identifiers: + all_identifier_keys = set() + for identifier in identifiers: + self.log("Processing device_identifier: {0}".format(identifier), "DEBUG") + all_identifier_keys.update(identifier.keys()) + + for key in identifier: + self.log(key) + if key in allowed_device_identifier_filters: + valid_keys_found.add(key) + self.log(valid_keys_found) + else: + self.msg = ( + "Invalid or unrecognized key '{0}' found in device_identifier. " + "Allowed keys are: {1}".format( + key, ", ".join(sorted(allowed_device_identifier_filters)) + ) + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + if "ip_address" in all_identifier_keys and "ip_address_range" in all_identifier_keys: + self.msg = ( + "Both 'ip_address' and 'ip_address_range' are specified across device_identifier entries. " + "Please specify only one of them." + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + if not valid_keys_found: + self.msg = ( + "Each 'device_identifier' list must contain at least one valid key among: {0}." + .format(", ".join(allowed_device_identifier_filters)) + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + if "requested_info" in device and device["requested_info"] is not None: + self.log("Applying requested_info for device index {0}".format(idx + 1), "DEBUG") + return_value = device["requested_info"] + for value_name in return_value: + if value_name not in allowed_return_values: + self.log( + "Invalid requested_info '{0}' in device index {1}." + "Valid options: {2}".format(value_name, idx, allowed_return_values), "ERROR" + ) + self.msg = ( + "'{0}' is not a valid return value. Allowed values are: {1}" + .format(value_name, sorted(allowed_return_values)) + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + if "output_file_info" in device: + output_file_info = device["output_file_info"] + if output_file_info is None: + continue + + file_format = output_file_info.get("file_format", "yaml") + file_mode = output_file_info.get("file_mode", "w") + timestamp = output_file_info.get("timestamp", False) + + output_file_info["file_format"] = file_format + output_file_info["file_mode"] = file_mode + output_file_info["timestamp"] = timestamp + + for key in output_file_info: + if key not in allowed_output_file_info_keys: + self.msg = "'{0}' is not a valid key in 'output_file_info'. Allowed keys are: {1}".format( + key, sorted(allowed_output_file_info_keys) + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + if output_file_info["file_format"] not in allowed_file_formats: + self.msg = "'file_format' must be one of: {0}".format(", ".join(sorted(allowed_file_formats))) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + if output_file_info["file_mode"] not in allowed_file_modes: + self.msg = "'file_mode' must be one of: {0}".format(", ".join(sorted(allowed_file_modes))) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + self.want = want + self.log(want, "DEBUG") + self.log("Fabric devices information workflow desired state extraction completed successfully", "DEBUG") + + def get_diff_gathered(self, config): + """ + Processes the device configuration and retrieves requested information for each fabric device. + + Args: + self (object): An instance of the class interacting with Cisco Catalyst Center APIs. + config (dict): A dictionary containing the playbook configuration, including a list of + fabric devices and the specific types of information to be retrieved + (via the 'requested_info' key). + + Returns: + self: The current instance with the 'msg' and 'total_response' attributes populated + based on the API responses for the requested device information. + + Description: + This method retrieves fabric-related information of fabric devices + for a list of network devices provided in the playbook. For each device in the + input, it performs the following: + + - Determines which categories of information are requested, including: + - fabric_info + - handoff_info (Layer 2, Layer 3 SDA, Layer 3 IP) + - onboarding_info + - connected_devices_info + - device_health_info + - device_issues_info + """ + self.log("Starting device info retrieval for all device entries", "INFO") + + fabric_devices = config.get("fabric_devices", []) + combined_fabric_data = {} + + for device_cfg in fabric_devices: + self.log("Processing device configuration entry with parameters: {0}".format(list(device_cfg.keys())), "DEBUG") + filtered_config = {} + for field_name, field_value in device_cfg.items(): + if field_name != "requested_info": + filtered_config[field_name] = field_value + + self.log("Filtered config (excluding requested_info): {0}".format(filtered_config), "DEBUG") + self.log("Extracted device identification parameters: {0}".format(list(filtered_config.keys())), "DEBUG") + requested_info = device_cfg.get("requested_info", []) + + if not requested_info: + all_info_requested = True + self.log("No specific information types requested - retrieving all available information categories", "DEBUG") + else: + all_info_requested = "all" in requested_info + self.log("Specific information types requested: {0}".format(requested_info), "DEBUG") + + fabric_info = all_info_requested or "fabric_info" in requested_info + handoff_info = all_info_requested or "handoff_info" in requested_info + onboarding_info = all_info_requested or "onboarding_info" in requested_info + connected_devices_info = all_info_requested or "connected_devices_info" in requested_info + device_health_info = all_info_requested or "device_health_info" in requested_info + device_issues_info = all_info_requested or "device_issues_info" in requested_info + + self.log(""" + Requested: + fabric_info: {0} + handoff_info: {1} + onboarding_info: {2} + connected_devices_info: {3} + device_health_info: {4} + device_issues_info: {5} + """.format( + fabric_info, + handoff_info, + onboarding_info, + connected_devices_info, + device_health_info, + device_issues_info + ), "DEBUG") + fabric_site_hierarchy = device_cfg.get("fabric_site_hierarchy") + fabric_exists, fabric_id = self.is_fabric_site(fabric_site_hierarchy) + device_ids = self.get_device_id(filtered_config) + filtered_fabric_devices = self.filter_fabric_devices(filtered_config) + self.log("Filtered fabric devices after applying given filters: {0}".format(filtered_fabric_devices), "DEBUG") + + if not fabric_exists: + self.msg = "The specified site hierarchy '{0}' is not a fabric site.".format( + device_cfg.get("fabric_site_hierarchy") + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + if not filtered_fabric_devices: + self.msg = "No fabric devices found for the given filters." + self.total_response.append(self.msg) + break + else: + self.total_response.append("The fabric devices filtered from the network devices are: {0}".format(list(filtered_fabric_devices.keys()))) + + if fabric_info: + self.log("Retrieving fabric configuration details and device roles for {0} fabric devices".format(len(filtered_fabric_devices)), "DEBUG") + fabric_info_result = self.get_fabric_info(filtered_fabric_devices) + self.total_response.append(fabric_info_result) + combined_fabric_data["fabric_info"] = fabric_info_result + + if device_issues_info: + self.log("Retrieving active device issues and alerts for {0} fabric devices".format(len(filtered_fabric_devices)), "DEBUG") + device_issues_result = self.get_device_issues_info(filtered_fabric_devices) + self.total_response.append(device_issues_result) + combined_fabric_data["device_issues_info"] = device_issues_result + + if handoff_info: + self.log("Retrieving Layer 2/3 handoff configurations for fabric border and control plane nodes", "DEBUG") + self.log("Retrieving Layer 3 SDA handoff configurations for fabric devices", "DEBUG") + handoff_layer3_sda_result = self.get_handoff_layer3_sda_info(filtered_fabric_devices) + self.total_response.append(handoff_layer3_sda_result) + combined_fabric_data["handoff_layer3_sda_info"] = handoff_layer3_sda_result + + self.log("Retrieving Layer 3 IP transit handoff configurations for fabric devices", "DEBUG") + handoff_layer3_ip_result = self.get_handoff_layer3_ip_info(filtered_fabric_devices) + self.total_response.append(handoff_layer3_ip_result) + combined_fabric_data["handoff_layer3_ip_info"] = handoff_layer3_ip_result + + self.log("Retrieving Layer 2 handoff configurations for fabric devices", "DEBUG") + handoff_layer2_result = self.get_handoff_layer2_info(filtered_fabric_devices) + self.total_response.append(handoff_layer2_result) + combined_fabric_data["handoff_layer2_info"] = handoff_layer2_result + if connected_devices_info: + self.log("Retrieving connected neighbor device information via interface for {0} fabric devices".format(len(filtered_fabric_devices)), "DEBUG") + connected_devices_result = self.get_connected_device_details_from_interfaces(filtered_fabric_devices) + self.total_response.append(connected_devices_result) + combined_fabric_data["connected_devices_info"] = connected_devices_result + + if device_health_info: + self.log("Retrieving health metrics and performance data for {0} fabric devices".format(len(filtered_fabric_devices)), "DEBUG") + device_health_result = self.get_device_health_info(filtered_fabric_devices) + self.total_response.append(device_health_result) + combined_fabric_data["device_health_info"] = device_health_result + + if onboarding_info: + self.log("Retrieving device onboarding status and port assignment details for {0} fabric devices".format(len(fabric_devices)), "DEBUG") + self.log("Retrieving device onboarding and port assignment information", "DEBUG") + onboarding_info_result = self.get_port_details(filtered_fabric_devices) + self.total_response.append(onboarding_info_result) + combined_fabric_data["onboarding_info"] = onboarding_info_result + + self.log("Retrieving device onboarding status and port channel details for {0} fabric devices".format(len(fabric_devices)), "DEBUG") + self.log("Retrieving device onboarding and port channel information", "DEBUG") + port_channel_info_result = self.get_port_channels(filtered_fabric_devices) + self.total_response.append(port_channel_info_result) + combined_fabric_data["port_channel_info"] = port_channel_info_result + + self.log("Retrieving SSID configuration details for wireless fabric devices", "DEBUG") + ssid_info_result = self.get_ssid_details(filtered_fabric_devices) + self.total_response.append(ssid_info_result) + combined_fabric_data["ssid_info"] = ssid_info_result + + self.log("Retrieving device provision status and deployment state information", "DEBUG") + provision_status_result = self.get_provision_status(filtered_fabric_devices) + self.total_response.append(provision_status_result) + combined_fabric_data["provision_status_info"] = provision_status_result + + if config.get("fabric_devices"): + output_file_info = config["fabric_devices"][0].get("output_file_info") + + if output_file_info: + self.log("Processing file output configuration for fabric device information export: {0}".format(output_file_info), "INFO") + self.write_device_info_to_file({"output_file_info": output_file_info}) + self.log("Fabric device information successfully written to output file", "INFO") + + if self.total_response: + self.log("Fabric device information retrieval workflow completed successfully with {0} response entries".format(len(fabric_devices)), "INFO") + self.msg = self.total_response + self.set_operation_result("success", False, self.msg, "INFO") + + def is_fabric_site(self, site_hierarchy): + """ + Determines whether a given site hierarchy is configured as a Software-Defined Access (SDA) fabric site. + + This method validates the existence of a site hierarchy in Cisco Catalyst Center and checks + if it has been configured as an SDA fabric site. + + Args: + site_hierarchy (str): The hierarchical path of the site to validate as a fabric site. + Format: "Global/Area/Building/Floor" or similar hierarchical structure. + Must be an existing site in Cisco Catalyst Center. + + Returns: + tuple: A tuple containing two elements: + - bool: True if the site is configured as a fabric site, False otherwise. + - str or None: The fabric site ID if the site is a fabric site, None otherwise. + + """ + self.log("Checking if site hierarchy '{0}' is a fabric site".format(site_hierarchy), "DEBUG") + site_exists, site_id = self.get_site_id(site_hierarchy) + + if not site_exists: + self.msg = "The specified site hierarchy '{0}' does not exist.".format(site_hierarchy) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + try: + limit = 500 + offset = 1 + fabric_site_id = None + + self.log("Checking fabric sites for site_id: {0}".format(site_id), "DEBUG") + + while True: + response = self.dnac._exec( + family="sda", + function="get_fabric_sites", + params={"site_id": site_id, "offset": offset, "limit": limit} + ) + + self.log("Received API response from 'get_fabric_sites': {0}".format(response), "DEBUG") + + fabric_sites = response.get("response", []) + self.log("Retrieved {0} fabric site(s) for site_id: {1}".format(len(fabric_sites), site_id), "DEBUG") + + if fabric_sites: + fabric_site_id = fabric_sites[0].get("id") + self.log( + "The site hierarchy '{0}' (siteId: {1}) is a Fabric site with Fabric ID: {2}".format( + site_hierarchy, site_id, fabric_site_id + ), + "INFO" + ) + return True, fabric_site_id + + if len(fabric_sites) < limit: + self.log("No more fabric sites returned (less than limit {0}).".format(limit), "DEBUG") + break + + offset += limit + + self.log( + "The site hierarchy '{0}' (siteId: {1}) is NOT a Fabric site.".format(site_hierarchy, site_id), + "INFO" + ) + return False, None + + except Exception as e: + self.msg = "Error occurred while checking fabric site: {0}".format(str(e)) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + return False, None + + def get_device_id(self, filtered_config): + """ + Retrieves device UUIDs from Cisco Catalyst Center based on device identifier parameters. + + This method processes device identification criteria from the configuration and maps network + devices to their corresponding UUIDs. It supports multiple identification methods and only + considers devices that are managed and reachable in the Catalyst Center inventory. + Logic Implementation: + The method implements two distinct logical operations based on the structure of device_identifiers: + + Logic Implementation: + The method implements two distinct logical operations based on the structure of device_identifiers: + + AND Logic (Single Entry with Multiple Keys): + - Triggered when: len(device_identifiers) == 1 AND len(device_identifiers[0].keys()) > 1 + - Behavior: Devices must match ALL specified criteria within the single entry + - Example: [{"ip_address": ["192.168.1.1"], "hostname": ["switch01"]}] + - Result: Returns only devices that have IP 192.168.1.1 AND hostname switch01 + - Implementation: Uses set intersection to find devices matching all criteria + + OR Logic (Multiple Entries): + - Triggered when: Multiple device_identifier entries are provided + - Behavior: Devices matching ANY of the entries are included + - Example: [{"ip_address": ["192.168.1.1"]}, {"hostname": ["switch02"]}] + - Result: Returns devices that have IP 192.168.1.1 OR hostname switch02 + - Implementation: Accumulates devices from each entry independently + + Args: + filtered_config (dict): Configuration dictionary containing device identification parameters. + + Returns: + dict or None: A dictionary mapping device IP addresses to their UUIDs for managed devices. + Returns None if no device_identifier section is found in configuration. + """ + + self.log("Starting device UUID mapping retrieval from 'device_identifier' entries", "INFO") + + device_identifiers = filtered_config.get("device_identifier", []) + if not device_identifiers: + self.msg = "No 'device_identifier' section found in configuration. Skipping device ID retrieval." + self.log(self.msg, "WARNING") + return None + + param_key_map = { + "ip_address": "managementIpAddress", + "serial_number": "serialNumber", + "hostname": "hostname" + } + + ip_uuid_map = {} + timeout = filtered_config.get("timeout", 120) + retries = filtered_config.get("retries", 3) + interval = filtered_config.get("interval", 10) + + # Detect logic type: AND or OR + is_and_logic = len(device_identifiers) == 1 and len(device_identifiers[0].keys()) > 1 + logic_type = "AND" if is_and_logic else "OR" + self.log("Detected device_identifier logic type: {0}".format(logic_type), "DEBUG") + + if is_and_logic: + identifier = device_identifiers[0] + self.log("Processing AND logic for identifiers: {0}".format(identifier), "DEBUG") + + combined_devices = None + for key, values in identifier.items(): + if not values: + continue + if not isinstance(values, list): + values = [values] + + expanded_values = [] + + for value in values: + if key == "ip_address_range": + try: + start_ip, end_ip = value.split("-") + start = ipaddress.IPv4Address(start_ip.strip()) + end = ipaddress.IPv4Address(end_ip.strip()) + expanded_values.extend([ + str(ipaddress.IPv4Address(i)) + for i in range(int(start), int(end) + 1) + ]) + self.log( + "Expanded IP range '{0}' into {1} IPs".format(value, len(expanded_values)), + "DEBUG" + ) + except Exception as e: + self.log("Invalid IP range '{0}': {1}".format(value, str(e)), "ERROR") + else: + expanded_values.append(value) + + param_key = param_key_map.get(key) + matched_devices = [] + + missing_ips = [] + + for ip_or_value in expanded_values: + params = {param_key_map.get(key, "managementIpAddress"): ip_or_value} + attempt = 0 + start_time = time.time() + device_found = False + + while attempt < retries or (time.time() - start_time) < timeout: + self.log("Attempt {0} - Calling API with params: {1}".format(attempt + 1, params), "DEBUG") + try: + response = self.dnac._exec( + family="devices", + function="get_device_list", + params=params + ) + devices = response.get("response", []) + self.log("Received API response for {0}={1}: {2}".format(key, ip_or_value, response), "DEBUG") + managed_devices = [ + device for device in devices + if device.get("collectionStatus") == "Managed" + or device.get("reachabilityStatus") == "Reachable" + ] + if managed_devices: + matched_devices.extend(managed_devices) + device_found = True + break + except Exception as e: + self.log("API call failed for {0}={1}: {2}".format(key, value, str(e)), "WARNING") + attempt += 1 + time.sleep(interval) + + if not device_found: + missing_ips.append(ip_or_value) + + if missing_ips: + display_value = "IP(s) not found: {}".format(", ".join(missing_ips)) + self.msg = ( + "No managed devices found for the following identifiers: {0}. " + "Device(s) may be unreachable, unmanaged, or not present in Catalyst Center inventory." + ).format(display_value) + self.set_operation_result("success", False, self.msg, "INFO") + if self.msg not in self.total_response: + self.total_response.append(self.msg) + + if combined_devices is None: + combined_devices = matched_devices + else: + combined_devices = [ + device for device in combined_devices if any( + device.get("instanceUuid") == managed_device.get("instanceUuid") for managed_device in matched_devices + ) + ] + + for device in combined_devices or []: + uuid = device.get("instanceUuid") + ip = device.get("managementIpAddress") + if uuid and ip: + ip_uuid_map[ip] = uuid + + if not combined_devices: + self.msg = ( + "No managed devices found matching all specified identifiers " + "({0}).".format(list(identifier.keys())) + ) + self.set_operation_result("success", False, self.msg, "INFO") + self.total_response.append(self.msg) + + else: + for idx, identifier in enumerate(device_identifiers, start=1): + self.log("Processing OR logic entry #{0}: {1}".format(idx, identifier), "DEBUG") + + for key, values in identifier.items(): + if not values: + continue + if not isinstance(values, list): + values = [values] + + expanded_values = [] + + for value in values: + if key == "ip_address_range": + try: + start_ip, end_ip = value.split("-") + start = ipaddress.IPv4Address(start_ip.strip()) + end = ipaddress.IPv4Address(end_ip.strip()) + expanded_values.extend([ + str(ipaddress.IPv4Address(i)) + for i in range(int(start), int(end) + 1) + ]) + self.log( + "Expanded IP range '{0}' into {1} IPs".format(value, len(expanded_values)), + "DEBUG" + ) + except Exception as e: + self.log("Invalid IP range '{0}': {1}".format(value, str(e)), "ERROR") + else: + expanded_values.append(value) + + missing_ips = [] + + for ip_or_value in expanded_values: + params = {param_key_map.get(key, "managementIpAddress"): ip_or_value} + attempt = 0 + attempt = 0 + start_time = time.time() + device_found = False + + while attempt < retries or (time.time() - start_time) < timeout: + self.log("Attempt {0} - Calling API with params: {1}".format(attempt + 1, params), "DEBUG") + try: + response = self.dnac._exec( + family="devices", + function="get_device_list", + params=params + ) + devices = response.get("response", []) + self.log("Received API response for {0}={1}: {2}".format(key, ip_or_value, response), "DEBUG") + managed_devices = [ + device for device in devices + if device.get("collectionStatus") == "Managed" + or device.get("reachabilityStatus") == "Reachable" + ] + if managed_devices: + for device in managed_devices: + uuid = device.get("instanceUuid") + ip = device.get("managementIpAddress") + if uuid and ip: + ip_uuid_map[ip] = uuid + device_found = True + break + except Exception as e: + self.log("API call failed for {0}={1}: {2}".format(key, value, str(e)), "WARNING") + attempt += 1 + time.sleep(interval) + + if not device_found: + missing_ips.append(ip_or_value) + + if missing_ips: + display_value = ", ".join(missing_ips) + self.msg = ( + "No managed devices found for the following {0}(s): {1}. " + "Device(s) may be unreachable, unmanaged, or not present in Catalyst Center inventory." + ).format(key, display_value) + self.set_operation_result("success", False, self.msg, "INFO") + if self.msg not in self.total_response: + self.total_response.append(self.msg) + + total_devices = len(ip_uuid_map) + self.log("Device UUID mapping completed — mapped {0} managed devices.".format(total_devices), "INFO") + + return ip_uuid_map + + def filter_fabric_devices(self, filtered_config): + """ + Filters network devices to identify which ones are part of a Software-Defined Access (SDA) fabric site. + + This method retrieves all fabric devices from a specified fabric site and cross-references them + with the provided device identifiers to determine which devices are actually fabric-enabled. + It supports optional role-based filtering to narrow results to specific fabric device roles. + + Args: + filtered_config (dict): Configuration dictionary containing device identification parameters. + + Returns: + dict: A dictionary mapping device IP addresses to their corresponding UUIDs for devices + that are both managed and part of the fabric site. Returns None if an error occurs. + """ + self.log("Starting comprehensive fabric device filtering", "INFO") + site_hierarchy = self.want["fabric_devices"][0].get("fabric_site_hierarchy") + fabric_exists, fabric_id = self.is_fabric_site(site_hierarchy) + device_ids = self.get_device_id(filtered_config) + + if filtered_config.get("device_identifier") and not device_ids: + self.log( + "Device identifiers were specified in configuration but no matching device UUIDs were found. " + "Skipping fabric filtering.", "WARNING" + ) + return None + + fabric_device_role = self.want["fabric_devices"][0].get("fabric_device_role") + + timeout = filtered_config.get("timeout", 120) + retries = filtered_config.get("retries", 3) + interval = filtered_config.get("interval", 10) + + filtered_devices = {} + start_time = time.time() + attempt = 0 + + if fabric_exists: + self.log("Retrieving fabric devices for fabric ID: {0}".format(fabric_id), "DEBUG") + + while attempt < retries and (time.time() - start_time) < timeout: + try: + limit = 500 + offset = 1 + fabric_devices = [] + + params = {"fabric_id": fabric_id, "offset": offset, "limit": limit} + if fabric_device_role: + params["device_roles"] = fabric_device_role + self.log( + "Applying role-based filtering for role: '{0}' in API request".format(fabric_device_role), + "DEBUG" + ) + self.log("Initial API params for fabric devices retrieval: {0}".format(params), "DEBUG") + + while True: + response = self.dnac._exec( + family="sda", + function="get_fabric_devices", + params=params + ) + + self.log("Received API response from 'get_fabric_devices': {0}".format(response), "DEBUG") + + devices = response.get("response", []) + + if devices: + fabric_devices.extend(devices) + + if len(devices) < limit: + self.log("No more fabric devices returned (less than limit {0}).".format(limit), "DEBUG") + break + + offset += limit + params["offset"] = offset + + self.log("Total fabric devices retrieved: {0}".format(len(fabric_devices)), "INFO") + + filtered_devices = {} + if device_ids: + for ip, uuid in device_ids.items(): + for device in fabric_devices: + fabric_id = device.get("fabricId") + if device.get("networkDeviceId") == uuid: + filtered_devices[ip] = fabric_id + self.log( + "Device {0} (UUID: {1}) included as part of fabric site '{2}'.".format( + ip, uuid, site_hierarchy + ), + "DEBUG" + ) + else: + for device in fabric_devices: + uuid = device.get("networkDeviceId") + if uuid: + ip_map = self.get_device_ips_from_device_ids([uuid]) + if ip_map and isinstance(ip_map, dict): + ip = list(ip_map.values())[0] + if ip: + filtered_devices[ip] = fabric_id + self.log( + "Device {0} (UUID: {1}) included as part of fabric site '{2}'.".format( + ip, uuid, site_hierarchy + ), + "DEBUG" + ) + if filtered_devices: + self.log("Fabric devices successfully filtered on attempt {0}".format(attempt + 1), "INFO") + break + + if attempt < retries and (time.time() - start_time) < timeout: + self.log( + "No matching fabric devices found in attempt {0}. Retrying in {1} seconds...".format( + attempt + 1, interval + ), + "WARNING" + ) + time.sleep(interval) + attempt += 1 + + total_filtered = len(filtered_devices) + self.log( + "Filtered down to {0} fabric devices after applying site{1} criteria.".format( + total_filtered, + " and role" if fabric_device_role else "" + ), + "INFO", + ) + + if not filtered_devices: + self.msg = "No devices from the provided identifiers are part of the specified fabric site with the given criteria." + self.set_operation_result("Success", False, self.msg, "ERROR").check_return_status() + + except Exception as e: + self.msg = "Error occurred while retrieving/filtering fabric devices: {0}".format(str(e)) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + return None + return filtered_devices + + def get_fabric_info(self, filtered_fabric_devices): + """ + Retrieve comprehensive fabric configuration details for specified fabric devices from Catalyst Center. + + This method queries the Catalyst Center SDA API to collect detailed fabric-specific information + for each provided fabric device. It iterates through the filtered fabric devices to retrieve + complete fabric configuration metadata including device roles, fabric site associations, and + SDA-specific attributes such as device types, border/edge/control plane roles, and fabric ID mappings. + + Args: + filtered_fabric_devices (dict): Mapping of device management IP addresses to their fabric IDs. + Contains only devices that have been confirmed as members of the specified fabric site. + Each IP address represents a managed device that exists in both the network inventory + and the fabric site configuration. + + Returns: + list: Structured fabric related information results in standardized format: + [ + { + "fabric_info": [ + { + "device_ip": "192.168.1.1", + "fabric_details": [fabric_records] or [] or "Error: " + } + ] + } + ] + """ + self.log("Retrieving comprehensive fabric configuration details for fabric device inventory", "INFO") + self.log("Processing fabric information for {0} fabric devices".format(len(filtered_fabric_devices)), "DEBUG") + + fabric_device_role = self.want["fabric_devices"][0].get("fabric_device_role") + device_identifier = self.want["fabric_devices"][0].get("device_identifier") + + fabric_info_list = [] + devices_with_fabric_info = 0 + devices_with_errors = 0 + + self.log("Querying fabric device information for filtered fabric devices from Cisco Catalyst Center", "DEBUG") + + for index, (ip, fabric_id) in enumerate(filtered_fabric_devices.items()): + ip_device_uuid_map = self.get_device_ids_from_device_ips([ip]) + for ip, device_uuid in ip_device_uuid_map.items(): + self.log("Processing fabric device {0}/{1}: IP={2}".format( + index + 1, len(filtered_fabric_devices), ip + ), "DEBUG") + try: + params = {"fabric_id": fabric_id} + if fabric_device_role: + params["device_roles"] = fabric_device_role + self.log( + "Applying role-based filtering for role: '{0}' in API request".format(fabric_device_role), + "DEBUG" + ) + if device_identifier: + params["network_device_id"] = device_uuid + self.log( + "Added 'network_device_id' to API params for device {0}: {1}".format(ip, device_uuid), + "DEBUG" + ) + self.log("Initial API params for fabric devices retrieval: {0}".format(params), "DEBUG") + + response = self.dnac._exec( + family="sda", + function="get_fabric_devices", + params=params + ) + fabric_data = response.get("response", []) + self.log( + "Received API response from 'get_fabric_devices' for device {0}: {1}".format( + ip, response + ), + "DEBUG" + ) + + filtered_fabric_data = [ + device for device in fabric_data + if device.get("networkDeviceId") == device_uuid + ] + + if filtered_fabric_data: + devices_with_fabric_info += 1 + self.log("Fabric details found for device_ip: {0}.".format(ip), "INFO") + fabric_info_list.append({ + "device_ip": ip, + "fabric_details": filtered_fabric_data + }) + self.log("Successfully retrieved fabric configuration for device {0}".format(ip), "DEBUG") + else: + self.log("No fabric details found for device_ip: {0}".format(ip), "WARNING") + + except Exception as api_err: + devices_with_errors += 1 + error_message = "Failed to retrieve fabric information for device {0}: {1}".format(ip, str(api_err)) + self.log(error_message, "ERROR") + fabric_info_list.append({ + "device_ip": ip, + "fabric_details": "Error: {0}".format(api_err) + }) + continue + + result = [{"fabric_info": fabric_info_list}] + + self.log("Completed fabric info retrieval for filtered fabric devices. Total devices processed: {0}".format(len(fabric_info_list)), "INFO") + self.log("Fabric info result: {0}".format(result), "DEBUG") + + total_fabric_devices = len(filtered_fabric_devices) + self.log( + "Fabric information retrieval completed - processed {0}/{1} fabric devices successfully".format( + devices_with_fabric_info, total_fabric_devices + ), + "INFO" + ) + if devices_with_errors > 0: + self.log("Warning: {0} devices encountered errors during fabric information retrieval".format(devices_with_errors), "WARNING") + + if devices_with_fabric_info > 0: + self.log( + "Fabric information successfully retrieved for devices: {0}".format( + [info["device_ip"] for info in fabric_info_list if not isinstance(info["fabric_details"], str)] + ), + "DEBUG" + ) + return result + + def get_device_issues_info(self, filtered_fabric_devices): + """ + Retrieve current device issues and alerts for fabric devices from Cisco Catalyst Center. + + This method queries the Catalyst Center Issues API to collect active issues, alerts, and + health problems for each provided fabric device. It provides comprehensive troubleshooting + information including critical alerts, warnings, and operational issues that may affect + fabric device performance and SDA functionality. + + Args: + filtered_fabric_devices (dict): Mapping of device management IP addresses to their fabric IDs. + Contains only devices that have been confirmed as members of the specified fabric site. + Each IP address represents a managed device that exists in both the network inventory + and the fabric site configuration. + + Return: + list: Structured device issues information results in standardized format: + [ + { + "device_issues_info": [ + { + "device_ip": "192.168.1.1", + "issue_details": [issue_records] or [] or "Error: " + }, + { + "device_ip": "192.168.1.2", + "issue_details": [issue_records] or [] or "Error: " + } + ] + } + ] + """ + self.log("Retrieving current device issues and alerts for fabric device troubleshooting", "INFO") + self.log( + "Processing device issues information for {0} fabric devices ".format( + len(filtered_fabric_devices) + ), + "DEBUG" + ) + + issue_info_list = [] + devices_processed = 0 + devices_with_issues = 0 + devices_without_issues = 0 + devices_with_errors = 0 + + for index, (ip, fabric_id) in enumerate(filtered_fabric_devices.items()): + ip_device_uuid_map = self.get_device_ids_from_device_ips([ip]) + for ip, device_uuid in ip_device_uuid_map.items(): + self.log("Processing fabric device {0}/{1}: IP={2}".format( + index + 1, len(filtered_fabric_devices), ip + ), "DEBUG") + devices_processed += 1 + self.log("Retrieving issue information for fabric device {0}".format(ip), "DEBUG") + try: + response = self.dnac._exec( + family="issues", + function="issues", + params={"device_id": device_uuid} + ) + issue_data = response.get("response", []) + self.log( + "Received API response from 'issues' for device {0}: {1}".format( + ip, response + ), + "DEBUG" + ) + + if issue_data: + devices_with_issues += 1 + self.log("Active issues found for fabric device {0} - retrieved {1} issue records".format(ip, len(issue_data)), "INFO") + issue_info_list.append({ + "device_ip": ip, + "issue_details": issue_data + }) + + else: + devices_without_issues += 1 + self.log("No active issues found for fabric device {0} - device status healthy".format(ip), "DEBUG") + issue_info_list.append({ + "device_ip": ip, + "issue_details": [] + }) + + except Exception as api_err: + devices_with_errors += 1 + self.msg = "Failed to retrieve device issues for fabric device {0}: {1}".format(ip, str(api_err)) + issue_info_list.append({ + "device_ip": ip, + "issue_details": "Error: {0}".format(str(api_err)) + }) + + result = [{"device_issues_info": issue_info_list}] + + self.log("Completed device info retrieval. Total devices processed: {0}".format(len(issue_info_list)), "INFO") + + total_fabric_devices = len(filtered_fabric_devices) + self.log( + "Device issues retrieval completed - processed {0}/{1} fabric devices successfully".format( + devices_processed, total_fabric_devices + ), + "INFO", + ) + if devices_with_issues > 0: + self.log("Fabric devices with active issues: {0}".format(devices_with_issues), "WARNING") + + if devices_without_issues > 0: + self.log("Fabric devices with healthy status (no issues): {0}".format(devices_without_issues), "INFO") + + if devices_with_errors > 0: + self.log("Warning: {0} devices encountered errors during issue information retrieval".format(devices_with_errors), "WARNING") + + self.log("Aggregated device‑issues info: {0}".format(result), "DEBUG") + + return result + + def get_transit_name_by_id(self, transit_id): + """ + Retrieve the human-readable transit network name for a given transit network identifier. + + This method queries the Cisco Catalyst Center SDA API to resolve transit network IDs + into their corresponding descriptive names for enhanced readability and reporting. + Transit networks are used in fabric handoff configurations to enable inter-fabric + and external connectivity in SDA deployments. + + Args: + transit_id (str): The unique identifier (UUID) of the transit network. + Must be a valid transit network ID that exists in the Catalyst Center SDA configuration. + + Returns: + str or None: The descriptive name of the transit network if found, otherwise None. + - Success: Returns the transit network name (e.g., "MPLS_WAN_Transit", "Internet_Transit") + - Not Found: Returns None when the transit ID doesn't exist or has no name configured + - Error: Returns None when API call fails or encounters exceptions + """ + self.log("Starting transit network name retrieval for transit_id: {0}".format(transit_id), "DEBUG") + + if not isinstance(transit_id, str) or len(transit_id.strip()) == 0: + self.log("Invalid transit_id format provided: {0} - returning None".format(transit_id), "WARNING") + return None + + try: + self.log("Querying Catalyst Center for transit network details with ID: {0}".format(transit_id), "DEBUG") + response = self.dnac._exec( + family="sda", + function="get_transit_networks", + params={"id": transit_id} + ) + transit_info = response.get("response", []) + self.log("Received API response for 'get_transit_networks' with transit_id {0}: {1}".format(transit_id, response), "DEBUG") + + if not transit_info: + self.log("No transit network information found for transit_id: {0}".format(transit_id), "DEBUG") + return None + + transit_name = transit_info[0].get("name", None) + + if not transit_name: + self.log("Transit network found but no name configured for transit_id: {0}".format(transit_id), "WARNING") + return None + + self.log("Successfully retrieved transit network name: '{0}' for ID: {1}".format(transit_name, transit_id), "INFO") + return transit_name + + except Exception as e: + self.log("Failed to retrieve transit name for transit_id {0}: {1}".format(transit_id, str(e)), "ERROR") + self.set_operation_result("failed", False, self.msg, "ERROR") + + return None + + def get_handoff_layer3_sda_info(self, filtered_fabric_devices): + """ + Retrieve Layer 3 SDA (Software-Defined Access) handoff configurations for fabric inter-site connectivity. + + This method queries the Catalyst Center SDA API to collect Layer 3 SDA transit handoff configurations + for fabric devices that enable inter-fabric site communication and SDA overlay routing. It provides + detailed information about SDA transit connections, LISP mappings, and fabric-to-fabric routing + configurations essential for multi-site SDA deployments. + + Args: + filtered_fabric_devices (dict): Mapping of device management IP addresses to their fabric IDs. + Contains only devices that have been confirmed as members of the specified fabric site. + Each IP address represents a managed device that exists in both the network inventory + and the fabric site configuration. + + Returns: + list: Structured Layer 3 SDA handoff information results in standardized format: + [ + { + "fabric_devices_layer3_handoffs_sda_info": [ + { + "device_ip": "192.168.1.1", + "handoff_layer3_sda_transit_info": [handoff_records] or [] or "Error: " + }, + { + "device_ip": "192.168.1.2", + "handoff_layer3_sda_transit_info": [handoff_records] or [] or "Error: " + } + ] + } + ] + """ + self.log("Retrieving Layer 3 SDA handoff configurations for fabric inter-site connectivity", "INFO") + self.log("Processing Layer 3 SDA handoff information for {0} devices across fabric sites".format(len(filtered_fabric_devices)), "DEBUG") + + device_identifier = self.want["fabric_devices"][0].get("device_identifier") + + all_handoff_layer3_sda_list = [] + processed_device_ips = set() + devices_processed = 0 + devices_with_handoffs = 0 + devices_without_handoffs = 0 + devices_with_errors = 0 + + for index, (ip, fabric_id) in enumerate(filtered_fabric_devices.items()): + ip_device_uuid_map = self.get_device_ids_from_device_ips([ip]) + for ip, device_uuid in ip_device_uuid_map.items(): + self.log( + "Processing layer3 sda handoff info for device {0}/{1}: " + "IP: {2}".format(index + 1, len(filtered_fabric_devices), ip), + "DEBUG" + ) + processed_device_ips.add(ip) + devices_processed += 1 + + self.log("Retrieving Layer 3 SDA handoff configuration for fabric device {0} in fabric site {1}".format(ip, fabric_id), "DEBUG") + + try: + params = {"fabric_id": fabric_id} + if device_identifier or fabric_id: + params["network_device_id"] = device_uuid + self.log( + "Added 'network_device_id' parameter for device {0}: {1}".format(ip, device_uuid), + "DEBUG" + ) + response = self.dnac._exec( + family="sda", + function="get_fabric_devices_layer3_handoffs_with_sda_transit", + params=params + ) + layer3_sda_handoff_data = response.get("response", []) + self.log( + "Received API response for 'get_fabric_devices_layer3_handoffs_with_sda_transit' for IP {0}: {1}".format( + ip, response + ), + "DEBUG" + ) + if layer3_sda_handoff_data: + for handoff in layer3_sda_handoff_data: + transit_id = handoff.get("transitNetworkId") + handoff["transitName"] = self.get_transit_name_by_id(transit_id) + devices_with_handoffs += 1 + self.log( + "Layer 3 SDA handoff configuration found for fabric device {0} - " + "retrieved {1} handoff records".format( + ip, len(layer3_sda_handoff_data) + ), + "INFO" + ) + all_handoff_layer3_sda_list.append({ + "device_ip": ip, + "handoff_layer3_sda_transit_info": layer3_sda_handoff_data + }) + + else: + devices_without_handoffs += 1 + self.log( + "No Layer 3 SDA handoff configuration found for fabric device {0} - " + "device may not be configured for inter-fabric routing".format( + ip + ), + "DEBUG" + ) + all_handoff_layer3_sda_list.append({ + "device_ip": ip, + "handoff_layer3_sda_transit_info": [] + }) + + except Exception as api_err: + devices_with_errors += 1 + self.msg = "Exception occurred while getting L3 SDA hand-off info for device {0}: {1}".format(ip, api_err) + all_handoff_layer3_sda_list.append({ + "device_ip": ip, + "handoff_layer3_sda_transit_info": "Error: {0}".format(api_err) + }) + + result = [{"fabric_devices_layer3_handoffs_sda_info": all_handoff_layer3_sda_list}] + + total_fabric_devices = len(filtered_fabric_devices) + self.log( + "Layer 3 SDA handoff configuration retrieval completed - " + "processed {0}/{1} fabric devices successfully".format( + devices_processed, + total_fabric_devices + ), + "INFO" + ) + if devices_with_handoffs > 0: + self.log("Fabric devices with Layer 3 SDA handoff configurations: {0}".format(devices_with_handoffs), "INFO") + + if devices_without_handoffs > 0: + self.log("Fabric devices without Layer 3 SDA handoff configurations: {0}".format(devices_without_handoffs), "INFO") + + if devices_with_errors > 0: + self.log("Warning: {0} devices encountered errors during Layer 3 SDA handoff configuration retrieval".format(devices_with_errors), "WARNING") + + self.log("Completed L3 SDA hand-off info retrieval. Total devices processed: {0}".format(len(all_handoff_layer3_sda_list)), "INFO") + self.log("Aggregated L3 SDA hand-off info: {0}".format(result), "DEBUG") + + return result + + def get_handoff_layer3_ip_info(self, filtered_fabric_devices): + """ + Retrieve Layer 3 IP transit handoff configurations for fabric external connectivity and routing. + + This method queries the Catalyst Center SDA API to collect Layer 3 IP transit handoff configurations + for fabric devices that enable external network connectivity beyond the SDA fabric boundary. It provides + detailed information about IP transit connections, external routing configurations, and fabric-to-external + network handoff settings essential for enterprise WAN integration and internet connectivity. + + Args: + filtered_fabric_devices (dict): Mapping of device management IP addresses to their fabric IDs. + Contains only devices that have been confirmed as members of the specified fabric site. + Each IP address represents a managed device that exists in both the network inventory + and the fabric site configuration. + + Returns: + list: Structured Layer 3 IP handoff information results in standardized format: + [ + { + "fabric_devices_layer3_handoffs_ip_info": [ + { + "device_ip": "192.168.1.1", + "handoff_layer3_ip_transit_info": [handoff_records] or [] or "Error: " + }, + { + "device_ip": "192.168.1.2", + "handoff_layer3_ip_transit_info": [handoff_records] or [] or "Error: " + } + ] + } + ] + """ + self.log("Retrieving Layer 3 IP handoff configurations for fabric external connectivity", "INFO") + self.log("Processing Layer 3 IP handoff information for {0} devices across fabric sites".format(len(filtered_fabric_devices)), "DEBUG") + + device_identifier = self.want["fabric_devices"][0].get("device_identifier") + + all_handoff_layer3_ip_info_list = [] + processed_device_ips = set() + devices_processed = 0 + devices_with_handoffs = 0 + devices_without_handoffs = 0 + devices_with_errors = 0 + + for index, (ip, fabric_id) in enumerate(filtered_fabric_devices.items()): + ip_device_uuid_map = self.get_device_ids_from_device_ips([ip]) + for ip, device_uuid in ip_device_uuid_map.items(): + self.log( + "Retrieving Layer 3 IP handoff configuration for fabric device {0}".format(ip), "DEBUG") + processed_device_ips.add(ip) + devices_processed += 1 + + try: + params = {"fabric_id": fabric_id} + + if device_identifier or fabric_id: + params["network_device_id"] = device_uuid + self.log( + "Added 'network_device_id' parameter for device {0}: {1}".format(ip, device_uuid), + "DEBUG" + ) + response = self.dnac._exec( + family="sda", + function="get_fabric_devices_layer3_handoffs_with_ip_transit", + params=params + ) + layer3_ip_handoff_data = response.get("response", []) + self.log( + "Received API response for 'get_fabric_devices_layer3_handoffs_with_ip_transit' for IP {0}: {1}".format( + ip, response + ), + "DEBUG" + ) + if layer3_ip_handoff_data: + for handoff in layer3_ip_handoff_data: + transit_id = handoff.get("transitNetworkId") + handoff["transitName"] = self.get_transit_name_by_id(transit_id) + devices_with_handoffs += 1 + self.log( + "Layer 3 IP handoff configuration found for fabric device {0} - " + "retrieved {1} handoff records".format( + ip, len(layer3_ip_handoff_data) + ), + "INFO" + ) + all_handoff_layer3_ip_info_list.append({ + "device_ip": ip, + "handoff_layer3_ip_transit_info": layer3_ip_handoff_data + }) + else: + devices_without_handoffs += 1 + self.log( + "No Layer 3 IP handoff configuration found for fabric device {0} - " + "device may not be configured for external IP connectivity".format( + ip + ), + "DEBUG" + ) + all_handoff_layer3_ip_info_list.append({ + "device_ip": ip, + "handoff_layer3_ip_transit_info": [] + }) + + except Exception as api_err: + devices_with_errors += 1 + self.msg = "Failed to retrieve Layer 3 IP handoff configuration for fabric device {0}: {1}".format(ip, str(api_err)) + all_handoff_layer3_ip_info_list.append({ + "device_ip": ip, + "handoff_layer3_ip_transit_info": "Error: {0}".format(api_err) + }) + + result = [{"fabric_devices_layer3_handoffs_ip_info": all_handoff_layer3_ip_info_list}] + + total_fabric_devices = len(filtered_fabric_devices) + self.log( + "Layer 3 IP handoff configuration retrieval completed - " + "processed {0}/{1} fabric devices successfully".format( + devices_processed, + total_fabric_devices + ), + "INFO" + ) + if devices_with_handoffs > 0: + self.log("Fabric devices with Layer 3 IP handoff configurations: {0}".format(devices_with_handoffs), "INFO") + + if devices_without_handoffs > 0: + self.log("Fabric devices without Layer 3 IP handoff configurations: {0}".format(devices_without_handoffs), "INFO") + + if devices_with_errors > 0: + self.log("Warning: {0} devices encountered errors during Layer 3 IP handoff configuration retrieval".format(devices_with_errors), "WARNING") + + self.log("Completed L3 IP hand-off info retrieval. Total devices processed: {0}".format(len(all_handoff_layer3_ip_info_list)), "INFO") + self.log("Aggregated L3 IP hand-off info: {0}".format(result), "DEBUG") + + return result + + def get_handoff_layer2_info(self, filtered_fabric_devices): + """ + Retrieve Layer 2 handoff configurations for fabric edge connectivity and VLAN bridging. + + This method queries the Catalyst Center SDA API to collect Layer 2 handoff configurations + for fabric devices that enable traditional VLAN-based connectivity and bridging between + SDA fabric and legacy network segments. It provides detailed information about Layer 2 + handoff interfaces, VLAN mappings, and bridging configurations essential for hybrid + network environments and gradual SDA migration scenarios. + + Args: + filtered_fabric_devices (dict): Mapping of device management IP addresses to their fabric IDs. + Contains only devices that have been confirmed as members of the specified fabric site. + Each IP address represents a managed device that exists in both the network inventory + and the fabric site configuration. + + Returns: + list: A list with a single dictionary containing Layer 2 handoff information: + [ + { + "fabric_devices_layer2_handoffs_info": [ + { + "device_ip": "192.168.1.2", + "handoff_layer2_info": [handoff_records] or [] or "Error: " + }, + { + "device_ip": "192.168.1.2", + "handoff_layer2_info": [handoff_records] or [] or "Error: " + } + ] + } + ] + """ + self.log("Retrieving Layer 2 handoff configurations for fabric edge connectivity", "INFO") + self.log("Processing Layer 2 handoff information for {0} devices across fabric sites".format(len(filtered_fabric_devices)), "DEBUG") + + device_identifier = self.want["fabric_devices"][0].get("device_identifier") + + all_handoff_layer2_info_list = [] + processed_device_ips = set() + devices_processed = 0 + devices_with_handoffs = 0 + devices_without_handoffs = 0 + devices_with_errors = 0 + + for index, (ip, fabric_id) in enumerate(filtered_fabric_devices.items()): + ip_device_uuid_map = self.get_device_ids_from_device_ips([ip]) + for ip, device_uuid in ip_device_uuid_map.items(): + self.log( + "Retrieving Layer 2 handoff configuration for fabric device {0} ".format(ip), "DEBUG") + devices_processed += 1 + processed_device_ips.add(ip) + + try: + params = {"fabric_id": fabric_id} + + if device_identifier or fabric_id: + params["network_device_id"] = device_uuid + self.log( + "Added 'network_device_id' parameter for device {0}: {1}".format(ip, device_uuid), + "DEBUG" + ) + response = self.dnac._exec( + family="sda", + function="get_fabric_devices_layer2_handoffs", + params=params + ) + layer2_handoff_data = response.get("response", []) + self.log( + "Received API response for 'get_fabric_devices_layer2_handoffs' for IP {0}: {1}".format( + ip, response + ), + "DEBUG" + ) + if layer2_handoff_data: + for handoff in layer2_handoff_data: + transit_id = handoff.get("transitNetworkId") + handoff["transitName"] = self.get_transit_name_by_id(transit_id) + devices_with_handoffs += 1 + self.log( + "Layer 2 handoff configuration found for fabric device {0} - " + "retrieved {1} handoff records".format( + ip, len(layer2_handoff_data) + ), + "INFO" + ) + all_handoff_layer2_info_list.append({ + "device_ip": ip, + "handoff_layer2_info": layer2_handoff_data + }) + + else: + devices_without_handoffs += 1 + self.log( + "No Layer 2 handoff configuration found for fabric device {0} - " + "device may not be configured for Layer 2 edge connectivity".format( + ip + ), + "DEBUG" + ) + all_handoff_layer2_info_list.append({ + "device_ip": ip, + "handoff_layer2_info": [] + }) + + except Exception as api_err: + devices_with_errors += 1 + self.msg = "Failed to retrieve Layer 2 handoff configuration for fabric device {0}: {1}".format(ip, str(api_err)) + self.log(self.msg, "ERROR") + all_handoff_layer2_info_list.append({ + "device_ip": ip, + "handoff_layer2_info": "Error: {0}".format(api_err) + }) + continue + + result = [{"fabric_devices_layer2_handoffs_info": all_handoff_layer2_info_list}] + + total_fabric_devices = len(filtered_fabric_devices) + self.log( + "Layer 2 handoff configuration retrieval completed - " + "processed {0}/{1} fabric devices successfully".format( + devices_processed, + total_fabric_devices + ), + "INFO" + ) + if devices_with_handoffs > 0: + self.log("Fabric devices with Layer 2 handoff configurations: {0}".format(devices_with_handoffs), "INFO") + + if devices_without_handoffs > 0: + self.log("Fabric devices without Layer 2 handoff configurations: {0}".format(devices_without_handoffs), "INFO") + + if devices_with_errors > 0: + self.log("Warning: {0} devices encountered errors during Layer 2 handoff configuration retrieval".format(devices_with_errors), "WARNING") + + self.log("Completed L2 hand-off info retrieval. Total devices processed: {0}".format(len(all_handoff_layer2_info_list)), "INFO") + self.log("Aggregated L2 hand-off info: {0}".format(result), "DEBUG") + + return result + + def get_interface_ids_per_device(self, filtered_fabric_devices): + """ + Retrieve interface identifiers for fabric devices to enable interface-based operations and connectivity analysis. + + This method queries the Catalyst Center Device API to collect comprehensive interface inventory + information for each specified fabric device. It retrieves interface UUIDs and metadata that + are essential for subsequent operations such as connected device discovery, interface health + monitoring, and network topology mapping within SDA fabric environments. + + Args: + filtered_fabric_devices (dict): Mapping of device management IP addresses to their fabric IDs. + Contains only devices that have been confirmed as members of the specified fabric site. + Each IP address represents a managed device that exists in both the network inventory + and the fabric site configuration. + + Returns: + dict: A dictionary mapping device IP addresses to sets of interface UUIDs: + { + "192.168.1.1": {"interface-uuid-1", "interface-uuid-2", "interface-uuid-3"}, + "192.168.1.2": {"interface-uuid-4", "interface-uuid-5"}, + } + """ + self.log("Retrieving interface identifiers for fabric device interface inventory and management", "INFO") + self.log( + "Processing interface discovery for {0} fabric devices".format( + len(filtered_fabric_devices) + ), + "DEBUG" + ) + + device_interfaces_map = {} + device_interfaces_map = {} + devices_processed = 0 + devices_with_interfaces = 0 + devices_without_interfaces = 0 + interfaces_without_ids = 0 + devices_with_errors = 0 + total_interfaces_discovered = 0 + + for index, (ip, fabric_id) in enumerate(filtered_fabric_devices.items()): + self.log("Processing interface discovery for device {0}/{1}: IP: {2}".format(index + 1, len(filtered_fabric_devices), ip), "DEBUG") + ip_device_uuid_map = self.get_device_ids_from_device_ips([ip]) + for ip, device_uuid in ip_device_uuid_map.items(): + devices_processed += 1 + self.log("Retrieving interface information for fabric device {0}".format(ip), "DEBUG") + + try: + self.log("Fetching interfaces for device: {0}".format(ip), "DEBUG") + + response = self.dnac._exec( + family="devices", + function="get_interface_info_by_id", + params={"device_id": device_uuid} + ) + self.log("Received API response for interface query on device {0}".format(ip), "DEBUG") + interface_response_data = response.get("response", []) + self.log( + "Interface query completed for device {0} - found {1} interface records".format( + ip, + len(interface_response_data) + ), + "DEBUG" + ) + self.log("Received API response for 'get_interface_info_by_id' for device {0}: {1}".format(ip, response), "DEBUG") + + interface_ids = set() + for interface in interface_response_data: + interface_id = interface.get("id") + if interface_id: + interface_ids.add(interface_id) + else: + interfaces_without_ids += 1 + self.log( + "Interface record missing UUID identifier for device {0} - skipping interface".format( + ip + ), + "WARNING" + ) + device_interfaces_map[ip] = interface_ids + total_interfaces_discovered += len(interface_ids) + + if interface_ids: + devices_with_interfaces += 1 + self.log( + "Successfully mapped {0} interface identifiers for fabric device {1}".format( + len(interface_ids), + ip + ), + "DEBUG" + ) + else: + devices_without_interfaces += 1 + self.log( + "No interface identifiers found for fabric device {0} - " + "device may have no configured interfaces".format(ip), + "WARNING" + ) + if interfaces_without_ids > 0: + self.log( + "Warning: {0} interface records for device {1} were missing " + "UUID identifiers".format( + interfaces_without_ids, + ip + ), + "WARNING" + ) + + except Exception as e: + devices_with_errors += 1 + self.msg = "Failed to retrieve interface information for fabric device {0}: {1}".format(ip, str(e)) + self.log(self.msg, "ERROR") + + total_fabric_devices = len(filtered_fabric_devices) + successful_devices = len(device_interfaces_map) + + self.log( + "Interface identifier retrieval completed - " + "processed {0}/{1} fabric devices successfully".format( + successful_devices, + total_fabric_devices + ), + "INFO" + ) + + if devices_with_interfaces > 0: + self.log("Fabric devices with interface identifiers: {0}".format(devices_with_interfaces), "INFO") + + if devices_without_interfaces > 0: + self.log("Fabric devices without interface identifiers: {0}".format(devices_without_interfaces), "INFO") + + if devices_with_errors > 0: + self.log("Warning: {0} devices encountered errors during interface retrieval".format(devices_with_errors), "WARNING") + + self.log("Total interface identifiers discovered across all fabric devices: {0}".format(total_interfaces_discovered), "INFO") + + return device_interfaces_map + + def get_connected_device_details_from_interfaces(self, filtered_fabric_devices): + """ + Discover connected device topology for fabric devices through comprehensive interface-level analysis. + + This method performs extensive connected device discovery by querying each interface of specified + fabric devices to identify neighboring devices, endpoints, and network attachments. It processes + interface-level connectivity data to provide complete visibility into fabric device interconnections, + attached endpoints, and network topology relationships essential for fabric network management + and troubleshooting operations. + + Args: + filtered_fabric_devices (dict): Mapping of device management IP addresses to their fabric IDs. + Contains only devices that have been confirmed as members of the specified fabric site. + Each IP address represents a managed device that exists in both the network inventory + and the fabric site configuration. + + Returns: + list: Structured connected device topology information in standardized format: + [ + { + "connected_device_info": [ + { + "device_ip": "192.168.1.1", + "connected_device_details": [connected_device_records] or "Error: " + }, + { + "device_ip": "192.168.1.2", + "connected_device_details": [connected_device_records] or "Error: " + } + ] + } + ] + """ + self.log("Discovering connected device topology for fabric device interface inventory", "INFO") + self.log("Processing connected device discovery for {0} fabric devices".format(len(filtered_fabric_devices)), "DEBUG") + + connected_info_list = [] + devices_with_connections = 0 + devices_without_connections = 0 + devices_with_errors = 0 + + self.log("Retrieving interface inventories for fabric devices to enable connected device discovery", "DEBUG") + device_interfaces_map = self.get_interface_ids_per_device(filtered_fabric_devices) + + if not device_interfaces_map: + self.log("No interface mappings available for fabric devices - unable to perform connected device discovery", "WARNING") + return [{"connected_device_info": []}] + + self.log("Processing connected device discovery across {0} fabric devices with interface inventories".format(len(device_interfaces_map)), "DEBUG") + + for index, (ip_address, interface_ids) in enumerate(device_interfaces_map.items()): + ip_device_uuid_map = self.get_device_ids_from_device_ips([ip_address]) + interface_count = len(interface_ids) + device_id = ip_device_uuid_map[ip_address] + interfaces_with_connections = 0 + connected_device_details = [] + + for interface_id in interface_ids: + self.log("Querying connected devices for interface {0} on device {1}".format(interface_id, ip_address), "DEBUG") + try: + response = self.dnac._exec( + family="devices", + function="get_connected_device_detail", + params={ + "device_uuid": device_id, + "interface_uuid": interface_id + } + ) + interface_connected_data = response.get("response", {}) + self.log("Received API response for IP {0}, interface {1}: {2}".format(ip_address, interface_id, response), "DEBUG") + + if interface_connected_data: + interfaces_with_connections += 1 + self.log("Connected device details found for {0}:{1}".format(ip_address, interface_id), "INFO") + connected_info_list.append({ + "device_ip": ip_address, + "connected_device_details": [interface_connected_data] + }) + else: + self.log("No connected device found for {0}:{1}".format(ip_address, interface_id), "DEBUG") + connected_info_list.append({ + "device_ip": ip_address, + "connected_device_details": [] + }) + + except Exception as e: + devices_with_errors += 1 + self.log("Failed to fetch connected device info for {0}: due to {1}".format(ip_address, str(e)), "ERROR") + connected_info_list.append({ + "device_ip": ip_address, + "connected_device_details": "Error: {0}".format(e) + }) + + result = [{"connected_device_info": connected_info_list}] + + total_fabric_devices = len(filtered_fabric_devices) + successful_devices = len(connected_info_list) + + self.log( + "Connected device topology discovery completed - " + "processed {0}/{1} fabric devices with {2} total interfaces".format( + successful_devices, + total_fabric_devices, + interface_count + ), + "INFO" + ) + if devices_with_connections > 0: + self.log("Fabric devices with connected device discoveries: {0}".format(devices_with_connections), "INFO") + + if devices_without_connections > 0: + self.log("Fabric devices with no connected devices: {0}".format(devices_without_connections), "INFO") + + if devices_with_errors > 0: + self.log("Warning: {0} devices encountered errors during connected device discovery".format(devices_with_errors), "WARNING") + + self.log("Total connected devices discovered across fabric topology: {0}".format(connected_info_list), "INFO") + + self.log("Completed connected device info retrieval. Total devices processed: {0}".format(len(connected_info_list)), "INFO") + self.log("Final aggregated connected device info: {0}".format(result), "DEBUG") + + return result + + def get_device_health_info(self, filtered_fabric_devices): + """ + Retrieve comprehensive health metrics and performance data for specified fabric devices from Catalyst Center. + + This method queries the Catalyst Center device health API to collect detailed health information + including CPU utilization, memory usage, device scores, and overall health status for each + provided fabric device. It implements pagination to handle large device inventories and filters + results to match only the specified fabric devices for targeted health monitoring. + + Args: + filtered_fabric_devices (dict): Mapping of device management IP addresses to their fabric IDs. + Contains only devices that have been confirmed as members of the specified fabric site. + Each IP address represents a managed device that exists in both the network inventory + and the fabric site configuration. + + Description: + - Makes an API call to fetch all network device health data. + - Filters the returned data to match the list of input fabric device IPs. + - If health data is found, it's included in the results. + - If not, adds a fallback message indicating no health info found for that device. + + Returns: + list: Structured device health information results in standardized format: + [ + { + "device_health_info": [ + { + "device_ip": "192.168.1.1", + "health_details": {health_metrics_object} or {} + }, + { + "device_ip": "192.168.1.2", + "health_details": {health_metrics_object} or {} + } + ] + } + ] + """ + self.log("Retrieving comprehensive health metrics and performance data for fabric device monitoring", "INFO") + self.log("Processing health information for {0} fabric devices from enterprise device inventory".format(len(filtered_fabric_devices)), "DEBUG") + + health_info_list = [] + processed_device_ips = set() + health_data_list = [] + + self.log("Implementing pagination to retrieve comprehensive device health inventory with 500 device limit per request", "DEBUG") + try: + limit = 500 + offset = 1 + total_pages_processed = 0 + + while True: + total_pages_processed += 1 + self.log("Retrieving device health data page {0} with offset: {1}, limit: {2}".format(total_pages_processed, offset, limit), "DEBUG") + response = self.dnac._exec( + family="devices", + function="devices", + params={'offset': offset, 'limit': limit} + ) + self.log("Received API response from 'devices' for device: {0}".format(response), "DEBUG") + + page_data = response.get("response", []) + health_data_list.extend(page_data) + + if len(page_data) < limit: + break + + offset += limit + self.log("Successfully retrieved health data for {0} total devices from Catalyst Center".format(len(health_data_list)), "INFO") + + devices_with_health_data = 0 + devices_without_health_data = 0 + + if health_data_list: + self.log("Filtering device health data to match {0} specified fabric devices".format(len(filtered_fabric_devices)), "DEBUG") + for device_data in health_data_list: + device_ip = device_data.get("ipAddress") + if device_ip in filtered_fabric_devices.keys() and device_ip not in processed_device_ips: + devices_with_health_data += 1 + processed_device_ips.add(device_ip) + self.log("Health metrics found for fabric device {0}".format(device_ip), "DEBUG") + health_info_list.append({ + "device_ip": device_ip, + "health_details": device_data + }) + for fabric_device_ip in filtered_fabric_devices.keys(): + if fabric_device_ip not in processed_device_ips: + devices_without_health_data += 1 + health_info_list.append({ + "device_ip": fabric_device_ip, + "health_details": {} + }) + self.log("No health information found for fabric device {0}".format(fabric_device_ip), "WARNING") + else: + self.log("No device health data retrieved from Catalyst Center - all fabric devices will have empty health details", "WARNING") + for fabric_device_ip in filtered_fabric_devices: + devices_without_health_data += 1 + health_info_list.append({ + "device_ip": fabric_device_ip, + "health_details": {} + }) + + except Exception as api_err: + self.msg = "Critical failure during device health information retrieval: {0}".format(str(api_err)) + health_info_list.append({ + "device_ip": fabric_device_ip, + "health_details": "Error: {0}".format(str(api_err)) + }) + + result = [{"device_health_info": health_info_list}] + + self.log("Completed health info retrieval. Total devices processed: {0}".format(len(health_info_list)), "INFO") + + total_fabric_devices = len(filtered_fabric_devices) + self.log( + "Device health information retrieval completed - processed {0}/{1} " + "fabric devices successfully".format( + len(health_info_list), total_fabric_devices + ), + "INFO" + ) + if devices_with_health_data > 0: + self.log("Fabric devices with health metrics available: {0}".format(devices_with_health_data), "INFO") + + if devices_without_health_data > 0: + self.log("Fabric devices without health data: {0}".format(devices_without_health_data), "WARNING") + + self.log("Aggregated device-health info: {0}".format(result), "DEBUG") + + return result + + def get_dev_type(self, ip_address): + """ + Determine device infrastructure type classification for network device analysis and management. + + This method queries the Catalyst Center device inventory to classify a network device + as either wired or wireless infrastructure based on its device family attributes. + The classification is essential for applying appropriate configuration templates, + monitoring policies, and management workflows specific to device infrastructure types. + + Args: + ip_address (str): Management IP address of the network device requiring type classification. + Format: "192.168.1.1" + Must be a valid, reachable device IP address in Catalyst Center inventory. + + Returns: + str or None: Device infrastructure type classification: + - 'wired': Traditional network infrastructure (switches, routers) + - 'wireless': Wireless infrastructure (controllers, access points) + - None: Device type cannot be determined, device not found, or API failure + """ + self.log("Determining device infrastructure type classification for network device management", "INFO") + self.log("Processing device type determination for IP address: {0}".format(ip_address), "DEBUG") + + try: + dev_response = self.dnac_apply["exec"]( + family="devices", + function="get_network_device_by_ip", + params={"ip_address": ip_address}, + ) + + self.log( + "Received API response from 'get_network_device_by_ip' API for IP {0} is {1}".format( + ip_address, str(dev_response) + ), + "DEBUG", + ) + + dev_dict = dev_response.get("response", {}) + if not dev_dict: + self.log( + "Invalid response received from the API 'get_network_device_by_ip'. 'response' is empty or missing.", + "WARNING", + ) + return None + + device_family = dev_dict.get("family") + self.log("Device family identified as '{0}' for infrastructure type classification".format(device_family), "DEBUG") + + if not device_family: + self.log("Device family is missing in the response.", "WARNING") + return None + + if device_family == "Wireless Controller": + device_type = "wireless" + elif device_family in ["Switches and Hubs", "Routers"]: + device_type = "wired" + else: + device_type = None + + self.log("The device type is {0}".format(device_type), "INFO") + + return device_type + + except Exception as e: + self.msg = "The Device - {0} not present in the Cisco Catalyst Center.".format( + ip_address + ) + self.set_operation_result("failed", False, self.msg, "ERROR") + + return None + + def get_ssid_details(self, filtered_fabric_devices): + """ + Fetch SSID details for fabric-enabled wireless devices from Cisco Catalyst Center. + + For each fabric device identified by IP, this method retrieves SSID (wireless network) configuration + information if the device is a wireless WLC. It uses the provided IP-to-UUID mapping to query Catalyst Center. + + Each entry in the returned list contains the device's IP and its SSID details, if available. + + The retrieved SSID details include key fields such as: + - ssid_name (e.g., "Corporate_WiFi") + - wlan_profile_name + - security_type + - interface_mappings + - authentication method + - VLAN, etc. (depends on the API structure) + + Args: + filtered_fabric_devices (dict): Mapping of device management IP addresses to their fabric IDs. + Contains only devices that have been confirmed as members of the specified fabric site. + Each IP address represents a managed device that exists in both the network inventory + and the fabric site configuration. + + Returns: + list: A list with a single dictionary: + [ + { + "ssid_info": [ + { + "device_ip": , + "ssid_details": + }, + ] + } + ] + + Note: + SSID details are only applicable to wireless controllers. Non-wireless devices + are processed but marked as not applicable for SSID configuration retrieval. + """ + self.log("Retrieving wireless SSID configuration details for fabric wireless infrastructure management", "INFO") + self.log("Processing SSID configuration for {0} fabric devices requiring wireless network analysis".format(len(filtered_fabric_devices)), "DEBUG") + + all_ssid_info_list = [] + devices_processed = 0 + wireless_devices_found = 0 + non_wireless_devices_found = 0 + devices_with_ssid_data = 0 + devices_without_ssid_data = 0 + devices_with_errors = 0 + + for index, (ip, fabric_id) in enumerate(filtered_fabric_devices.items()): + ip_device_uuid_map = self.get_device_ids_from_device_ips([ip]) + device_id = ip_device_uuid_map[ip] + self.log("Processing SSID configuration for device {0}/{1}: IP: {2}".format(index + 1, len(filtered_fabric_devices), ip), "DEBUG") + devices_processed += 1 + self.log("Processing SSID configuration analysis for fabric device {0}".format(ip), "DEBUG") + device_type = self.get_dev_type(ip) + self.log("Device {0} is identified as '{1}'".format(ip, device_type), "DEBUG") + + if device_type != "wireless": + non_wireless_devices_found += 1 + self.log( + "Skipping SSID retrieval for device {0} - " + "device type '{1}' does not support SSID configuration".format( + ip, + device_type + ), + "DEBUG" + ) + all_ssid_info_list.append({ + "device_ip": ip, + "ssid_details": "The device is not wireless; therefore, SSID information retrieval is not applicable." + }) + continue + wireless_devices_found += 1 + self.log("Retrieving SSID configuration for wireless controller {0}".format(ip), "DEBUG") + + try: + response = self.dnac._exec( + family="wireless", + function="get_ssid_details_for_specific_wireless_controller", + params={"network_device_id": device_id} + ) + ssid_data = response.get("response", []) + self.log( + "Received API response from 'get_ssid_details_for_specific_wireless_controller' " + "for device {0}: {1}".format(ip, response), + "DEBUG" + ) + if ssid_data: + devices_with_ssid_data += 1 + all_ssid_info_list.append({ + "device_ip": ip, + "ssid_details": ssid_data + }) + self.log("SSID configuration found for wireless controller {0} - retrieved {1} SSID records".format(ip, len(ssid_data)), "INFO") + else: + devices_without_ssid_data += 1 + all_ssid_info_list.append({ + "device_ip": ip, + "ssid_details": "No SSID info found" + }) + self.log( + "No SSID configuration found for wireless controller {0} - " + "controller may not have configured SSIDs".format( + ip + ), + "DEBUG" + ) + except Exception as api_err: + devices_with_errors += 1 + self.msg = "Failed to retrieve SSID configuration for wireless controller {0}: {1}".format(ip, str(api_err)) + self.log(self.msg, "ERROR") + + all_ssid_info_list.append({ + "device_ip": ip, + "ssid_details": "Error: {0}".format(api_err) + }) + + result = [{"ssid_info": all_ssid_info_list}] + + total_fabric_devices = len(filtered_fabric_devices) + self.log( + "Wireless SSID configuration retrieval completed - " + "processed {0}/{1} fabric devices successfully".format( + devices_processed, + total_fabric_devices + ), + "INFO" + ) + if wireless_devices_found > 0: + self.log("Wireless controllers identified for SSID analysis: {0}".format(wireless_devices_found), "INFO") + + if non_wireless_devices_found > 0: + self.log("Non-wireless devices skipped for SSID analysis: {0}".format(non_wireless_devices_found), "INFO") + + if devices_with_ssid_data > 0: + self.log("Wireless controllers with SSID configurations: {0}".format(devices_with_ssid_data), "INFO") + + if devices_without_ssid_data > 0: + self.log("Wireless controllers without SSID configurations: {0}".format(devices_without_ssid_data), "INFO") + + if devices_with_errors > 0: + self.log("Warning: {0} devices encountered errors during SSID configuration retrieval".format(devices_with_errors), "WARNING") + + self.log("Completed SSID info retrieval. Total devices processed: {0}".format(len(all_ssid_info_list)), "INFO") + self.log("Final aggregated SSID info: {0}".format(result), "DEBUG") + + return result + + def get_provision_status(self, filtered_fabric_devices): + """ + Fetch provisioning status details for fabric-enabled devices from Cisco Catalyst Center. + + For each device identified as a fabric device, this method uses its IP-to-UUID mapping + to retrieve provisioning status information via Catalyst Center APIs. + + Each entry in the returned list contains the device's IP and its provisioning status, if available. + + The retrieved provisioning status may include key fields such as: + - deviceRole (e.g., Border Node, Edge Node) + - provisioningState (e.g., provisioned, failed, in-progress) + - fabricStatus (e.g., enabled, disabled) + - siteHierarchy + - fabricDomain + + Args: + filtered_fabric_devices (dict): Mapping of device management IP addresses to their fabric IDs. + Contains only devices that have been confirmed as members of the specified fabric site. + Each IP address represents a managed device that exists in both the network inventory + and the fabric site configuration. + + Returns: + list: A list with a single dictionary: + [ + { + "provisioning_status_info": [ + { + "device_ip": , + "provisioning_status_details": + }, + ] + } + ] + Note: + Provisioning status provides insights into fabric device readiness, role assignments, + and current state within the SDA fabric infrastructure for operational monitoring. + """ + self.log("Retrieving fabric device provisioning status for lifecycle management and health monitoring", "INFO") + self.log("Processing provisioning status for {0} fabric devices".format(len(filtered_fabric_devices)), "DEBUG") + + all_provision_status_info_list = [] + devices_processed = 0 + devices_with_provisioning_status = 0 + devices_without_provisioning_status = 0 + devices_with_errors = 0 + + for index, (ip, fabric_id) in enumerate(filtered_fabric_devices.items()): + devices_processed += 1 + self.log( + "Processing provision status info for device {0}/{1}: " + "IP: {2})".format(index + 1, len(filtered_fabric_devices), ip), + "DEBUG" + ) + try: + self.log("Fetching provision status for device: {0}".format(ip), "DEBUG") + response = self.dnac._exec( + family="sda", + function="get_provisioned_wired_device", + params={"device_management_ip_address": ip} + ) + provision_data = response + self.log("Received API response from 'get_provisioned_wired_device' for device {0}: {1}".format(ip, response), "DEBUG") + + if provision_data: + devices_with_provisioning_status += 1 + all_provision_status_info_list.append({ + "device_ip": ip, + "provision_status": provision_data + }) + self.log("Provisioning status found for fabric device {0} - device is provisioned in fabric".format(ip), "INFO") + else: + devices_without_provisioning_status += 1 + self.log("No provisioning status found for device IP: {0}".format(ip), "DEBUG") + all_provision_status_info_list.append({ + "device_ip": ip, + "provision_status": {} + }) + self.log("No provisioning status found for fabric device {0} - device may not be provisioned or not found".format(ip), "DEBUG") + + except Exception as api_err: + devices_with_errors += 1 + self.msg = "Failed to retrieve provisioning status for fabric device {0}: {1}".format(ip, str(api_err)) + self.log(self.msg, "ERROR") + all_provision_status_info_list.append({ + "device_ip": ip, + "provision_status": "Error: {0}".format(api_err) + }) + + result = [{"provision_status_info": all_provision_status_info_list}] + + total_fabric_devices = len(filtered_fabric_devices) + self.log( + "Fabric device provisioning status retrieval completed - " + "processed {0}/{1} fabric devices successfully".format( + devices_processed, + total_fabric_devices + ), + "INFO" + ) + if devices_with_provisioning_status > 0: + self.log("Fabric devices with provisioning status indicating successful fabric provisioning: {0}".format(devices_with_provisioning_status), "INFO") + + if devices_without_provisioning_status > 0: + self.log( + "Fabric devices without provisioning status indicating potential " + "provisioning issues: {0}".format( + devices_without_provisioning_status + ), + "INFO" + ) + + if devices_with_errors > 0: + self.log("Warning: {0} devices encountered errors during provisioning status retrieval".format(devices_with_errors), "WARNING") + + self.log("Aggregated provision status info: {0}".format(result), "DEBUG") + return result + + def get_port_details(self, filtered_fabric_devices): + """ + Retrieve SDA port assignment configurations for fabric device onboarding and provisioning analysis. + + This method queries the Catalyst Center SDA API to collect port assignment details for fabric + devices, providing insights into device onboarding status, port configurations, and SDA + provisioning workflows essential for fabric lifecycle management. + + Args: + filtered_fabric_devices (dict): Mapping of device management IP addresses to their fabric IDs. + Contains only devices that have been confirmed as members of the specified fabric site. + + Returns: + list: A list with a single dictionary containing port assignment information: + [ + { + "device_onboarding_info": [ + { + "device_ip": "192.168.1.1", + "port_details": [port_assignment_records] or [] or "Error: " + } + ] + } + ] + + Note: + Port assignment details include interface mappings, VLAN assignments, security group + configurations, and SDA provisioning status for comprehensive onboarding analysis. + """ + self.log("Retrieving fabric device onboarding information for lifecycle management and troubleshooting", "INFO") + self.log("Processing onboarding status for {0} fabric devices across fabric sites".format(len(filtered_fabric_devices)), "DEBUG") + + device_identifier = self.want["fabric_devices"][0].get("device_identifier") + + all_onboarding_info_list = [] + devices_processed = 0 + devices_with_onboarding_data = 0 + devices_without_onboarding_data = 0 + devices_with_errors = 0 + + for index, (ip, fabric_id) in enumerate(filtered_fabric_devices.items()): + ip_device_uuid_map = self.get_device_ids_from_device_ips([ip]) + for ip, device_uuid in ip_device_uuid_map.items(): + devices_processed += 1 + self.log( + "Processing onboarding device detail for device {0}/{1}: " + "IP: {2}".format(index + 1, len(filtered_fabric_devices), ip), + "DEBUG" + ) + try: + params = {"fabric_id": fabric_id} + + if device_identifier or fabric_id: + params["network_device_id"] = device_uuid + self.log( + "Added 'network_device_id' parameter for device {0}: {1}".format(ip, device_uuid), + "DEBUG" + ) + response = self.dnac._exec( + family="sda", + function="get_port_assignments", + params=params + ) + onboarding_data = response.get("response", []) + self.log( + "Received API response from 'get_port_assignments' for device {0}: {1}".format( + ip, response + ), + "DEBUG" + ) + if onboarding_data: + devices_with_onboarding_data += 1 + self.log("Onboarding data found for device IP: {0}".format(ip), "INFO") + all_onboarding_info_list.append({ + "device_ip": ip, + "port_assignment_details": onboarding_data + }) + else: + devices_without_onboarding_data += 1 + self.log("No onboarding data found for device IP: {0}".format(ip), "DEBUG") + all_onboarding_info_list.append({ + "device_ip": ip, + "port_assignment_details": [] + }) + continue + + except Exception as api_err: + devices_with_errors += 1 + self.msg = "Exception occurred while getting port assignment details for device {0}: {1}".format(ip, api_err) + all_onboarding_info_list.append({ + "device_ip": ip, + "port_assignment_details": "Error: {0}".format(api_err) + }) + + result = [{"port_assignment_info": all_onboarding_info_list}] + + total_fabric_devices = len(filtered_fabric_devices) + self.log( + "Fabric device onboarding information retrieval completed - " + "processed {0}/{1} fabric devices successfully".format( + devices_processed, + total_fabric_devices + ), + "INFO" + ) + + if devices_with_onboarding_data > 0: + self.log("Fabric devices with onboarding data indicating successful fabric integration: {0}".format(devices_with_onboarding_data), "INFO") + + if devices_without_onboarding_data > 0: + self.log("Fabric devices without onboarding data indicating potential onboarding issues: {0}".format(devices_without_onboarding_data), "INFO") + + if devices_with_errors > 0: + self.log("Warning: {0} devices encountered errors during onboarding information retrieval".format(devices_with_errors), "WARNING") + + self.log("Completed onboarding info retrieval. Total devices processed: {0}".format(len(all_onboarding_info_list)), "INFO") + self.log("Aggregated device-onboarding info: {0}".format(result), "DEBUG") + + return result + + def get_port_channels(self, filtered_fabric_devices): + """ + Retrieve SDA port channel configurations for fabric device interface aggregation and redundancy analysis. + + This method queries the Catalyst Center SDA API to collect port channel details for fabric + devices, providing insights into interface aggregation configurations, VLAN assignments, and + connected device information essential for fabric network redundancy and bandwidth management. + + Args: + filtered_fabric_devices (dict): Mapping of device management IP addresses to their fabric IDs. + Contains only devices that have been confirmed as members of the specified fabric site. + + Returns: + list: A list with a single dictionary containing port channel information: + [ + { + "device_onboarding_info": [ + { + "device_ip": "192.168.1.1", + "port_channel_details": [port_channel_records] or [] or "Error: " + } + ] + } + ] + + """ + self.log("Retrieving fabric device onboarding information for lifecycle management and troubleshooting", "INFO") + self.log("Processing port channel details for {0} fabric devices across fabric sites".format(len(filtered_fabric_devices)), "DEBUG") + + device_identifier = self.want["fabric_devices"][0].get("device_identifier") + + self.log( + "Port channel retrieval configuration - device_identifier specified: {0}".format( + bool(device_identifier) + ), + "DEBUG" + ) + + all_port_channel_info_list = [] + + statistics = { + 'devices_processed': 0, + 'devices_with_port_channels': 0, + 'devices_without_port_channels': 0, + 'devices_with_errors': 0, + 'total_port_channels_retrieved': 0, + 'total_api_calls': 0 + } + + self.log( + "Beginning port channel data collection across {0} fabric devices".format( + len(filtered_fabric_devices) + ), + "INFO" + ) + + for index, (ip, fabric_id) in enumerate(filtered_fabric_devices.items()): + self.log( + "Processing outer loop for device {0}/{1} - " + "IP: {2}, Fabric ID: {3}".format( + index + 1, len(filtered_fabric_devices), ip, fabric_id + ), + "DEBUG" + ) + ip_device_uuid_map = self.get_device_ids_from_device_ips([ip]) + + if not ip_device_uuid_map or ip not in ip_device_uuid_map: + self.log( + "Failed to retrieve device UUID for IP {0} - skipping port channel retrieval".format( + ip + ), + "WARNING" + ) + statistics['devices_with_errors'] += 1 + all_port_channel_info_list.append({ + "device_ip": ip, + "port_channel_details": "Error: Unable to retrieve device UUID" + }) + continue + + for ip, device_uuid in ip_device_uuid_map.items(): + statistics['devices_processed'] += 1 + + self.log( + "Processing inner loop for device {0} - UUID: {1}".format( + ip, device_uuid + ), + "DEBUG" + ) + + self.log( + "Initiating port channel data retrieval for fabric device {0}".format(ip), + "DEBUG" + ) + try: + params = {"fabric_id": fabric_id} + + if device_identifier or fabric_id: + params["network_device_id"] = device_uuid + self.log( + "Added 'network_device_id' parameter for device {0}: {1}".format(ip, device_uuid), + "DEBUG" + ) + statistics['total_api_calls'] += 1 + response = self.dnac._exec( + family="sda", + function="get_port_channels", + params=params + ) + + if not response or not isinstance(response, dict): + self.log( + "Invalid API response structure for device {0} - " + "expected dict, got: {1}".format( + ip, type(response).__name__ + ), + "WARNING" + ) + statistics['devices_with_errors'] += 1 + all_port_channel_info_list.append({ + "device_ip": ip, + "port_channel_details": "Error: Invalid API response structure" + }) + continue + + port_channel_data = response.get("response", []) + self.log( + "Received API response from 'get_port_channels' for device {0}: {1}".format( + ip, response + ), + "DEBUG" + ) + + if not isinstance(port_channel_data, list): + self.log( + "Unexpected response data type for device {0} - " + "expected list, got: {1}".format( + ip, type(port_channel_data).__name__ + ), + "WARNING" + ) + statistics['devices_with_errors'] += 1 + all_port_channel_info_list.append({ + "device_ip": ip, + "port_channel_details": "Error: Unexpected response data format" + }) + continue + self.log( + "Received API response from 'get_port_channels' for device {0}: {1}".format( + ip, response + ), + "DEBUG" + ) + if port_channel_data: + statistics['devices_with_port_channels'] += 1 + statistics['total_port_channels_retrieved'] += len(port_channel_data) + + self.log( + "Port channel configuration found for fabric device {0} - " + "retrieved {1} port channel records".format( + ip, len(port_channel_data) + ), + "INFO" + ) + + all_port_channel_info_list.append({ + "device_ip": ip, + "port_channel_details": port_channel_data + }) + + else: + statistics['devices_without_port_channels'] += 1 + + self.log( + "No port channel configuration found for fabric device {0} - " + "device may not have configured port channels".format(ip), + "DEBUG" + ) + + all_port_channel_info_list.append({ + "device_ip": ip, + "port_channel_details": [] + }) + continue + + except Exception as api_err: + devices_with_errors += 1 + self.msg = "Exception occurred while getting port assignment details for device {0}: {1}".format(ip, api_err) + all_port_channel_info_list.append({ + "device_ip": ip, + "port_channel_details": "Error: {0}".format(api_err) + }) + continue + + result = [{"port_channel_info": all_port_channel_info_list}] + + self.log( + "Port channel configuration retrieval completed - " + "devices processed: {0}, with port channels: {1}, " + "without port channels: {2}, with errors: {3}".format( + statistics['devices_processed'], + statistics['devices_with_port_channels'], + statistics['devices_without_port_channels'], + statistics['devices_with_errors'] + ), + "INFO" + ) + + self.log( + "Port channel retrieval statistics - " + "total API calls: {0}, total port channels retrieved: {1}".format( + statistics['total_api_calls'], + statistics['total_port_channels_retrieved'] + ), + "INFO" + ) + + if statistics['devices_with_port_channels'] > 0: + self.log( + "Fabric devices with port channel configurations indicating " + "successful interface aggregation: {0}".format( + statistics['devices_with_port_channels'] + ), + "INFO" + ) + + if statistics['devices_without_port_channels'] > 0: + self.log( + "Fabric devices without port channel configurations: {0}".format( + statistics['devices_without_port_channels'] + ), + "INFO" + ) + + if statistics['devices_with_errors'] > 0: + self.log( + "Warning: {0} devices encountered errors during port channel " + "configuration retrieval - check individual device logs for details".format( + statistics['devices_with_errors'] + ), + "WARNING" + ) + + successful_devices = [ + entry["device_ip"] for entry in all_port_channel_info_list + if isinstance(entry["port_channel_details"], list) and entry["port_channel_details"] + ] + + if successful_devices: + self.log( + "Successfully retrieved port channel configurations for devices: {0}".format( + successful_devices + ), + "DEBUG" + ) + + self.log( + "Port channel configuration retrieval operation completed for {0} " + "fabric devices with {1} total device entries processed".format( + len(filtered_fabric_devices), len(all_port_channel_info_list) + ), + "INFO" + ) + + self.log( + "Final aggregated port channel information result: {0}".format(result), + "DEBUG" + ) + + return result + + def write_device_info_to_file(self, filtered_config): + """ + Write collected fabric device information to a specified file with comprehensive format support and error handling. + + This method provides robust file output capabilities for fabric device data with support for multiple + formats (JSON/YAML), file modes (overwrite/append), automatic directory creation, timestamp insertion, + and comprehensive error handling with detailed logging for operational traceability. + + Parameters: + export_configuration (dict): Configuration dictionary containing file output specifications. + Required structure: + { + "output_output_file_info": { + "file_path": str, # Absolute path without extension (required) + "file_format": str, # "json" or "yaml" (default: "yaml") + "file_mode": str, # "w" (overwrite) or "a" (append) (default: "w") + "timestamp": bool # Include download timestamp (default: False) + }, + "data": dict # Optional: specific data to write (uses self.total_response if not provided) + } + + Returns: + self: The current instance with updated internal state reflecting the file operation results. + + Raises: + Exception: Critical errors during file operations, directory creation, or data serialization + are logged but do not raise exceptions to maintain operational continuity. + """ + self.log("Starting Device Information File Export Operation", "INFO") + + output_file_info = filtered_config.get("output_file_info", {}) + self.log("File info received: {0}".format(output_file_info), "DEBUG") + + target_file_path = output_file_info.get("file_path") + output_file_format = output_file_info.get("file_format", "yaml").lower().strip() + file_write_mode = output_file_info.get("file_mode", "w").lower().strip() + include_timestamp_flag = output_file_info.get("timestamp", False) + + if not target_file_path: + self.log("No file_path specified in output_file_info", "ERROR") + return self + + full_path_with_ext = "{0}.{1}".format(target_file_path, output_file_format) + + try: + os.makedirs(os.path.dirname(full_path_with_ext), exist_ok=True) + except Exception as e: + self.log("Error creating directories for path: {0} — {1}".format(full_path_with_ext, e), "ERROR") + return self + + try: + if isinstance(self.total_response, list): + new_data = self.total_response[:] + else: + new_data = [self.total_response] + + if include_timestamp_flag: + timestamp_entry = {"Downloaded_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S")} + new_data_with_timestamp = [timestamp_entry] + new_data + else: + new_data_with_timestamp = new_data + + if file_write_mode == "a" and os.path.exists(full_path_with_ext): + try: + with open(full_path_with_ext, "r") as f: + if output_file_format == "json": + existing_data = json.load(f) + else: + existing_data = yaml.safe_load(f) + + if existing_data is None: + existing_data = [] + elif not isinstance(existing_data, list): + existing_data = [existing_data] + + except Exception: + self.log("Failed to read existing file.", "WARNING") + existing_data = [] + + data_to_write = existing_data + new_data_with_timestamp + + else: + data_to_write = new_data_with_timestamp + + with open(full_path_with_ext, "w") as f: + if output_file_format == "json": + json.dump(data_to_write, f, indent=2) + else: + yaml.dump(data_to_write, f, default_flow_style=False) + + self.log("Successfully wrote device info to file: {0}".format(full_path_with_ext), "INFO") + + except Exception as e: + self.log("Failed to write device info to file {0}: {1}".format(full_path_with_ext, e), "ERROR") + + return self + + +def main(): + """ main entry point for module execution + """ + element_spec = {'dnac_host': {'required': True, 'type': 'str'}, + 'dnac_port': {'type': 'str', 'default': '443'}, + 'dnac_username': {'type': 'str', 'default': 'admin', 'aliases': ['user']}, + 'dnac_password': {'type': 'str', 'no_log': True}, + 'dnac_verify': {'type': 'bool', 'default': True}, + 'dnac_version': {'type': 'str', 'default': '2.2.3.3'}, + 'dnac_debug': {'type': 'bool', 'default': False}, + 'dnac_log_level': {'type': 'str', 'default': 'WARNING'}, + "dnac_log_file_path": {"type": 'str', "default": 'dnac.log'}, + "dnac_log_append": {"type": 'bool', "default": True}, + 'dnac_log': {'type': 'bool', 'default': False}, + 'validate_response_schema': {'type': 'bool', 'default': True}, + 'config_verify': {'type': 'bool', "default": False}, + 'dnac_api_task_timeout': {'type': 'int', "default": 1200}, + 'dnac_task_poll_interval': {'type': 'int', "default": 2}, + 'config': {'required': True, 'type': 'list', 'elements': 'dict'}, + 'state': {'default': 'gathered', 'choices': ['gathered']} + } + + module = AnsibleModule(argument_spec=element_spec, supports_check_mode=True) + ccc_fabric_device_info = FabricDevicesInfo(module) + state = ccc_fabric_device_info.params.get("state") + + current_version = ccc_fabric_device_info.get_ccc_version() + min_supported_version = "2.3.7.9" + + if ccc_fabric_device_info.compare_dnac_versions(current_version, min_supported_version) < 0: + ccc_fabric_device_info.status = "failed" + ccc_fabric_device_info.msg = ( + "The specified version '{0}' does not support the 'fabric device info workflow' feature. " + "Supported version(s) start from '{1}' onwards.".format(current_version, min_supported_version) + ) + ccc_fabric_device_info.log(ccc_fabric_device_info.msg, "ERROR") + ccc_fabric_device_info.check_return_status() + + if state not in ccc_fabric_device_info.supported_states: + ccc_fabric_device_info.status = "invalid" + ccc_fabric_device_info.msg = "State {0} is invalid".format(state) + ccc_fabric_device_info.check_return_status() + + ccc_fabric_device_info.validate_input().check_return_status() + + for config in ccc_fabric_device_info.validated_config: + ccc_fabric_device_info.reset_values() + ccc_fabric_device_info.get_want(config) + ccc_fabric_device_info.get_diff_state_apply[state](config) + + module.exit_json(**ccc_fabric_device_info.result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ise_radius_integration_workflow_manager.py b/plugins/modules/ise_radius_integration_workflow_manager.py index 512c08109a..bbf2915d09 100644 --- a/plugins/modules/ise_radius_integration_workflow_manager.py +++ b/plugins/modules/ise_radius_integration_workflow_manager.py @@ -6,7 +6,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -__author__ = ["Muthu Rakesh, Madhan Sankaranarayanan"] +__author__ = ["Muthu Rakesh, Madhan Sankaranarayanan, Archit Soni"] DOCUMENTATION = r""" --- module: ise_radius_integration_workflow_manager @@ -24,7 +24,7 @@ extends_documentation_fragment: - cisco.dnac.workflow_manager_params author: Muthu Rakesh (@MUTHU-RAKESH-27) Madhan Sankaranarayanan - (@madhansansel) + (@madhansansel) Archit Soni (@koderchit) options: config_verify: description: Set to True to verify the Cisco Catalyst @@ -1390,16 +1390,21 @@ def get_want(self, config): self.status = "success" return self - def check_ise_server_integration_status(self, ip_address): + def wait_for_ise_integration_status(self, ip_address): """ - Check whether the Cisco ISE server is ready for the accepting the user authentication certificate. + Wait for the Cisco ISE server to complete its integration with Cisco Catalyst Center. + + This method continuously polls the Cisco ISE server integration status until it reaches + a terminal state (`WAITING_USER_INPUT` or `COMPLETE`). If the process exceeds the + allowed timeout period (30 seconds) or an exception occurs during status retrieval, + the method logs the failure and terminates execution using `fail_and_exit()`. Parameters: - ip_address (str) - The IP address of the Cisco ISE server. - self - The current object with updated desired Authentication Policy Server information. + ip_address (str): The IP address of the Cisco ISE server being integrated. Returns: - self - The current object with updated desired Authentication Policy Server information. + str: The final integration status of the Cisco ISE server (WAITING_USER_INPUT or COMPLETE). + The function does not return if it fails — it calls `fail_and_exit()` to fail and exit the module. """ start_time = time.time() @@ -1410,32 +1415,34 @@ def check_ise_server_integration_status(self, ip_address): function="cisco_ise_server_integration_status", op_modifies=True, ) + overall_status = cisco_ise_status.get("overallStatus") except Exception as msg: - self.msg = "Exception occurred while checking the status of the Cisco ISE server with IP address '{ip}'.".format( - ip=ip_address + self.msg = ( + f"Exception occurred while checking the status of the Cisco ISE server " + f"with IP address '{ip_address}'. Error: {msg}" ) + self.fail_and_exit(self.msg) + + self.log(f"Current ISE server status: '{overall_status}'", "WARNING") - overall_status = cisco_ise_status.get("overallStatus") - statuses = ["WAITING_USER_INPUT", "COMPLETE"] - if overall_status in statuses: + if overall_status in ["WAITING_USER_INPUT", "COMPLETE"]: self.log( - "The status of the Cisco ISE server is '{status}'".format( - status=overall_status - ) + f"The Cisco ISE server status is '{overall_status}'. Breaking the loop.", + "INFO", ) break - if (time.time() - start_time) >= 10: + if (time.time() - start_time) >= 30: self.msg = ( - "The Cisco Catalyst Center took more than 10 seconds to accept " - "the PxGrid certificate of the Cisco ISE server with ." + f"The Cisco Catalyst Center took more than 10 seconds to accept " + f"the PxGrid certificate of the Cisco ISE server with IP '{ip_address}'." ) - self.status = "failed" - break + self.fail_and_exit(self.msg) time.sleep(1) - return self + self.log(f"Final ISE server status: '{overall_status}'", "INFO") + return overall_status def accept_cisco_ise_server_certificate(self, ipAddress, trusted_server): """ @@ -1470,6 +1477,11 @@ def accept_cisco_ise_server_certificate(self, ipAddress, trusted_server): self.status = "failed" return self + self.log( + "Calling 'accept_cisco_ise_server_certificate_for_cisco_ise_server_integration' API with payload - " + f"id: {cisco_ise_id}, isCertAcceptedByUser: {trusted_server}", + "INFO", + ) response = self.dnac._exec( family="system_settings", function="accept_cisco_ise_server_certificate_for_cisco_ise_server_integration", @@ -1703,7 +1715,7 @@ def update_auth_policy_server(self, authentication_policy_server): ] self.log( "Desired State for Authentication and Policy Server for the IP '{0}' (want): {1}".format( - ip_address, auth_server_params + ip_address, self.pprint(auth_server_params) ), "DEBUG", ) @@ -1738,8 +1750,31 @@ def update_auth_policy_server(self, authentication_policy_server): if is_ise_server: trusted_server = self.want.get("trusted_server") - self.check_ise_server_integration_status(ip_address) - self.accept_cisco_ise_server_certificate(ip_address, trusted_server) + ise_radius_integration_status = ( + self.wait_for_ise_integration_status(ip_address) + ) + if ise_radius_integration_status == "WAITING_USER_INPUT": + self.log( + "Cisco ISE server is waiting for user input to accept the certificate.", + "INFO", + ) + self.log( + f"Calling API to accept Cisco ISE server certificate for IP '{ip_address}' with trusted_server={trusted_server}", + "INFO", + ) + self.accept_cisco_ise_server_certificate( + ip_address, trusted_server + ) + + elif ise_radius_integration_status == "COMPLETE": + self.log( + "Cisco ISE server integration is already complete. No user certificate acceptance required.", + "INFO", + ) + else: + self.msg = f"Unexpected Cisco ISE server integration status '{ise_radius_integration_status}' for IP '{ip_address}'." + self.fail_and_exit(self.msg) + ise_integration_wait_time = self.want.get( "ise_integration_wait_time" ) @@ -1920,11 +1955,38 @@ def update_auth_policy_server(self, authentication_policy_server): trusted_server_msg = "" if is_ise_server_enabled: + self.log( + "Cisco ISE server is enabled. Checking if it certificate acceptance processing.", + "DEBUG", + ) trusted_server = self.want.get("trusted_server") state = have_auth_server_details.get("state") if state != "ACTIVE": - self.check_ise_server_integration_status(ip_address) - self.accept_cisco_ise_server_certificate(ip_address, trusted_server) + ise_radius_integration_status = ( + self.wait_for_ise_integration_status(ip_address) + ) + if ise_radius_integration_status == "WAITING_USER_INPUT": + self.log( + "Cisco ISE server is waiting for user input to accept the certificate.", + "INFO", + ) + self.log( + f"Calling API to accept Cisco ISE server certificate for IP '{ip_address}' with trusted_server={trusted_server}", + "INFO", + ) + self.accept_cisco_ise_server_certificate( + ip_address, trusted_server + ) + + elif ise_radius_integration_status == "COMPLETE": + self.log( + "Cisco ISE server integration is already complete. No user certificate acceptance required.", + "INFO", + ) + else: + self.msg = f"Unexpected Cisco ISE server integration status '{ise_radius_integration_status}' for IP '{ip_address}'." + self.fail_and_exit(self.msg) + ise_integration_wait_time = self.want.get( "ise_integration_wait_time" ) diff --git a/plugins/modules/lan_automation_workflow_manager.py b/plugins/modules/lan_automation_workflow_manager.py index 8e69eeec00..7619a6c736 100644 --- a/plugins/modules/lan_automation_workflow_manager.py +++ b/plugins/modules/lan_automation_workflow_manager.py @@ -5,7 +5,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -__author__ = "Luna Aliaj, Madhan Sankaranarayanan" +__author__ = "Luna Aliaj, Madhan Sankaranarayanan, Archit Soni" DOCUMENTATION = r""" --- module: lan_automation_workflow_manager @@ -31,6 +31,7 @@ author: - Luna Aliaj (@majlona) - Madhan Sankaranarayanan (@madhansansel) + - Archit Soni (@koderchit) options: dnac_api_task_timeout: description: The maximum time to wait for a task @@ -307,6 +308,101 @@ device. type: str required: true + port_channel: + description: > + Configuration to create, update, or delete Port Channels between two LAN + Automated devices in Cisco Catalyst Center. Port Channels aggregate + multiple physical links between devices to provide increased bandwidth + and redundancy. + type: list + elements: dict + suboptions: + source_device_management_ip_address: + description: > + Management IP address of the source device. At least one device + identifier (IP address, MAC address, or serial number) must be + provided for the source device. The device must be LAN Automated + and in Reachable and Managed state in Cisco Catalyst Center inventory. + type: str + required: false + source_device_mac_address: + description: > + MAC address of the source device. Alternative to management IP + address or serial number for device identification. The device + must be LAN Automated and in Reachable and Managed state in + Cisco Catalyst Center inventory. + type: str + required: false + source_device_serial_number: + description: > + Serial number of the source device. Alternative to management IP + address or MAC address for device identification. The device must + be LAN Automated and in Reachable and Managed state in Cisco + Catalyst Center inventory. + type: str + required: false + destination_device_management_ip_address: + description: > + Management IP address of the destination device. At least one device + identifier (IP address, MAC address, or serial number) must be + provided for the destination device. The device must be LAN Automated + and in Reachable and Managed state in Cisco Catalyst Center inventory. + type: str + required: false + destination_device_mac_address: + description: > + MAC address of the destination device. Alternative to management IP + address or serial number for device identification. The device must + be LAN Automated and in Reachable and Managed state in Cisco + Catalyst Center inventory. + type: str + required: false + destination_device_serial_number: + description: > + Serial number of the destination device. Alternative to management + IP address or MAC address for device identification. The device must + be LAN Automated and in Reachable and Managed state in Cisco + Catalyst Center inventory. + type: str + required: false + port_channel_number: + description: > + - This value is system-assigned during creation and user provided value will be ignored. + Catalyst Center will automatically provide a unique number upon creation. + - Can be used for update operations to target a specific existing + Port Channel for modification (adding/removing interfaces). + - Can be used for delete operations to identify the specific + Port Channel to remove from the device pair. + - When used for update/delete operations, eliminates the need to + specify existing interface links for Port Channel identification. + type: int + required: false + links: + description: > + - List of physical interface links to include in the Port Channel. + - Required for create operations - at least one link must be specified. + - Optional for update operations - adds/removes links from existing + Port Channel. + - All links must be between the same source and destination devices. + - Interface names must match exact device interface nomenclature. + type: list + elements: dict + suboptions: + source_port: + description: > + Interface name on the source device (e.g., 'GigabitEthernet1/0/1', + 'TenGigabitEthernet1/0/1'). Must be a valid, available interface + on the source device that is not already part of another Port Channel. + type: str + required: true + destination_port: + description: + Interface name on the destination device (e.g., 'GigabitEthernet1/0/1', + 'TenGigabitEthernet1/0/1'). Must be a valid, available interface + on the destination device that is not already part of another + Port Channel. + type: str + required: true requirements: - dnacentersdk >= 2.9.2 - python >= 3.9 @@ -337,17 +433,69 @@ the playbook will keep running until the state of the device is active or reached the timeout value. + - Port Channel operations require both source and destination devices + to be LAN Automated devices in Reachable and Managed state within + Cisco Catalyst Center inventory. + - For the source device, at least one of identifier must be provided from management + IP address, MAC address, or serial number. + - For the destination device, at least one of identifier must be provided from management + IP address, MAC address, or serial number when performing + create or update operations, and is recommended when targeting a + specific Port Channel for deletion. + - When multiple device identifiers are provided for the same device, + precedence order is serial_number > management_ip_address > mac_address. + - Port Channel link constraints - each Port Channel must maintain + between 2 and 8 physical links. Operations creating Port Channels + with fewer than 2 or more than 8 links will fail validation. + - Port Channel identification for updates can use either existing + link specifications or port_channel_number parameter. When both + are provided, port_channel_number takes precedence for identification. + - Link isolation requirement - physical links cannot be shared between + multiple Port Channels. Each link belongs exclusively to one + Port Channel configuration. + - Source and destination terminology is used for configuration + consistency only. Both devices function as equal peers in the + resulting Port Channel aggregation. + - Port Channel deletion behavior varies based on provided parameters. + When deleting Port Channels without specifying individual links, + if port_channel_number is provided, only that specific Port Channel + will be deleted. If both endpoints are provided without + port_channel_number, all Port Channels between those devices will + be deleted. If only source endpoint is provided, all Port Channels + from that source device will be deleted. + - Port Channel deletion behavior when deleting individual links - + removing links that would result in fewer than 2 remaining links + will automatically delete the entire Port Channel. Operations that + would leave exactly 1 link will fail validation as Port Channels + require minimum 2 links for proper operation. + - Port Channel operations integrate with existing LAN Automation + device lifecycle management and appear in standard Catalyst Center + interface topology views. + - When updating Port Channels, at least one existing link must be + provided to identify the Port Channel between the same endpoints + unless port_channel_number is specified for direct identification. + - Links from different Port Channels cannot be mixed during update + operations. Each physical link can belong to only one Port Channel + at any given time. + - SDK Method used are - ccc_lan_automation.lanautomation.lan_automation_start_v2 - ccc_lan_automation.lanautomation.lan_automation_stop - ccc_lan_automation.lanautomation.lan_automation_device_update - ccc_lan_automation.lanautomation.lan_automation_active_sessions - ccc_lan_automation.lanautomation.lan_automation_status - ccc_lan_automation.lanautomation.lan_automation_log - ccc_lan_automation.devices.get_device_list ccc_lan_automation.devices.get_interface_details - ccc_lan_automation.deviceonboardingpnp.authorize_device - ccc_lan_automation.deviceonboardingpnp.get_device_list + lan_automation.LanAutomation.lan_automation_start_v2 + lan_automation.LanAutomation.lan_automation_stop + lan_automation.LanAutomation.lan_automation_device_update + lan_automation.LanAutomation.lan_automation_active_sessions + lan_automation.LanAutomation.lan_automation_status + lan_automation.LanAutomation.lan_automation_log + lan_automation.LanAutomation.get_port_channels + lan_automation.LanAutomation.create_a_new_port_channel_between_devices + lan_automation.LanAutomation.add_a_lan_automated_link_to_a_port_channel + lan_automation.LanAutomation.delete_port_channel + lan_automation.LanAutomation.remove_a_link_from_port_channel + devices.Devices.get_interface_details + devices.Devices.get_device_list + device_onboarding_pnp.DeviceOnboardingPnp.authorize_device + device_onboarding_pnp.DeviceOnboardingPnp.get_device_list """ + EXAMPLES = r""" --- - name: Start a LAN Automation session without waiting @@ -396,6 +544,7 @@ device_management_ip_address: "204.1.1.11" launch_and_wait: false pnp_authorization: false + - name: Start a LAN Automation session with device authorization and waiting for the task to complete cisco.dnac.lan_automation_workflow_manager: @@ -445,6 +594,7 @@ device_serial_number_authorization: - "FJC27172JDW" - "FJC2721261A" + - name: Stop a LAN Automation session cisco.dnac.lan_automation_workflow_manager: dnac_host: "{{dnac_host}}" @@ -462,6 +612,7 @@ discovered_device_site_name_hierarchy: "Global/USA/SAN JOSE" primary_device_management_ip_address: "204.1.1.1" + - name: Update loopback for LAN Automated devices cisco.dnac.lan_automation_workflow_manager: dnac_host: "{{dnac_host}}" @@ -481,6 +632,7 @@ new_loopback0_ip_address: "91.1.2.6" - device_management_ip_address: "204.1.2.163" new_loopback0_ip_address: "91.1.2.5" + - name: Update hostname for LAN Automated devices cisco.dnac.lan_automation_workflow_manager: dnac_host: "{{dnac_host}}" @@ -500,6 +652,7 @@ new_host_name: "SR-LAN-9300-im1" - device_management_ip_address: "91.1.1.6" new_host_name: "Test" + - name: Add link for LAN Automated devices cisco.dnac.lan_automation_workflow_manager: dnac_host: "{{dnac_host}}" @@ -520,6 +673,7 @@ destination_device_management_ip_address: "204.1.1.4" destination_device_interface_name: "HundredGigE1/0/5" ip_pool_name: "underlay_sj" + - name: Delete link between LAN Automated devices cisco.dnac.lan_automation_workflow_manager: dnac_host: "{{dnac_host}}" @@ -539,6 +693,7 @@ source_device_interface_name: "HundredGigE1/0/2" destination_device_management_ip_address: "204.1.1.4" destination_device_interface_name: "HundredGigE1/0/5" + - name: Apply loopback and hostname updates for LAN Automated devices cisco.dnac.lan_automation_workflow_manager: @@ -562,6 +717,244 @@ new_host_name: "SR-LAN-9300-SJ" - device_management_ip_address: "204.1.1.5" new_host_name: "SR-LAN-9500-SJ" + +- name: Create a new Port Channel using Management IP address device identification + cisco.dnac.lan_automation_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log: true + config_verify: false + state: merged + config: + - port_channel: + - source_device_management_ip_address: 10.1.1.1 + destination_device_management_ip_address: 20.1.1.1 + links: + - source_port: GigabitEthernet1/0/1 + destination_port: GigabitEthernet2/0/1 + - source_port: GigabitEthernet1/0/2 + destination_port: GigabitEthernet2/0/2 + +- name: Create Port Channel using MAC address device identification + cisco.dnac.lan_automation_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log: true + config_verify: false + state: merged + config: + - port_channel: + - source_device_mac_address: aa:bb:cc:dd:ee:01 + destination_device_mac_address: aa:bb:cc:dd:ee:02 + links: + - source_port: TenGigabitEthernet1/0/1 + destination_port: TenGigabitEthernet1/0/1 + - source_port: TenGigabitEthernet1/0/2 + destination_port: TenGigabitEthernet1/0/2 + +- name: Create Port Channel using serial number device identification + cisco.dnac.lan_automation_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log: true + config_verify: false + state: merged + config: + - port_channel: + - source_device_serial_number: FCW2140L056 + destination_device_serial_number: FCW2140L057 + links: + - source_port: FortyGigabitEthernet1/0/1 + destination_port: FortyGigabitEthernet1/0/1 + +# Provide at least one existing link to identify the Port Channel. +- name: Update existing Port Channel by providing at least one existing link + cisco.dnac.lan_automation_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log: true + config_verify: false + state: merged + config: + - port_channel: + - source_device_management_ip_address: 10.1.1.1 + destination_device_management_ip_address: 20.1.1.1 + links: + # Existing link already part of the Port Channel + - source_port: GigabitEthernet1/0/1 + destination_port: GigabitEthernet2/0/1 + # New link to be added + - source_port: GigabitEthernet1/0/10 + destination_port: GigabitEthernet2/0/10 + +- name: Update a Port Channel using port_channel_number + # No need to specify existing links when port_channel_number is provided. + cisco.dnac.lan_automation_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log: true + config_verify: false + state: merged + config: + - port_channel: + - source_device_management_ip_address: 10.1.1.1 + destination_device_management_ip_address: 20.1.1.1 + port_channel_number: 1 + links: + - source_port: GigabitEthernet1/0/10 + destination_port: GigabitEthernet2/0/10 + +- name: Delete all Port Channels between two devices + cisco.dnac.lan_automation_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log: true + config_verify: false + state: deleted + config: + - port_channel: + - source_device_management_ip_address: 10.1.1.1 + destination_device_management_ip_address: 20.1.1.1 + +- name: Delete a specific link from a Port Channel + cisco.dnac.lan_automation_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log: true + config_verify: false + state: deleted + config: + - port_channel: + - source_device_management_ip_address: 10.1.1.1 + destination_device_management_ip_address: 20.1.1.1 + links: + - source_port: GigabitEthernet1/0/1 + destination_port: GigabitEthernet2/0/1 # This link will be removed from its associated Port Channel. + +- name: Delete an entire Port Channel between two devices by specifying all the links + cisco.dnac.lan_automation_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log: true + config_verify: false + state: deleted + config: + - port_channel: + - source_device_management_ip_address: 10.1.1.1 + destination_device_management_ip_address: 20.1.1.1 + links: + - source_port: GigabitEthernet1/0/1 + destination_port: GigabitEthernet2/0/1 + - source_port: GigabitEthernet1/0/2 + destination_port: GigabitEthernet2/0/2 + - source_port: GigabitEthernet1/0/3 + destination_port: GigabitEthernet2/0/3 + +- name: Delete an entire Port Channel between two devices by specifying the port_channel_number + cisco.dnac.lan_automation_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log: true + config_verify: false + state: deleted + config: + - port_channel: + - source_device_management_ip_address: 10.1.1.1 + destination_device_management_ip_address: 20.1.1.1 + port_channel_number: 1 + +- name: Delete all Port Channels originating from a source device + cisco.dnac.lan_automation_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log: true + config_verify: false + state: deleted + config: + - port_channel: + - source_device_management_ip_address: 10.1.1.1 + # Deletes every Port Channel from source device 10.1.1.1, regardless of destination. + +- name: Complex Port Channel operations with multiple configurations + cisco.dnac.lan_automation_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log: true + config_verify: false + state: merged + config: + - port_channel: + # First Port Channel between devices A and B + - source_device_management_ip_address: "10.1.1.1" + destination_device_management_ip_address: "20.1.1.1" + links: + - source_port: "GigabitEthernet1/0/1" + destination_port: "GigabitEthernet2/0/1" + - source_port: "GigabitEthernet1/0/2" + destination_port: "GigabitEthernet2/0/2" + # Second Port Channel between devices A and C + - source_device_management_ip_address: "10.1.1.1" + destination_device_management_ip_address: "30.1.1.1" + links: + - source_port: "GigabitEthernet1/0/3" + destination_port: "GigabitEthernet3/0/1" + - source_port: "GigabitEthernet1/0/4" + destination_port: "GigabitEthernet3/0/2" """ RETURN = r""" dnac_response: @@ -579,10 +972,11 @@ """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.cisco.dnac.plugins.module_utils.dnac import ( - DnacBase, +from ansible_collections.cisco.dnac.plugins.module_utils.dnac import DnacBase +from ansible_collections.cisco.dnac.plugins.module_utils.validation import ( validate_list_of_dicts, ) + import time @@ -601,6 +995,13 @@ def __init__(self, module): self.updated_hostname, self.no_hostname_updated = [], [] self.added_link, self.no_link_added = [], [] self.deleted_link, self.no_link_deleted = [], [] + self.port_channel_created = [] + self.port_channel_deleted, self.no_port_channel_deleted = [], [] + self.link_added_to_port_channel, self.link_not_added_to_port_channel = [], [] + self.link_removed_from_port_channel, self.link_not_removed_from_port_channel = ( + [], + [], + ) self.supported_states = ["merged", "deleted"] def validate_input(self): @@ -638,165 +1039,194 @@ def validate_input(self): "type": "dict", "required": False, "elements": "dict", - "options": { - "discovered_device_site_name_hierarchy": { + "discovered_device_site_name_hierarchy": { + "type": "str", + "required": True, + }, + "primary_device_management_ip_address": { + "type": "str", + "required": True, + }, + "primary_device_interface_names": { + "type": "list", + "required": True, + "elements": "str", + }, + "peer_device_management_ip_address": { + "type": "str", + "required": False, + }, + "ip_pools": { + "type": "list", + "required": True, + "elements": "dict", + "ip_pool_name": {"type": "str", "required": True}, + "ip_pool_role": { "type": "str", "required": True, + "choices": ["MAIN_POOL", "PHYSICAL_LINK_POOL"], + }, + }, + "multicast_enabled": { + "type": "bool", + "required": False, + "default": False, + }, + "host_name_prefix": {"type": "str", "required": False}, + "redistribute_isis_to_bgp": { + "type": "bool", + "required": False, + "default": False, + }, + "isis_domain_pwd": {"type": "str", "required": False}, + "discovery_level": { + "type": "int", + "required": False, + "default": 2, + }, + "discovery_timeout": {"type": "int", "required": False}, + "discovery_devices": { + "type": "list", + "required": False, + "elements": "dict", + "device_serial_number": {"type": "str", "required": True}, + "device_host_name": {"type": "str", "required": False}, + "device_site_name_hierarchy": { + "type": "str", + "required": False, }, - "primary_device_management_ip_address": { + "device_management_ip_address": { + "type": "str", + "required": False, + }, + }, + "launch_and_wait": { + "type": "bool", + "required": False, + "default": False, + }, + "pnp_authorization": { + "type": "bool", + "required": False, + "default": False, + }, + "device_serial_number_authorization": { + "type": "list", + "required": False, + "elements": "str", + }, + }, + "lan_automated_device_update": { + "type": "dict", + "required": False, + "elements": "dict", + "loopback_update_device_list": { + "type": "list", + "required": False, + "elements": "dict", + "device_management_ip_address": { "type": "str", "required": True, }, - "primary_device_interface_names": { - "type": "list", + "new_loopback0_ip_address": { + "type": "str", "required": True, - "elements": "str", }, - "peer_device_management_ip_address": { + }, + "hostname_update_devices": { + "type": "list", + "required": False, + "elements": "dict", + "device_management_ip_address": { "type": "str", - "required": False, + "required": True, }, - "ip_pools": { - "type": "list", + "new_host_name": {"type": "str", "required": True}, + }, + "link_add": { + "type": "dict", + "required": False, + "source_device_management_ip_address": { + "type": "str", "required": True, - "elements": "dict", - "options": { - "ip_pool_name": {"type": "str", "required": True}, - "ip_pool_role": { - "type": "str", - "required": True, - "choices": ["MAIN_POOL", "PHYSICAL_LINK_POOL"], - }, - }, }, - "multicast_enabled": { - "type": "bool", - "required": False, - "default": False, + "source_device_interface_name": { + "type": "str", + "required": True, }, - "host_name_prefix": {"type": "str", "required": False}, - "redistribute_isis_to_bgp": { - "type": "bool", - "required": False, - "default": False, + "destination_device_management_ip_address": { + "type": "str", + "required": True, }, - "isis_domain_pwd": {"type": "str", "required": False}, - "discovery_level": { - "type": "integer", - "required": False, - "default": 2, + "destination_device_interface_name": { + "type": "str", + "required": True, }, - "discovery_timeout": {"type": "integer", "required": False}, - "discovery_devices": { - "type": "list", - "required": False, - "elements": "dict", - "options": { - "device_serial_number": {"type": "str", "required": True}, - "device_host_name": {"type": "str", "required": False}, - "device_site_name_hierarchy": { - "type": "str", - "required": False, - }, - "device_management_ip_address": { - "type": "str", - "required": False, - }, - }, + "ip_pool_name": {"type": "str", "required": True}, + }, + "link_delete": { + "type": "dict", + "required": False, + "source_device_management_ip_address": { + "type": "str", + "required": True, }, - "launch_and_wait": { - "type": "bool", - "required": False, - "default": False, + "source_device_interface_name": { + "type": "str", + "required": True, }, - "pnp_authorization": { - "type": "bool", - "required": False, - "default": False, + "destination_device_management_ip_address": { + "type": "str", + "required": True, }, - "device_serial_number_authorization": { - "type": "list", - "required": False, - "elements": "str", + "destination_device_interface_name": { + "type": "str", + "required": True, }, }, }, - "lan_automated_device_update": { - "type": "dict", - "required": False, + "port_channel": { + "type": "list", "elements": "dict", - "options": { - "loopback_update_device_list": { - "type": "list", - "required": False, - "elements": "dict", - "options": { - "device_management_ip_address": { - "type": "str", - "required": True, - }, - "new_loopback0_ip_address": { - "type": "str", - "required": True, - }, - }, - }, - "hostname_update_devices": { - "type": "list", - "required": False, - "elements": "dict", - "options": { - "device_management_ip_address": { - "type": "str", - "required": True, - }, - "new_host_name": {"type": "str", "required": True}, - }, - }, - "link_add": { - "type": "dict", - "required": False, - "options": { - "source_device_management_ip_address": { - "type": "str", - "required": True, - }, - "source_device_interface_name": { - "type": "str", - "required": True, - }, - "destination_device_management_ip_address": { - "type": "str", - "required": True, - }, - "destination_device_interface_name": { - "type": "str", - "required": True, - }, - "ip_pool_name": {"type": "str", "required": True}, - }, + "source_device_management_ip_address": { + "type": "str", + "required": False, + }, + "source_device_mac_address": { + "type": "str", + "required": False, + }, + "source_device_serial_number": { + "type": "str", + "required": False, + }, + "destination_device_management_ip_address": { + "type": "str", + "required": False, + }, + "destination_device_mac_address": { + "type": "str", + "required": False, + }, + "destination_device_serial_number": { + "type": "str", + "required": False, + }, + "port_channel_number": { + "type": "int", + "required": False, + }, + "links": { + "type": "list", + "elements": "dict", + "required": False, + "source_port": { + "type": "str", + "required": True, }, - "link_delete": { - "type": "dict", - "required": False, - "options": { - "source_device_management_ip_address": { - "type": "str", - "required": True, - }, - "source_device_interface_name": { - "type": "str", - "required": True, - }, - "destination_device_management_ip_address": { - "type": "str", - "required": True, - }, - "destination_device_interface_name": { - "type": "str", - "required": True, - }, - }, + "destination_port": { + "type": "str", + "required": True, }, }, }, @@ -815,16 +1245,14 @@ def validate_input(self): return self self.validated_config = valid_lan_automation - self.msg = "Successfully validated playbook configuration parameters using 'validate_input': {0}".format( - str(valid_lan_automation) - ) - self.log(str(self.msg), "INFO") + self.msg = f"Successfully validated playbook configuration parameters using 'validate_input': {self.pprint(valid_lan_automation)}." + self.log(self.msg, "INFO") self.status = "success" return self def get_have(self, config): """ - Retrieve the current LAN Automation session details from Cisco Catalyst Center. + Retrieve the current LAN Automation session details and port channel configurations from Cisco Catalyst Center. This method checks for active LAN Automation sessions, retrieves their details, maps session IDs to the primary IP addresses of seed devices, and stores the information in the class instance. @@ -836,6 +1264,13 @@ def get_have(self, config): - activeSessions (bool): Indicates if active LAN Automation sessions are present. - activeSessionIds (list): List of IDs for the active LAN Automation sessions. - session_to_ip_map (dict): Maps session IDs to their corresponding primary IP addresses. + - port_channel (list[list[dict] | None]): Extracted port channel configurations for each + playbook-defined port channel config. + - Each element corresponds to one config from the playbook. + - Each element is either: + • list[dict] → one or more matching port channel configs + • None → if no match was found for that config + - If the playbook does not define any port channel configs, this is []. """ have = { @@ -876,28 +1311,462 @@ def get_have(self, config): "INFO", ) + have["port_channel"] = self.extract_port_channel_config_from_catalyst_center() + self.have = have self.log("Current State (self.have): {}".format(str(self.have)), "INFO") self.msg = "Successfully retrieved the details from the Cisco Catalyst Center" self.status = "success" return self - def map_active_sessions_to_primary_ip( - self, lan_automation_sessions, lan_automation_session_ids + def get_port_channels( + self, + source_device_management_ip_address, + destination_device_management_ip_address, + port_channel_number, ): """ - Map active LAN Automation session IDs to their respective primary seed device IP addresses. - This method associates each session ID from the active LAN Automation sessions with its - corresponding primary seed device's management IP address. - Args: - - lan_automation_sessions (list): List of dictionaries containing session details for all active LAN - Automation sessions. - - lan_automation_session_ids (list): List of active LAN Automation session IDs retrieved from Cisco - Catalyst Center. - Returns: - - dict: A dictionary mapping session IDs to their corresponding primary seed device IP address. - If no primary IP address is found for a session ID, its value is set to `None`. - """ + Retrieve Port Channel configurations between two devices from Cisco Catalyst Center. + + Parameters: + source_device_management_ip_address (str): Management IP address of the source device. + destination_device_management_ip_address (str): Management IP address of the destination device. If None, + retrieves Port Channels from the source device to all connected devices. + port_channel_number (int): Specific Port Channel number to filter results. If None, returns all Port Channels between the devices. + + Returns: + list or None: List of Port Channel configuration dictionaries if found, None if no configurations exist. + Each dictionary contains Port Channel details including device information, + Port Channel numbers, and link configurations. + + Description: + Calls the Cisco Catalyst Center API to retrieve Port Channel configurations between two specified devices. + The method constructs a payload with source and destination device IP addresses and makes an API call + to 'get_port_channels'. If a specific port_channel_number is provided, it filters the results to return + only configurations matching that Port Channel number. The method handles API response validation, + logs detailed information about the retrieval process, and returns None if no configurations are found + or if an error occurs during the API call. + """ + + self.log( + f"Initiating Port Channel retrieval - Source IP: '{source_device_management_ip_address}', " + f"Destination IP: '{destination_device_management_ip_address}', Port Channel Number: '{port_channel_number}'", + "INFO", + ) + + if not source_device_management_ip_address: + self.log( + "Source device management IP address is required to fetch Port Channel configurations.", + "ERROR", + ) + return None + + if not self.is_valid_ipv4(source_device_management_ip_address): + self.msg = ( + f"Invalid source device IP address format: {source_device_management_ip_address}", + "ERROR", + ) + self.fail_and_exit(self.msg) + + payload = { + "device1_management_ipaddress": source_device_management_ip_address, + "device2_management_ipaddress": destination_device_management_ip_address, + # port_channel_number is not passed in payload, as fetching with port_channel_number is not available yet. To be added here later. + } + + if destination_device_management_ip_address: + self.log( + f"Processing Port Channel configuration: Source device '{source_device_management_ip_address}' -> " + f"Destination device '{destination_device_management_ip_address}'", + "DEBUG", + ) + if not self.is_valid_ipv4(destination_device_management_ip_address): + self.msg = ( + f"Invalid destination device IP address format: {destination_device_management_ip_address}".format( + destination_device_management_ip_address + ), + "ERROR", + ) + self.fail_and_exit(self.msg) + else: + self.log( + f"Processing Port Channel configuration: All Port Channels from source device '{source_device_management_ip_address}'", + "DEBUG", + ) + + if port_channel_number: + self.log( + f"Applying Port Channel number filter: '{port_channel_number}'", "DEBUG" + ) + else: + self.log( + "No Port Channel number filter applied - retrieving all Port Channels", + "DEBUG", + ) + + # Note about API limitation + self.log( + "Note: port_channel_number filtering is done client-side as API doesn't support server-side filtering yet", + "DEBUG", + ) + + try: + self.log( + f"Calling 'get_port_channels' API with payload: {self.pprint(payload)}", + "DEBUG", + ) + response = self.dnac_apply["exec"]( + family="lan_automation", function="get_port_channels", params=payload + ) + + # Check if the response is empty + self.log( + f"API call completed. Received API response from 'get_port_channels' for for Source IP: '{source_device_management_ip_address}', " + f"Destination IP: '{destination_device_management_ip_address}': {response}", + "DEBUG", + ) + + if not response: + self.msg = ( + f"No port channel configurations retrieved for Source IP: '{source_device_management_ip_address}', " + f"Destination IP: '{destination_device_management_ip_address}', Response is empty." + ) + self.log(self.msg, "DEBUG") + return None + + port_channel_info = response.get("response") + + self.log( + f"Retrieved Port Channel configurations for Source IP: {source_device_management_ip_address}, " + f"Destination IP: {destination_device_management_ip_address}: {self.pprint(port_channel_info)}", + "INFO", + ) + if not port_channel_number: + self.log( + "port_channel_number is not provided, returning all Port Channel configurations.", + "INFO", + ) + return port_channel_info + + self.log( + f"Successfully retrieved {len(port_channel_info)} Port Channel configuration(s) for " + f"source device: {source_device_management_ip_address}", + "INFO", + ) + filtered_info = [ + info + for info in port_channel_info + if info.get("device1PortChannelNumber") == port_channel_number + ] + + if not filtered_info: + self.msg = ( + f"No Port Channel configurations found for Source IP: '{source_device_management_ip_address}', " + f"Destination IP: '{destination_device_management_ip_address}', Port Channel Number: '{port_channel_number}'." + ) + self.log(self.msg, "DEBUG") + return None + + self.log( + f"Filtered Port Channel configurations: {self.pprint(filtered_info)}", + "DEBUG", + ) + return filtered_info + + except Exception as e: + self.msg = ( + f"Exception occurred while retrieving Port Channel details - " + f"Source IP: '{source_device_management_ip_address}', " + f"Destination IP: '{destination_device_management_ip_address}', " + f"Port Channel Number: '{port_channel_number}', " + f"Error: '{str(e)}'" + ) + self.fail_and_exit(self.msg) + + def format_port_channels_have_configs(self, port_channel_configs): + """ + Format port channel configurations into a standardized structure. + + Parameters: + port_channel_configs (list[dict]): + List of dictionaries containing raw port channel configurations. + Each configuration may include device IPs, port channel numbers, IDs, + and associated port channel member links. + + Returns: + list[dict]: + List of formatted port channel configuration dictionaries. + Each dictionary contains source/destination device IPs, + port channel number, ID, and associated links. + + Description: + This method processes a list of raw port channel configurations + and reformats them into a structured format suitable for further + workflows. Each port channel entry includes device management IPs, + port channel metadata, and member link mappings (source/destination ports). + Logging is performed at multiple stages to aid debugging. + """ + self.log( + f"Formatting Port Channel configuration: {self.pprint(port_channel_configs)}", + "DEBUG", + ) + + if not port_channel_configs: + self.log("No Port Channel configurations to format.", "INFO") + return [] + + formatted_configs = [] + for idx, port_channel_config in enumerate(port_channel_configs, start=1): + self.log( + f"Processing port channel config #{idx}: {self.pprint(port_channel_config)}", + "DEBUG", + ) + + formatted_config = { + "sourceDeviceManagementIPAddress": port_channel_config.get( + "device1ManagementIPAddress" + ), + "destinationDeviceManagementIPAddress": port_channel_config.get( + "device2ManagementIPAddress" + ), + "portChannelNumber": port_channel_config.get("portChannelNumber"), + "id": port_channel_config.get("id"), + } + self.log( + f"Extracted config details: " + f"Source={formatted_config['sourceDeviceManagementIPAddress']}, " + f"Destination={formatted_config['destinationDeviceManagementIPAddress']}, " + f"PortChannel={formatted_config['portChannelNumber']}, " + f"ID={formatted_config['id']}", + "DEBUG", + ) + + formatted_config["links"] = [ + { + "sourcePort": link.get("device1Interface"), + "destinationPort": link.get("device2Interface"), + } + for link in port_channel_config.get("portChannelMembers", []) + ] + + formatted_configs.append(formatted_config) + + self.log( + f"Formatted {len(formatted_configs)} Port Channel configuration(s): " + f"{self.pprint(formatted_configs)}", + "DEBUG", + ) + return formatted_configs + + def extract_port_channel_config_from_catalyst_center(self): + """ + Extract port channel configurations from Catalyst Center. + + Parameters: + None + + Returns: + list[list[dict] | None]: + - The outer list corresponds to each port channel config defined + in the playbook (`want["port_channel"]`). + - Each element in the outer list can be: + • list[dict] → one or more matching configs from Catalyst Center + • None → if no matching config was found for that playbook entry + - If the playbook does not define any port channel configs, + an empty list [] is returned. + + Description: + This method compares the desired port channel configurations defined + in the playbook (`want["port_channel"]`) with existing port channel + configurations fetched from Catalyst Center. + + Behavior: + - If no port channel configs are defined in the playbook, an empty list [] is returned. + - If `port_channel_number` is provided, exactly one config is expected and returned + inside a list (or None if no match). + - If only `links` are provided, exactly one config is matched and returned + inside a list (or None if no match). + - If neither are provided, all existing configs for the given device pair + are returned inside a list. + - If multiple configs match the same set of links, the method raises an error. + - Logs are generated at INFO and DEBUG levels for traceability. + """ + + port_channel_configs = self.want.get("port_channel", []) + self.log( + f"Initializing extraction of port channel configurations from Catalyst Center " + f"for playbook port_channel config: {self.pprint(port_channel_configs)}", + "DEBUG", + ) + + if not port_channel_configs: + self.log( + "No port_channel configuration provided in the playbook. " + "No extraction needed.", + "INFO", + ) + return [] + + have_port_channel_configs = [] + + for idx, port_channel_config in enumerate(port_channel_configs, start=1): + self.log( + f"[Config {idx}] Extracting details from Catalyst Center for playbook config: {self.pprint(port_channel_config)}", + "INFO", + ) + + source_device_management_ip_address = port_channel_config.get( + "sourceDeviceManagementIPAddress" + ) + destination_device_management_ip_address = port_channel_config.get( + "destinationDeviceManagementIPAddress" + ) + port_channel_number = port_channel_config.get("portChannelNumber") + + self.log( + f"[Config {idx}] Fetching configs with Source={source_device_management_ip_address}, " + f"Destination={destination_device_management_ip_address}, " + f"PortChannel={port_channel_number}", + "DEBUG", + ) + fetched_port_channel_configs = self.get_port_channels( + source_device_management_ip_address, + destination_device_management_ip_address, + port_channel_number, + ) + + if not fetched_port_channel_configs: + self.log( + f"No matching port channel configurations found in Catalyst Center for [Config {idx}].", + "INFO", + ) + have_port_channel_configs.append(None) + continue + + self.log( + f"[Config {idx}] Retrieved {len(fetched_port_channel_configs)} raw configs.", + "DEBUG", + ) + self.log( + f"Retrieved {len(fetched_port_channel_configs)} raw port channel configurations from Catalyst Center for [Config {idx}]", + "DEBUG", + ) + formatted_port_channel_config_list = self.format_port_channels_have_configs( + fetched_port_channel_configs + ) + want_links = port_channel_config.get("links") + + if want_links: + self.log( + f"[Config {idx}] Links provided in playbook: {self.pprint(want_links)}", + "INFO", + ) + if port_channel_number: + self.log( + f"[Config {idx}] port_channel_number provided, keeping all fetched configs.", + "INFO", + ) + have_port_channel_config = formatted_port_channel_config_list + # if formatted_port_channel_config_list is not empty and port_channel_number is provided, there should be only one config in the list. + self.log( + f"[Config {idx}] Matching configs: {self.pprint(have_port_channel_config)}", + "DEBUG", + ) + else: + self.log( + f"[Config {idx}] port_channel_number not provided, attempting to find the relevant port channel with the provided links.", + "INFO", + ) + link_matched_port_channel_config_index_set = set() + for want_link in want_links: + for i, have_port_channel_config in enumerate( + formatted_port_channel_config_list + ): + for have_link in have_port_channel_config.get("links", []): + if want_link.get("sourcePort") == have_link.get( + "sourcePort" + ) and want_link.get("destinationPort") == have_link.get( + "destinationPort" + ): + link_matched_port_channel_config_index_set.add(i) + self.log( + f"[Config {idx}] Link {want_link} matches with existing config index {i}: " + f"{self.pprint(have_port_channel_config)}", + "DEBUG", + ) + + if not link_matched_port_channel_config_index_set: + self.log( + f"[Config {idx}] No existing Port Channel config found by matching links provided in config.", + "DEBUG", + ) + have_port_channel_config = None + + elif len(link_matched_port_channel_config_index_set) > 1: + matched_config_indices = ", ".join( + map( + str, + sorted(link_matched_port_channel_config_index_set), + ) + ) + self.msg = ( + f"[Config {idx}] Links provided in playbooks belongs to multiple existing Port " + f"Channel configurations: {self.pprint(formatted_port_channel_config_list)}." + f"Matching indices: {matched_config_indices}" + ) + self.fail_and_exit(self.msg) + else: + matched_config_index = ( + link_matched_port_channel_config_index_set.pop() + ) + self.log( + f"[Config {idx}] Link matched Port Channel config at index: '{matched_config_index}' " + f"Matched Config: {self.pprint(formatted_port_channel_config_list[matched_config_index])}", + "DEBUG", + ) + have_port_channel_config = [ + formatted_port_channel_config_list[matched_config_index] + ] + else: + self.log( + f"[Config {idx}] No links provided in the playbook configuration, keeping all {len(formatted_port_channel_config_list)} configs.", + "INFO", + ) + have_port_channel_config = formatted_port_channel_config_list + + have_port_channel_configs.append(have_port_channel_config) + self.log( + f"[Config {idx}] Final extracted configs: {self.pprint(have_port_channel_config)}", + "DEBUG", + ) + + self.log( + f"Retrieved existing Port Channel configurations (Have): {self.pprint(have_port_channel_configs)}", + "DEBUG", + ) + return have_port_channel_configs + + def map_active_sessions_to_primary_ip( + self, lan_automation_sessions, lan_automation_session_ids + ): + """ + Map active LAN Automation session IDs to their respective primary seed device IP addresses. + This method associates each session ID from the active LAN Automation sessions with its + corresponding primary seed device's management IP address. + Args: + - lan_automation_sessions (list): List of dictionaries containing session details for all active LAN + Automation sessions. + - lan_automation_session_ids (list): List of active LAN Automation session IDs retrieved from Cisco + Catalyst Center. + Returns: + - dict: A dictionary mapping session IDs to their corresponding primary seed device IP address. + If no primary IP address is found for a session ID, its value is set to `None`. + """ + + self.log( + f"Initiating session ID to primary IP mapping for {len(lan_automation_sessions or [])} sessions and {len(lan_automation_session_ids or [])} IDs", + "DEBUG", + ) session_id_to_primary_ip = {} @@ -1167,43 +2036,744 @@ def get_site_details(self, site_name_hierarchy): return site_exists - def get_want(self, config): + def get_device_ip_by_device_identifier( + self, device_identifier, device_identifier_value + ): """ - Collects and validates the desired state of LAN automation and device updates based on the provided - configuration. - Args: - self (object): An instance of a class used for interacting with Cisco Catalyst Center. - config (dict): A configuration dictionary containing details about LAN automation sessions and device - updates. + Retrieve the management IP address of a device given a specific identifier. + + Parameters: + device_identifier (str): + The type of identifier used to search for the device. Possible values: + - "ip_address" + - "mac_address" + - "serial_number" + device_identifier_value (str): + The value of the identifier to match for the device. + Returns: - self (object): The instance of the class with the updated `want` attribute containing the validated desired - state of LAN automation and device update processes. + str | None: + - The management IP address of the device if found. + - None if no device matches the given identifier and value. + Description: - This function processes the given configuration to gather and validate details about: - - LAN Automation: It collects information on LAN automation sessions, ensuring that all required parameters - such as IP pools, management IP addresses, and discovery devices are provided and valid. - - Device Updates: It checks the configuration for details about devices that require updates, such as - hostname updates, loopback interfaces, and link configurations, validating the provided - IP addresses and device roles. - If any required parameters are missing or invalid, the function logs an error message and updates the - status accordingly. On successful collection of all parameters, it logs the desired state and sets the - status to success. + This method maps the given identifier type to the corresponding API field, + queries the Cisco Catalyst Center for the device using the 'get_device_list' API, + and retrieves the management IP address. Detailed DEBUG logs are generated + for the payload, API response, and any errors encountered. If the API call + fails or no device is found, the method either logs the issue or exits the + program with a failure message. """ - want = {} - missing_params = [] - state = self.params.get("state") + self.log( + f"Retrieving device IP for device with {device_identifier}: '{device_identifier_value}'.", + "DEBUG", + ) + if not device_identifier: + self.log( + "Device identifier type is required for device IP retrieval", "ERROR" + ) + return None - if state == "merged": - want["lan_automation"], lan_auto_missing_params = ( - self.extract_lan_automation(config) + if not device_identifier_value: + self.log( + "Device identifier value is required for device IP retrieval", "ERROR" ) - missing_params.extend(lan_auto_missing_params) + return None - want["lan_automated_device_update"], lan_auto_device_missing_params = ( - self.extract_lan_automated_device_update(config) + valid_identifiers = ["ip_address", "mac_address", "serial_number"] + if device_identifier not in valid_identifiers: + self.log( + "Invalid device identifier type: {0}. Valid types are: {1}".format( + device_identifier, ", ".join(valid_identifiers) + ), + "ERROR", ) - missing_params.extend(lan_auto_device_missing_params) + return None + + device_identifier_api_name_dict = { + "ip_address": "managementIpAddress", + "mac_address": "macAddress", + "serial_number": "serialNumber", + } + api_field = device_identifier_api_name_dict.get(device_identifier) + payload = {f"{api_field}": device_identifier_value} + self.log( + f"Constructed payload for API request: {self.pprint(payload)}", "DEBUG" + ) + try: + response = self.dnac._exec( + family="devices", + function="get_device_list", + params=payload, + ) + + self.log( + f"Received API response from 'get_device_list' for the device with {device_identifier}: '{device_identifier_value}' is : {str(response)}", + "DEBUG", + ) + + if not response: + self.log( + f"No response received from 'get_device_list' API for {device_identifier}: {device_identifier_value}", + "WARNING", + ) + return None + + response_data = response.get("response") + if not response_data: + self.msg = f"No device found with {device_identifier}: '{device_identifier_value}'." + self.log(self.msg, "DEBUG") + return None + + if not isinstance(response_data, list) or len(response_data) == 0: + self.log( + "Empty or invalid device list returned for {0}: {1}".format( + device_identifier, device_identifier_value + ), + "INFO", + ) + return None + + device_data = response_data[0] + device_ip = device_data.get("managementIpAddress") + + self.log( + f"Device IP for {device_identifier} '{device_identifier_value}' found: {device_ip}", + "INFO", + ) + + return device_ip + + except Exception as e: + error_message = str(e) + self.msg = f"Failed to retrieve device IP for {device_identifier}: '{device_identifier_value}'. Error: {error_message}" + self.fail_and_exit(self.msg) + + def resolve_device_ip(self, device_type, serial_number, ip_address, mac_address): + """ + Resolve the management IP address of a device using serial number, IP address, or MAC address. + + Parameters: + device_type (str): + The type of device (source or destination). + serial_number (str | None): + The serial number of the device. If provided, this is used as the primary method to retrieve the IP. + ip_address (str | None): + The IP address of the device. Used if serial number is not provided. + mac_address (str | None): + The MAC address of the device. Used if neither serial number nor IP address are provided. + + Returns: + str: + The resolved management IP address of the device. If the device cannot be found using the provided + identifier(s), the method exits with an error. + + Description: + This method attempts to resolve the management IP address of a device in the following order: + 1. Serial number (highest priority) + 2. Provided IP address + 3. MAC address (lowest priority) + + - Logs are generated at DEBUG level for every step. + - If a device cannot be found using the provided identifier, the method calls `fail_and_exit`. + - Internally, it uses `get_device_ip_by_device_identifier` to query Cisco DNA Center. + """ + self.log( + f"Starting device IP resolution for {device_type} device using available identifiers", + "DEBUG", + ) + + self.log( + "Resolution parameters - Serial: {0}, IP: {1}, MAC: {2}".format( + serial_number or "None", ip_address or "None", mac_address or "None" + ), + "DEBUG", + ) + + if not device_type: + self.log( + "Device type parameter is required for device IP resolution", "ERROR" + ) + self.msg = "Device type cannot be empty for IP resolution" + self.fail_and_exit(self.msg) + + resolved_ip_address = None + + if serial_number: + self.log( + f"{device_type.capitalize()} device management serial number is provided: {serial_number}", + "DEBUG", + ) + self.log("Retrieving IP address using serial number...", "DEBUG") + ip_address = self.get_device_ip_by_device_identifier( + "serial_number", serial_number + ) + if not ip_address: + self.log( + f"Failed to resolve IP address using serial number: {serial_number}", + "ERROR", + ) + self.msg = ( + f"Failed to retrieve device details using serial number: {serial_number}. " + "Please check if serial number is correct." + ) + self.fail_and_exit(self.msg) + + self.log( + f"Successfully resolved IP address using serial number {serial_number}: {ip_address}", + "INFO", + ) + + elif ip_address: + self.log( + f"{device_type.capitalize()} device management IP address is provided: {ip_address}", + "DEBUG", + ) + self.log("Validating provided IP address...", "DEBUG") + original_ip_address = ip_address + ip_address = self.get_device_ip_by_device_identifier( + "ip_address", ip_address + ) + if not ip_address: + self.log( + f"Failed to resolve IP address using IP address: {original_ip_address}", + "ERROR", + ) + self.msg = ( + f"Failed to retrieve device details using IP address: {original_ip_address}. " + "Please check if IP address is correct." + ) + self.fail_and_exit(self.msg) + else: + self.log( + f"Successfully validated the IP address: {ip_address}", "DEBUG" + ) + + elif mac_address: + self.log( + f"{device_type.capitalize()} device management MAC address is provided: {mac_address}", + "DEBUG", + ) + self.log("Retrieving IP address using MAC address...", "DEBUG") + ip_address = self.get_device_ip_by_device_identifier( + "mac_address", mac_address + ) + if not ip_address: + self.log( + f"Failed to resolve IP address using MAC address: {mac_address}", + "ERROR", + ) + self.msg = ( + f"Failed to retrieve device details using MAC address: {mac_address}. " + "Please check if MAC address is correct." + ) + self.fail_and_exit(self.msg) + + self.log( + f"Successfully resolved IP address using MAC address {mac_address}: {ip_address}", + "INFO", + ) + else: + self.msg = ( + f"None of 'management_ip_address', 'mac_address' or 'serial_number' " + f"are provided for the {device_type} device. Atleast one of them is required." + ) + self.fail_and_exit(self.msg) + + return ip_address + + def validate_port_channel_number(self, port_channel_config, idx, state): + """ + Validate port channel number parameter based on state. + Args: + port_channel_config (dict): Port channel configuration + idx (int): Configuration index for logging + state (str): Current operation state + Returns: + int or None: Validated port channel number + """ + self.log( + f"Validating port channel number for configuration {idx} in state '{state}'", + "DEBUG", + ) + port_channel_number = port_channel_config.get("port_channel_number") + + if state == "merged": + if not port_channel_number: + self.log( + "Configuration {0}: No port_channel_number provided - new Port Channel will be " + "created if none exists between endpoints".format(idx), + "INFO", + ) + return None + + self.log( + "Configuration {0}: port_channel_number provided: {1} - will be used for " + "updating existing Port Channel".format(idx, port_channel_number), + "INFO", + ) + return port_channel_number + + elif state == "deleted": + if not port_channel_number: + self.log( + "Configuration {0}: No port_channel_number provided - all Port Channels " + "between specified endpoints will be deleted".format(idx), + "INFO", + ) + return None + + self.log( + "Configuration {0}: port_channel_number provided: {1} - only specified " + "Port Channel will be deleted".format(idx, port_channel_number), + "INFO", + ) + return port_channel_number + + return None + + def validate_port_channel_links( + self, port_channel_config, idx, state, missing_params + ): + """ + Validate port channel links parameter based on state. + Args: + port_channel_config (dict): Port channel configuration + idx (int): Configuration index for logging + state (str): Current operation state + missing_params (list): List to collect missing parameters + Returns: + list or None: Validated links configuration + """ + + self.log( + f"Validating port channel links for configuration {idx} in state '{state}'", + "DEBUG", + ) + + links = port_channel_config.get("links") + + if state == "merged": + if not links: + error_msg = ( + "Configuration {0}: Missing links parameter for merged state - " + "at least one link must be specified".format(idx) + ) + self.log(error_msg, "ERROR") + missing_params.append(error_msg) + return None + else: + self.log( + "Configuration {0}: Links provided for validation: {1}".format( + idx, len(links) if isinstance(links, list) else "invalid" + ), + "INFO", + ) + + # Validate links structure + self.validate_links(links) + return links + + elif state == "deleted": + if not links: + self.log( + "Configuration {0}: No links provided for deleted state - " + "all links will be targeted".format(idx), + "INFO", + ) + return None + else: + self.log( + "Configuration {0}: Links provided for deletion: {1}".format( + idx, len(links) if isinstance(links, list) else "invalid" + ), + "INFO", + ) + + # Validate links structure + self.validate_links(links) + return links + + return None + + def _validate_and_resolve_source_device( + self, port_channel_config, idx, missing_params + ): + """ + Validate and resolve source device IP address. + Args: + port_channel_config (dict): Port channel configuration + idx (int): Configuration index for logging + missing_params (list): List to collect missing parameters + Returns: + str or None: Resolved source device IP address + """ + source_ip = port_channel_config.get("source_device_management_ip_address") + source_mac = port_channel_config.get("source_device_management_mac_address") + source_serial = port_channel_config.get( + "source_device_management_serial_number" + ) + + if any([source_ip, source_mac, source_serial]): + self.log( + f"Configuration {idx}: Source device identifiers provided - resolving IP address", + "DEBUG", + ) + + resolved_ip = self.resolve_device_ip( + "source", source_serial, source_ip, source_mac + ) + + if resolved_ip: + self.log( + f"Configuration {idx}: Successfully resolved source device IP: {resolved_ip}", + "INFO", + ) + return resolved_ip + else: + error_msg = f"Configuration {idx}: Failed to resolve source device IP" + self.log(error_msg, "ERROR") + missing_params.append(error_msg) + return None + else: + error_msg = ( + f"Configuration {idx}: Missing source device identifiers - at least one of " + "'source_device_management_ip_address', 'source_device_management_mac_address' " + "or 'source_device_management_serial_number' is required" + ) + self.log(error_msg, "ERROR") + missing_params.append(error_msg) + return None + + def _validate_and_resolve_destination_device( + self, port_channel_config, idx, state, missing_params + ): + """ + Validate and resolve destination device IP address based on state. + Args: + port_channel_config (dict): Port channel configuration + idx (int): Configuration index for logging + state (str): Current operation state + missing_params (list): List to collect missing parameters + Returns: + str or None: Resolved destination device IP address + """ + dest_ip = port_channel_config.get("destination_device_management_ip_address") + dest_mac = port_channel_config.get("destination_device_mac_address") + dest_serial = port_channel_config.get("destination_device_serial_number") + + has_destination_identifiers = any([dest_ip, dest_mac, dest_serial]) + + if state == "merged": + if has_destination_identifiers: + self.log( + f"Configuration {idx}: Destination device identifiers provided - resolving IP address", + "DEBUG", + ) + + resolved_ip = self.resolve_device_ip( + "destination", dest_serial, dest_ip, dest_mac + ) + + if resolved_ip: + self.log( + f"Configuration {idx}: Successfully resolved destination device IP: {resolved_ip}", + "INFO", + ) + return resolved_ip + else: + error_msg = ( + f"Configuration {idx}: Failed to resolve destination device IP" + ) + self.log(error_msg, "ERROR") + missing_params.append(error_msg) + return None + else: + error_msg = ( + f"Configuration {idx}: Missing destination device identifiers for merged state - " + f"at least one of 'destination_device_management_ip_address', " + f"'destination_device_mac_address' or 'destination_device_serial_number' " + f"is required" + ) + self.log(error_msg, "ERROR") + missing_params.append(error_msg) + return None + + elif state == "deleted": + if not has_destination_identifiers: + self.log( + f"Configuration {idx}: No destination device identifiers provided for deleted state - " + f"all port channels from source device will be targeted", + "INFO", + ) + return None + else: + self.log( + f"Configuration {idx}: Destination device identifiers provided - resolving IP address", + "DEBUG", + ) + + resolved_ip = self.resolve_device_ip( + "destination", dest_serial, dest_ip, dest_mac + ) + + if resolved_ip: + self.log( + f"Configuration {idx}: Successfully resolved destination device IP: {resolved_ip}", + "INFO", + ) + return resolved_ip + else: + error_msg = ( + f"Configuration {idx}: Failed to resolve destination device IP" + ) + self.log(error_msg, "ERROR") + missing_params.append(error_msg) + return None + + return None + + def validate_and_extract_port_channel_config_from_playbook(self, config): + """ + Validate and extract port channel configurations from the playbook. + + Parameters: + config (dict): + The playbook configuration containing port channel information and + device details. + + Returns: + tuple[list[dict], list[str]]: + - want_port_channel_configs (list[dict]): + A list of validated and resolved port channel configuration dictionaries. + Each dictionary may include: + - source_device_management_ip_address (str) + - destination_device_management_ip_address (str | None) + - port_channel_number (int | None) + - links (list[dict] | None) + - missing_params (list[str]): + A list of parameters that were required but missing from the playbook config. + Examples: + - Missing source device identifiers + - Missing destination device identifiers (for merged state) + - Missing links (for merged state) + + Description: + This method performs the following: + - Iterates through port channel configurations defined in the playbook. + - Resolves device IP addresses using serial number, IP, or MAC address for both + source and destination devices. + - Validates optional parameters such as `port_channel_number` and `links` according + to the desired state ('merged' or 'deleted'). + - Generates INFO and DEBUG logs for every validation and resolution step. + - Collects any missing required parameters into `missing_params`. + - Returns a list of fully prepared port channel configurations along with any + missing parameter information. + """ + + self.log( + f"Validating and Extracting port channel configuration for the config: {self.pprint(config)}", + "DEBUG", + ) + port_channel_configs = config.get("port_channel") + + if not port_channel_configs: + self.log("No port channel configuration provided in the playbook.", "INFO") + return [], [] + + if not isinstance(port_channel_configs, list): + self.log( + f"Invalid port_channel format - expected list, got: {type(port_channel_configs).__name__}", + "ERROR", + ) + return [], ["Invalid port_channel format - expected list"] + + self.log( + f"Found {len(port_channel_configs)} port channel configurations to process", + "INFO", + ) + + missing_params = [] + want_port_channel_configs = [] + state = self.params.get("state") + + self.log(f"Processing port channel configurations for state: {state}", "DEBUG") + + # Validate and collect port channel parameters + for idx, port_channel_config in enumerate(port_channel_configs, start=1): + self.log( + f"[PortChannel {idx}] Processing playbook config: {self.pprint(port_channel_config)}", + "DEBUG", + ) + if not isinstance(port_channel_config, dict): + error_msg = ( + "Invalid port channel config at index {0} - expected dict".format( + idx + ) + ) + self.log(error_msg, "ERROR") + missing_params.append(error_msg) + continue + + want_port_channel_config = {} + + # Source end point validation, one of the device identifiers is required + want_port_channel_config["source_device_management_ip_address"] = ( + self._validate_and_resolve_source_device( + port_channel_config, idx, missing_params + ) + ) + + # Destination end point validation, based on state as destination end points details are conditionally optional + want_port_channel_config["destination_device_management_ip_address"] = ( + self._validate_and_resolve_destination_device( + port_channel_config, idx, state, missing_params + ) + ) + + # optional parameter, validating according to state + want_port_channel_config["port_channel_number"] = ( + self.validate_port_channel_number(port_channel_config, idx, state) + ) + + # optional parameter, validating according to state + want_port_channel_config["links"] = self.validate_port_channel_links( + port_channel_config, idx, state, missing_params + ) + + want_port_channel_configs.append(want_port_channel_config) + self.log( + f"[PortChannel {idx}] Final extracted config: {self.pprint(want_port_channel_config)}", + "DEBUG", + ) + + self.log( + f"All extracted port channel configurations: {self.pprint(want_port_channel_configs)}", + "INFO", + ) + if missing_params: + self.log(f"Missing required parameters: {missing_params}", "ERROR") + + return want_port_channel_configs, missing_params + + def validate_links(self, links): + """ + Validate provided port channel links to ensure no duplicate source or destination ports. + + Parameters: + links (list[dict]): + A list of dictionaries, each containing: + - source_port (str): The source port of the link. + - destination_port (str): The destination port of the link. + + Returns: + None: + Raises an error via `fail_and_exit` if any source port is mapped to multiple + destination ports, or any destination port is mapped to multiple source ports. + + Description: + This method performs the following: + - Iterates through each link in the provided list. + - Ensures that each source port maps to only one destination port. + - Ensures that each destination port maps to only one source port. + - Logs DEBUG messages at the start and end of validation. + - Raises an error and exits if duplicate mappings are detected. + """ + + self.log( + f"Validating provided links for duplicate source or destination ports. Links: {self.pprint(links)}", + "DEBUG", + ) + + source_to_destination_port_mapping = {} + destination_to_source_port_mapping = {} + + for idx, link in enumerate(links, start=1): + src = link.get("source_port") + dst = link.get("destination_port") + self.log( + f"[Link {idx}] Checking mapping: source={src}, destination={dst}", + "DEBUG", + ) + + if src not in source_to_destination_port_mapping: + source_to_destination_port_mapping[src] = dst + self.log( + f"[Link {idx}] Source port {src} mapped to destination {dst}", + "DEBUG", + ) + else: + self.msg = ( + f"Link {idx}] Source port : {src} is mapped to multiple destination ports\n" + f"Destination Port 1: {source_to_destination_port_mapping[src]}, Destination Port 2: {dst}\n" + "A Source Port can only be mapped to a single Destination Port" + ) + self.fail_and_exit(self.msg) + + if dst not in destination_to_source_port_mapping: + destination_to_source_port_mapping[dst] = src + self.log( + f"[Link {idx}] Destination port {dst} mapped to source {src}", + "DEBUG", + ) + else: + self.msg = ( + f"Destination port : {dst} is mapped to multiple source ports\n" + f"Source Port 1: {destination_to_source_port_mapping[dst]}, Source Port 2: {src}\n" + "A Destination Port can only be mapped to a single Source Port" + ) + self.fail_and_exit(self.msg) + + self.log( + "No duplicate source or destination ports found in the provided links.", + "DEBUG", + ) + return + + def get_want(self, config): + """ + Collects and validates the desired state of LAN automation, device updates, and port channel configurations + based on the provided configuration. + + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + config (dict): A configuration dictionary containing details about LAN automation sessions, device + updates, and port channel configurations. + + Returns: + self (object): The instance of the class with the updated `want` attribute containing the validated desired + state of LAN automation, device update processes, and port channel configurations. + + Description: + This function processes the given configuration to gather and validate details about: + - LAN Automation: It collects information on LAN automation sessions, ensuring that all required parameters + such as IP pools, management IP addresses, and discovery devices are provided and valid. + - Device Updates: It checks the configuration for details about devices that require updates, such as + hostname updates, loopback interfaces, and link configurations, validating the provided + IP addresses and device roles. + - Port Channels: It validates and extracts port channel configurations from the playbook, resolving source + and destination device IP addresses, port channel numbers, and links. Ensures that all + required parameters are present according to the desired state ('merged' or 'deleted'). + If any required parameters are missing or invalid, the function logs an error message and updates the + status accordingly. On successful collection of all parameters, it logs the desired state and sets the + status to success. + """ + + want = {} + missing_params = [] + state = self.params.get("state") + + if state == "merged": + want["lan_automation"], lan_auto_missing_params = ( + self.extract_lan_automation(config) + ) + missing_params.extend(lan_auto_missing_params) + + want["lan_automated_device_update"], lan_auto_device_missing_params = ( + self.extract_lan_automated_device_update(config) + ) + missing_params.extend(lan_auto_device_missing_params) elif state == "deleted": want["lan_automation"], lan_auto_missing_params = ( @@ -1211,19 +2781,23 @@ def get_want(self, config): ) missing_params.extend(lan_auto_missing_params) + want["port_channel"], port_channel_missing_params = ( + self.validate_and_extract_port_channel_config_from_playbook(config) + ) + missing_params.extend(port_channel_missing_params) + if missing_params: - missing_msg = ( + self.msg = ( "The following required parameters are missing or invalid: " + ", ".join(missing_params) ) - self.log(missing_msg, "ERROR") - self.module.fail_json(msg=missing_msg, response=[]) + self.log(self.msg, "ERROR") + self.fail_and_exit(self.msg) want = self.update_dict_keys_to_camel_case(want) - self.want = want self.msg = "Successfully collected all parameters from playbook for comparison" - self.log("Desired State (want): {0}".format(str(self.want)), "INFO") + self.log(f"Desired State (want): {self.pprint(self.want)}", "INFO") self.status = "success" return self @@ -1257,7 +2831,11 @@ def update_dict_keys_to_camel_case(self, input_data): keys where predefined camelCase strings are used instead of the default conversion. """ - keys_to_preserve = ["lan_automation", "lan_automated_device_update"] + keys_to_preserve = [ + "lan_automation", + "lan_automated_device_update", + "port_channel", + ] specific_key_mappings = { "primary_device_management_ip_address": "primaryDeviceManagmentIPAddress", @@ -2109,18 +3687,124 @@ def fail_with_error(self, error_message): self.log(error_message, "ERROR") self.module.fail_json(msg=error_message, response=[]) + def _build_port_channel_messages(self, items, template, result_msg_list): + """ + Helper function to format port channel messages and append them to the result list. + + Parameters: + items (list): List of port channel dictionaries containing source, destination, and links. + template (str): Message template to format the result string. + result_msg_list (list): The list to which formatted messages will be appended. + + Returns: + None + + Description: + Iterates over the provided list of port channels, extracts the source IP, + destination IP, and associated links, and formats them into a message using + the given template. The formatted message is appended to the result list. + """ + + for port_channel in items: + source_device_ip = port_channel.get("sourceDeviceManagementIPAddress") + destination_device_ip = port_channel.get( + "destinationDeviceManagementIPAddress" + ) + links = port_channel.get("links", []) + msg = template.format( + source=source_device_ip, + destination=destination_device_ip, + links=self.pprint(links), + ) + result_msg_list.append(msg) + self.log(f"Appended message: {msg}", "DEBUG") + + def build_port_channel_result_messages(self): + """ + Build and return result messages for all port channel operations. + + Parameters: + None + + Returns: + list: A list of formatted messages summarizing port channel operations. + + Description: + This method consolidates messages for port channel creation, deletion, + link addition, and link removal operations. It uses a mapping of + instance attributes to predefined message templates. For each attribute + that contains results, it delegates message construction to the helper + function `_build_port_channel_messages`. + """ + message_map = { + "port_channel_created": ( + "Port channel created successfully between source device '{source}' " + "and destination device '{destination}' with links: {links}." + ), + "port_channel_deleted": ( + "Port channel deleted successfully between source device '{source}' " + "and destination device '{destination}' with links: {links}." + ), + "no_port_channel_deleted": ( + "No port channel found to delete between source device '{source}' " + "and destination device '{destination}' with links: {links}. No update needed." + ), + "link_added_to_port_channel": ( + "Links added successfully to the port channel between source device '{source}' " + "and destination device '{destination}'. Added links: {links}." + ), + "link_not_added_to_port_channel": ( + "Links were not added to the port channel between source device '{source}' " + "and destination device '{destination}' as they already exist. Links: {links}." + ), + "link_removed_from_port_channel": ( + "Links removed successfully from the port channel between source device '{source}' " + "and destination device '{destination}'. Removed links: {links}." + ), + "link_not_removed_from_port_channel": ( + "Links were not removed from the port channel between source device '{source}' " + "and destination device '{destination}' as they do not exist. Links: {links}." + ), + } + + result_msg_list = [] + self.log("Building result messages for port channel operations", "INFO") + + for attr, template in message_map.items(): + items = getattr(self, attr, []) + if items: + self.log( + f"Processing attribute: {attr} with {len(items)} items", "DEBUG" + ) + self._build_port_channel_messages(items, template, result_msg_list) + + self.log( + f"Completed building result messages for port channel operations. Total messages: {len(result_msg_list)}", + "INFO", + ) + return result_msg_list + def update_lan_auto_messages(self): """ - Updates and logs messages based on the status of LAN automation start, completion, stop and loopbacks, - hostnames, and links updates. + Updates and logs messages based on the status of LAN automation start, completion, stop, + and loopbacks, hostnames, links updates, and port channel operations. + Returns: self (object): Returns the current instance of the class with updated `result` and `msg` attributes. + Description: - This method compiles status messages related to various aspects of LAN automation, including - session start, completion, and updates to loopbacks, hostnames, and links. It checks the status - flags and constructs a message list that is logged and returned. The result attribute is updated - to indicate whether any changes occurred during the automation process. + This method compiles status messages related to various aspects of LAN automation, including: + - LAN Automation Sessions: start, completion, and stop statuses. + - Loopbacks and Hostnames: updates applied or not needed. + - Links: added, deleted, or no action needed for device links. + - Port Channels: creation, deletion, link addition or removal, and no-action-needed scenarios. + + For each of these categories, the method checks the corresponding status flags, constructs messages, + logs them, and appends them to a result message list. The `result` attribute is updated to indicate + whether any changes occurred during the automation process. Finally, the messages are compiled + into `self.msg` and logged with the operation result. """ + self.result["changed"] = False result_msg_list = [] if self.started_lan_automation: @@ -2202,6 +3886,9 @@ def update_lan_auto_messages(self): no_link_deleted_msg = "Provided links {} did not need any deletion from Cisco Catalyst Center." result_msg_list.append(no_link_deleted_msg) + port_channel_msgs = self.build_port_channel_result_messages() + result_msg_list.extend(port_channel_msgs) + if ( self.updated_loopback or self.updated_hostname @@ -2210,6 +3897,10 @@ def update_lan_auto_messages(self): or self.started_lan_automation or self.completed_lan_automation or self.stopped_lan_automation + or self.port_channel_created + or self.port_channel_deleted + or self.link_added_to_port_channel + or self.link_removed_from_port_channel ): self.result["changed"] = True @@ -2220,10 +3911,11 @@ def update_lan_auto_messages(self): def verify_diff_merged(self, config): """ - Manages LAN Automation tasks in Cisco Catalyst Center based on the provided configuration. - Args: + Manages LAN Automation tasks and Port Channel operations in Cisco Catalyst Center based on the provided configuration. + + Args: self (object): An instance of a class used for interacting with Cisco Catalyst Center. - config (dict): A dictionary containing the configuration details for LAN automation. The structure includes: + config (dict): A dictionary containing the configuration details for LAN automation and port channels. The structure includes: - 'lan_automation': Dictionary with details for starting a LAN automation session, including: - 'primaryDeviceManagmentIPAddress': The IP address of the primary device. - 'launchAndWait': Boolean indicating if the session should be launched and waited upon. @@ -2232,19 +3924,26 @@ def verify_diff_merged(self, config): - 'hostnameUpdateDevices': List of dictionaries for hostname updates. - 'linkAdd': Dictionary for link addition details. - 'linkDelete': Dictionary for link deletion details. + - 'port_channel': List of port channel configurations to be verified or updated, including: + - 'source_device_management_ip_address' and 'destination_device_management_ip_address' + - 'links': List of links associated with the port channel. + - 'port_channel_number': Optional port channel number. + Returns: self (object): Returns the current instance of the class with updated attributes for created, - updated, and no-update status of LAN automation tasks. + updated, and no-update status of LAN automation tasks and port channel operations. + Description: - This method orchestrates LAN Automation operations by: + This method orchestrates LAN Automation operations and port channel verification by: - Initiating a LAN automation session if specified in the configuration, checking for existing - sessions, and logging the result. + sessions, and logging the result. - Filtering device updates to determine which changes are necessary before performing updates. - Performing updates to devices based on the filtered details for loopback IP addresses, - hostnames, links to be added, and links to be deleted. - - Verifying the results of the updates, logging successes and warnings as appropriate. + hostnames, links to be added, and links to be deleted. + - Verifying port channel configurations and ensuring the correct port channels exist, links are added or removed, + and logging successes and warnings as appropriate. - Ensuring all required tasks are executed and their statuses are checked to facilitate smooth - playbook execution. + playbook execution. """ lan_automation = self.want.get("lan_automation", {}) if lan_automation: @@ -2293,6 +3992,87 @@ def verify_diff_merged(self, config): self.process_link_addition(lan_devices.get("linkAdd", {})) self.process_link_deletion(lan_devices.get("linkDelete", {})) + port_channel = self.want.get("port_channel", {}) + if port_channel: + self.get_have(config) + self.log(f"Current State (have): {self.pprint(self.have)}", "INFO") + + self.log("Verifying Port Channel configurations in merged state.", "INFO") + self.verify_diff_merged_port_channel(port_channel) + return self + + def verify_diff_merged_port_channel(self, port_channel): + """ + Verifies port channel configurations in the merged state. + + Parameters: + port_channel (list[dict]): List of desired port channel configurations from the playbook. Each dictionary may include: + - 'source_device_management_ip_address' + - 'destination_device_management_ip_address' + - 'links': List of links in the port channel. + - 'port_channel_number': Optional port channel number. + + Returns: + self (object): Returns the current instance of the class after verifying port channel configurations. + + Description: + This method iterates through the desired port channel configurations and performs the following: + - Logs the start of verification for each port channel configuration. + - Retrieves the corresponding existing port channel configuration from `self.have`. + - Checks if the port channel configuration exists; if not, raises an error. + - Compares the existing configuration with the desired configuration using `port_channel_config_needs_update`. + - If an update is needed but the configuration is expected to be already applied, raises an error. + - Logs debug messages for each verification step and indicates if the configuration is up to date. + """ + + self.log( + f"Verifying port channel configurations in merged state: {self.pprint(port_channel)}", + "INFO", + ) + + for i, want_port_channel_config in enumerate(port_channel): + self.log( + f"Verifying port channel configuration at '{i}' index: {self.pprint(want_port_channel_config)}", + "DEBUG", + ) + + have_port_channel_configs = self.have.get("port_channel")[i] + self.log( + f"Corresponding existing port channel configurations at '{i}' index: {self.pprint(have_port_channel_configs)}", + "DEBUG", + ) + + if not have_port_channel_configs: + self.msg = ( + f"Port channel configuration verification failed at index '{i}'. " + "No existing port channel configuration found. Expected port channel to be created but it doesn't exist." + ) + self.fail_and_exit(self.msg) + else: + self.log( + "Existing port channel configuration found. Verifying if updates were applied.", + "DEBUG", + ) + # In case of merged, there'll only one config if it exists. + have_port_channel_config = have_port_channel_configs[0] + needs_update, updated_port_channel_config = ( + self.port_channel_config_needs_update( + want_port_channel_config, have_port_channel_config + ) + ) + if needs_update: + self.msg = ( + f"Port channel configuration verification failed at index '{i}'. " + f"Configuration needs update but expected it to be already updated. " + f"Config that needs update: {self.pprint(updated_port_channel_config)}" + ) + self.fail_and_exit(self.msg) + else: + self.log( + "Port channel configuration verification passed. Configuration is up to date as expected.", + "DEBUG", + ) + return self def process_loopback_updates(self, loopback_updates): @@ -2490,6 +4270,8 @@ def get_diff_merged(self): - Starting a LAN Automation session. - Stopping the session if needed. - Updating LAN Automated devices based on the filtered configuration received. + - Managing Port Channel configurations by creating, updating, or deleting port channels + according to the desired state in the playbook. It ensures all required tasks are present, executes them, and checks their status to facilitate smooth playbook execution. Returns: @@ -2506,8 +4288,9 @@ def get_diff_merged(self): of the operations and any issues encountered. - The filtered updates are passed to the corresponding API calls to update the LAN automated devices as necessary. + - Port channel configurations specified in `self.want["port_channel"]` are processed for creation, + update, or deletion, and the status of these operations is verified. """ - action_plan = { "start_lan_automation": ( self.start_lan_auto, @@ -2521,6 +4304,7 @@ def get_diff_merged(self): lan_automation = self.want.get("lan_automation", {}) update_device = self.want.get("lan_automated_device_update", {}) + port_channel = self.want.get("port_channel", {}) self.log("LAN Automation settings: {}".format(lan_automation), "DEBUG") self.log("Device update settings: {}".format(update_device), "DEBUG") @@ -2614,6 +4398,440 @@ def get_diff_merged(self): else: self.log("No updates required after filtering.", "INFO") + if port_channel: + self.log( + f"Processing port channel configuration: {self.pprint(port_channel)}", + "DEBUG", + ) + self.get_diff_merged_port_channel(port_channel).check_return_status() + + return self + + def port_channel_config_needs_update( + self, want_port_channel_config, have_port_channel_config + ): + """ + Determines if a given port channel configuration requires an update based on the desired state + in the playbook and the existing configuration in Cisco Catalyst Center. + + Parameters: + want_port_channel_config (dict): The desired port channel configuration from the playbook. + Expected keys: + - sourceDeviceManagementIPAddress (str): Source device IP. + - destinationDeviceManagementIPAddress (str | None): Destination device IP. + - links (list[dict] | None): List of link mappings for the port channel. + have_port_channel_config (dict | None): The existing port channel configuration retrieved + from Catalyst Center. Same structure as `want_port_channel_config`. + + Returns: + tuple: + - bool: True if an update is required, False otherwise. + - dict | None: The updated port channel configuration containing only the links that + need to be added or removed. None if no update is needed. + + Description: + - Compares the desired port channel configuration (`want_port_channel_config`) with the + existing configuration (`have_port_channel_config`). + - Validates source-to-destination port mappings to ensure no duplicates exist. + - Determines which links need to be added (for 'merged' state) or removed (for 'deleted' state). + - Validates that the resulting number of links does not exceed 8 or fall below 2. + - Logs DEBUG messages at key points: starting comparison, desired vs existing configs, + links requiring update, and validation results. + - Exits with an error if any constraints are violated or invalid state is provided. + """ + + state = self.params.get("state") + self.log( + f"Comparing if port channel configuration needs update in '{state}' state.", + "DEBUG", + ) + self.log( + f"Desired port channel config: {self.pprint(want_port_channel_config)}", + "DEBUG", + ) + self.log( + f"Existing port channel config: {self.pprint(have_port_channel_config)}", + "DEBUG", + ) + + if not have_port_channel_config: + self.log( + "No existing port channel configuration found. Update needed.", "DEBUG" + ) + return True, want_port_channel_config + + # prechecks before comparision + want_links = want_port_channel_config.get("links") + want_source_to_destination_port_mapping = {} + want_destination_to_source_port_mapping = {} + for link_idx, want_link in enumerate(want_links, start=1): + for want_source_port, want_destination_port in want_link.items(): + want_source_to_destination_port_mapping[want_source_port] = ( + want_destination_port + ) + want_destination_to_source_port_mapping[want_destination_port] = ( + want_source_port + ) + self.log( + f"Mapped desired link {link_idx}: {want_source_port} -> {want_destination_port}", + "DEBUG", + ) + + self.log( + "Starting duplicate port validation against existing port channel configuration.", + "DEBUG", + ) + have_links = have_port_channel_config.get("links", []) + for have_link in have_links: + have_src = have_link.get("sourcePort") + have_dst = have_link.get("destinationPort") + if ( + have_src in want_source_to_destination_port_mapping + and have_dst != want_source_to_destination_port_mapping[have_src] + ): + self.msg = ( + f"Duplicate source port '{have_src}' found in existing port channel configuration.\n" + f"Existing Link - Source: {have_src}, Destination: {have_dst}\n" + f"Desired Link - Source: {have_src}, Destination: {want_source_to_destination_port_mapping[have_src]}\n" + "One source port can only map to one destination port." + ) + self.fail_and_exit(self.msg) + + updated_required_links = [] + + self.log("Determining links that require update.", "DEBUG") + if state == "merged": + for want_link in want_links: + if want_link not in have_links: + updated_required_links.append(want_link) + elif state == "deleted": + for have_link in have_links: + if have_link in want_links: + updated_required_links.append(have_link) + else: + self.msg = f"Invalid state '{state}' provided. Supported states are 'merged' and 'deleted'." + self.fail_and_exit(self.msg) + # The update API is not available, instead 2 seperate APIs for Add and delete are available, so only storing the new links to be added/removed. + + if not updated_required_links: + self.log( + "Port channel configuration is up to date. No update needed.", "DEBUG" + ) + return False, None + + self.log( + f"Port channel configuration needs update. Links that requires update: {self.pprint(updated_required_links)}", + "DEBUG", + ) + + self.log( + "Checking if updated links exceed 8 (max) or fall below 2 (min) allowed links per port channel.", + "DEBUG", + ) + + if state == "merged": + total_links_after_merge = len(updated_required_links) + len(have_links) + if total_links_after_merge > 8: + self.log( + "Link count validation failed - would exceed maximum of 8 links", + "ERROR", + ) + self.msg = ( + f"Port channel configuration update would result in more than 8 links. " + f"Maximum allowed links per port channel is 8. Current links: {len(have_links)}, " + f"Links to be added: {len(updated_required_links)}, Total would be: {total_links_after_merge}" + ) + self.fail_and_exit(self.msg) + + elif state == "deleted": + remaining_links_after_deletion = len(have_links) - len( + updated_required_links + ) + if remaining_links_after_deletion == 1: + self.log( + "Link count validation failed - would result in less than 2 links", + "ERROR", + ) + self.msg = ( + f"Port channel configuration update would result in less than 2 links. " + f"Minimum required links per port channel is 2. Current links: {len(have_links)}, " + f"Links to be removed: {len(updated_required_links)}, Total would be: {remaining_links_after_deletion}" + ) + self.fail_and_exit(self.msg) + + self.log("Port channel link count validation passed.", "DEBUG") + + updated_port_channel_config = { + "sourceDeviceManagementIPAddress": want_port_channel_config.get( + "sourceDeviceManagementIPAddress" + ), + "destinationDeviceManagementIPAddress": want_port_channel_config.get( + "destinationDeviceManagementIPAddress" + ), + "links": updated_required_links, + "id": have_port_channel_config.get("id"), + } + self.log( + f"Port channel configuration comparison completed - update required with {len(updated_required_links)} links", + "DEBUG", + ) + self.log( + f"Updated port channel configuration: {self.pprint(updated_port_channel_config)}", + "DEBUG", + ) + + return True, updated_port_channel_config + + def create_port_channel(self, port_channel_config): + """ + Creates a new port channel between two devices using the provided configuration. + + Parameters: + port_channel_config (dict): Configuration dictionary containing: + - sourceDeviceManagementIPAddress (str): IP of the source device. + - destinationDeviceManagementIPAddress (str): IP of the destination device. + - links (list[dict]): List of link mappings with 'sourcePort' and 'destinationPort'. + + Returns: + self: Returns the current instance with updated `port_channel_created` list. + + Description: + - Logs the initiation of the port channel creation process. + - Constructs the payload for the API call based on source/destination IPs and links. + - Calls the API to create the port channel and retrieves the task ID. + - Logs the API call payload at DEBUG level. + - Checks if the task ID is returned; if not, exits with an error. + - Monitors the task status and logs success upon completion. + - Appends the created port channel configuration to `self.port_channel_created`. + """ + + self.log( + f"Initiating port channel creation with configuration: {self.pprint(port_channel_config)}", + "INFO", + ) + source_device_management_ip = port_channel_config.get( + "sourceDeviceManagementIPAddress" + ) + destination_device_management_ip = port_channel_config.get( + "destinationDeviceManagementIPAddress" + ) + + create_port_channel_payload = { + "device1ManagementIPAddress": source_device_management_ip, + "device2ManagementIPAddress": destination_device_management_ip, + "portChannelMembers": [ + { + "device1Interface": link.get("sourcePort"), + "device2Interface": link.get("destinationPort"), + } + for link in port_channel_config.get("links", []) + ], + } + + task_name = "create_a_new_port_channel_between_devices" + + self.log( + f"Calling '{task_name}' API with payload: {self.pprint(create_port_channel_payload)}", + "DEBUG", + ) + task_id = self.get_taskid_post_api_call( + "lan_automation", task_name, create_port_channel_payload + ) + + if not task_id: + self.msg = f"Unable to retrieve the task_id for the task '{task_name}' for the port channel'." + self.fail_and_exit(self.msg) + + success_msg = ( + f"Port channel between the source device '{source_device_management_ip}' and " + f"destination device '{destination_device_management_ip}' created successfully." + ) + self.get_task_status_from_tasks_by_id(task_id, task_name, success_msg) + self.log( + f"Port channel creation process completed for devices: {source_device_management_ip} -> {destination_device_management_ip}", + "INFO", + ) + + self.port_channel_created.append(port_channel_config) + return self + + def add_lan_automated_link_to_a_port_channel(self, port_channel_config): + """ + Adds new links to an existing port channel between two devices. + + Parameters: + port_channel_config (dict): Configuration dictionary containing: + - id (str): ID of the existing port channel. + - sourceDeviceManagementIPAddress (str): IP of the source device. + - destinationDeviceManagementIPAddress (str): IP of the destination device. + - links (list[dict]): List of link mappings to be added with 'sourcePort' and 'destinationPort'. + + Returns: + self: Returns the current instance with updated `link_added_to_port_channel` list. + + Description: + - Logs the initiation of the port channel update process. + - Constructs the payload for adding links to the port channel. + - Calls the API and retrieves the task ID for adding links. + - Logs DEBUG information about the API payload and task ID retrieval. + - Monitors the task status and logs a success message upon completion. + - Appends the updated port channel configuration to `self.link_added_to_port_channel`. + """ + + source_device_management_ip = port_channel_config.get( + "sourceDeviceManagementIPAddress" + ) + destination_device_management_ip = port_channel_config.get( + "destinationDeviceManagementIPAddress" + ) + self.log( + f"Initiating update of port channel between source device: '{source_device_management_ip}' and " + f"destination device: '{destination_device_management_ip}' with config: {self.pprint(port_channel_config)}", + "INFO", + ) + + if not source_device_management_ip: + self.log( + "Source device management IP address is required for link addition", + "ERROR", + ) + self.msg = ( + "Source device management IP address is missing from configuration" + ) + self.fail_and_exit(self.msg) + + if not destination_device_management_ip: + self.log( + "Destination device management IP address is required for link addition", + "ERROR", + ) + self.msg = ( + "Destination device management IP address is missing from configuration" + ) + self.fail_and_exit(self.msg) + + links = port_channel_config.get("links", []) + if not links or not isinstance(links, list): + self.log( + "Valid links list is required for port channel link addition", "ERROR" + ) + self.msg = "Links configuration is missing or invalid" + self.fail_and_exit(self.msg) + + update_port_channel_payload = { + "id": port_channel_config.get("id"), + "portChannelMembers": [ + { + "device1Interface": link.get("sourcePort"), + "device2Interface": link.get("destinationPort"), + } + for link in port_channel_config.get("links", []) + ], + } + + task_name = "add_a_lan_automated_link_to_a_port_channel" + + self.log( + f"Calling '{task_name}' API with payload: {self.pprint(update_port_channel_payload)}", + "DEBUG", + ) + task_id = self.get_taskid_post_api_call( + "lan_automation", task_name, update_port_channel_payload + ) + if not task_id: + self.msg = ( + f"Unable to retrieve the task_id for the task '{task_name}' for the port channel " + f"between the source device '{source_device_management_ip}' and destination device '{destination_device_management_ip}''." + ) + self.fail_and_exit(self.msg) + + success_msg = ( + f"Port channel between the source device '{source_device_management_ip}' and " + f"destination device '{destination_device_management_ip}' updated successfully with links: {self.pprint(port_channel_config.get('links'))}." + ) + + self.get_task_status_from_tasks_by_id(task_id, task_name, success_msg) + self.log( + f"Port channel links added successfully to port channel ID '{port_channel_config.get('id')}'.", + "INFO", + ) + + self.link_added_to_port_channel.append(port_channel_config) + return self + + def get_diff_merged_port_channel(self, port_channel): + """ + Processes port channel configurations in the 'merged' state and ensures they are created or updated as needed. + + Parameters: + port_channel (list[dict]): List of desired port channel configurations to be processed. + + Returns: + self: Returns the current instance with updated port channel status lists. + + Description: + - Iterates through each desired port channel configuration. + - Logs the desired configuration at each index. + - Compares with existing configurations in `self.have`. + - Creates new port channels if none exist. + - Updates existing port channels if links need to be added. + - Logs the status of each configuration, including updates or no-change scenarios. + """ + + self.log( + f"Processing port channel configurations in merged state: {self.pprint(port_channel)}", + "INFO", + ) + + for i, want_port_channel_config in enumerate(port_channel): + self.log( + f"Processing port channel configuration at '{i}' index: {self.pprint(want_port_channel_config)}", + "DEBUG", + ) + + have_port_channel_configs = self.have.get("port_channel")[i] + self.log( + f"Existing port channel configurations at index '{i}': {self.pprint(have_port_channel_configs)}", + "DEBUG", + ) + + if not have_port_channel_configs: + self.log( + "No existing port channel configuration found. Creating new port channel.", + "DEBUG", + ) + self.create_port_channel(want_port_channel_config) + else: + self.log( + "Existing port channel configuration found. Checking for updates.", + "DEBUG", + ) + have_port_channel_config = have_port_channel_configs[0] + # In case of merged, there'll only one config if it exists. + needs_update, updated_port_channel_config = ( + self.port_channel_config_needs_update( + want_port_channel_config, have_port_channel_config + ) + ) + if needs_update: + self.log( + "Port channel configuration needs update. Creating updated port channel.", + "DEBUG", + ) + self.add_lan_automated_link_to_a_port_channel( + updated_port_channel_config + ) + else: + self.log( + "Port channel configuration is up to date. No update needed.", + "DEBUG", + ) + self.link_not_added_to_port_channel.append(want_port_channel_config) + + self.log( + "Completed processing all port channel configurations in merged state.", + "INFO", + ) return self def filter_updates(self, update_device): @@ -3598,6 +5816,15 @@ def get_diff_deleted(self): Cisco Catalyst Center API to stop the session. It logs the outcome, indicating success or failure, and updates the internal state based on the API response. """ + + port_channel = self.want.get("port_channel", {}) + if port_channel: + self.log( + f"Processing port channel configuration: {self.pprint(port_channel)}", + "DEBUG", + ) + self.get_diff_deleted_port_channel(port_channel).check_return_status() + if not self.want.get("lan_automation"): self.log( "LAN automation configuration not found in 'want'. Exiting the method.", @@ -3697,6 +5924,378 @@ def get_diff_deleted(self): return self + def process_link_deletion_in_port_channel( + self, want_port_channel_config, have_port_channel_config + ): + """ + Processes the deletion of links in a port channel and determines whether the entire port channel + or specific links need to be removed. + + Parameters: + want_port_channel_config (dict): Desired port channel configuration specifying links to delete. + have_port_channel_config (dict): Existing port channel configuration from which links will be removed. + + Returns: + None: Updates internal state lists to reflect deletions or no-change scenarios. + + Description: + - Compares desired links to delete against existing port channel links. + - Determines if updates are required using `port_channel_config_needs_update`. + - Logs debug and info messages at every step to provide detailed traceability. + - Deletes the entire port channel if all links are being removed. + - Deletes specific links if only a subset needs removal. + - Updates `link_not_removed_from_port_channel` list if no changes are required. + """ + + self.log( + "Starting port channel link deletion processing for specified configuration", + "DEBUG", + ) + self.log( + f"Desired port channel configuration for link deletion: {self.pprint(want_port_channel_config)}", + "DEBUG", + ) + self.log( + f"Existing port channel configuration for link deletion: {self.pprint(have_port_channel_config)}", + "DEBUG", + ) + want_links = want_port_channel_config.get("links", []) + self.log( + f"Links are provided in the configuration for deletion. Checking if updates are required. Links to be deleted: {self.pprint(want_links)}", + "DEBUG", + ) + + needs_update, updated_port_channel_config = ( + self.port_channel_config_needs_update( + want_port_channel_config, have_port_channel_config + ) + ) + + if needs_update: + self.log("Port channel links need update.", "DEBUG") + self.log( + "Determining whether all links are being removed, which would result in full port channel deletion.", + "DEBUG", + ) + + have_links_len = len(have_port_channel_config.get("links", [])) + deleted_links_len = len(updated_port_channel_config.get("links", [])) + + if have_links_len == deleted_links_len: + self.log( + "All links are being removed. Deleting the entire port channel.", + "DEBUG", + ) + self.delete_lan_automated_port_channel(updated_port_channel_config) + else: + self.log( + f"Deleting specified links from the port channel. Links to remove: {self.pprint(updated_port_channel_config.get('links', []))}", + "INFO", + ) + self.delete_lan_automated_link_from_a_port_channel( + updated_port_channel_config + ) + else: + self.log("Port channel links are up to date. No update needed.", "DEBUG") + self.link_not_removed_from_port_channel.append(want_port_channel_config) + + self.log( + f"Completed processing link deletion for port channel: {self.pprint(want_port_channel_config)}", + "DEBUG", + ) + return + + def process_delete_port_channel(self, have_port_channel_config): + """ + Triggers the deletion of the port channel via API and logs the operation. + + Parameters: + have_port_channel_config (dict): A dictionary containing the current port channel configuration details, including: + - 'id' (str): The unique identifier of the port channel. + - 'portChannelNumber' (int): The port channel number. + - 'sourceDeviceManagementIPAddress' (str): IP address of the source device. + - 'destinationDeviceManagementIPAddress' (str): IP address of the destination device. + + Returns: + None + + Description: + - Logs the current port channel configuration that is targeted for deletion. + - Prepares the payload required for the deletion API call. + - Calls `delete_lan_automated_port_channel` to delete the port channel. + - Logs both the initiation and confirmation of the deletion process. + """ + self.log( + "Starting complete port channel deletion process for specified configuration", + "DEBUG", + ) + + destination_device_management_ip = have_port_channel_config.get( + "destinationDeviceManagementIPAddress" + ) + source_device_management_ip = have_port_channel_config.get( + "sourceDeviceManagementIPAddress" + ) + port_channel_number = have_port_channel_config.get("portChannelNumber") + self.log( + f"Deleting port channel configuration: {self.pprint(have_port_channel_config)}", + "DEBUG", + ) + updated_port_channel_config = { + "id": have_port_channel_config.get("id"), + "portChannelNumber": port_channel_number, + "destinationDeviceManagementIPAddress": destination_device_management_ip, + "sourceDeviceManagementIPAddress": source_device_management_ip, + "links": have_port_channel_config.get("links", []), + } + + self.log( + f"Constructed port channel deletion configuration: {self.pprint(updated_port_channel_config)}", + "DEBUG", + ) + self.log( + f"Executing port channel deletion API call for port channel ID: {updated_port_channel_config.get('id')}", + "DEBUG", + ) + self.delete_lan_automated_port_channel(updated_port_channel_config) + + def delete_lan_automated_port_channel(self, port_channel_config): + """ + Deletes a specific port channel from Cisco Catalyst Center using the provided configuration. + + Parameters: + port_channel_config (dict): A dictionary containing the port channel details to be deleted, including: + - 'id' (str): Unique identifier of the port channel. + - 'sourceDeviceManagementIPAddress' (str): IP address of the source device. + - 'destinationDeviceManagementIPAddress' (str): IP address of the destination device. + + Returns: + self: Returns the current instance after initiating deletion and logging the result. + + Description: + - Logs the port channel configuration that is about to be deleted. + - Constructs the payload required for the deletion API call. + - Calls the LAN automation API to delete the port channel and retrieves the task ID. + - Raises an error if the task ID cannot be retrieved. + - Waits for task completion and logs a success message upon deletion. + - Updates the internal `port_channel_deleted` list to track deleted configurations. + """ + + self.log( + f"Initiating deletion of port channel with configuration: {self.pprint(port_channel_config)}", + "DEBUG", + ) + + source_device_management_ip = port_channel_config.get( + "sourceDeviceManagementIPAddress" + ) + destination_device_management_ip = port_channel_config.get( + "destinationDeviceManagementIPAddress" + ) + + delete_port_channel_payload = { + "id": port_channel_config.get("id"), + } + + task_name = "delete_port_channel" + self.log( + f"Calling '{task_name}' API with payload: {self.pprint(delete_port_channel_payload)}", + "DEBUG", + ) + + task_id = self.get_taskid_post_api_call( + "lan_automation", task_name, delete_port_channel_payload + ) + if not task_id: + self.msg = f"Unable to retrieve the task_id for the task '{task_name}' for the port channel'." + self.fail_and_exit(self.msg) + + success_msg = ( + f"Port channel between the source device '{source_device_management_ip}' and " + f"destination device '{destination_device_management_ip}' deleted successfully." + ) + self.get_task_status_from_tasks_by_id(task_id, task_name, success_msg) + self.port_channel_deleted.append(port_channel_config) + return self + + def delete_lan_automated_link_from_a_port_channel(self, port_channel_config): + """ + Deletes specific links from an existing port channel in Cisco Catalyst Center. + + Parameters: + port_channel_config (dict): A dictionary containing the port channel details and links to delete, including: + - 'id' (str): Unique identifier of the port channel. + - 'sourceDeviceManagementIPAddress' (str): IP address of the source device. + - 'destinationDeviceManagementIPAddress' (str): IP address of the destination device. + - 'links' (list[dict]): List of dictionaries containing links to be removed. Each dictionary should have: + - 'sourcePort' (str): Source interface in the port channel. + - 'destinationPort' (str): Destination interface in the port channel. + + Returns: + self: Returns the current instance after initiating link deletion and logging the result. + + Description: + - Logs the port channel and link configuration that is about to be deleted. + - Constructs the payload required for the API call to remove links from the port channel. + - Calls the LAN automation API and retrieves the task ID for the deletion operation. + - Raises an error if the task ID cannot be retrieved. + - Waits for task completion and logs a success message upon deletion. + - Updates the internal `link_removed_from_port_channel` list to track deleted links. + """ + + source_device_management_ip = port_channel_config.get( + "sourceDeviceManagementIPAddress" + ) + destination_device_management_ip = port_channel_config.get( + "destinationDeviceManagementIPAddress" + ) + + self.log( + f"Initiating deletion of links from port channel between source device: '{source_device_management_ip}' " + f"and destination device: '{destination_device_management_ip}", + "DEBUG", + ) + + delete_link_payload = { + "id": port_channel_config.get("id"), + "portChannelMembers": [ + { + "device1Interface": link.get("sourcePort"), + "device2Interface": link.get("destinationPort"), + } + for link in port_channel_config.get("links", []) + ], + } + + task_name = "remove_a_link_from_port_channel" + + self.log( + f"Calling '{task_name}' API with payload: {self.pprint(delete_link_payload)}", + "DEBUG", + ) + task_id = self.get_taskid_post_api_call( + "lan_automation", task_name, delete_link_payload + ) + if not task_id: + self.msg = f"Unable to retrieve the task_id for the task '{task_name}' for the port channel'." + self.fail_and_exit(self.msg) + + success_msg = ( + f"Links from port channel between the source device '{source_device_management_ip}' " + f"and destination device '{destination_device_management_ip}' deleted successfully." + ) + + self.get_task_status_from_tasks_by_id(task_id, task_name, success_msg) + self.link_removed_from_port_channel.append(port_channel_config) + return self + + def get_diff_deleted_port_channel(self, port_channel): + """ + Processes port channel configurations in the 'deleted' state and deletes port channels or specific links + from existing port channels in Cisco Catalyst Center as necessary. + + Parameters: + port_channel (list[dict]): A list of port channel configurations to process for deletion. Each dictionary + can include: + - 'sourceDeviceManagementIPAddress' (str): IP address of the source device. + - 'destinationDeviceManagementIPAddress' (str): IP address of the destination device. + - 'portChannelNumber' (str/int): Port channel number. + - 'links' (list[dict]): List of links to delete, where each dictionary contains: + - 'sourcePort' (str): Source interface. + - 'destinationPort' (str): Destination interface. + + Returns: + self: Returns the current instance after processing deletions and logging results. + + Description: + - Iterates through each port channel configuration provided in the 'deleted' state. + - Checks if an existing port channel configuration is present in `self.have`. + - If no existing configuration is found, logs the information and marks no deletion required. + - If links are specified, deletes only the specified links using `process_link_deletion_in_port_channel`. + - If only a port channel number is provided without specific links, deletes the entire port channel using `process_delete_port_channel`. + - If only destination device IP is provided, deletes all port channels between the source and destination device. + - If only source device IP is provided, deletes all port channels associated with the source device. + - Maintains detailed DEBUG logs for all decisions and actions taken. + - Updates internal tracking lists like `no_port_channel_deleted` as applicable. + """ + + self.log( + f"Processing port channel configurations in deleted state: {self.pprint(port_channel)}", + "INFO", + ) + + for i, want_port_channel_config in enumerate(port_channel): + self.log( + f"Processing port channel configuration at '{i}' index: {self.pprint(want_port_channel_config)}", + "DEBUG", + ) + + have_port_channel_config_list = self.have.get("port_channel")[i] + self.log( + f"Corresponding existing port channel configurations at '{i}' index: {self.pprint(have_port_channel_config_list)}", + "DEBUG", + ) + + if not have_port_channel_config_list: + self.log( + "No existing port channel configuration found. No operations required.", + "DEBUG", + ) + self.no_port_channel_deleted.append(want_port_channel_config) + continue + + self.log( + "Existing port channel configuration found. Checking for updates.", + "DEBUG", + ) + source_device_management_ip = want_port_channel_config.get( + "sourceDeviceManagementIPAddress" + ) + destination_device_management_ip = want_port_channel_config.get( + "destinationDeviceManagementIPAddress" + ) + links = want_port_channel_config.get("links") + port_channel_number = want_port_channel_config.get("portChannelNumber") + + if links: + self.log( + f"Links are provided in the configuration for deletion. " + f"Processing link deletion for source device '{source_device_management_ip}' " + f"and destination device '{destination_device_management_ip}'. Links: {self.pprint(links)}", + "DEBUG", + ) + have_port_channel_config = have_port_channel_config_list[0] + self.process_link_deletion_in_port_channel( + want_port_channel_config, have_port_channel_config + ) + elif port_channel_number: + self.log( + f"Port channel number is provided: '{port_channel_number}'. Deleting the entire port channel.", + "DEBUG", + ) + have_port_channel_config = have_port_channel_config_list[0] + self.process_delete_port_channel(have_port_channel_config) + elif destination_device_management_ip: + self.log( + "Source and Destination device management IP is provided without specific links or port channel number. " + f"Deleting all the port channel between the source device: '{source_device_management_ip}' " + f"and destination device: '{destination_device_management_ip}'.", + "DEBUG", + ) + for have_port_channel_config in have_port_channel_config_list: + self.process_delete_port_channel(have_port_channel_config) + else: + self.log( + f"No links, port channel number, or destination device provided. " + f"Deleting all port channels associated with source device '{source_device_management_ip}'", + "DEBUG", + ) + for have_port_channel_config in have_port_channel_config_list: + self.process_delete_port_channel(have_port_channel_config) + + self.log("Completed processing all port channel deletions.", "INFO") + return self + def check_stop_session(self, seed_ip_address, session_id, max_wait_time=1800): """ Periodically checks if the LAN Automation session has been stopped. @@ -3757,19 +6356,160 @@ def check_stop_session(self, seed_ip_address, session_id, max_wait_time=1800): self.set_operation_result("failed", False, self.msg, "CRITICAL") return self + def verify_diff_deleted_port_channel(self, port_channel): + """ + Verifies that port channel configurations in deleted state have been properly removed. + + Parameters: + port_channel (list[dict]): List of port channel configurations intended for deletion. + Each dict may contain: + - sourceDeviceManagementIPAddress (str): Source device IP. + - destinationDeviceManagementIPAddress (str): Destination device IP. + - portChannelNumber (str/int): Port channel number. + - links (list[dict]): Links associated with the port channel. + + Returns: + self: Logs verification results and raises errors if deletions were not applied as expected. + + Description: + - Logs the verification start and end at INFO level. + - For each port channel configuration: + - DEBUG logs the configuration and the existing state. + - Checks if links, port channel number, or destination IP are provided and verifies deletions accordingly. + - Raises an error if expected deletions were not applied. + - Updates internal verification status and ensures traceability. + - Ensures every branch of verification has logging to trace the decision process and results. + """ + self.log( + f"Verifying port channel configurations in deleted state: {self.pprint(port_channel)}", + "INFO", + ) + + for i, want_port_channel_config in enumerate(port_channel): + self.log( + f"Verifying port channel configuration at '{i}' index: {self.pprint(want_port_channel_config)}", + "DEBUG", + ) + + have_port_channel_config_list = self.have.get("port_channel")[i] + self.log( + f"Corresponding existing port channel configurations at '{i}' index: {self.pprint(have_port_channel_config_list)}", + "DEBUG", + ) + + if not have_port_channel_config_list: + self.log( + "No existing port channel configuration found. Verification passed.", + "DEBUG", + ) + continue + + self.log( + "Existing port channel configuration found. Verifying if deletions were applied correctly.", + "DEBUG", + ) + source_device_management_ip = want_port_channel_config.get( + "sourceDeviceManagementIPAddress" + ) + destination_device_management_ip = want_port_channel_config.get( + "destinationDeviceManagementIPAddress" + ) + links = want_port_channel_config.get("links") + port_channel_number = want_port_channel_config.get("portChannelNumber") + + if links: + self.log( + "Links were provided in the configuration for deletion. Verifying if links were properly removed.", + "DEBUG", + ) + have_port_channel_config = have_port_channel_config_list[0] + needs_update, updated_port_channel_config = ( + self.port_channel_config_needs_update( + want_port_channel_config, have_port_channel_config + ) + ) + if needs_update: + self.msg = ( + f"Port channel verification failed at index '{i}'. " + f"Links that should have been deleted are still present: {self.pprint(have_port_channel_config.get('links'))}" + ) + self.fail_and_exit(self.msg) + else: + self.log( + "Port channel link deletion verification passed. Links were properly removed.", + "DEBUG", + ) + elif port_channel_number: + self.log( + f"Port channel number provided: '{port_channel_number}'. Verifying deletion.", + "DEBUG", + ) + self.msg = ( + f"Port channel verification failed at index '{i}'. Port channel with number '{port_channel_number}' should " + f"have been deleted but still exists. Source IP: '{source_device_management_ip}', " + f"Destination IP: '{destination_device_management_ip}', Existing details: {self.pprint(have_port_channel_config_list)}" + ) + self.fail_and_exit(self.msg) + # Since have_port_channel_config_list is not empty, the port channel still exists and deletion failed + + elif destination_device_management_ip: + self.log( + f"Destination device IP provided: '{destination_device_management_ip}'. Verifying deletion of all port channels between " + f"source '{source_device_management_ip}' and destination '{destination_device_management_ip}'.", + "DEBUG", + ) + self.msg = ( + f"Port channel verification failed at index '{i}'. Port channels between source device '{source_device_management_ip}' " + f"and destination device '{destination_device_management_ip}' should " + f"have been deleted but still exist: {self.pprint(have_port_channel_config_list)}" + ) + self.fail_and_exit(self.msg) + # If we reach here, have_port_channel_config_list is not empty, so deletion failed + else: + self.log( + f"No specific links, port channel number, or destination device provided. " + f"Verifying deletion of all port channels from source device '{source_device_management_ip}'.", + "DEBUG", + ) + self.msg = ( + f"Port channel verification failed at index '{i}'. Port channels from source device '{source_device_management_ip}' " + f"should have been deleted but still exist: {self.pprint(have_port_channel_config_list)}" + ) + self.fail_and_exit(self.msg) + # If we still have any port channel configs from this source device, deletion failed + + self.log("Completed verification of all port channel deletions.", "INFO") + return self + def verify_diff_deleted(self, config): """ Verifies the presence of an active LAN Automation session for the specified seed IP address in Cisco Catalyst - Center. + Center and checks the deletion status of port channels. + Args: config (dict): Configuration details to be verified. + Returns: self: The current instance of the class used for interacting with Cisco Catalyst Center. + Description: - This method checks for an active LAN automation session associated with the primary device management IP address. - If found, it logs the session ID; otherwise, it logs a message indicating that no session is active for the - specified seed IP. + - Checks for an active LAN automation session associated with the primary device management IP address. + Logs the session ID if found, otherwise logs that no session is active for the specified seed IP. + - Verifies the deletion of port channels if port channel configuration is present in the requested configuration. + Logs details of verification for traceability and raises errors if deletions were not applied as expected. """ + + self.get_have(config) + self.log(f"Current State (have): {self.pprint(self.have)}", "INFO") + + port_channel = self.want.get("port_channel", {}) + if port_channel: + self.log( + f"Verifying port channel deletion in deleted state: {self.pprint(port_channel)}", + "DEBUG", + ) + self.verify_diff_deleted_port_channel(port_channel).check_return_status() + if not self.want.get("lan_automated_device_update"): self.log( "LAN automated device update is not requested. Exiting verification.", @@ -3787,9 +6527,6 @@ def verify_diff_deleted(self, config): "DEBUG", ) - self.get_have(config) - self.log("Current State (have): {0}".format(str(self.have)), "INFO") - session_to_ip_mapping = self.have.get("session_to_ip_map", {}) self.log("Session to IP mapping: {}".format(session_to_ip_mapping), "DEBUG") @@ -3873,8 +6610,8 @@ def main(): for config in ccc_lan_automation.validated_config: ccc_lan_automation.reset_values() - ccc_lan_automation.get_have(config).check_return_status() ccc_lan_automation.get_want(config).check_return_status() + ccc_lan_automation.get_have(config).check_return_status() ccc_lan_automation.get_diff_state_apply[state]().check_return_status() if config_verify: ccc_lan_automation.verify_diff_state_apply[state]( diff --git a/plugins/modules/network_devices_info_workflow_manager.py b/plugins/modules/network_devices_info_workflow_manager.py new file mode 100644 index 0000000000..d88adca9c6 --- /dev/null +++ b/plugins/modules/network_devices_info_workflow_manager.py @@ -0,0 +1,4410 @@ +# !/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Cisco Systems +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +__author__ = ("Karthick S N", "Madhan Sankaranarayanan") + +DOCUMENTATION = r""" +--- +module: network_devices_info_workflow_manager +short_description: Gather facts about network devices from Cisco Catalyst Center (facts/info module) using flexible filters. + +description: + - Gathers detailed facts (information) about network devices managed by Cisco Catalyst Center using flexible user-defined filters. + - Supports filtering by management IP, MAC address, hostname, serial number, software type, + software version, role, device type, family, and site hierarchy. + - Allows selection of specific device information types, such as device details, interfaces, + VLANs, line cards, supervisor cards, POE, module count, connected devices, configuration, + summary, polling interval, stack, and link mismatch details. + - Handles query retries, timeouts, and polling intervals for robust data collection. + - Supports output to a file using the C(output_file_info) option. Output can be JSON or YAML, + with user-defined file path, file mode (overwrite or append), and optional timestamp. + - If C(output_file_info) is provided, results are written to the file; otherwise, results are + returned in the Ansible output. + - Returns structured results for each requested information type, or an empty list if + no devices match the filters after all retries. + - This module is tagged as a facts/info module and is safe to use in check mode. + +version_added: "6.31.0" +extends_documentation_fragment: + - cisco.dnac.workflow_manager_params +author: + - Karthick S N (@karthick-s-n) + - Madhan Sankaranarayanan (@madhansansel) + +options: + config_verify: + description: Set to True to verify the Cisco Catalyst Center after applying the playbook config. + type: bool + default: False + state: + description: The desired state of the configuration after module execution. + type: str + choices: ["gathered"] + default: gathered + config: + description: + - List of dictionaries specifying network device query parameters. + - Each dictionary must contain a C(network_devices) list with at least one unique identifier + (such as management IP, MAC address, hostname, or serial number) per device. + type: list + elements: dict + required: true + suboptions: + network_devices: + description: + - Contains filters and configuration for retrieving network devices information. + - Requires at least one device identification or filtering criterion. + type: list + elements: dict + suboptions: + site_hierarchy: + description: + - Site hierarchy path for filtering devices by location. + type: str + device_type: + description: + - Device type filter for specific device models. + - Examples include "Cisco Catalyst 9300 Switch", "Cisco Catalyst 9400 Switch". + type: str + choices: + - Cisco Catalyst 9300 Switch + - Cisco Catalyst 9400 Switch + - Cisco Catalyst 9500 Switch + - Cisco Catalyst C9500-48Y4C Switch + - Cisco 3800E Unified Access Point + - Cisco Catalyst 9130AXI Unified Access Point + - Cisco Catalyst 9800-L-C Wireless Controller + - Cisco Catalyst 9115AXI Unified Access Point + - Cisco Catalyst Wireless 9164I Unified Access Point + - Cisco Wireless 9176D1 Access Point # Additional options may be found in the API documentation. + device_role: + description: + - Device role filter for network function-based filtering. + - Common roles include ACCESS, DISTRIBUTION, CORE, WAN, WLC, DATA_CENTER. + type: str + choices: + - ACCESS + - DISTRIBUTION + - CORE + - WAN + - WLC + - DATA_CENTER # Additional options may be found in the API documentation. + device_family: + description: + - Device family filter for device category-based filtering. + - Examples include "Switches and Hubs", "Routers", "Wireless Controller". + type: str + choices: + - Switches and Hubs + - Routers + - Wireless Controller + - Unified AP + - Sensors # Additional options may be found in the API documentation. + software_version: + description: + - Software version filter for specific software releases. + - Format example "16.12.05", "17.6.1". + type: str + os_type: + description: + - Operating system type filter for software platform filtering. + - Common types include IOS-XE, IOS, IOS-XR, NX-OS, ASA, FTD. + type: str + choices: + - IOS-XE + - IOS + - IOS-XR + - NX-OS + - ASA + - FTD + - IOS-XE SD-WAN # Additional options may be found in the API documentation. + device_identifier: + description: + - Optional list of device identification criteria to further filter network devices. + - Provides granular control over which network devices have their information retrieved. + - Multiple identification methods can be combined for comprehensive device targeting. + - Only devices that are both network-enabled and match the identifier criteria will be processed. + - When multiple identification parameters (ip_address, hostname, serial_number, mac_address) are specified in the same entry, + they must all refer to the same physical device for proper validation. + - Use separate device_identifier entries when targeting different devices with different identification methods. + type: list + elements: dict + suboptions: + ip_address: + description: + - List of management IP addresses to identify specific network devices. + - Each IP address must correspond to a managed device in the Cisco Catalyst Center inventory. + - Only devices with matching IP addresses will have their information retrieved. + - IP addresses must be valid IPv4 addresses in dotted decimal notation. + type: list + elements: str + mac_address: + description: + - List of device MAC addresses to identify specific network devices. + - Each MAC address must correspond to a managed device in the Cisco Catalyst Center inventory. + - Only devices with matching MAC addresses will have their information retrieved. + - MAC addresses should be in standard format (e.g., "aa:bb:cc:dd:ee:ff"). + type: list + elements: str + serial_number: + description: + - List of device serial numbers to identify specific network devices. + - Each serial number must match exactly as recorded in Cisco Catalyst Center device inventory. + - Only devices with matching serial numbers will have their information retrieved. + - Serial numbers are case-sensitive and must match the format used by the device manufacturer. + type: list + elements: str + hostname: + description: + - List of device hostnames to identify specific network devices. + - Each hostname must match exactly as configured in Cisco Catalyst Center device inventory. + - Only devices with matching hostnames will have their information retrieved. + - Hostnames are case-sensitive and must match the exact device hostname configuration. + type: list + elements: str + timeout: + description: + - Maximum time in seconds to wait for device information retrieval operations to complete. + - Applied to each individual device lookup operation during the filtering process. + - If device information retrieval fails within this timeout period, the operation will retry based on the 'retries' parameter. + - Longer timeouts may be needed for environments with slower network connectivity or larger device inventories. + - If timeout is greater than (retries * interval), the operation will continue retrying until the timeout period ends. + - Total operation time is bounded by the timeout value regardless of retry configuration. + type: int + default: 120 + retries: + description: + - Number of retry attempts for device information retrieval operations when initial attempts fail. + - Applied to each individual device lookup filtering operation. + - Higher retry counts improve reliability in environments with intermittent connectivity or high API load. + - Total operation time is affected by retries combined with timeout and interval settings. + - Actual retry attempts may be less than specified if timeout period is reached first. + type: int + default: 3 + interval: + description: + - Time in seconds to wait between retry attempts for device information retrieval operations. + - Applied as a delay between failed attempts during device lookup filtering processes. + - Combined with timeout and retries to determine total operation duration. + - If (retries * interval) exceeds timeout, retries will continue until timeout is reached. + - Longer intervals help reduce API load on Cisco Catalyst Center during retry operations. + - Should be balanced with timeout settings to avoid excessively long operation times. + type: int + default: 10 + requested_info: + description: + - List of device information types to retrieve. + - If set to ['all'], retrieves all available information categories. + - If specific types are listed, only those will be retrieved. + - If omitted, defaults to all information types. + type: list + elements: str + choices: + - all # Retrieves all available information of all choices below + - device_interfaces_by_range_info #Retrieves interface details by specified range + - device_info #Retrieves basic device details of hostname, model, serial number, OS version + - interface_info #Retrieves interface details such as status, speed, duplex, and MAC address + - interface_vlan_info #Retrieves VLAN information for each interface + - line_card_info #Retrieves line card details for modular devices + - supervisor_card_info #Retrieves supervisor card details for modular devices + - poe_info #Retrieves Power over Ethernet (PoE) information for interfaces + - module_count_info #Retrieves the count of installed modules + - connected_device_info #Retrieves information about devices connected to the specified device + - device_config_info #Retrieves the running configuration of the specified device + - device_summary_info #Retrieves a summary of the specified device's information + - device_polling_interval_info #Retrieves the polling interval configuration for the specified device + - device_stack_info #Retrieves stack information for stackable devices + - device_link_mismatch_info #Retrieves details of link mismatches speed/duplex/VLAN issues + output_file_info: + description: + - Controls file output generation for network device information retrieval results. + - When provided, saves retrieved device information to the specified file + along with returning the data in standard Ansible module output. + - Supports flexible file formatting, writing modes, and optional timestamp inclusion for audit purposes. + - Enables automated reporting and data archival workflows for network device monitoring operations. + type: dict + suboptions: + file_path: + description: + - Absolute path to the output file without file extension. + - File extension is automatically appended based on the selected file format (.json or .yaml). + - Directory structure will be created automatically if it does not exist. + - Path must be writable by the user executing the Ansible playbook. + type: str + required: true + file_format: + description: + - Output data format for the generated file. + - Determines file structure and extension applied to the file path. + - YAML format provides better human readability while JSON offers programmatic parsing advantages. + - Format selection affects file extension and data serialization method. + type: str + default: yaml + choices: + - json + - yaml + file_mode: + description: + - File writing mode determining how data is written to the target file. + - Use 'w' to overwrite existing file content or 'a' to append new data to existing content. + - Append mode enables incremental data collection across multiple playbook runs. + - Overwrite mode ensures clean data sets for each execution. + type: str + default: w + choices: + - w + - a + timestamp: + description: + - Controls inclusion of data retrieval timestamp in the output file content. + - When enabled, adds the data collection timestamp as the first entry for audit trail purposes. + - Useful for tracking when network device information was collected in automated workflows. + - Timestamp format follows "YYYY-MM-DD HH:MM:SS" standard format. + type: bool + default: false + +requirements: + - dnacentersdk >= 2.9.3 + - python >= 3.9.19 +notes: + - This is a facts/info module, it only retrieves information and does not modify any device or configuration. + - Writing to a local file is for reporting/archival purposes only and does not affect the state of any managed device. + - Safe to use in check mode. + - SDK Methods used are + - devices.Devices.get_device_list + - devices.Devices.get_device_interface_vlans + - devices.Devices.get_device_interfaces_by_specified_range + - devices.Devices.get_linecard_details + - devices.Devices.inventory_insight_device_link_mismatch + - devices.Devices.get_stack_details_for_device + - devices.Devices.get_device_config_by_id + - devices.Devices.get_polling_interval_by_id + - devices.Devices.get_supervisor_card_detail + - devices.Devices.poe_details + - devices.Devices.get_connected_device_detail + - devices.Devices.get_interface_info_by_id + - devices.Devices.get_module_count + - devices.Devices.get_network_device_by_ip + - devices.Devices.get_device_summary + + - Paths used are + - GET/dna/intent/api/v1/network-device + - GET/dna/intent/api/v1/network-device/{id}/vlan + - GET/dna/intent/api/v1/interface/network-device/{deviceId}/{startIndex}/{recordsToReturn} + - GET/dna/intent/api/v1/network-device/{deviceUuid}/line-card + - GET/dna/intent/api/v1/network-device/insight/{siteId}/device-link + - GET/dna/intent/api/v1/network-device/{deviceId}/stack + - GET/dna/intent/api/v1/network-device/{networkDeviceId}/config + - GET/dna/intent/api/v1/network-device/{id}/collection-schedule + - GET/dna/intent/api/v1/network-device/{id}/brief + - GET/dna/intent/api/v1/network-device/{deviceUuid}/supervisor-card + - GET/dna/intent/api/v1/network-device/{deviceUuid}/poe + - GET/dna/intent/api/v1/network-device/{deviceUuid}/interface/{interfaceUuid}/neighbor + - GET/dna/intent/api/v1/interface/network-device/{deviceId} + - GET/dna/intent/api/v1/network-device/module/count + - GET/dna/intent/api/v1/network-device/ip-address/{ipAddress} +""" + +EXAMPLES = r""" +# 1 Example Playbook to gather specific network device information from Cisco Catalyst Center +--- +- name: Get Specific Network devices information on Cisco Catalyst Center + hosts: localhost + connection: local + vars_files: + - "credentials.yml" + tasks: + - name: Gather detailed facts for specific network devices + cisco.dnac.network_devices_info_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: false + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: queried + config: + - network_devices: + - site_hierarchy: Global/USA/SAN JOSE + device_type: "Cisco Catalyst 9300 Switch" + device_role: "ACCESS" + device_family: "Switches and Hubs" + software_version: "17.12.1" + os_type: "IOS-XE" + device_identifier: + - ip_address: ["204.1.2.2"] + - serial_number: ["FCW2137L0SB"] + - hostname: ["SJ-BN-9300.cisco.local"] + - mac_address: ["90:88:55:90:26:00"] + timeout: 60 + retries: 3 + interval: 10 + requested_info: + - device_info + - interface_info + - interface_vlan_info + - line_card_info + - supervisor_card_info + - poe_info + - module_count_info + - connected_device_info + - device_interfaces_by_range_info + - device_config_info + - device_summary_info + - device_polling_interval_info + - device_stack_info + - device_link_mismatch_info + output_file_info: + file_path: /Users/priyadharshini/Downloads/info + file_format: json + file_mode: w + timestamp: true + +# 2 Example Playbook to gather all network device information from Cisco Catalyst Center +- name: Get All Network devices information on Cisco Catalyst Center + hosts: localhost + connection: local + vars_files: + - "credentials.yml" + tasks: + - name: Gather detailed facts for all network devices + cisco.dnac.network_devices_info_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: false + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: queried + config: + - network_devices: + - site_hierarchy: Global/USA/SAN JOSE + device_type: "Cisco Catalyst 9300 Switch" + device_role: "ACCESS" + device_family: "Switches and Hubs" + software_version: "17.12.1" + os_type: "IOS-XE" + device_identifier: + - ip_address: ["204.1.2.2"] + - serial_number: ["FCW2137L0SB"] + - hostname: ["SJ-BN-9300.cisco.local"] + - mac_address: ["90:88:55:90:26:00"] + timeout: 60 + retries: 3 + interval: 10 + requested_info: + - all + output_file_info: + file_path: /Users/priyadharshini/Downloads/info + file_format: json + file_mode: w + timestamp: true +""" + +RETURN = r""" + +#Case 1: Successful Retrieval of Device Info +response_device_info: + description: + - Device information for network devices, including family, type, software version, serial number, and more. + - Returned for each device matching the query. + returned: always + type: dict + sample: { + "response": [{ + "family": "Switches and Hubs", + "type": "Cisco Catalyst 9300 Switch", + "lastUpdateTime": 1750896739913, + "macAddress": "0c:75:bd:42:db:80", + "deviceSupportLevel": "Supported", + "softwareType": "IOS-XE", + "softwareVersion": "17.2.1", + "serialNumber": "FJC2335S09F", + "inventoryStatusDetail": "", + "collectionInterval": "Global Default", + "dnsResolvedManagementAddress": "204.1.2.3", + "lastManagedResyncReasons": "Periodic", + "managementState": "Managed", + "pendingSyncRequestsCount": "0", + "reasonsForDeviceResync": "Periodic", + "reasonsForPendingSyncRequests": "", + "syncRequestedByApp": "", + "upTime": "63 days, 19:36:43.08", + "roleSource": "MANUAL", + "lastUpdated": "2025-06-26 00:12:19", + "interfaceCount": "0", + "apManagerInterfaceIp": "", + "bootDateTime": "2025-04-23 04:36:19", + "collectionStatus": "Managed", + "hostname": "test123", + "locationName": null, + "managementIpAddress": "204.1.2.3", + "platformId": "C9300-48UXM", + "reachabilityFailureReason": "", + "reachabilityStatus": "Reachable", + "series": "Cisco Catalyst 9300 Series Switches", + "snmpContact": "", + "snmpLocation": "", + "associatedWlcIp": "", + "apEthernetMacAddress": null, + "errorCode": null, + "errorDescription": null, + "lastDeviceResyncStartTime": "2025-06-26 00:11:45", + "lineCardCount": "0", + "lineCardId": "", + "managedAtleastOnce": false, + "memorySize": "NA", + "tagCount": "0", + "tunnelUdpPort": null, + "uptimeSeconds": 5528803, + "vendor": "Cisco", + "waasDeviceMode": null, + "description": "Cisco IOS Software [Amsterdam], Catalyst L3 Switch Software (CAT9K_IOSXE), + Version 17.2.1, RELEASE SOFTWARE (fc4) Technical Support: http://www.cisco.com/techsupport + Copyright (c) 1986-2020 by Cisco Systems, Inc. Compiled Thu 26-Mar-20 03:29 by mcpre netconf enabled", + "location": null, + "role": "ACCESS", + "instanceUuid": "e62e6405-13e4-4f1b-ae1c-580a28a96a88", + "instanceTenantId": "66e48af26fe687300375675e", + "id": "e62e6405-13e4-4f1b-ae1c-580a28a96a88" + }], + "version": "string" + } + +#Case 2: Successful Retrieval of Device Interface VLAN info +response_device_interface_vlan_info: + description: Details of the response containing VLAN information for device interfaces. + returned: always + type: dict + sample: { + "response": [ + { + "interfaceName": "GigabitEthernet0/1", + "ipAddress": "192.168.10.25", + "mask": 24, + "networkAddress": "192.168.10.0", + "numberOfIPs": 254, + "prefix": "192.168.10.0/24", + "vlanNumber": 10, + "vlanType": "Data" + } + ], + "version": "string" + } + +#Case 3: Successful Retrieval of Device Interfaces by Specified Range +response_device_interfaces_range: + description: Details of the response containing device interface information retrieved by a specified range. + returned: always + type: dict + sample: { + "response": [ + { + "addresses": [], + "adminStatus": "UP", + "className": null, + "deviceId": "e62e6405-13e4-4f1b-ae1c-580a28a96a88", + "duplex": "FullDuplex", + "ifIndex": "73", + "interfaceType": "Physical", + "ipv4Address": null, + "ipv4Mask": null, + "isisSupport": "false", + "lastIncomingPacketTime": null, + "lastOutgoingPacketTime": 1750896368000, + "lastUpdated": null, + "macAddress": "0c:75:bd:42:db:c1", + "mappedPhysicalInterfaceId": null, + "mappedPhysicalInterfaceName": null, + "mediaType": null, + "mtu": "9100", + "nativeVlanId": "1", + "ospfSupport": "false", + "pid": "C9300-48UXM", + "portMode": "access", + "portName": "AppGigabitEthernet1/0/1", + "portType": "Ethernet Port", + "serialNo": "FJC2335S09F", + "series": "Cisco Catalyst 9300 Series Switches", + "speed": "1000000", + "status": "up", + "vlanId": "1", + "voiceVlan": "", + "description": "", + "name": null, + "instanceUuid": "c9c638b6-4627-4a2e-be25-05f6e487bfcf", + "instanceTenantId": "66e48af26fe687300375675e", + "id": "c9c638b6-4627-4a2e-be25-05f6e487bfcf" + } + ], + "version": "string" + } + +#Case 4: Successful Retrieval of Linecard Details +response_linecard_details: + description: Details of the response containing linecard information for the device. + returned: always + type: dict + sample: { + "response": [ + { + "serialno": "SN123456789", + "partno": "PN987654321", + "switchno": "SW-001-A1", + "slotno": "Slot-04" + } + ], + "version": "string" + } + +#Case 5: Successful Retrieval of Inventory Insight Device Link Mismatch API +response_inventory_insight_link_mismatch: + description: Details of the response containing device link mismatch information from Inventory Insight API. + returned: always + type: dict + sample: { + "response": [ + { + "endPortAllowedVlanIds": "10,20,30", + "endPortNativeVlanId": "10", + "startPortAllowedVlanIds": "10,20,30", + "startPortNativeVlanId": "10", + "linkStatus": "up", + "endDeviceHostName": "switch-nyc-01", + "endDeviceId": "device-1001", + "endDeviceIpAddress": "192.168.1.10", + "endPortAddress": "GigabitEthernet1/0/24", + "endPortDuplex": "full", + "endPortId": "endport-1001", + "endPortMask": "255.255.255.0", + "endPortName": "Gi1/0/24", + "endPortPepId": "pep-ep-1001", + "endPortSpeed": "1000Mbps", + "startDeviceHostName": "router-dc-01", + "startDeviceId": "device-2001", + "startDeviceIpAddress": "192.168.1.1", + "startPortAddress": "GigabitEthernet0/1", + "startPortDuplex": "full", + "startPortId": "startport-2001", + "startPortMask": "255.255.255.0", + "startPortName": "Gi0/1", + "startPortPepId": "pep-sp-2001", + "startPortSpeed": "1000Mbps", + "lastUpdated": "2025-06-26T10:15:00Z", + "numUpdates": 15, + "avgUpdateFrequency": 4.0, + "type": "ethernet-link", + "instanceUuid": "123e4567-e89b-12d3-a456-426614174000", + "instanceTenantId": "tenant-xyz123" + } + ], + "version": "string" + } + +#Case 6: Successful Retrieval of Stack Details for Device +response_stack_details: + description: Details of the response containing stack information for the device. + returned: always + type: dict + sample: { + "response": { + "device_stack_info": [ + { + "deviceId": "e62e6405-13e4-4f1b-ae1c-580a28a96a88", + "stackSwitchInfo": [ + { + "hwPriority": 0, + "macAddress": "0c:75:bd:42:db:80", + "numNextReload": 1, + "role": "ACTIVE", + "softwareImage": "17.02.01", + "stackMemberNumber": 1, + "state": "READY", + "switchPriority": 1, + "entPhysicalIndex": "1000", + "serialNumber": "FJC2335S09F", + "platformId": "C9300-48UXM" + } + ], + "stackPortInfo": [ + { + "isSynchOk": "Yes", + "name": "StackSub-St1-1", + "switchPort": "1/1", + "neighborPort": "NONE", + "nrLinkOkChanges": 0, + "stackCableLengthInfo": "NO_CABLE", + "stackPortOperStatusInfo": "DOWN", + "linkActive": false, + "linkOk": false + }, + { + "isSynchOk": "Yes", + "name": "StackSub-St1-2", + "switchPort": "1/2", + "neighborPort": "NONE", + "nrLinkOkChanges": 0, + "stackCableLengthInfo": "NO_CABLE", + "stackPortOperStatusInfo": "DOWN", + "linkActive": false, + "linkOk": false + } + ], + "svlSwitchInfo": null + } + ] + }, + "version": "string" + } + +#Case 7: Successful Retrieval of Device Config +response_device_config: + description: Details of the response containing the device configuration as a string. + returned: always + type: dict + sample: { + "response": "Building Configuration Operation Successful", + "version": "string" + } + +#Case 8: Successful Retrieval of Polling Interval +response_polling_interval: + description: Details of the response containing the polling interval value. + returned: always + type: dict + sample: { + "device_polling_interval_info": [ + 86400 + ], + "version": "string" + } + +#Case 9: Successful Retrieval of Device Summary +response_device_summary: + description: Details of the response containing a summary of the device. + returned: always + type: dict + sample: { + "response": { + "id": "e62e6405-13e4-4f1b-ae1c-580a28a96a88", + "role": "ACCESS", + "roleSource": "MANUAL" + }, + "version": "string" + } + +#Case 10: Successful Retrieval of Supervisor Card Detail +response_supervisor_card_detail: + description: Details of the response containing supervisor card information for the device. + returned: always + type: dict + sample: { + "response": [ + { + "serialno": "SN1234567890", + "partno": "PN9876543210", + "switchno": "SW-01", + "slotno": "3" + } + ], + "version": "string" + } + +#Case 11: Successful Retrieval of POE Details +response_poe_details: + description: Details of the response containing Power over Ethernet (POE) statistics. + returned: always + type: dict + sample: { + "response": { + "powerAllocated": "525", + "powerConsumed": "0", + "powerRemaining": "525" + }, + "version": "string" + } + +#Case 12: Successful Retrieval of Connected Device Detail +response_connected_device_detail: + description: Details of the response containing information about a connected neighbor device. + returned: always + type: dict + sample: { + "response": { + "neighborDevice": "DC-T-9300", + "neighborPort": "TenGigabitEthernet1/1/8", + "capabilities": [ + "IGMP_CONDITIONAL_FILTERING", + "ROUTER", + "SWITCH" + ] + }, + "version": "string" + } + +#Case 13: Successful Retrieval of Interface Info +response_interface_info: + description: Details of the response containing interface information for a device. + returned: always + type: dict + sample: { + "response": [ + { + "addresses": [], + "adminStatus": "UP", + "className": null, + "deviceId": "e62e6405-13e4-4f1b-ae1c-580a28a96a88", + "duplex": "FullDuplex", + "ifIndex": "73", + "interfaceType": "Physical", + "ipv4Address": null, + "ipv4Mask": null, + "isisSupport": "false", + "lastIncomingPacketTime": null, + "lastOutgoingPacketTime": 1750896368000, + "lastUpdated": null, + "macAddress": "0c:75:bd:42:db:c1", + "mappedPhysicalInterfaceId": null, + "mappedPhysicalInterfaceName": null, + "mediaType": null, + "mtu": "9100", + "nativeVlanId": "1", + "ospfSupport": "false", + "pid": "C9300-48UXM", + "portMode": "access", + "portName": "AppGigabitEthernet1/0/1", + "portType": "Ethernet Port", + "serialNo": "FJC2335S09F", + "series": "Cisco Catalyst 9300 Series Switches", + "speed": "1000000", + "status": "up", + "vlanId": "1", + "voiceVlan": "", + "description": "", + "name": null, + "instanceUuid": "c9c638b6-4627-4a2e-be25-05f6e487bfcf", + "instanceTenantId": "66e48af26fe687300375675e", + "id": "c9c638b6-4627-4a2e-be25-05f6e487bfcf" + } + ], + "version": "string" + } + +#Case 14: Successful Retrieval of Module Count +response_module_count: + description: Details of the response containing the count of modules. + returned: always + type: dict + sample: { + "module_count_info": [ + 3 + ], + "version": "string" + } + +#Case 15: Successful Retrieval of Network Device by IP +response_network_device_by_ip: + description: Details of the response containing network device information retrieved by IP address. + returned: always + type: dict + sample: { + "response": { + "apManagerInterfaceIp": "10.10.10.15", + "associatedWlcIp": "10.10.10.1", + "bootDateTime": "2025-06-20T09:30:00Z", + "collectionInterval": "300", + "collectionStatus": "success", + "errorCode": "0", + "errorDescription": "", + "family": "Cisco Aironet", + "hostname": "AP-Office-23", + "id": "ap-12345", + "instanceTenantId": "tenant-001", + "instanceUuid": "a1b2c3d4-e5f6-7890-1234-56789abcdef0", + "interfaceCount": "6", + "inventoryStatusDetail": "Active", + "lastUpdateTime": 1687700000, + "lastUpdated": "2025-06-25T10:00:00Z", + "lineCardCount": "1", + "lineCardId": "lc-001", + "location": "Building 1, Floor 2", + "locationName": "HQ Floor 2", + "macAddress": "00:1A:2B:3C:4D:5E", + "managementIpAddress": "10.10.10.15", + "memorySize": "2048MB", + "platformId": "AIR-AP2800", + "reachabilityFailureReason": "", + "reachabilityStatus": "reachable", + "role": "Access Point", + "roleSource": "auto-discovery", + "serialNumber": "FTX12345678", + "series": "2800", + "snmpContact": "admin@example.com", + "snmpLocation": "Data Center Rack 5", + "softwareType": "IOS-XE", + "softwareVersion": "17.6.1", + "tagCount": "4", + "tunnelUdpPort": "4500", + "type": "wireless-ap", + "upTime": "3 days, 5 hours", + "waasDeviceMode": "N/A", + "dnsResolvedManagementAddress": "ap-office23.example.com", + "apEthernetMacAddress": "00:1A:2B:3C:4D:5E", + "vendor": "Cisco", + "reasonsForPendingSyncRequests": "", + "pendingSyncRequestsCount": "0", + "reasonsForDeviceResync": "", + "lastDeviceResyncStartTime": "2025-06-24T08:00:00Z", + "uptimeSeconds": 277200, + "managedAtleastOnce": true, + "deviceSupportLevel": "Gold", + "managementState": "Managed", + "description": "Office wireless access point on Floor 2" + }, + "version": "string" + } +""" + + +from ansible_collections.cisco.dnac.plugins.module_utils.dnac import ( + DnacBase, +) +from ansible.module_utils.basic import AnsibleModule +import json +import time +import os +import ipaddress +try: + import yaml +except ImportError: + yaml = None +from datetime import datetime + +from ansible_collections.cisco.dnac.plugins.module_utils.validation import ( + validate_list_of_dicts,) + + +class NetworkDevicesInfo(DnacBase): + """Class containing member attributes for network_devices_info_workflow_manager module""" + def __init__(self, module): + super().__init__(module) + self.supported_states = ['gathered'] + self.total_response = [] + + def validate_input(self): + """ + Validate the playbook configuration for device information retrieval and data integrity. + + This method performs strict type checks, required field validation, duplicate detection, + and default value population to ensure the playbook configuration is correctly structured + and ready for further processing or API interactions. + + It validates that: + - The configuration exists and is a list. + - Each item in the list conforms to the expected schema defined in `config_spec`. + - Default values are applied where necessary. + - Invalid Args are detected and reported. + + Args: + self (object): An instance of the class handling Cisco Catalyst Center operations, + containing the `config` attribute to validate. + + Returns: + self: The current instance with updated attributes: + - self.msg (str): Status message indicating validation success or failure. + - self.status (str): Either "success" or "failed", based on validation result. + - self.validated_config (list): A sanitized, validated version of the playbook configuration, + if validation succeeds. + """ + self.log("Initiating comprehensive input validation for network devices information workflow configuration", "INFO") + + config_spec = { + "network_devices": { + "type": "list", + "elements": "dict", + "site_hierarchy": { + "type": "str", + "required": False + }, + "device_type": { + "type": "str", + "required": False + }, + "device_role": { + "type": "str", + "required": False, + }, + "device_family": { + "type": "str", + "required": False + }, + "software_version": { + "type": "str", + "required": False + }, + "os_type": { + "type": "str", + "required": False, + }, + "device_identifier": { + "type": "list", + "elements": "dict", + "ip_address": { + "type": "list", + "elements": "str", + "required": False + }, + "serial_number": { + "type": "list", + "elements": "str", + "required": False + }, + "hostname": { + "type": "list", + "elements": "str", + "required": False + }, + "mac_address": { + "type": "list", + "elements": "str", + "required": False + } + }, + "timeout": { + "type": "int", + "default": 120 + }, + "retries": { + "type": "int", + "default": 3 + }, + "interval": { + "type": "int", + "default": 10 + }, + "requested_info": { + "type": "list", + "elements": "str", + "allowed_values": [ + "device_info", + "interface_info", + "interface_vlan_info", + "line_card_info", + "supervisor_card_info", + "poe_info", + "module_count_info", + "connected_device_info", + "device_interfaces_by_range_info", + "device_config_info", + "device_summary_info", + "device_polling_interval_info", + "device_stack_info", + "device_link_mismatch_info" + ] + }, + "output_file_info": { + "type": "dict", + "file_path": { + "type": "str" + }, + "file_format": { + "type": "str", + "default": "yaml", + "allowed_values": ["json", "yaml"] + }, + "file_mode": { + "type": "str", + "default": "w", + "allowed_values": ["w", "a"] + }, + "timestamp": { + "type": "bool", + "default": False + } + } + } + } + try: + valid_config, invalid_params = validate_list_of_dicts(self.config, config_spec) + + if invalid_params: + self.msg = "Network devices configuration validation failed with invalid Args: {0}".format( + invalid_params + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + self.validated_config = valid_config + + self.log("Network devices configuration validation completed successfully", "INFO") + self.log( + "Validated {0} network device configuration section(s) for workflow processing".format( + (valid_config) + ), + "DEBUG" + ) + return self + + except Exception as validation_exception: + self.msg = "Network devices configuration validation encountered an error: {0}".format( + str(validation_exception) + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + def get_want(self, config): + """ + Extracts and validates the desired network device information workflow state from playbook configuration. + + This method processes the playbook configuration to determine the desired state for network device + information retrieval operations. It performs comprehensive validation of all configuration parameters, + validates device identification criteria, information type requests, and file output settings to ensure + the configuration is properly structured and meets all operational requirements before proceeding + with device discovery and information retrieval workflows. + + Args: + config (dict): Network device information workflow configuration dictionary. + + Returns: + self: The current instance with updated attributes: + - self.want: Validated configuration dictionary ready for processing + - self.status: Validation status ("success" or "failed") + - self.msg: Status message describing validation results + """ + self.log("Extracting desired network devices information workflow state from playbook configuration", "DEBUG") + self.log("Processing configuration sections for comprehensive workflow validation", "DEBUG") + + want = {} + network_devices = config.get("network_devices") + + want["network_devices"] = config.get("network_devices") + + device_keys = [ + "site_hierarchy", "device_type", "device_role", + "device_family", "software_version", "os_type", + "device_identifier" + ] + allowed_return_values = { + "all", + "device_info", + "interface_info", + "interface_vlan_info", + "line_card_info", + "supervisor_card_info", + "poe_info", + "module_count_info", + "connected_device_info", + "device_interfaces_by_range_info", + "device_config_info", + "device_summary_info", + "device_polling_interval_info", + "device_stack_info", + "device_link_mismatch_info" + } + allowed_device_identifier_filters = {"ip_address", "hostname", "serial_number", "ip_address_range", "mac_address"} + allowed_field = { + "site_hierarchy", "device_type", "device_role", "device_family", "software_version", "os_type", + "device_identifier", "timeout", "retries", "interval", "requested_info", "output_file_info" + } + allowed_output_file_info_keys = {"file_path", "file_format", "file_mode", "timestamp"} + allowed_file_formats = {"json", "yaml"} + allowed_file_modes = {"a", "w"} + + for config in self.config: + if "network_devices" not in config: + self.msg = "'network_devices' key is missing in the config block" + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + for idx, device in enumerate(config["network_devices"]): + self.log("Processing device entry {0}: {1}".format(idx + 1, device), "DEBUG") + for key in device: + if key not in allowed_field: + self.msg = "'{0}' is not a valid key in network device entry. Allowed keys are: {1}".format( + key, ", ".join(sorted(allowed_field)) + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + if not any(device.get(key) for key in device_keys): + self.log( + "Device index {0} missing required identification keys: {1}".format( + idx + 1, device_keys + ), + "ERROR" + ) + self.msg = ( + "Each network device must contain at least one of the following keys: {0}." + .format(", ".join(device_keys)) + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + for numeric in ("timeout", "retries", "interval"): + if numeric in device and device[numeric] < 0: + self.msg = "'{0}' must be a non-negative integer".format(numeric) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + valid_keys_found = set() + identifiers = device.get("device_identifier", []) + + if identifiers: + all_identifier_keys = set() + for identifier in identifiers: + self.log("Processing device_identifier: {0}".format(identifier), "DEBUG") + all_identifier_keys.update(identifier.keys()) + + for key in identifier: + self.log(key) + if key in allowed_device_identifier_filters: + valid_keys_found.add(key) + self.log(valid_keys_found) + else: + self.msg = ( + "Invalid or unrecognized key '{0}' found in device_identifier. " + "Allowed keys are: {1}".format( + key, ", ".join(sorted(allowed_device_identifier_filters)) + ) + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + if "ip_address" in all_identifier_keys and "ip_address_range" in all_identifier_keys: + self.msg = ( + "Both 'ip_address' and 'ip_address_range' are specified across device_identifier entries. " + "Please specify only one of them." + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + if not valid_keys_found: + self.msg = ( + "Each 'device_identifier' list must contain at least one valid key among: {0}." + .format(", ".join(allowed_device_identifier_filters)) + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + if "requested_info" in device and device["requested_info"] is not None: + self.log("Applying requested_info for device index {0}".format(idx + 1), "DEBUG") + return_value = device["requested_info"] + for value_name in return_value: + if value_name not in allowed_return_values: + self.log( + "Invalid requested_info '{0}' in device index {1}." + "Valid options: {2}".format(value_name, idx, allowed_return_values), "ERROR" + ) + self.msg = ( + "'{0}' is not a valid return value. Allowed values are: {1}" + .format(value_name, sorted(allowed_return_values)) + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + if "output_file_info" in device: + output_file_info = device["output_file_info"] + if output_file_info is None: + continue + + file_format = output_file_info.get("file_format", "yaml") + file_mode = output_file_info.get("file_mode", "w") + timestamp = output_file_info.get("timestamp", False) + + output_file_info["file_format"] = file_format + output_file_info["file_mode"] = file_mode + output_file_info["timestamp"] = timestamp + + for key in output_file_info: + if key not in allowed_output_file_info_keys: + self.msg = "'{0}' is not a valid key in 'output_file_info'. Allowed keys are: {1}".format( + key, sorted(allowed_output_file_info_keys) + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + if output_file_info["file_format"] not in allowed_file_formats: + self.msg = "'file_format' must be one of: {0}".format(", ".join(sorted(allowed_file_formats))) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + if output_file_info["file_mode"] not in allowed_file_modes: + self.msg = "'file_mode' must be one of: {0}".format(", ".join(sorted(allowed_file_modes))) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + self.want = want + self.log(want, "DEBUG") + self.log("Network devices information workflow desired state extraction completed successfully", "DEBUG") + return self + + def get_diff_gathered(self, config): + """ + Processes the device configuration and retrieves requested information for each network device. + + Args: + self (object): An instance of the class interacting with Cisco Catalyst Center APIs. + config (dict): A dictionary containing the playbook configuration. + + Returns: + self: The current instance with the 'msg' and 'total_response' attributes populated + based on the API responses for the requested device information. + + Description: + This method retrieves information of for a list of network devices + based on filters provided in the playbook. For each device in the + input, it performs the following: + + - Determines which categories of information are requested, including: + - device_info + - interface_info + - interface_vlan_info + - line_card_info + - supervisor_card_info + - poe_info + - module_count_info + - connected_device_info + - device_interfaces_by_range_info + - device_config_info + - device_summary_info + - device_polling_interval_info + - device_stack_info + - device_link_mismatch_info + """ + self.log("Starting device info retrieval for all device entries", "INFO") + + network_devices = config.get("network_devices", []) + combined_data = {} + + for device_cfg in network_devices: + self.log("Processing device configuration entry with Args: {0}".format(list(device_cfg.keys())), "DEBUG") + filtered_config = {} + for field_name, field_value in device_cfg.items(): + if field_name != "requested_info": + filtered_config[field_name] = field_value + + self.log("Filtered config (excluding requested_info): {0}".format(filtered_config), "DEBUG") + self.log("Extracted device identification Args: {0}".format(list(filtered_config.keys())), "DEBUG") + requested_info = device_cfg.get("requested_info", []) + + if not requested_info: + all_info_requested = True + self.log("No specific information types requested - retrieving all available information categories", "DEBUG") + else: + all_info_requested = "all" in requested_info + self.log("Specific information types requested: {0}".format(requested_info), "DEBUG") + + device_info = all_info_requested or "device_info" in requested_info + interface_info = all_info_requested or "interface_info" in requested_info + interface_vlan_info = all_info_requested or "interface_vlan_info" in requested_info + linecard_info = all_info_requested or "line_card_info" in requested_info + supervisor_card_info = all_info_requested or "supervisor_card_info" in requested_info + poe_info = all_info_requested or "poe_info" in requested_info + module_count_info = all_info_requested or "module_count_info" in requested_info + connected_device_info = all_info_requested or "connected_device_info" in requested_info + device_interfaces_by_range_info = all_info_requested or "device_interfaces_by_range_info" in requested_info + device_config_info = all_info_requested or "device_config_info" in requested_info + device_summary_info = all_info_requested or "device_summary_info" in requested_info + device_polling_interval_info = all_info_requested or "device_polling_interval_info" in requested_info + device_stack_info = all_info_requested or "device_stack_info" in requested_info + device_link_mismatch_info = all_info_requested or "device_link_mismatch_info" in requested_info + + self.log( + """ + Requested: + device_info: {0} + interface_info: {1} + interface_vlan_info: {2} + line_card_info: {3} + supervisor_card_info: {4} + poe_info: {5} + module_count_info: {6} + connected_device_info: {7} + device_interfaces_by_range_info: {8} + device_config_info: {9} + device_summary_info: {10} + device_polling_interval_info:{11} + device_stack_info: {12} + device_link_mismatch_info: {13} + """.format( + device_info, + interface_info, + interface_vlan_info, + linecard_info, + supervisor_card_info, + poe_info, + module_count_info, + connected_device_info, + device_interfaces_by_range_info, + device_config_info, + device_summary_info, + device_polling_interval_info, + device_stack_info, + device_link_mismatch_info + ), + "DEBUG" + ) + + device_ids = self.filter_network_devices(filtered_config) + self.log("Filtered network devices after applying all the provided filters: {0}".format(device_ids), "DEBUG") + + if not device_ids: + self.msg = "No network devices found for the given filters." + self.total_response.append(self.msg) + break + else: + self.total_response.append("The network devices filtered from the provided filters are: {0}".format(list(device_ids.keys()))) + + if device_info: + self.log("Retrieving device details for network devices: {0}".format(list(device_ids.keys())), "DEBUG") + device_info_result = self.get_device_info(device_ids) + self.total_response.append(device_info_result) + combined_data["device_info"] = device_info_result + + if interface_info: + self.log("Retrieving interface details for network devices: {0}".format(list(device_ids.keys())), "DEBUG") + interface_info_result = self.get_interface_info(device_ids) + self.total_response.append(interface_info_result) + combined_data["interface_info"] = interface_info_result + + if interface_vlan_info: + self.log("Retrieving VLAN details for network devices: {0}".format(list(device_ids.keys())), "DEBUG") + interface_vlan_info_result = self.get_interface_vlan_info(device_ids) + self.total_response.append(interface_vlan_info_result) + combined_data["interface_vlan_info"] = interface_vlan_info_result + + if linecard_info: + self.log("Retrieving linecard details for network devices: {0}".format(list(device_ids.keys())), "DEBUG") + linecard_info_result = self.get_linecard_info(device_ids) + self.total_response.append(linecard_info_result) + combined_data["linecard_info"] = linecard_info_result + + if supervisor_card_info: + self.log("Retrieving Supervisor card details for network devices: {0}".format(list(device_ids.keys())), "DEBUG") + supervisor_card_info_result = self.get_supervisor_card_info(device_ids) + self.total_response.append(supervisor_card_info_result) + combined_data["supervisor_card_info"] = supervisor_card_info_result + + if poe_info: + self.log("Retrieving PoE details for network devices: {0}".format(list(device_ids.keys())), "DEBUG") + poe_info_result = self.get_poe_info(device_ids) + self.total_response.append(poe_info_result) + combined_data["poe_info"] = poe_info_result + + if module_count_info: + self.log("Retrieving module count details for network devices: {0}".format(list(device_ids.keys())), "DEBUG") + module_count_info_result = self.get_module_count_info(device_ids) + self.total_response.append(module_count_info_result) + combined_data["module_count_info"] = module_count_info_result + + if connected_device_info: + self.log("Retrieving connected neighbor device information via interface for network devices: {0}".format(list(device_ids.keys())), "DEBUG") + connected_devices_result = self.get_connected_device_details_from_interfaces(device_ids) + self.total_response.append(connected_devices_result) + combined_data["connected_devices_info"] = connected_devices_result + + if device_interfaces_by_range_info: + self.log("Retrieving interface information for specified range for network devices: {0}".format(list(device_ids.keys())), "DEBUG") + device_interfaces_by_range_info_result = self.get_interfaces_by_specified_range(device_ids) + self.total_response.append(device_interfaces_by_range_info_result) + combined_data["device_interfaces_by_range_info"] = device_interfaces_by_range_info_result + + if device_config_info: + self.log("Retrieving device configuration information for network devices: {0}".format(list(device_ids.keys())), "DEBUG") + device_config_info_result = self.get_device_config_info(device_ids) + self.total_response.append(device_config_info_result) + combined_data["device_config_info"] = device_config_info_result + + if device_summary_info: + self.log("Retrieving device summary information for network devices: {0}".format(list(device_ids.keys())), "DEBUG") + device_summary_info_result = self.get_device_summary_info(device_ids) + self.total_response.append(device_summary_info_result) + combined_data["device_summary_info"] = device_summary_info_result + + if device_polling_interval_info: + self.log("Retrieving device polling interval information for network devices: {0}".format(list(device_ids.keys())), "DEBUG") + device_polling_interval_info_result = self.get_device_polling_interval_info(device_ids) + self.total_response.append(device_polling_interval_info_result) + combined_data["device_polling_interval_info"] = device_polling_interval_info_result + + if device_stack_info: + self.log("Retrieving device stack information for network devices: {0}".format(list(device_ids.keys())), "DEBUG") + device_stack_info_result = self.get_device_stack_info(device_ids) + self.total_response.append(device_stack_info_result) + combined_data["device_stack_info"] = device_stack_info_result + + if device_link_mismatch_info: + site_hierarchy = device_cfg.get("site_hierarchy") + site_exists, site_id = self.get_site_id(site_hierarchy) + if not site_hierarchy: + self.msg = "For 'device_link_mismatch_info', 'site_hierarchy' must be provided." + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + else: + self.log("Retrieving device link mismatch details for network devices: {0}".format(list(device_ids.keys())), "DEBUG") + device_link_mismatch_info_result = self.get_device_link_mismatch_info(site_id, device_ids) + self.total_response.append(device_link_mismatch_info_result) + combined_data["device_link_mismatch_info"] = device_link_mismatch_info_result + + if config.get("network_devices"): + output_file_info = config["network_devices"][0].get("output_file_info") + + if output_file_info: + self.log("Processing file output configuration for network device information export: {0}".format(output_file_info), "INFO") + self.write_device_info_to_file({"output_file_info": output_file_info}) + self.log("Network device information successfully written to output file", "INFO") + + if self.total_response: + self.log("Network device information retrieval workflow completed successfully with {0} response entries".format(len(network_devices)), "INFO") + self.msg = self.total_response + self.set_operation_result("success", False, self.msg, "INFO") + return self + + def get_device_id(self, filtered_config): + """ + Retrieves device UUIDs from Cisco Catalyst Center based on device identifier Args. + + This method processes device identification criteria from the configuration and maps network + devices to their corresponding UUIDs. It supports multiple identification methods and only + considers devices that are managed and reachable in the Catalyst Center inventory. + Logic Implementation: + The method implements two distinct logical operations based on the structure of device_identifiers: + + Logic Implementation: + The method implements two distinct logical operations based on the structure of device_identifiers: + + AND Logic (Single Entry with Multiple Keys): + - Triggered when: len(device_identifiers) == 1 AND len(device_identifiers[0].keys()) > 1 + - Behavior: Devices must match ALL specified criteria within the single entry + - Example: [{"ip_address": ["192.168.1.1"], "hostname": ["switch01"]}] + - Result: Returns only devices that have IP 192.168.1.1 AND hostname switch01 + - Implementation: Uses set intersection to find devices matching all criteria + + OR Logic (Multiple Entries): + - Triggered when: Multiple device_identifier entries are provided + - Behavior: Devices matching ANY of the entries are included + - Example: [{"ip_address": ["192.168.1.1"]}, {"hostname": ["switch02"]}] + - Result: Returns devices that have IP 192.168.1.1 OR hostname switch02 + - Implementation: Accumulates devices from each entry independently + + Args: + filtered_config (dict): Configuration dictionary containing device identification Args. + + Returns: + dict or None: A dictionary mapping device IP addresses to their UUIDs for managed devices. + Returns None if no device_identifier section is found in configuration. + """ + self.log("Starting device UUID mapping retrieval from 'device_identifier' entries", "INFO") + + self.log( + "Processing filtered configuration with parameters: {0}".format( + self.pprint(filtered_config) + ), + "DEBUG" + ) + + if not isinstance(filtered_config, dict): + self.log( + "Invalid filtered_config parameter - expected dict, got: {0}".format( + type(filtered_config).__name__ + ), + "ERROR" + ) + self.msg = "filtered_config parameter must be a valid dictionary" + self.set_operation_result("failed", False, self.msg, "ERROR") + return None + + device_identifiers = filtered_config.get("device_identifier", []) + + if not isinstance(device_identifiers, list): + self.log( + "Invalid device_identifiers format - expected list, got: {0}".format( + type(device_identifiers).__name__ + ), + "ERROR" + ) + self.msg = "device_identifier must be a list of identification criteria" + self.set_operation_result("failed", False, self.msg, "ERROR") + return None + + if not device_identifiers: + self.msg = "No 'device_identifier' section found in configuration. Skipping device ID retrieval." + self.log(self.msg, "WARNING") + return None + + param_key_map = { + "ip_address": "managementIpAddress", + "serial_number": "serialNumber", + "hostname": "hostname", + "mac_address": "macAddress", + } + + ip_uuid_map = {} + timeout = filtered_config.get("timeout", 120) + retries = filtered_config.get("retries", 3) + interval = filtered_config.get("interval", 10) + + self.log( + "Using retry configuration - timeout: {0}s, retries: {1}, interval: {2}s".format( + timeout, retries, interval + ), + "DEBUG" + ) + + is_and_logic = ( + len(device_identifiers) == 1 and + len(device_identifiers[0].keys()) > 1 + ) + logic_type = "AND" if is_and_logic else "OR" + self.log("Detected device_identifier logic type: {0} for {1} identifier groups".format( + logic_type, len(device_identifiers)), "DEBUG") + + if is_and_logic: + identifier = device_identifiers[0] + self.log("Processing AND logic for identifiers: {0}".format(identifier), "DEBUG") + + combined_devices = None + for key_index, (key, values) in enumerate(identifier.items(), start=1): + self.log( + "Processing AND criteria {0}/{1} - key: {2}, values: {3}".format( + key_index, len(identifier), key, values + ), + "DEBUG" + ) + if not values: + self.log( + "Skipping empty values for key: {0}".format(key), + "DEBUG" + ) + continue + if not isinstance(values, list): + values = [values] + self.log( + "Converted single value to list for key {0}: {1}".format(key, values), + "DEBUG" + ) + + expanded_values = [] + + for value in values: + if key == "ip_address_range": + self.log( + "Expanding IP address range: {0}".format(value), + "DEBUG" + ) + try: + start_ip, end_ip = value.split("-") + start = ipaddress.IPv4Address(start_ip.strip()) + end = ipaddress.IPv4Address(end_ip.strip()) + range_ips = [ + str(ipaddress.IPv4Address(i)) + for i in range(int(start), int(end) + 1) + ] + expanded_values.extend(range_ips) + self.log( + "Expanded IP range '{0}' into {1} individual IP addresses".format( + value, len(range_ips) + ), + "DEBUG" + ) + except Exception as e: + self.log( + "Failed to expand IP range '{0}': {1}".format(value, str(e)), + "ERROR" + ) + continue + else: + expanded_values.append(value) + self.log("Added individual value '{0}' to expanded list (not an IP range)".format(value), "DEBUG") + + param_key = param_key_map.get(key) + matched_devices = [] + + missing_ips = [] + + for value_index, ip_or_value in enumerate(expanded_values, start=1): + self.log( + "Processing OR value {0}/{1} for key '{2}': {3}".format( + value_index, len(expanded_values), key, ip_or_value + ), + "DEBUG" + ) + params = {param_key_map.get(key, "managementIpAddress"): ip_or_value} + devices = self.execute_device_lookup_with_retry(params, key, ip_or_value, timeout, retries, interval) + + if devices: + matched_devices.extend(devices) + else: + missing_ips.append(ip_or_value) + self.log("Device not found in inventory for identifier '{0}' - adding to missing list".format(ip_or_value), "DEBUG") + + if missing_ips: + display_value = ", ".join(missing_ips) + self.msg = ( + "No devices found for the following identifiers {0}: {1}. " + "Device(s) may not be present in Catalyst Center inventory." + ).format(key, display_value) + self.set_operation_result("success", False, self.msg, "INFO") + if self.msg not in self.total_response: + self.total_response.append(self.msg) + + if combined_devices is None: + combined_devices = matched_devices + self.log( + "Initialized combined devices with {0} devices from first key: {1}".format( + len(matched_devices), key + ), + "DEBUG" + ) + previous_count = len(combined_devices) + combined_devices = [ + device for device in combined_devices + if any( + device.get("instanceUuid") == matched_device.get("instanceUuid") + for matched_device in matched_devices + ) + ] + self.log( + "Applied AND logic intersection - reduced from {0} to {1} devices".format( + previous_count, len(combined_devices) + ), + "DEBUG" + ) + + # Process final results for AND logic + if combined_devices: + self.log( + "AND logic completed successfully - found {0} devices matching all criteria".format( + len(combined_devices) + ), + "INFO" + ) + + for device in combined_devices: + uuid = device.get("instanceUuid") + ip = device.get("managementIpAddress") + if uuid and ip: + ip_uuid_map[ip] = uuid + self.log( + "Mapped AND logic device - IP: {0}, UUID: {1}".format(ip, uuid), + "DEBUG" + ) + else: + self.msg = ( + "No devices found matching all specified identifiers: {0}".format( + list(identifier.keys()) + ) + ) + self.log( + "AND logic completed - no devices matched all criteria", + "WARNING" + ) + + else: + # OR Logic: Multiple entries or single entry with one key + self.log( + "Processing OR logic for {0} identifier groups".format(len(device_identifiers)), + "INFO" + ) + for idx, identifier in enumerate(device_identifiers, start=1): + self.log( + "Processing OR logic group {0}/{1}: {2}".format( + idx, len(device_identifiers), identifier + ), + "DEBUG" + ) + + for key, values in identifier.items(): + if not values: + self.log( + "Skipping empty values for key: {0}".format(key), + "DEBUG" + ) + continue + if not isinstance(values, list): + values = [values] + self.log( + "Converted single value to list for key {0}: {1}".format(key, values), + "DEBUG" + ) + + expanded_values = [] + + for value in values: + if key == "ip_address_range": + self.log( + "Expanding IP address range: {0}".format(value), + "DEBUG" + ) + try: + start_ip, end_ip = value.split("-") + start = ipaddress.IPv4Address(start_ip.strip()) + end = ipaddress.IPv4Address(end_ip.strip()) + range_ips = [ + str(ipaddress.IPv4Address(i)) + for i in range(int(start), int(end) + 1) + ] + expanded_values.extend(range_ips) + self.log( + "Expanded IP range '{0}' into {1} individual IP addresses".format( + value, len(range_ips) + ), + "DEBUG" + ) + except Exception as e: + self.log( + "Failed to expand IP range '{0}': {1}".format(value, str(e)), + "ERROR" + ) + continue + else: + expanded_values.append(value) + self.log("Added individual value '{0}' to expanded list (not an IP range)".format(value), "DEBUG") + + missing_ips = [] + + for value_index, ip_or_value in enumerate(expanded_values, start=1): + self.log( + "Processing OR value {0}/{1} for key '{2}': {3}".format( + value_index, len(expanded_values), key, ip_or_value + ), + "DEBUG" + ) + params = {param_key_map.get(key, "managementIpAddress"): ip_or_value} + devices = self.execute_device_lookup_with_retry(params, key, ip_or_value, timeout, retries, interval) + + if devices: + for device in devices: + uuid = device.get("instanceUuid") + ip = device.get("managementIpAddress") + if uuid and ip: + ip_uuid_map[ip] = uuid + else: + missing_ips.append(ip_or_value) + self.log("Device not found in inventory for identifier '{0}' - adding to missing list".format(ip_or_value), "DEBUG") + + if missing_ips: + display_value = ", ".join(missing_ips) + self.msg = ( + "No devices found for the following {0}(s): {1}. " + "Device(s) may not be present in Catalyst Center inventory." + ).format(key, display_value) + self.set_operation_result("success", False, self.msg, "INFO") + if self.msg not in self.total_response: + self.total_response.append(self.msg) + + total_devices = len(ip_uuid_map) + self.log( + "Device UUID mapping completed successfully using {0} logic - mapped {1} unique devices".format( + logic_type, total_devices + ), + "INFO" + ) + + if total_devices > 0: + self.log( + "Successfully mapped devices: {0}".format(list(ip_uuid_map.keys())), + "DEBUG" + ) + else: + self.log( + "No devices found matching the specified criteria", + "WARNING" + ) + self.log("Device UUID mapping completed — mapped {0} managed devices.".format(total_devices), "INFO") + + return ip_uuid_map + + def execute_device_lookup_with_retry(self, params, key, value, timeout, retries, interval): + """ + Execute device lookup API call with comprehensive retry mechanism and timeout handling. + + Parameters: + params (dict): API parameters for device lookup + key (str): Filter key being processed + value (str): Filter value being processed + timeout (int): Maximum timeout in seconds + retries (int): Maximum number of retry attempts + interval (int): Wait interval between retries + + Returns: + list: List of found devices, empty list if no devices found + """ + attempt = 0 + start_time = time.time() + + self.log( + "Starting device lookup with retry mechanism - key: {0}, value: {1}".format( + key, value + ), + "DEBUG" + ) + + while attempt < retries and (time.time() - start_time) < timeout: + elapsed_time = time.time() - start_time + self.log( + "Attempt {0}/{1} for {2}={3} - elapsed time: {4:.1f}s".format( + attempt + 1, retries, key, value, elapsed_time + ), + "DEBUG" + ) + + try: + self.log( + "Executing API call with parameters: {0}".format(params), + "DEBUG" + ) + + response = self.dnac._exec( + family="devices", + function="get_device_list", + params=params + ) + + self.log( + "Received API response for {0}={1}: {2}".format( + key, value, response + ), + "DEBUG" + ) + + devices = response.get("response", []) + + if devices: + self.log( + "Found {0} devices for {1}={2} on attempt {3}".format( + len(devices), key, value, attempt + 1 + ), + "DEBUG" + ) + return devices + else: + self.log( + "No devices found for {0}={1} on attempt {2}".format( + key, value, attempt + 1 + ), + "DEBUG" + ) + + except Exception as e: + self.log( + "API call failed for {0}={1} on attempt {2}: {3}".format( + key, value, attempt + 1, str(e) + ), + "WARNING" + ) + + # Prepare for next attempt + attempt += 1 + + if attempt < retries and (time.time() - start_time) < timeout: + self.log( + "Waiting {0}s before next attempt for {1}={2}".format( + interval, key, value + ), + "DEBUG" + ) + time.sleep(interval) + elif attempt >= retries: + self.log( + "Maximum retry attempts ({0}) reached for {1}={2}".format( + retries, key, value + ), + "WARNING" + ) + break + elif (time.time() - start_time) >= timeout: + self.log( + "Timeout ({0}s) reached for {1}={2}".format(timeout, key, value), + "WARNING" + ) + break + + total_elapsed = time.time() - start_time + self.log( + "Device lookup completed for {0}={1} - no devices found, attempts: {2}, elapsed: {3:.1f}s".format( + key, value, attempt, total_elapsed + ), + "DEBUG" + ) + + return [] + + def get_devices_from_site(self, site_name): + """ + Retrieves device UUIDs from a specified site hierarchy in Cisco Catalyst Center. + + This method performs comprehensive site hierarchy analysis and device discovery by processing + different site types (global, area, building, floor) and their relationships. It handles + both parent and child site structures, applies wildcard patterns for hierarchical site + discovery, and collects all network devices assigned to the specified site and its sub-sites. + + Args: + site_name (str): The hierarchical site name for device discovery. + Format: "Global/Region/Building/Floor" or any subset thereof. + Examples: + - "Global" (retrieves devices from entire hierarchy) + - "Global/USA/NewYork" (retrieves devices from NewYork area and sub-sites) + - "Global/USA/NewYork/Building1/Floor1" (retrieves devices from specific floor) + - "Global/Campus/Building-A" (retrieves building + floor devices) + + Returns: + list: A list of device UUIDs for all devices assigned to the specified site hierarchy. + Returns empty list if no devices found or site doesn't exist. + """ + + self.log("Starting device retrieval from site: {0}".format(site_name), "INFO") + + if not site_name: + return [] + + # Determine site type + site_type = self.get_sites_type(site_name) + if not site_type: + self.log( + "Unable to determine site type for: '{0}'".format(site_name), + "WARNING" + ) + return [] + + self.log( + "Site type determined - site: '{0}', type: '{1}'".format(site_name, site_type), + "DEBUG" + ) + + if site_type == "building": + site_info = self.process_building_site(site_name) + + elif site_type in ["area", "global"]: + site_info = self.process_area_site(site_name) + + elif site_type == "floor": + site_info = self.process_floor_site(site_name) + + else: + self.log( + "Unknown site type '{0}' for site '{1}'".format(site_type, site_name), + "ERROR" + ) + return [] + return self.fetch_devices_for_sites(site_info) + + def process_building_site(self, site_name): + """ + Process building site hierarchy including parent site and child floors. + + Args: + site_name (str): Building site name to process + + Returns: + dict: Dictionary mapping site hierarchy names to site IDs + """ + self.log( + "Processing building hierarchy for site: '{0}'".format(site_name), + "DEBUG" + ) + + site_info = {} + + # Get parent building site data + self.log( + "Fetching parent building site data for: '{0}'".format(site_name), + "DEBUG" + ) + + parent_site_data = self.get_site(site_name) + + if parent_site_data and parent_site_data.get("response"): + self.log( + "Parent building site data found - processing {0} items".format( + len(parent_site_data.get('response', [])) + ), + "DEBUG" + ) + + for item in parent_site_data["response"]: + if "nameHierarchy" in item and "id" in item: + site_info[item["nameHierarchy"]] = item["id"] + self.log( + "Added parent site '{0}' with ID '{1}' to hierarchy".format( + item['nameHierarchy'], item['id'] + ), + "DEBUG" + ) + else: + self.log( + "No parent site data found for building: '{0}'".format(site_name), + "WARNING" + ) + + wildcard_site = site_name + "/.*" + self.log( + "Fetching child floor sites using wildcard pattern: '{0}'".format( + wildcard_site + ), + "DEBUG" + ) + child_site_data = self.get_site(wildcard_site) + + if child_site_data and child_site_data.get("response"): + for item in child_site_data["response"]: + if "nameHierarchy" in item and "id" in item: + site_info[item["nameHierarchy"]] = item["id"] + self.log( + "Added child floor site '{0}' with ID '{1}' to hierarchy".format( + item['nameHierarchy'], item['id'] + ), + "DEBUG" + ) + else: + self.log( + "No child floor sites found under building: '{0}'".format(site_name), + "DEBUG" + ) + + return site_info + + def process_area_site(self, site_name): + """ + Process area or global site hierarchy including all child sites. + + Args: + site_name (str): Area or global site name to process + + Returns: + dict: Dictionary mapping site hierarchy names to site IDs + """ + self.log( + "Processing area/global hierarchy for site: '{0}'".format(site_name), + "DEBUG" + ) + + site_info = {} + + wildcard_site = site_name + "/.*" + child_data = self.get_site(wildcard_site) + + site_names = wildcard_site if child_data and child_data.get("response") else site_name + + site_data = self.get_site(site_names) + + for item in site_data.get("response", []): + if "nameHierarchy" in item and "id" in item: + site_info[item["nameHierarchy"]] = item["id"] + self.log( + "Added child site '{0}' with ID '{1}' to hierarchy".format( + item['nameHierarchy'], item['id'] + ), + "DEBUG" + ) + else: + self.log( + "No child sites found under area/global: '{0}' - using original site".format( + site_name + ), + "DEBUG" + ) + + return site_info + + def process_floor_site(self, site_name): + """ + Process floor site hierarchy (single site). + + Args: + site_name (str): Floor site name to process + + Returns: + dict: Dictionary mapping site hierarchy names to site IDs + """ + self.log( + "Processing floor hierarchy for site: '{0}'".format(site_name), + "DEBUG" + ) + + site_info = {} + + site_data = self.get_site(site_name) + + if site_data and site_data.get("response"): + self.log( + "Floor site data found - processing {0} items".format( + len(site_data.get('response', [])) + ), + "DEBUG" + ) + + for item in site_data["response"]: + if "nameHierarchy" in item and "id" in item: + site_info[item["nameHierarchy"]] = item["id"] + self.log( + "Added floor site '{0}' with ID '{1}' to hierarchy".format( + item['nameHierarchy'], item['id'] + ), + "DEBUG" + ) + else: + self.log( + "No site data found for floor: '{0}'".format(site_name), + "WARNING" + ) + + return site_info + + def fetch_devices_for_sites(self, site_info): + """ + Retrieve all devices from a specific site ID using pagination. + + Args: + site_info (dict): Dictionary mapping site hierarchy names to site IDs + + Returns: + list: List of device IDs from the site + """ + self.log( + "Starting device retrieval from site '{0}'".format( + site_info + ), + "DEBUG" + ) + + device_id_list = [] + + for hierarchy, site_id in site_info.items(): + offset = 1 + limit = self.get_device_details_limit() + + self.log( + "Using pagination - limit: {0} devices per request".format(limit), + "DEBUG" + ) + + while True: + try: + self.log( + "Fetching devices from site '{0}' - offset: {1}, limit: {2}".format( + site_info, offset, limit + ), + "DEBUG" + ) + response = self.dnac._exec( + family="site_design", + function="get_site_assigned_network_devices", + params={"site_id": site_id, "offset": offset, "limit": limit}, + ) + + devices = response.get("response", []) + if not devices: + self.log( + "No more devices found for site '{0}' at offset {1}".format( + hierarchy, offset + ), + "DEBUG", + ) + break + + for device in devices: + device_id = device.get("deviceId") + device_id_list.append(device_id) + self.log( + "Retrieved device ID '{0}' from site '{1}'".format(device_id, hierarchy), + "DEBUG" + ) + + offset += limit + + except Exception as e: + self.log( + "Exception during device retrieval from site '{0}' (ID: {1}): {2}".format( + hierarchy, site_id, str(e) + ), + "ERROR" + ) + return None + self.log( + "Device retrieval completed for site '{0}' - total devices: {1}".format( + hierarchy, len(device_id_list) + ), + "DEBUG" + ) + + return device_id_list + + def filter_network_devices(self, filtered_config): + """ + Performs comprehensive network device filtering based on multiple criteria and site hierarchies. + + This method implements advanced device discovery and filtering capabilities by combining site-based + device identification, device identifier matching, and attribute-based filtering to create a + refined list of network devices that meet all specified criteria. + + Args: + filtered_config (dict): Comprehensive filtering configuration dictionary. + + Returns: + dict or None: Dictionary mapping device IP addresses to their UUIDs for devices matching all criteria. + Returns None if no devices match the filtering criteria or if critical errors occur. + """ + self.log("Filtering network devices based on provided Args", "INFO") + + limit = 500 + offset = 1 + + site_hierarchy = filtered_config.get("site_hierarchy") + device_type = filtered_config.get("device_type") + device_role = filtered_config.get("device_role") + device_family = filtered_config.get("device_family") + software_version = filtered_config.get("software_version") + os_type = filtered_config.get("os_type") + device_identifier = filtered_config.get("device_identifier") + + timeout = filtered_config.get("timeout", 120) + retries = filtered_config.get("retries", 3) + interval = filtered_config.get("interval", 10) + + self.log( + "Using filter configuration - timeout: {0}s, retries: {1}, interval: {2}s".format( + timeout, retries, interval + ), + "DEBUG" + ) + + self.log( + "Filter criteria - site_hierarchy: {0}, device_type: {1}, role: {2}, family: {3}".format( + site_hierarchy, device_type, device_role, device_family + ), + "DEBUG" + ) + + filtered_devices = {} + start_time = time.time() + attempt = 0 + elapsed_time = time.time() - start_time + + while attempt < retries and (elapsed_time < timeout): + try: + self.log( + "Starting device discovery phase - retrieving network devices with offset {0} and limit {1}".format( + offset, limit + ), + "DEBUG" + ) + self.log("Attempt {0} - Retrieving network devices with offset {1} and limit {2}".format( + attempt + 1, offset, limit + ), "DEBUG") + + all_devices = [] + + device_ids_in_site = [] + + # Phase 1: Site-based device discovery + if site_hierarchy: + self.log( + "Processing site hierarchy filter: {0}".format(site_hierarchy), + "INFO" + ) + device_ids_in_site = self.get_devices_from_site(site_hierarchy) + self.log( + "Site-based device discovery completed - found {0} devices for site '{1}'".format( + len(device_ids_in_site), site_hierarchy + ), + "INFO" + ) + self.log( + "Device IDs from site '{0}': {1}".format(site_hierarchy, device_ids_in_site), + "DEBUG" + ) + + # Phase 2: Device identifier-based discovery + if device_identifier: + self.log( + "Processing device identifier filter: {0}".format(device_identifier), + "INFO" + ) + ip_uuid_map = self.get_device_id(filtered_config) + if ip_uuid_map: + device_ids_from_identifiers = list(ip_uuid_map.values()) + self.log( + "Identifier-based device discovery completed - found {0} devices".format( + len(device_ids_from_identifiers) + ), + "INFO" + ) + + # Combine site and identifier filters if both are specified + if site_hierarchy: + device_ids = list(set(device_ids_from_identifiers) & set(device_ids_in_site)) + self.log( + "Applied intersection of site and identifier filters - result: {0} devices".format( + len(device_ids) + ), + "DEBUG" + ) + else: + device_ids = device_ids_from_identifiers + self.log( + "Using identifier-based filter results: {0} devices".format( + len(device_ids) + ), + "DEBUG" + ) + else: + self.log( + "No devices found matching device identifier criteria", + "WARNING" + ) + device_ids = [] + else: + device_ids = device_ids_in_site if site_hierarchy else [None] + self.log( + "Using site-based filter results or all devices: {0}".format( + len(device_ids) if device_ids != [None] else "all" + ), + "DEBUG" + ) + + self.log( + "Device discovery completed - processing {0} device IDs for attribute filtering".format( + len(device_ids) if device_ids != [None] else "all devices" + ), + "INFO" + ) + + # Phase 3: Apply attribute-based filters + for device_index, device_id in enumerate(device_ids): + self.log( + "Processing device {0}/{1} for attribute filtering - device_id: {2}".format( + device_index + 1, len(device_ids), device_id + ), + "DEBUG" + ) + params = {"offset": offset, "limit": limit} + if device_id: + params["id"] = device_id + + filters = { + "role": device_role, + "family": device_family, + "type": device_type, + "software_version": software_version, + "software_type": os_type + } + + applied_filters = [] + for key, value in filters.items(): + if value: + params[key] = value + applied_filters.append("{0}='{1}'".format(key, value)) + self.log( + "Applied {0} filter with value: '{1}'".format(key, value), + "DEBUG" + ) + + if applied_filters: + self.log( + "Executing device query with filters: {0}".format( + ", ".join(applied_filters) + ), + "DEBUG" + ) + else: + self.log( + "Executing device query without attribute filters", + "DEBUG" + ) + + self.log( + "API parameters for device query: {0}".format(params), + "DEBUG" + ) + + while True: + self.log( + "Executing API call - offset: {0}, limit: {1}".format( + params.get("offset"), params.get("limit") + ), + "DEBUG" + ) + response = self.dnac._exec( + family="devices", + function="get_device_list", + params=params + ) + + self.log("Received API response from 'get_network_devices': {0}".format(response), "DEBUG") + + devices = response.get("response", []) + + if devices: + self.log( + "Found {0} devices in current page".format(len(devices)), + "DEBUG" + ) + all_devices.extend(devices) + device_id = devices[0].get("instanceUuid") + self.log( + "Sample device from response - UUID: {0}".format(device_id), + "DEBUG" + ) + + if len(devices) < limit: + self.log( + "Reached end of results - received {0} devices (less than limit {1})".format( + len(devices), limit + ), + "DEBUG" + ) + break + + offset += limit + self.log( + "Continuing pagination - new offset: {0}".format(offset), + "DEBUG" + ) + params["offset"] = offset + + self.log("Total network devices retrieved: {0}".format(len(all_devices)), "INFO") + + # Phase 4: Build final filtered device mapping + devices_processed = 0 + for device in all_devices: + ip = device.get("managementIpAddress") + device_id = device.get("instanceUuid") + if ip and device_id: + filtered_devices[ip] = device_id + devices_processed += 1 + self.log( + "Device {0} included in final results - IP: {1}, UUID: {2}".format( + devices_processed, ip, device_id + ), + "DEBUG" + ) + else: + self.log( + "Skipping device with missing IP or UUID - IP: {0}, UUID: {1}".format( + ip, device_id + ), + "WARNING" + ) + + if filtered_devices: + self.log( + "Device filtering completed successfully on attempt {0} - found {1} matching devices".format( + attempt + 1, len(filtered_devices) + ), + "INFO" + ) + break + else: + if attempt < retries and (time.time() - start_time) < timeout: + self.log( + "No devices matched criteria on attempt {0}/{1} - retrying in {2} seconds".format( + attempt + 1, retries, interval + ), + "WARNING" + ) + time.sleep(interval) + attempt += 1 + else: + self.log( + "No devices matched filtering criteria after {0} attempts".format( + attempt + 1 + ), + "WARNING" + ) + break + + except Exception as e: + self.msg = "Error occurred while retrieving/filtering network devices: {0}".format(str(e)) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + return None + + if not filtered_devices: + self.msg = ( + "No network devices matched the provided filter criteria after {0} attempts " + "and {1:.1f} seconds".format(attempt + 1, time.time() - start_time) + ) + self.set_operation_result("success", False, self.msg, "INFO") + self.log( + "Device filtering completed with no matching devices", + "WARNING" + ) + return None + + total_elapsed = time.time() - start_time + self.log( + "Network device filtering completed successfully - " + "found {0} devices in {1:.1f} seconds across {2} attempts".format( + len(filtered_devices), total_elapsed, attempt + 1 + ), + "INFO" + ) + + self.log( + "Final filtered device mapping: {0}".format( + dict(list(filtered_devices.items())[:5]) + ) + ("... and {0} more".format(len(filtered_devices) - 5) if len(filtered_devices) > 5 else ""), + "DEBUG" + ) + + return filtered_devices + + def get_device_info(self, ip_uuid_map): + """ + Fetch detailed information for a list of network devices from Cisco Catalyst Center. + + For each device ID, this method calls the 'get_device_list' API and aggregates the results. + Each entry in the returned list contains the device's management IP and its details. + + The retrieved device details include key fields like: + family, type, lastUpdateTime, macAddress, softwareVersion, serialNumber, managementIpAddress, + hostname, upTime, role, platformId, reachabilityStatus, description, instanceUuid, id + and more, providing a comprehensive overview of each device's configuration and status. + + Executes API calls for each device ID and aggregates the retrieved data into a structured list. + + Args: + ip_uuid_map (dict): A mapping of device IPs to their UUIDs. + + Returns: + list: A list with a single dictionary: + [ + { + "device_info": [ + { + "device_ip": , + "device_details": + }, + ] + } + ] + """ + self.log("Starting device info retrieval for device_ids: {0}".format(ip_uuid_map), "INFO") + device_info_list = [] + + for device_ip, device_id in ip_uuid_map.items(): + self.log("Processing device ID: {0} (IP: {1})".format(device_id, device_ip), "DEBUG") + self.log("Fetching device info for device_id: {0}, device_ip: {1}".format(device_id, device_ip), "DEBUG") + + try: + response = self.dnac._exec( + family="devices", + function="get_device_list", + params={'id': device_id} + ) + self.log( + "Received API response from 'get_device_info' for device {0} (IP: {1}): {2}".format( + device_id, device_ip, response), "DEBUG") + + device_response = response.get("response", []) + if device_response: + self.log("Device details found for device_id: {0}, device_ip: {1}".format(device_id, device_ip), "INFO") + device_info_list.append({ + "device_ip": device_ip, + "device_details": device_response + }) + else: + self.log("No device details found for device_id: {0}, device_ip: {1}".format(device_id, device_ip), "WARNING") + device_info_list.append({ + "device_ip": device_ip, + "device_details": [] + }) + + except Exception as e: + self.msg = "Exception occurred while getting device list for device_id {0}, device_ip {1}: {2}".format(device_id, device_ip, e) + device_info_list.append({ + "device_ip": device_ip, + "device_details": "Error: {0}".format(e) + }) + continue + + result = [{"device_info": device_info_list}] + + self.log("Completed device info retrieval. Total devices processed: {0}".format(len(device_info_list)), "INFO") + self.log("Device info result: {0}".format(result), "DEBUG") + return result + + def get_interface_info(self, ip_uuid_map): + """ + Fetch interface information on interfaces for specified devices from Cisco Catalyst Center. + + For each device ID, this method calls the 'get_device_list' API and aggregates the results. + Each entry in the returned list contains the device's management IP and its details. + + Retrieves detailed interface data for each device ID provided. + + The retrieved interface details include key fields such as: + - adminStatus (e.g., "UP") + - duplex (e.g., "FullDuplex") + - ifIndex (e.g., "73") + - interfaceType (e.g., "Physical") + - macAddress (e.g., "0c:75:bd:42:db:c1") + - mtu (e.g., "9100") + - nativeVlanId (e.g., "1") + - portMode (e.g., "access") + - portName (e.g., "AppGigabitEthernet1/0/1") + - portType (e.g., "Ethernet Port") + - serialNo (e.g., "FJC2335S09F") + - speed (e.g., "1000000") + - status (e.g., "up") + - vlanId (e.g., "1") + - voiceVlan (e.g., "") + - description (e.g., "") + - instanceUuid + - instanceTenantId + + Args: + ip_uuid_map (dict): A mapping of device IPs to their UUIDs. + + Returns: + list: A list with a single dictionary: + [ + { + "interface_info": [ + { + "device_ip": , + "interface_details": + }, + ] + } + ] + """ + self.log("Fetching interface info for {0} devices: {1}".format(len(ip_uuid_map), list(ip_uuid_map.keys())), "INFO") + + interface_info_list = [] + + for device_ip, device_id in ip_uuid_map.items(): + self.log("Processing device ID: {0} (IP: {1})".format(device_id, device_ip), "DEBUG") + self.log("Fetching device interface info for device_id: {0}, device_ip: {1}".format(device_id, device_ip), "DEBUG") + + try: + response = self.dnac._exec( + family="devices", + function="get_interface_info_by_id", + params={'device_id': device_id} + ) + self.log( + "Received API response from 'get_interface_info' for device {0} (IP: {1}): {2}".format( + device_id, device_ip, response), "DEBUG") + + interface_data = response.get("response", []) + if interface_data: + self.log("Found {0} interface records for device IP: {1}".format(len(interface_data), device_ip), "DEBUG") + interface_info_list.append({ + "device_ip": device_ip, + "interface_details": interface_data + }) + else: + self.log("No interface details found for device IP: {0}".format(device_ip), "DEBUG") + interface_info_list.append({ + "device_ip": device_ip, + "interface_details": [] + }) + + except Exception as e: + self.msg = "Exception occurred while getting device interface info list for device_id {0}, device_ip {1}: {2}".format(device_id, device_ip, e) + interface_info_list.append({ + "device_ip": device_ip, + "interface_details": "Error: {0}".format(e) + }) + continue + + result = [{"interface_info": interface_info_list}] + + self.log("Completed Device Interface info retrieval. Total devices processed: {0}".format(len(interface_info_list)), "INFO") + self.log("Device Interface info result: {0}".format(result), "DEBUG") + return result + + def get_interface_vlan_info(self, ip_uuid_map): + """ + Fetch VLAN interface details for a list of devices from Cisco Catalyst Center. + + For each device ID, this method calls the 'get_device_list' API and aggregates the results. + Each entry in the returned list contains the device's management IP and its details. + + Retrieves VLAN interface configuration data for each device ID and aggregates the results + into a structured format. Each device's VLAN interface details include interface-specific + VLAN assignments, configurations, and related network information. + + The retrieved VLAN interface details include key fields such as: + - interfaceName (e.g., "GigabitEthernet0/1") + - ipAddress (e.g., "192.168.10.25") + - mask (e.g., 24) + - networkAddress (e.g., "192.168.10.0") + - numberOfIPs (e.g., 254) + - prefix (e.g., "192.168.10.0/24") + - vlanNumber (e.g., 10) + - vlanType (e.g., "Data") + + Args: + ip_uuid_map (dict): A mapping of device IPs to their UUIDs. + + Returns: + list: A list with a single dictionary: + [ + { + "interface_vlan_info": [ + { + "device_ip": , + "interface_vlan_details": + }, + ] + } + ] + """ + self.log("Fetching VLAN interface data for {0} devices: {1}".format(len(ip_uuid_map), list(ip_uuid_map.keys())), "INFO") + + vlans_info_list = [] + + for device_ip, device_id in ip_uuid_map.items(): + self.log("Processing device ID: {0} (IP: {1})".format(device_id, device_ip), "DEBUG") + self.log("Fetching device interface vlans info for device_id: {0}, device_ip: {1}".format(device_id, device_ip), "DEBUG") + + try: + response = self.dnac._exec( + family="devices", + function="get_device_interface_vlans", + params={'id': device_id} + ) + self.log( + "Received API response from 'get_device_interface_vlans' for device {0} (IP: {1}): {2}".format( + device_id, device_ip, response), "DEBUG") + + vlan_data = response.get("response", []) + if vlan_data: + self.log("Found {0} VLAN records for device IP: {1}".format(len(vlan_data), device_ip), "DEBUG") + vlans_info_list.append({ + "device_ip": device_ip, + "interface_vlan_details": vlan_data + }) + else: + self.log("No VLAN interface data found for device IP: {0}".format(device_ip), "DEBUG") + vlans_info_list.append({ + "device_ip": device_ip, + "interface_vlan_details": [] + }) + + except Exception as e: + self.msg = "Exception occurred while getting VLAN interface data for device {0} (IP: {1}): {2}".format(device_id, device_ip, e) + vlans_info_list.append({ + "device_ip": device_ip, + "interface_vlan_details": "Error: {0}".format(e) + }) + continue + + result = [{"interface_vlan_info": vlans_info_list}] + + self.log("Completed Interface Vlan info retrieval. Total devices processed: {0}".format(len(vlans_info_list)), "INFO") + self.log("Interface Vlan info result: {0}".format(result), "DEBUG") + return result + + def get_linecard_info(self, ip_uuid_map): + """ + Fetch line card details for a list of devices from Cisco Catalyst Center. + + For each device ID, this method calls the 'get_device_list' API and aggregates the results. + Each entry in the returned list contains the device's management IP and its details. + + Queries and aggregates line card information for each provided device ID. + + The retrieved line card details include key fields such as: + - serialno (e.g., "SN123456789") + - partno (e.g., "PN987654321") + - switchno (e.g., "SW-001-A1") + - slotno (e.g., "Slot-04") + + Args: + ip_uuid_map (dict): A mapping of device IPs to their UUIDs. + + Returns: + list: A list with a single dictionary: + [ + { + "line_card_info": [ + { + "device_ip": , + "linecard_details": + }, + ] + } + ] + """ + self.log("Fetching Line card data for {0} devices: {1}".format(len(ip_uuid_map), list(ip_uuid_map.keys())), "INFO") + + linecards_info_list = [] + + for device_ip, device_id in ip_uuid_map.items(): + self.log("Processing device ID: {0} (IP: {1})".format(device_id, device_ip), "DEBUG") + self.log("Fetching line card info for device_id: {0}, device_ip: {1}".format(device_id, device_ip), "DEBUG") + + try: + response = self.dnac._exec( + family="devices", + function="get_linecard_details", + params={'device_uuid': device_id} + ) + self.log( + "Received API response from 'get_linecard_details' for device {0} (IP: {1}): {2}".format( + device_id, device_ip, response), "DEBUG") + + linecard_data = response.get("response", []) + if linecard_data: + self.log("Found {0} line card records for device IP: {1}".format(len(linecard_data), device_ip), "DEBUG") + linecards_info_list.append({ + "device_ip": device_ip, + "linecard_details": linecard_data + }) + else: + self.log("No line card details found for device IP: {0}".format(device_ip), "DEBUG") + linecards_info_list.append({ + "device_ip": device_ip, + "linecard_details": [] + }) + + except Exception as e: + self.msg = "Exception occurred while getting line card info list for device_id {0}, device_ip {1}: {2}".format(device_id, device_ip, e) + linecards_info_list.append({ + "device_ip": device_ip, + "linecard_details": "Error: {0}".format(e) + }) + continue + + result = [{"line_card_info": linecards_info_list}] + + self.log("Completed Line Card info retrieval. Total devices processed: {0}".format(len(linecards_info_list)), "INFO") + self.log("Line Card info result: {0}".format(result), "DEBUG") + return result + + def get_supervisor_card_info(self, ip_uuid_map): + """ + Fetch supervisor card details for a list of devices from Cisco Catalyst Center. + + For each device ID, this method calls the 'get_device_list' API and aggregates the results. + Each entry in the returned list contains the device's management IP and its details. + + Retrieves detailed supervisor card information for each provided device ID. + + The retrieved supervisor card details include key fields such as: + - serialno (e.g., "SN1234567890") + - partno (e.g., "PN9876543210") + - switchno (e.g., "SW-01") + - slotno (e.g., "3") + + Args: + ip_uuid_map (dict): A mapping of device IPs to their UUIDs. + + Returns: + list: A list with a single dictionary: + [ + { + "supervisor_card_info": [ + { + "device_ip": , + "supervisor_card_details": + }, + ] + } + ] + """ + self.log("Fetching supervisor card data for {0} devices: {1}".format(len(ip_uuid_map), list(ip_uuid_map.keys())), "INFO") + + supervisor_cards_info_list = [] + + for device_ip, device_id in ip_uuid_map.items(): + self.log("Processing device ID: {0} (IP: {1})".format(device_id, device_ip), "DEBUG") + self.log("Fetching supervisor card info for device_id: {0}, device_ip: {1}".format(device_id, device_ip), "DEBUG") + + try: + response = self.dnac._exec( + family="devices", + function="get_supervisor_card_detail", + params={'device_uuid': device_id} + ) + self.log( + "Received API response from 'get_supervisor_card_details' for device {0} (IP: {1}): {2}".format( + device_id, device_ip, response), "DEBUG") + + supervisor_cards = response.get("response", []) + if supervisor_cards: + self.log("Found {0} supervisor card records for device IP: {1}".format(len(supervisor_cards), device_ip), "DEBUG") + supervisor_cards_info_list.append({ + "device_ip": device_ip, + "supervisor_card_details": supervisor_cards + }) + else: + self.log("No supervisor card details found for device IP: {0}".format(device_ip), "DEBUG") + supervisor_cards_info_list.append({ + "device_ip": device_ip, + "supervisor_card_details": [] + }) + + except Exception as e: + self.msg = "Exception occurred while getting supervisor card info list for device_id {0}, device_ip {1}: {2}".format(device_id, device_ip, e) + supervisor_cards_info_list.append({ + "device_ip": device_ip, + "supervisor_card_details": "Error: {0}".format(e) + }) + continue + + result = [{"supervisor_card_info": supervisor_cards_info_list}] + + self.log("Completed Device Supervisor Card info retrieval. Total devices processed: {0}".format(len(supervisor_cards_info_list)), "INFO") + self.log("Device Supervisor Card info result: {0}".format(result), "DEBUG") + return result + + def get_poe_info(self, ip_uuid_map): + """ + Fetch Power over Ethernet (PoE) details for specified devices from Cisco Catalyst Center. + + For each device ID, this method calls the 'get_device_list' API and aggregates the results. + Each entry in the returned list contains the device's management IP and its details. + + Retrieves PoE information for each device ID provided. + + The retrieved PoE details include key fields such as: + - powerAllocated (e.g., "525") + - powerConsumed (e.g., "0") + - powerRemaining (e.g., "525") + + Args: + ip_uuid_map (dict): Mapping of device IPs to their UUIDs. + + Returns: + list: A list with a single dictionary: + [ + { + "poe_info": [ + { + "device_ip": , + "poe_details": + }, + ] + } + ] + """ + self.log("Fetching PoE data for {0} devices: {1}".format(len(ip_uuid_map), list(ip_uuid_map.keys())), "INFO") + + poe_info_list = [] + + for device_ip, device_id in ip_uuid_map.items(): + self.log("Processing device ID: {0} (IP: {1})".format(device_id, device_ip), "DEBUG") + self.log("Fetching poe info for device_id: {0}, device_ip: {1}".format(device_id, device_ip), "DEBUG") + + try: + response = self.dnac._exec( + family="devices", + function="poe_details", + params={'device_uuid': device_id} + ) + self.log( + "Received API response from 'poe_details': {0}".format( + (response) + ), + "DEBUG", + ) + poe_data = response.get("response", []) + if poe_data: + self.log("Found {0} PoE records for device IP: {1}".format(len(poe_data), device_ip), "DEBUG") + poe_info_list.append({ + "device_ip": device_ip, + "poe_details": poe_data + }) + else: + self.log("No PoE details found for device IP: {0}".format(device_ip), "DEBUG") + poe_info_list.append({ + "device_ip": device_ip, + "poe_details": [] + }) + + except Exception as e: + self.msg = "Exception occurred while getting PoE Info list for device_id {0}, device_ip {1}: {2}".format(device_id, device_ip, e) + poe_info_list.append({ + "device_ip": device_ip, + "poe_details": "Error: {0}".format(e) + }) + continue + + result = [{"poe_info": poe_info_list}] + + self.log("Completed Device PoE info retrieval. Total devices processed: {0}".format(len(poe_info_list)), "INFO") + self.log("Device PoE info result: {0}".format(result), "DEBUG") + return result + + def get_module_count_info(self, ip_uuid_map): + """ + Fetch module count details for specified devices from Cisco Catalyst Center. + + For each device ID, this method calls the 'get_device_list' API and aggregates the results. + Each entry in the returned list contains the device's management IP and its details. + + Retrieves module count information for each device ID provided. + + The retrieved module count includes the key field: + - module_count_info (int): Number of modules in the device (e.g., 3) + + Args: + ip_uuid_map (dict): Mapping of device IPs to their UUIDs. + + Returns: + list: A list with a single dictionary: + [ + { + "module_count_info": [ + { + "device_ip": , + "module_count_details": + }, + ] + } + ] + """ + + self.log( + "Processing module count data for {0} devices with IP-UUID mapping: {1}".format( + len(ip_uuid_map) if ip_uuid_map else 0, + list(ip_uuid_map.keys()) if ip_uuid_map else [] + ), + "DEBUG" + ) + + module_counts_info_list = [] + successful_retrievals = 0 + failed_retrievals = 0 + + for device_index, (device_ip, device_id) in enumerate(ip_uuid_map.items(), start=1): + self.log( + "Processing device {0}/{1} - IP: {2}, UUID: {3}".format( + device_index, len(ip_uuid_map), device_ip, device_id + ), + "DEBUG" + ) + self.log("Fetching module count info for device_id: {0}, device_ip: {1}".format(device_id, device_ip), "DEBUG") + + # Validate device IP and ID + if not device_ip or not device_id: + self.log( + "Skipping device with missing IP or UUID - IP: {0}, UUID: {1}".format( + device_ip, device_id + ), + "WARNING" + ) + module_counts_info_list.append({ + "device_ip": device_ip or "unknown", + "module_count_details": "Error: Missing device IP or UUID" + }) + failed_retrievals += 1 + continue + + self.log( + "Executing module count API call for device IP: {0}, UUID: {1}".format( + device_ip, device_id + ), + "DEBUG" + ) + + try: + response = self.dnac._exec( + family="devices", + function="get_module_count", + params={'device_id': device_id} + ) + self.log( + "Received API response from 'get_module_count' for device {0} (IP: {1}): {2}".format( + device_id, device_ip, response), "DEBUG") + + module_count_data = response.get("response", []) + + if module_count_data: + successful_retrievals += 1 + self.log("Found {0} module count records for device IP: {1}".format(module_count_data, device_ip), "DEBUG") + module_counts_info_list.append({ + "device_ip": device_ip, + "module_count_details": module_count_data + }) + else: + successful_retrievals += 1 + self.log("No module count details found for device IP: {0}".format(device_ip), "DEBUG") + module_counts_info_list.append({ + "device_ip": device_ip, + "module_count_details": [] + }) + + except Exception as e: + failed_retrievals += 1 + self.msg = "Exception occurred while getting module count info list for device_id {0}, device_ip {1}: {2}".format(device_id, device_ip, e) + module_counts_info_list.append({ + "device_ip": device_ip, + "module_count_details": "Error: {0}".format(e) + }) + continue + + result = [{"module_count_info": module_counts_info_list}] + + self.log( + "Module count information retrieval completed - " + "total devices: {0}, successful: {1}, failed: {2}".format( + len(ip_uuid_map), successful_retrievals, failed_retrievals + ), + "INFO" + ) + + self.log( + "Module count retrieval result summary: {0}".format( + {"total_devices": len(module_counts_info_list), "result_structure": "module_count_info"} + ), + "DEBUG" + ) + return result + + def get_interface_ids_per_device(self, ip_uuid_map): + """ + Retrieve interface identifiers for devices to enable interface-based operations and connectivity analysis. + + This method queries the Catalyst Center Device API to collect comprehensive interface inventory + information for each specified device. It retrieves interface UUIDs and metadata that + are essential for subsequent operations such as connected device discovery, interface health + monitoring, and network topology mapping within the network. + + Args: + ip_uuid_map (dict): Mapping of device IP addresses to their UUIDs. + + Returns: + dict: A dictionary mapping device IP addresses to sets of interface UUIDs: + { + "192.168.1.1": {"interface-uuid-1", "interface-uuid-2", "interface-uuid-3"}, + "192.168.1.2": {"interface-uuid-4", "interface-uuid-5"}, + } + """ + self.log("Retrieving interface identifiers for network device interface inventory and management", "INFO") + self.log( + "Processing interface discovery for {0} network devices".format( + len(ip_uuid_map) + ), + "DEBUG" + ) + + device_interfaces_map = {} + + # Statistics tracking + statistics = { + 'devices_processed': 0, + 'devices_with_interfaces': 0, + 'devices_without_interfaces': 0, + 'devices_with_errors': 0, + 'interfaces_without_ids': 0, + 'total_interfaces_discovered': 0 + } + + for index, (ip, device_id) in enumerate(ip_uuid_map.items()): + statistics['devices_processed'] += 1 + self.log( + "Processing device {0}/{1} - IP: {2}, UUID: {3}".format( + statistics['devices_processed'], len(ip_uuid_map), ip, device_id + ), + "DEBUG" + ) + + # Validate device IP and UUID + if not ip or not device_id: + self.log( + "Skipping device with missing IP or UUID - IP: {0}, UUID: {1}".format( + ip, device_id + ), + "WARNING" + ) + statistics['devices_with_errors'] += 1 + continue + + try: + self.log("Fetching interfaces for device: {0}".format(ip), "DEBUG") + + response = self.dnac._exec( + family="devices", + function="get_interface_info_by_id", + params={"device_id": device_id} + ) + self.log("Received API response for interface query on device {0}".format(ip), "DEBUG") + interface_response_data = response.get("response", []) + self.log( + "Interface query completed for device {0} - found {1} interface records".format( + ip, + len(interface_response_data) + ), + "DEBUG" + ) + self.log("Received API response for 'get_interface_info_by_id' for device {0}: {1}".format(ip, response), "DEBUG") + + interface_ids = set() + interfaces_missing_ids = 0 + + for interface in interface_response_data: + interface_id = interface.get("id") + if interface_id: + interface_ids.add(interface_id) + else: + interfaces_without_ids += 1 + self.log( + "Interface record missing UUID identifier for device {0} - skipping interface".format( + ip + ), + "WARNING" + ) + device_interfaces_map[ip] = interface_ids + statistics['interfaces_without_ids'] += interfaces_missing_ids + statistics['total_interfaces_discovered'] += len(interface_ids) + + if interface_ids: + statistics['devices_with_interfaces'] += 1 + self.log( + "Successfully mapped {0} interface identifiers for device {1}".format( + len(interface_ids), + ip + ), + "DEBUG" + ) + else: + statistics['devices_without_interfaces'] += 1 + self.log( + "No interface identifiers found for device {0} - " + "device may have no configured interfaces".format(ip), + "WARNING" + ) + if interfaces_without_ids > 0: + self.log( + "Warning: {0} interface records for device {1} were missing " + "UUID identifiers".format( + interfaces_without_ids, + ip + ), + "WARNING" + ) + + except Exception as e: + statistics['devices_with_errors'] += 1 + self.msg = "Failed to retrieve interface information for device {0}: {1}".format(ip, str(e)) + self.log(self.msg, "ERROR") + + total_network_devices = len(ip_uuid_map) + successful_devices = len(device_interfaces_map) + + self.log( + "Interface discovery statistics - " + "devices with interfaces: {0}, " + "devices without interfaces: {1}, " + "devices with errors: {2}".format( + statistics['devices_with_interfaces'], + statistics['devices_without_interfaces'], + statistics['devices_with_errors'] + ), + "INFO" + ) + + if statistics['interfaces_without_ids'] > 0: + self.log( + "Warning: {0} interface records across all devices were missing " + "UUID identifiers".format(statistics['interfaces_without_ids']), + "WARNING" + ) + + self.log( + "Total interface identifiers discovered: {0} across {1} devices".format( + statistics['total_interfaces_discovered'], successful_devices + ), + "INFO" + ) + if statistics['devices_with_interfaces'] > 0: + self.log("Network devices with interface identifiers: {0}".format(statistics['devices_with_interfaces']), "INFO") + + if statistics['devices_without_interfaces'] > 0: + self.log("Network devices without interface identifiers: {0}".format(statistics['devices_without_interfaces']), "INFO") + + if statistics['devices_with_errors'] > 0: + self.log("Warning: {0} devices encountered errors during interface retrieval".format(statistics['devices_with_errors']), "WARNING") + + self.log("Total interface identifiers discovered across all network devices: {0}".format(statistics['total_interfaces_discovered']), "INFO") + + return device_interfaces_map + + def get_connected_device_details_from_interfaces(self, ip_uuid_map): + """ + Discover connected device topology for network devices through comprehensive interface-level analysis. + + This method performs extensive connected device discovery by querying each interface of specified + network devices to identify neighboring devices, endpoints, and network attachments. It processes + interface-level connectivity data to provide complete visibility into network device interconnections, + attached endpoints, and network topology relationships essential for network management + and troubleshooting operations. + + Args: + ip_uuid_map (dict): Mapping of device IP addresses to their UUIDs. + + Returns: + list: Structured connected device topology information in standardized format: + [ + { + "connected_device_info": [ + { + "device_ip": "192.168.1.1", + "connected_device_details": [connected_device_records] or "Error: " + }, + { + "device_ip": "192.168.1.2", + "connected_device_details": [connected_device_records] or "Error: " + } + ] + } + ] + """ + self.log("Discovering connected device topology for device interface inventory", "INFO") + self.log("Processing connected device discovery for {0} network devices".format(len(ip_uuid_map)), "DEBUG") + + connected_info_list = [] + + statistics = { + 'devices_processed': 0, + 'devices_with_connections': 0, + 'devices_without_connections': 0, + 'devices_with_errors': 0, + 'interfaces_processed': 0, + 'interfaces_with_connections': 0, + 'total_connections_discovered': 0 + } + + self.log( + "Phase 1: Retrieving interface inventories for network devices to enable discovery", + "INFO" + ) + device_interfaces_map = self.get_interface_ids_per_device(ip_uuid_map) + + if not device_interfaces_map: + self.log( + "No interface mappings available for network devices - " + "unable to perform connected device discovery", + "WARNING" + ) + return [{"connected_device_info": []}] + + self.log( + "Phase 1 completed: Retrieved interface mappings for {0} network devices".format( + len(device_interfaces_map) + ), + "INFO" + ) + + self.log( + "Phase 2: Processing connected device discovery across device interfaces", + "INFO" + ) + + for index, (device_ip, interface_ids) in enumerate(device_interfaces_map.items()): + statistics['devices_processed'] += 1 + ip_device_uuid_map = self.get_device_ids_from_device_ips([device_ip]) + device_uuid = ip_device_uuid_map[device_ip] + interfaces_with_connections = 0 + + self.log( + "Processing device {0}/{1} - IP: {2}, UUID: {3}, interfaces: {4}".format( + statistics['devices_processed'], len(device_interfaces_map), + device_ip, device_uuid, len(interface_ids) + ), + "DEBUG" + ) + + # Validate device mapping + if not device_uuid: + self.log( + "Skipping device {0} - missing UUID in ip_uuid_map".format(device_ip), + "WARNING" + ) + statistics['devices_with_errors'] += 1 + connected_info_list.append({ + "device_ip": device_ip, + "connected_device_details": "Error: Missing device UUID in mapping" + }) + continue + + if not interface_ids: + self.log( + "Device {0} has no interfaces available for connected device discovery".format( + device_ip + ), + "WARNING" + ) + statistics['devices_without_connections'] += 1 + connected_info_list.append({ + "device_ip": device_ip, + "connected_device_details": [] + }) + continue + + for interface_index, interface_id in enumerate(interface_ids, start=1): + statistics['interfaces_processed'] += 1 + + self.log( + "Processing interface {0}/{1} for device {2} - interface_id: {3}".format( + interface_index, len(interface_ids), device_ip, interface_id + ), + "DEBUG" + ) + try: + self.log( + "Executing connected device query for interface {0} on device {1}".format( + interface_id, device_ip + ), + "DEBUG" + ) + response = self.dnac._exec( + family="devices", + function="get_connected_device_detail", + params={ + "device_uuid": device_uuid, + "interface_uuid": interface_id + } + ) + self.log( + "Received connected device API response for device {0}, interface {1}: {2}".format( + device_ip, interface_id, response + ), + "DEBUG" + ) + interface_connected_data = response.get("response", {}) + + if interface_connected_data: + interfaces_with_connections += 1 + statistics['interfaces_with_connections'] += 1 + statistics['total_connections_discovered'] += 1 + statistics['devices_with_connections'] += 1 + self.log( + "Connected device found for device {0}, interface {1}: {2}".format( + device_ip, interface_id, interface_connected_data + ), + "DEBUG" + ) + connected_info_list.append({ + "device_ip": device_ip, + "connected_device_details": [interface_connected_data] + }) + else: + statistics['devices_without_connections'] += 1 + self.log( + "No connected device found for device {0}, interface {1}".format( + device_ip, interface_id + ), + "DEBUG" + ) + connected_info_list.append({ + "device_ip": device_ip, + "connected_device_details": [] + }) + + except Exception as e: + statistics['devices_with_errors'] += 1 + self.log( + "Exception during connected device query for device {0}, interface {1}: {2}".format( + device_ip, interface_id, str(e) + ), + "ERROR" + ) + connected_info_list.append({ + "device_ip": device_ip, + "connected_device_details": "Error: {0}".format(e) + }) + + result = [{"connected_device_info": connected_info_list}] + + self.log( + "Phase 2 completed: Connected device topology discovery finished successfully", + "INFO" + ) + + # Final statistics and comprehensive logging + self.log( + "Discovery statistics - devices processed: {0}, " + "devices with connections: {1}, devices without connections: {2}, " + "devices with errors: {3}".format( + statistics['devices_processed'], + statistics['devices_with_connections'], + statistics['devices_without_connections'], + statistics['devices_with_errors'] + ), + "INFO" + ) + self.log( + "Interface processing statistics - total interfaces: {0}, " + "interfaces with connections: {1}, total connections discovered: {2}".format( + statistics['interfaces_processed'], + statistics['interfaces_with_connections'], + statistics['total_connections_discovered'] + ), + "INFO" + ) + + if statistics['devices_with_errors'] > 0: + self.log( + "Warning: {0} devices encountered errors during connected device discovery".format( + statistics['devices_with_errors'] + ), + "WARNING" + ) + + self.log( + "Connected device topology discovery completed successfully - " + "processed {0} devices with {1} total interfaces, " + "discovered {2} total connections".format( + statistics['devices_processed'], + statistics['interfaces_processed'], + statistics['total_connections_discovered'] + ), + "INFO" + ) + + return result + + def get_interfaces_by_specified_range(self, ip_uuid_map): + """ + Fetch interfaces by specified range details for specified devices from Cisco Catalyst Center. + + Retrieves interface details for a list of device UUIDs using the + 'Get Device Interfaces by Specified Range' API with default values. + The API is called with a default range of start_index = 1 and + records_to_return = 500 for each device. + + The retrieved interface details include key fields such as: + - addresses (list): List of interface IP addresses (usually empty or detailed IPs) + - adminStatus (str): Administrative status (e.g., "UP") + - duplex (str): Duplex mode (e.g., "FullDuplex") + - ifIndex (str): Interface index identifier (e.g., "73") + - interfaceType (str): Type of interface (e.g., "Physical") + - lastOutgoingPacketTime (int): Timestamp of last outgoing packet (epoch ms) + - macAddress (str): MAC address of the interface (e.g., "0c:75:bd:42:db:c1") + - mtu (str): Maximum Transmission Unit size (e.g., "9100") + - nativeVlanId (str): Native VLAN ID (e.g., "1") + - pid (str): Platform ID (e.g., "C9300-48UXM") + - portMode (str): Port mode (e.g., "access") + - portName (str): Name of the port (e.g., "AppGigabitEthernet1/0/1") + - portType (str): Type of port (e.g., "Ethernet Port") + - serialNo (str): Serial number of the device (e.g., "FJC2335S09F") + - series (str): Device series (e.g., "Cisco Catalyst 9300 Series Switches") + - speed (str): Speed in kbps (e.g., "1000000") + - status (str): Operational status (e.g., "up") + - vlanId (str): VLAN ID assigned (e.g., "1") + - voiceVlan (str): Voice VLAN (usually empty) + - description (str): Interface description (usually empty) + - instanceUuid (str): Interface instance UUID + - instanceTenantId (str): Tenant ID for the instance + + Args: + ip_uuid_map (dict): Mapping of device IPs to their UUIDs. + + Returns: + list: A list with a single dictionary: + [ + { + "connected_device_info": [ + { + "device_ip": , + "connected_device_details": + }, + ] + } + ] + """ + self.log( + "Processing interface range retrieval for {0} network devices: {1}".format( + len(ip_uuid_map) if ip_uuid_map else 0, + list(ip_uuid_map.keys()) if ip_uuid_map else [] + ), + "DEBUG" + ) + + interface_by_range_info_list = [] + statistics = { + 'devices_processed': 0, + 'devices_with_interfaces': 0, + 'devices_without_interfaces': 0, + 'devices_with_errors': 0, + 'total_interfaces_retrieved': 0, + 'total_api_calls': 0 + } + + for device_ip, device_id in ip_uuid_map.items(): + statistics['devices_processed'] += 1 + self.log( + "Processing device {0}/{1} - IP: {2}, UUID: {3}".format( + statistics['devices_processed'], len(ip_uuid_map), device_ip, device_id + ), + "DEBUG" + ) + + if not device_ip or not device_id: + self.log( + "Skipping device with missing IP or UUID - IP: {0}, UUID: {1}".format( + device_ip, device_id + ), + "WARNING" + ) + statistics['devices_with_errors'] += 1 + interface_by_range_info_list.append({ + "device_ip": device_ip or "unknown", + "interface_info": "Error: Missing device IP or UUID" + }) + continue + start_index = 1 + records_to_return = 500 + interface_data = [] + + self.log( + "Starting paginated interface retrieval for device {0} with " + "initial parameters - start_index: {1}, records_to_return: {2}".format( + device_ip, start_index, records_to_return + ), + "DEBUG" + ) + + while True: + self.log( + "Executing interface range API call for device {0} - " + "requesting {1} records starting at index {2}".format( + device_ip, records_to_return, start_index + ), + "DEBUG" + ) + try: + statistics['total_api_calls'] += 1 + response = self.dnac._exec( + family="devices", + function="get_device_interfaces_by_specified_range", + params={ + "device_id": device_id, + "start_index": start_index, + "records_to_return": records_to_return + } + ) + + self.log("Received API response from 'get_device_interfaces_by_specified_range' for device {0}: {1}".format( + device_ip, response), "DEBUG" + ) + + if not response or 'response' not in response: + self.log( + "Invalid or empty API response received for device {0}".format(device_ip), + "WARNING" + ) + break + + data_chunk = response['response'] + if data_chunk: + chunk_size = len(data_chunk) + self.log( + "Retrieved {0} interface records for device {1} at index {2}".format( + chunk_size, device_ip, start_index + ), + "DEBUG" + ) + interface_data.extend(data_chunk) + statistics['total_interfaces_retrieved'] += chunk_size + + # Check if we've reached the end of available data + if chunk_size < records_to_return: + self.log( + "Reached end of interface data for device {0} - " + "received {1} records (less than requested {2})".format( + device_ip, chunk_size, records_to_return + ), + "DEBUG" + ) + break + + # Update pagination parameters for next iteration + start_index += records_to_return + self.log( + "Continuing pagination for device {0} - next start_index: {1}".format( + device_ip, start_index + ), + "DEBUG" + ) + else: + self.log( + "No interface data returned for device {0} at index {1}".format( + device_ip, start_index + ), + "DEBUG" + ) + break + + except Exception as api_err: + self.log( + "Exception during interface range API call for device {0}: {1}".format( + device_ip, str(api_err) + ), + "ERROR" + ) + interface_data = "Error: {0}".format(str(api_err)) + statistics['devices_with_errors'] += 1 + continue + + if interface_data: + statistics['devices_with_interfaces'] += 1 + self.log( + "Successfully retrieved {0} total interfaces for device {1}".format( + len(interface_data), device_ip + ), + "INFO" + ) + interface_by_range_info_list.append({ + "device_ip": device_ip, + "interface_info": interface_data + }) + else: + statistics['devices_without_interfaces'] += 1 + self.log( + "No interfaces found for device {0}".format(device_ip), + "INFO" + ) + interface_by_range_info_list.append({ + "device_ip": device_ip, + "interface_info": [] + }) + + result = [{"device_interfaces_by_range_info": interface_by_range_info_list}] + + # Comprehensive logging of operation results + self.log( + "Interface range retrieval completed successfully - " + "devices processed: {0}, devices with interfaces: {1}, " + "devices without interfaces: {2}, devices with errors: {3}".format( + statistics['devices_processed'], + statistics['devices_with_interfaces'], + statistics['devices_without_interfaces'], + statistics['devices_with_errors'] + ), + "INFO" + ) + + self.log( + "Interface retrieval statistics - " + "total API calls: {0}, total interfaces retrieved: {1}".format( + statistics['total_api_calls'], + statistics['total_interfaces_retrieved'] + ), + "INFO" + ) + + if statistics['devices_with_errors'] > 0: + self.log( + "Warning: {0} devices encountered errors during interface range retrieval".format( + statistics['devices_with_errors'] + ), + "WARNING" + ) + + self.log( + "Interface range data retrieval operation completed with {0} total devices processed".format( + len(interface_by_range_info_list) + ), + "INFO" + ) + + return result + + def get_device_config_info(self, ip_uuid_map): + """ + Fetch configuration data for a list of devices from Cisco Catalyst Center. + + For each device ID, this method calls the 'get_device_list' API and aggregates the results. + Each entry in the returned list contains the device's management IP and its details. + + Retrieves the full configuration details for each specified device ID and aggregates the results. + + The configuration details include the device's running configuration, which may consist of + multiple lines of configuration commands. + + Parameters: + ip_uuid_map (dict): Mapping of device IPs to their UUIDs for which configuration details need to be fetched. + + Returns: + list: A list with a single dictionary: + [ + { + "device_config_info": [ + { + "device_ip": , + "device_config_details": + }, + ] + } + ] + """ + self.log("Fetching Device config data for {0} devices: {1}".format(len(ip_uuid_map), list(ip_uuid_map.keys())), "INFO") + + device_config_list = [] + + for device_ip, device_id in ip_uuid_map.items(): + self.log("Processing device ID: {0} (IP: {1})".format(device_id, device_ip), "DEBUG") + self.log("Fetching device config info for device_id: {0}, device_ip: {1}".format(device_id, device_ip), "DEBUG") + + try: + response = self.dnac._exec( + family="devices", + function="get_device_config_by_id", + params={'network_device_id': device_id} + ) + self.log( + "Received API response from 'get_device_config' for device {0} (IP: {1}): {2}".format( + device_id, device_ip, response), "DEBUG") + + config_data = response.get("response", []) + if config_data: + self.log("Found {0} configuration lines for device IP: {1}".format(len(config_data), device_ip), "DEBUG") + device_config_list.append({ + "device_ip": device_ip, + "device_config_details": config_data + }) + else: + self.log("No device config card details found for device IP: {0}".format(device_ip), "DEBUG") + device_config_list.append({ + "device_ip": device_ip, + "device_config_details": [] + }) + + except Exception as e: + self.msg = "Exception occurred while getting device config for device_id {0}, device_ip {1}: {2}".format(device_id, device_ip, e) + device_config_list.append({ + "device_ip": device_ip, + "device_config_details": "Error: {0}".format(e) + }) + continue + + result = [{"device_config_info": device_config_list}] + + self.log("Completed Device Config info retrieval. Total devices processed: {0}".format(len(device_config_list)), "INFO") + self.log("Device Config info result: {0}".format(result), "DEBUG") + return result + + def get_device_summary_info(self, ip_uuid_map): + """ + Fetch summary information of devices for a list of devices from Cisco Catalyst Center. + + For each device ID, this method calls the 'get_device_list' API and aggregates the results. + Each entry in the returned list contains the device's management IP and its details. + + Retrieves key summary details for each device ID provided and aggregates the results. + + The retrieved device summary details include key fields such as: + - id (e.g., "e62e6405-13e4-4f1b-ae1c-580a28a96a88") + - role (e.g., "ACCESS") + - roleSource (e.g., "MANUAL") + + Parameters: + ip_uuid_map (dict): Mapping of device IPs to their UUIDs for which summary information needs to be retrieved. + + Returns: + list: A list with a single dictionary: + [ + { + "device_summary_info": [ + { + "device_ip": , + "device_summary_details": + }, + ] + } + ] + """ + self.log("Fetching device summary data for {0} devices: {1}".format(len(ip_uuid_map), list(ip_uuid_map.keys())), "INFO") + + device_summary_info_list = [] + + for device_ip, device_id in ip_uuid_map.items(): + self.log("Processing device ID: {0} (IP: {1})".format(device_id, device_ip), "DEBUG") + self.log("Fetching device summary info for device_id: {0}, device_ip: {1}".format(device_id, device_ip), "DEBUG") + + try: + response = self.dnac._exec( + family="devices", + function="get_device_summary", + params={'id': device_id} + ) + self.log( + "Received API response from 'get_device_summary' for device {0} (IP: {1}): {2}".format( + device_id, device_ip, response), "DEBUG") + + summary_data = response.get("response", []) + self.log("Summary data: {0}".format(summary_data), "DEBUG") + if summary_data: + self.log("Found {0} summary records for device IP: {1}".format(len(summary_data), device_ip), "DEBUG") + device_summary_info_list.append({ + "device_ip": device_ip, + "device_summary_details": summary_data + }) + else: + self.log("No device summary details found for device IP: {0}".format(device_ip), "DEBUG") + device_summary_info_list.append({ + "device_ip": device_ip, + "device_summary_details": [] + }) + + except Exception as e: + self.msg = "Exception occurred while getting device summary list for device_id {0}, device_ip {1}: {2}".format(device_id, device_ip, e) + device_summary_info_list.append({ + "device_ip": device_ip, + "device_summary_details": "Error: {0}".format(e) + }) + continue + + result = [{"device_summary_info": device_summary_info_list}] + + self.log("Completed Device Summary info retrieval. Total devices processed: {0}".format(len(device_summary_info_list)), "INFO") + self.log("Device Summary info result: {0}".format(result), "DEBUG") + return result + + def get_device_polling_interval_info(self, ip_uuid_map): + """ + Fetch polling interval information for a list of devices from Cisco Catalyst Center. + + For each device ID, this method calls the 'get_device_list' API and aggregates the results. + Each entry in the returned list contains the device's management IP and its details. + + Retrieves the polling interval configuration for each specified device ID and compiles the results. + + The polling interval details include the time intervals at which the device is polled for updates, + which can be critical for monitoring and management tasks (e.g., 86400 seconds for daily polling). + + Parameters: + ip_uuid_map (dict): Mapping of device IPs to their UUIDs for which polling interval details need to be retrieved. + + Returns: + list: A list with a single dictionary: + [ + { + "device_polling_interval_info": [ + { + "device_ip": , + "polling_interval_details": + }, + ] + } + ] + """ + self.log("Fetching polling interval data for {0} devices: {1}".format(len(ip_uuid_map), list(ip_uuid_map.keys())), "INFO") + + polling_intervals_info_list = [] + + for device_ip, device_id in ip_uuid_map.items(): + self.log("Processing device ID: {0} (IP: {1})".format(device_id, device_ip), "DEBUG") + self.log("Fetching polling intervals info for device_id: {0}, device_ip: {1}".format(device_id, device_ip), "DEBUG") + + try: + response = self.dnac._exec( + family="devices", + function="get_polling_interval_by_id", + params={'id': device_id} + ) + self.log( + "Received API response from 'get_polling_interval' for device {0} (IP: {1}): {2}".format( + device_id, device_ip, response), "DEBUG") + + intervals = response.get("response", []) + if intervals: + self.log("Found {0} polling interval records for device IP: {1}".format((intervals), device_ip), "DEBUG") + polling_intervals_info_list.append({ + "device_ip": device_ip, + "polling_interval_details": intervals + }) + else: + self.log("No polling interval details found for device IP: {0}".format(device_ip), "DEBUG") + polling_intervals_info_list.append({ + "device_ip": device_ip, + "polling_interval_details": [] + }) + + except Exception as e: + self.msg = "Exception occurred while getting polling interval info list for device_id {0}, device_ip {1}: {2}".format(device_id, device_ip, e) + polling_intervals_info_list.append({ + "device_ip": device_ip, + "polling_interval_details": "Error: {0}".format(e) + }) + continue + + result = [{"device_polling_interval_info": polling_intervals_info_list}] + + self.log("Completed Device Polling Interval info retrieval. Total devices processed: {0}".format(len(polling_intervals_info_list)), "INFO") + self.log("Device Polling Interval info result: {0}".format(result), "DEBUG") + return result + + def get_device_stack_info(self, ip_uuid_map): + """ + Fetch stack details for a list of devices from Cisco Catalyst Center. + + For each device ID, this method calls the 'get_device_list' API and aggregates the results. + Each entry in the returned list contains the device's management IP and its details. + + Retrieves stack member information for each given device ID and compiles the results. + + The stack member info includes key fields such as: + - stackSwitchInfo: list of dicts with fields including hwPriority, macAddress, role, softwareImage, + stackMemberNumber, state, switchPriority, serialNumber, platformId, entPhysicalIndex + - stackPortInfo: list of dicts with fields including isSynchOk, name, switchPort, neighborPort, + nrLinkOkChanges, stackCableLengthInfo, stackPortOperStatusInfo, linkActive, linkOk + - svlSwitchInfo: list of dicts with fields including macAddress, role, softwareImage, + stackMemberNumber, state, switchPriority, serialNumber, platformId, entPhysicalIndex + + Parameters: + ip_uuid_map (dict): Mapping of device IPs to their UUIDs for which stack details need to be retrieved. + + Returns: + list: A list with a single dictionary: + [ + { + "device_stack_info": [ + { + "device_ip": , + "stack_details": + }, + ] + } + ] + """ + self.log("Fetching stack details for {0} devices: {1}".format(len(ip_uuid_map), list(ip_uuid_map.keys())), "INFO") + + stack_info_list = [] + + for device_ip, device_id in ip_uuid_map.items(): + self.log("Processing device ID: {0} (IP: {1})".format(device_id, device_ip), "DEBUG") + self.log("Fetching stack info for device_id: {0}, device_ip: {1}".format(device_id, device_ip), "DEBUG") + + try: + response = self.dnac._exec( + family="devices", + function="get_stack_details_for_device", + params={'device_id': device_id} + ) + self.log( + "Received API response from 'get_stack_details' for device {0} (IP: {1}): {2}".format( + device_id, device_ip, response), "DEBUG") + + stack_info = response.get("response", []) + if stack_info: + self.log("Found {0} stack records for device IP: {1}".format(len(stack_info), device_ip), "DEBUG") + stack_info_list.append({ + "device_ip": device_ip, + "stack_details": stack_info + }) + else: + self.log("No stack details found for device IP: {0}".format(device_ip), "DEBUG") + stack_info_list.append({ + "device_ip": device_ip, + "stack_details": [] + }) + + except Exception as e: + self.msg = "Exception occurred while getting device stack info list for device_id {0}, device_ip {1}: {2}".format(device_id, device_ip, e) + stack_info_list.append({ + "device_ip": device_ip, + "stack_details": "Error: {0}".format(e) + }) + continue + + result = [{"device_stack_info": stack_info_list}] + + self.log("Completed Stack info retrieval. Total devices processed: {0}".format(len(stack_info_list)), "INFO") + self.log("Stack info result: {0}".format(result), "DEBUG") + return result + + def get_device_link_mismatch_info(self, site_id, ip_uuid_map): + """ + Fetch Inventory Insight Device Link Mismatch data for a list of site IDs. + + Retrieves mismatch data for both 'vlan' and 'speed-duplex' categories for each site. + Aggregates all results and returns them in a structured list. + + The retrieved device link mismatch data includes key fields such as: + - endPortAllowedVlanIds (str): Allowed VLAN IDs on the end port (e.g., "10,20,30") + - endPortNativeVlanId (str): Native VLAN ID on the end port (e.g., "10") + - startPortAllowedVlanIds (str): Allowed VLAN IDs on the start port (e.g., "10,20,30") + - startPortNativeVlanId (str): Native VLAN ID on the start port (e.g., "10") + - linkStatus (str): Current status of the link (e.g., "up") + - endDeviceHostName (str): Hostname of the device at the end port (e.g., "switch-nyc-01") + - endDeviceId (str): Unique ID of the device at the end port (e.g., "device-1001") + - endDeviceIpAddress (str): IP address of the device at the end port (e.g., "192.168.1.10") + - endPortAddress (str): Interface address of the end port (e.g., "GigabitEthernet1/0/24") + - endPortDuplex (str): Duplex setting of the end port (e.g., "full") + - endPortSpeed (str): Speed setting of the end port (e.g., "1000Mbps") + - startDeviceHostName (str): Hostname of the device at the start port (e.g., "router-dc-01") + - startDeviceId (str): Unique ID of the device at the start port (e.g., "device-2001") + - startDeviceIpAddress (str): IP address of the device at the start port (e.g., "192.168.1.1") + - startPortAddress (str): Interface address of the start port (e.g., "GigabitEthernet0/1") + - startPortDuplex (str): Duplex setting of the start port (e.g., "full") + - startPortSpeed (str): Speed setting of the start port (e.g., "1000Mbps") + - lastUpdated (str): ISO 8601 timestamp of last update (e.g., "2025-06-26T10:15:00Z") + - numUpdates (int): Number of updates recorded (e.g., 15) + - avgUpdateFrequency (float): Average frequency of updates (e.g., 4.0) + - type (str): Type of link (e.g., "ethernet-link") + - instanceUuid (str): Unique instance UUID + + Parameters: + site_ids (list): List of site IDs to fetch device link mismatch information. + ip_uuid_map (dict): Mapping of device IPs to their UUIDs for which link mismatch details need to be retrieved. + + Returns: + list: A list containing a single dictionary with structure: + [ + { + "device_link_mismatch_info": [ + { + "device_ip": "", + "vlan": [ + { + "device_ip": "", + "link_mismatch_details": + } + ], + "speed-duplex": [ + { + "device_ip": "", + "link_mismatch_details": + } + ] + }, + ] + } + ] + """ + + self.log("Fetching device link mismatch data for {0} devices: {1}".format(len(ip_uuid_map), list(ip_uuid_map.keys())), "INFO") + + link_mismatch_info = [] + + for device_ip, device_id in ip_uuid_map.items(): + site_result = { + "device_ip": device_ip, + "vlan": [], + "speed-duplex": [] + } + + for category in ['vlan', 'speed-duplex']: + self.log("Processing device ID: {0} (IP: {1})".format(device_id, device_ip), "DEBUG") + self.log("Fetching device link mismatch info for device_id: {0}, device_ip: {1}".format(device_id, device_ip), "DEBUG") + + try: + response = self.dnac._exec( + family="devices", + function="inventory_insight_device_link_mismatch", + params={ + 'site_id': site_id, + 'category': category + } + ) + self.log( + "Received API response from 'inventory_insight_device_link_mismatch': {0}".format( + (response) + ), + "DEBUG", + ) + mismatch_data = response.get("response", []) + if mismatch_data: + self.log( + "Received API response for device {0}: {1}".format(device_ip, mismatch_data), + "DEBUG" + ) + if isinstance(mismatch_data, list): + site_result[category].append({ + "device_ip": device_ip, + "link_mismatch_details": mismatch_data + }) + + else: + self.log("No link mismatch found for device IP: {0}".format(device_ip), "DEBUG") + site_result[category].append({ + "device_ip": device_ip, + "link_mismatch_details": [] + }) + + if category == 'vlan': + self.log("VLAN Category Link Mismatch Response for site {0}: {1}".format(site_id, response), "INFO") + else: + self.log("Speed-Duplex Category Link Mismatch Response for site {0}: {1}".format(site_id, response), "INFO") + + except Exception as e: + self.msg = "Exception occurred while getting {0} link mismatch data for site {1}: {2}".format(category, site_id, e) + site_result[category].append({ + "device_ip": device_ip, + "link_mismatch_details": "Error: {0}".format(e) + }) + continue + + self.log(site_result["vlan"]) + self.log(site_result["speed-duplex"]) + link_mismatch_info.append(site_result) + + result = [{"device_link_mismatch_info": link_mismatch_info}] + + self.log("Completed Device Link Mismatch info retrieval. Total devices processed: {0}".format(len(link_mismatch_info)), "INFO") + self.log("Device Link Mismatch info result: {0}".format(result), "DEBUG") + + return result + + def write_device_info_to_file(self, filtered_config): + """ + Write collected network device information to a specified file with comprehensive format support and error handling. + + This method provides robust file output capabilities for network device data with support for multiple + formats (JSON/YAML), file modes (overwrite/append), automatic directory creation, timestamp insertion, + and comprehensive error handling with detailed logging for operational traceability. + + Parameters: + export_configuration (dict): Configuration dictionary containing file output specifications. + Required structure: + { + "output_output_file_info": { + "file_path": str, # Absolute path without extension (required) + "file_format": str, # "json" or "yaml" (default: "yaml") + "file_mode": str, # "w" (overwrite) or "a" (append) (default: "w") + "timestamp": bool # Include download timestamp (default: False) + }, + "data": dict # Optional: specific data to write (uses self.total_response if not provided) + } + + Returns: + self: The current instance with updated internal state reflecting the file operation results. + + Raises: + Exception: Critical errors during file operations, directory creation, or data serialization + are logged but do not raise exceptions to maintain operational continuity. + """ + self.log("Starting Device Information File Export Operation", "INFO") + + output_file_info = filtered_config.get("output_file_info", {}) + self.log("File info received: {0}".format(output_file_info), "DEBUG") + + target_file_path = output_file_info.get("file_path") + output_file_format = output_file_info.get("file_format", "yaml").lower().strip() + file_write_mode = output_file_info.get("file_mode", "w").lower().strip() + include_timestamp_flag = output_file_info.get("timestamp", False) + + self.log("Extracted file parameters - Path: {0}, Format: {1}, Mode: {2}, Timestamp: {3}".format( + target_file_path, output_file_format, file_write_mode, include_timestamp_flag), "INFO") + + if not target_file_path: + self.log("No file_path specified in output_file_info", "ERROR") + return self + + if file_write_mode not in {"w", "a"}: + self.log("Invalid file_mode '{0}'. Use 'w' (overwrite) or 'a' (append).".format(file_write_mode), "ERROR") + return self + + full_path_with_ext = "{0}.{1}".format(target_file_path, output_file_format) + + try: + os.makedirs(os.path.dirname(full_path_with_ext), exist_ok=True) + except Exception as e: + self.log("Error creating directories for path: {0} — {1}".format(full_path_with_ext, e), "ERROR") + return self + + try: + if isinstance(self.total_response, list): + new_data = self.total_response[:] + else: + new_data = [self.total_response] + + if include_timestamp_flag: + timestamp_entry = {"Downloaded at": datetime.now().strftime("%Y-%m-%d %H:%M:%S")} + new_data_with_timestamp = [timestamp_entry] + new_data + else: + new_data_with_timestamp = new_data + + if file_write_mode == "a" and os.path.exists(full_path_with_ext): + try: + with open(full_path_with_ext, "r") as f: + if output_file_format == "json": + existing_data = json.load(f) + else: + existing_data = yaml.safe_load(f) + + if existing_data is None: + existing_data = [] + elif not isinstance(existing_data, list): + existing_data = [existing_data] + + except Exception: + self.log("Failed to read existing file. Starting fresh.", "WARNING") + existing_data = [] + + data_to_write = existing_data + new_data_with_timestamp + + else: + data_to_write = new_data_with_timestamp + + with open(full_path_with_ext, "w") as f: + if output_file_format == "json": + json.dump(data_to_write, f, indent=2) + else: + yaml.dump(data_to_write, f, default_flow_style=False) + + self.log("Successfully wrote device info to file: {0}".format(full_path_with_ext), "INFO") + + except Exception as e: + self.log("Failed to write device info to file {0}: {1}".format(full_path_with_ext, e), "ERROR") + + return self + + +def main(): + """ + main entry point for module execution + """ + element_spec = {'dnac_host': {'required': True, 'type': 'str'}, + 'dnac_port': {'type': 'str', 'default': '443'}, + 'dnac_username': {'type': 'str', 'default': 'admin', 'aliases': ['user']}, + 'dnac_password': {'type': 'str', 'no_log': True}, + 'dnac_verify': {'type': 'bool', 'default': True}, + 'dnac_version': {'type': 'str', 'default': '2.2.3.3'}, + 'dnac_debug': {'type': 'bool', 'default': False}, + 'dnac_log_level': {'type': 'str', 'default': 'WARNING'}, + "dnac_log_file_path": {"type": 'str', "default": 'dnac.log'}, + "dnac_log_append": {"type": 'bool', "default": True}, + 'dnac_log': {'type': 'bool', 'default': False}, + 'validate_response_schema': {'type': 'bool', 'default': True}, + 'config_verify': {'type': 'bool', "default": False}, + 'dnac_api_task_timeout': {'type': 'int', "default": 1200}, + 'dnac_task_poll_interval': {'type': 'int', "default": 2}, + 'config': {'required': True, 'type': 'list', 'elements': 'dict'}, + 'state': {'default': 'gathered', 'choices': ['gathered']} + } + + module = AnsibleModule(argument_spec=element_spec, supports_check_mode=False) + ccc_device_info = NetworkDevicesInfo(module) + state = ccc_device_info.params.get("state") + + current_version = ccc_device_info.get_ccc_version() + min_supported_version = "2.3.7.9" + + if ccc_device_info.compare_dnac_versions(current_version, min_supported_version) < 0: + ccc_device_info.status = "failed" + ccc_device_info.msg = ( + "The specified version '{0}' does not support the 'network device info workflow' feature. " + "Supported version(s) start from '{1}' onwards.".format(current_version, min_supported_version) + ) + ccc_device_info.log(ccc_device_info.msg, "ERROR") + ccc_device_info.check_return_status() + + if state not in ccc_device_info.supported_states: + ccc_device_info.status = "invalid" + ccc_device_info.msg = "State {0} is invalid".format(state) + ccc_device_info.check_return_status() + + ccc_device_info.validate_input().check_return_status() + + for config in ccc_device_info.validated_config: + ccc_device_info.reset_values() + ccc_device_info.get_want(config).check_return_status() + ccc_device_info.get_diff_state_apply[state](config).check_return_status() + + module.exit_json(**ccc_device_info.result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/network_profile_wireless_workflow_manager.py b/plugins/modules/network_profile_wireless_workflow_manager.py index 41ea0c1e8f..ff5ab06194 100644 --- a/plugins/modules/network_profile_wireless_workflow_manager.py +++ b/plugins/modules/network_profile_wireless_workflow_manager.py @@ -24,7 +24,7 @@ - This module interacts with Cisco Catalyst Center's to create profile name, SSID details, additional interface details destination port and protocol. -version_added: "6.31.0" +version_added: "6.37.0" extends_documentation_fragment: - cisco.dnac.workflow_manager_params author: @@ -151,6 +151,53 @@ type: list elements: str required: false + feature_template_designs: + description: | + List of feature template designs to be assigned or removed to/from the wireless network profile. + Feature templates provide advanced configuration capabilities for wireless infrastructure + including AAA settings, SSID configurations, CleanAir parameters, and RRM settings. + These templates enable standardized configuration deployment across wireless network profiles. + type: list + elements: dict + required: false + suboptions: + design_type: + description: | + The category or name of the feature template to be applied. + This defines the functional area of the configuration (For example, AAA, SSID, CleanAir). + Only one feature template category can be specified per entry in this list. + For support values: + - AAA_RADIUS_ATTRIBUTES_CONFIGURATION + - ADVANCED_SSID_CONFIGURATION + - CLEANAIR_CONFIGURATION + - DOT11AX_CONFIGURATION + - DOT11BE_STATUS_CONFIGURATION + - EVENT_DRIVEN_RRM_CONFIGURATION + - FLEX_CONFIGURATION + - MULTICAST_CONFIGURATION + - RRM_FRA_CONFIGURATION + - RRM_GENERAL_CONFIGURATION + type: str + required: false + feature_templates: + description: | + A list of specific design names or IDs to apply within the chosen feature template category. + These designs include various parameters and settings for wireless infrastructure configuration. + If "Default Advanced SSID Design" is included in this list, it is comprehensive for SSID configuration, + and no other template designs are typically needed for that specific SSID feature. + type: list + elements: str + required: true + applicability_ssids: + description: | + A list of SSIDs to which this feature template applies. + If "Default Advanced SSID Design" is selected for the 'feature_templates', this feature template + will automatically apply to all SSIDs, regardless of this list's content. + For example, ["SSID1", "SSID2"]. + type: list + elements: str + required: false + default: ["All"] additional_interfaces: description: | Specifies additional interfaces to be added to this wireless profile. @@ -213,36 +260,197 @@ dnac_task_poll_interval: 1 state: merged config: - - profile_name: "Corporate_Wireless_Profile" + - profile_name: Corporate_Wireless_Profile site_names: - - "Global/Headquarters" - - "Global/BranchOffice" + - Global/Headquarters + - Global/BranchOffice ssid_details: - - ssid_name: "Corporate_WiFi" + - ssid_name: Corporate_WiFi enable_fabric: false - dot11be_profile_name: "Corporate_VLAN" - vlan_group_name: "Corporate_VLAN_Group" - - ssid_name: "Guest_WiFi" + dot11be_profile_name: Corporate_VLAN + vlan_group_name: Corporate_VLAN_Group + - ssid_name: Guest_WiFi enable_fabric: false - dot11be_profile_name: "Corporate_VLAN" - interface_name: "guest_network" + dot11be_profile_name: Corporate_VLAN + interface_name: guest_network local_to_vlan: 3002 ap_zones: - - ap_zone_name: "HQ_AP_Zone" - rf_profile_name: "HIGH" + - ap_zone_name: HQ_AP_Zone + rf_profile_name: HIGH ssids: - - "Corporate_WiFi" - - ap_zone_name: "Branch_AP_Zone" - rf_profile_name: "TYPICAL" + - Corporate_WiFi + - ap_zone_name: Branch_AP_Zone + rf_profile_name: TYPICAL ssids: - - "Guest_WiFi" + - Guest_WiFi additional_interfaces: - - interface_name: "Corp_Interface_1" + - interface_name: Corp_Interface_1 vlan_id: 100 - - interface_name: "Guest_Interface_1" + - interface_name: Guest_Interface_1 vlan_id: 3002 day_n_templates: - - "Wireless_Controller_Config" + - Wireless_Controller_Config + feature_template_designs: + - design_type: Advanced SSID Configuration + feature_templates: + - Default Advanced SSID Design + applicability_ssids: + - HQ_WiFi + - Branch_Secure + + - name: Create network wireless profile name only + cisco.dnac.network_profile_wireless_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: merged + config: + - profile_name: Corporate_Wireless_Profile + + - name: Create network wireless profile assign to site + cisco.dnac.network_profile_wireless_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: merged + config: + - profile_name: Corporate_Wireless_Profile + site_names: + - Global/USA/SAN JOSE/SJ_BLD20 + + - name: Create network wireless profile with feature template assign to site + cisco.dnac.network_profile_wireless_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: merged + config: + - profile_name: Corporate_Wireless_Profile + site_names: + - Global/USA/SAN JOSE/SJ_BLD20/FLOOR3 + feature_template_designs: + - design_type: AAA_RADIUS_ATTRIBUTES_CONFIGURATION + feature_templates: + - Default AAA_Radius_Attributes_Configuration + + - name: Update network wireless profile with feature template + cisco.dnac.network_profile_wireless_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: merged + config: + - profile_name: Corporate_Wireless_Profile + site_names: + - Global/USA/SAN JOSE/SJ_BLD20/FLOOR3 + feature_template_designs: + - design_type: AAA_RADIUS_ATTRIBUTES_CONFIGURATION + feature_templates: + - Default AAA_Radius_Attributes_Configuration + - design_type: CLEANAIR_CONFIGURATION + feature_templates: + - SAMPLE + - Default CleanAir 6GHz Design + + - name: Create network wireless profile with SSID details + cisco.dnac.network_profile_wireless_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: merged + config: + - profile_name: Corporate_Wireless_Profile + ssid_details: + - ssid_name: Guest_WiFi + enable_fabric: false + dot11be_profile_name: Corporate_VLAN + interface_name: guest_network + local_to_vlan: 3002 + - ssid_name: ODC_WiFi + enable_fabric: false + dot11be_profile_name: Corporate_VLAN + interface_name: guest_network + local_to_vlan: 3001 + + - name: Update network wireless profile with additional SSID details + cisco.dnac.network_profile_wireless_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: merged + config: + - profile_name: Corporate_Wireless_Profile + ssid_details: + - ssid_name: Guest_WiFi + enable_fabric: false + dot11be_profile_name: Corporate_VLAN + interface_name: guest_network + local_to_vlan: 3002 + - ssid_name: ODC_WiFi + enable_fabric: false + dot11be_profile_name: Corporate_VLAN + interface_name: guest_network + local_to_vlan: 3001 + - ssid_name: Corporate_WiFi + enable_fabric: false + dot11be_profile_name: Corporate_VLAN + interface_name: guest_network + local_to_vlan: 3003 + - name: Update wireless network profile cisco.dnac.network_profile_wireless_workflow_manager: dnac_host: "{{ dnac_host }}" @@ -259,27 +467,27 @@ dnac_task_poll_interval: 1 state: merged config: - - profile_name: "Corporate_Wireless_Profile" + - profile_name: Corporate_Wireless_Profile site_names: - - "Global/FrontOffice" + - Global/FrontOffice ssid_details: - - ssid_name: "Guest_WiFi" + - ssid_name: Guest_WiFi enable_fabric: false - dot11be_profile_name: "Corporate_VLAN" - interface_name: "guest_network" + dot11be_profile_name: Corporate_VLAN + interface_name: guest_network local_to_vlan: 3002 ap_zones: - - ap_zone_name: "Branch_AP_Zone" - rf_profile_name: "TYPICAL" + - ap_zone_name: Branch_AP_Zone + rf_profile_name: TYPICAL ssids: - - "Guest_WiFi" + - Guest_WiFi additional_interfaces: - - interface_name: "Guest_Interface_4" + - interface_name: Guest_Interface_4 vlan_id: 2002 day_n_templates: - - "Wireless_Controller_Config" - - name: Delete wireless profile from Cisco Catalyst - Center. + - Wireless_Controller_Config + + - name: Delete wireless profile from Cisco Catalyst Center. cisco.dnac.network_profile_wireless_workflow_manager: dnac_host: "{{ dnac_host }}" dnac_username: "{{ dnac_username }}" @@ -295,49 +503,126 @@ dnac_task_poll_interval: 1 state: deleted config: - - profile_name: "Corporate_Wireless_Profile" + - profile_name: Corporate_Wireless_Profile """ RETURN = r""" -# Case 1: Successful creation/update of wireless profile -response_create: - description: A dictionary or list containing the response returned by the Cisco Catalyst Center Python SDK. - This response indicates that the wireless profile was either created or updated successfully. - returned: always +# Case 1: Successful wireless profile operations (create/update) +response_merged: + description: Response returned when wireless profile operations complete successfully. + Contains details about profile creation, updates, site assignments, and template associations. + returned: always when state=merged type: dict - sample: > - { - "msg": "Wireless Profile created/updated successfully for '[{'profile_name': 'Corporate_Wireless_Profile', - 'status': 'Network Profile [ff0003b4-adab-4de4-af0e-0cf07d6df07f] Successfully created'}]'.", - "response": [ - { - "profile_name": "Corporate_Wireless_Profile", - "status": "Network Profile [ff0003b4-adab-4de4-af0e-0cf07d6df07f] created Successfully" - } - ], - "status": "success" - } -# Case 2: Successfully deleted wireless profile -response_delete: - description: A dictionary or list containing the response returned by the Cisco Catalyst Center Python SDK. - This response indicates that the wireless profile was successfully deleted from the system. - returned: always + sample: + # Basic profile creation + profile_create_basic: + msg: "Wireless profile(s) created/updated and verified successfully" + response: + - profile_name: "Corporate_Wireless_Profile" + profile_status: "Network Profile [ff0003b4-adab-4de4-af0e-0cf07d6df07f] Successfully Created" + status: "success" + changed: true + + # Profile with site assignment + profile_create_with_sites: + msg: "Wireless profile(s) created/updated and verified successfully" + response: + - profile_name: "Corporate_Wireless_Profile" + profile_status: "Network Profile [9a1c37bd-52a9-436c-af8c-35e64f788abd] Successfully Created" + site_status: "Sites ['Global/USA/SAN JOSE/SJ_BLD20/FLOOR3', + 'Global/USA/SAN JOSE/SJ_BLD20/FLOOR1'] successfully associated + to network profile: Corporate_Wireless_Profile" + status: "success" + changed: true + + # Profile update with template assignment + profile_update_with_template_assignment: + msg: "Wireless profile(s) created/updated and verified successfully" + response: + - profile_name: "Corporate_Wireless_Profile" + profile_status: "Network Profile [bba6fd01-9d65-4bde-973a-a7ba6a9ad9b4] Successfully Updated" + template_status: "Templates successfully attached to network profile" + status: "success" + changed: true + +# Case 2: Successful wireless profile deletion +response_deleted: + description: Response returned when wireless profile deletion completes successfully. + Contains details about profile removal and site disassociation. + returned: always when state=deleted type: dict - sample: > - { - "msg": "Wireless Profile deleted successfully for '[{'profile_name': 'Corporate_Wireless_Profile', - 'status': 'Network Profile [ff0003b4-adab-4de4-af0e-0cf07d6df07f] Successfully Deleted'}]'.", - "response": [ - { - "profile_name": "Corporate_Wireless_Profile", - "status": "Network Profile [ff0003b4-adab-4de4-af0e-0cf07d6df07f] Successfully Deleted" - } - ], - "status": "success" - } + sample: + msg: "Wireless profile(s) deleted and verified successfully" + response: + - profile_name: "Corporate_Wireless_Profile" + status: "Network Profile [ff0003b4-adab-4de4-af0e-0cf07d6df07f] Successfully Deleted" + sites_unassigned: "Sites successfully disassociated before deletion" + status: "success" + changed: true + +# Case 3: No changes required (idempotent) +response_no_changes: + description: Response when no changes are required as the desired state already exists. + returned: when configuration already matches desired state + type: dict + sample: + msg: "No changes required, profile(s) already exist and match desired configuration" + response: [] + status: "success" + changed: false + +# Case 4: Partial success with warnings +response_partial_success: + description: Response when some operations succeed but others encounter issues. + Contains details about successful operations and any warnings or failures. + returned: when some operations succeed but others fail + type: dict + sample: + msg: "Wireless profile(s) created/updated with warnings" + response: + - profile_name: "Corporate_Wireless_Profile" + profile_status: "Network Profile [ff0003b4-adab-4de4-af0e-0cf07d6df07f] Successfully Created" + warnings: + - "Some templates could not be attached due to permission issues" + - "Site assignment failed for 1 out of 3 sites" + status: "success" + changed: true + warnings: 2 + +# Case 5: Operation failure +response_failed: + description: Response when wireless profile operations fail. + Contains error details and information about what failed. + returned: when operations fail + type: dict + sample: + msg: "Failed to create/update wireless profile: API validation error" + response: + - profile_name: "Corporate_Wireless_Profile" + error: "Invalid SSID configuration: AP Zone SSID names does not exist." + failed_operation: "profile_creation" + status: "failed" + changed: false + +# Case 6: Verification failure +response_verification_failed: + description: Response when profile operations complete but verification fails. + Indicates the operation may have succeeded but the final state doesn't match expectations. + returned: when config_verify=true and verification fails + type: dict + sample: + msg: "Profile operation completed but verification failed" + response: + - profile_name: "Corporate_Wireless_Profile" + operation_status: "Network Profile [ff0003b4-adab-4de4-af0e-0cf07d6df07f] Successfully Created" + verification_error: "Unable to verify the profile doesn't match expected state" + status: "failed" + changed: true + """ import re +import copy from ansible.module_utils.basic import AnsibleModule from ansible_collections.cisco.dnac.plugins.module_utils.dnac import ( validate_list_of_dicts, @@ -355,6 +640,7 @@ def __init__(self, module): super().__init__(module) self.supported_states = ["merged", "deleted"] self.created, self.deleted, self.not_processed = [], [], [] + self.remove_profile_data, self.already_removed = [], [] self.keymap = dict( profile_name="wirelessProfileName", @@ -371,6 +657,18 @@ def __init__(self, module): policy_profile_name="policyProfileName", ap_zone_name="apZoneName", ) + self.available_design_types = [ + "AAA_RADIUS_ATTRIBUTES_CONFIGURATION", + "ADVANCED_SSID_CONFIGURATION", + "CLEANAIR_CONFIGURATION", + "DOT11AX_CONFIGURATION", + "DOT11BE_STATUS_CONFIGURATION", + "EVENT_DRIVEN_RRM_CONFIGURATION", + "FLEX_CONFIGURATION", + "MULTICAST_CONFIGURATION", + "RRM_FRA_CONFIGURATION", + "RRM_GENERAL_CONFIGURATION", + ] def validate_input(self): """ @@ -430,6 +728,21 @@ def validate_input(self): "required": True, }, }, + "feature_template_designs": { + "type": "list", + "elements": "dict", + "design_type": {"type": "str", "required": False}, + "feature_templates": { + "type": "list", + "elements": "str", + "required": False + }, + "applicability_ssids": { + "type": "list", + "elements": "str", + "required": False + }, + }, } if not self.config: @@ -738,6 +1051,10 @@ def validate_ssid_info(self, ssid_list, config, errormsg): if ap_zones: self.validate_ap_zone(ap_zones, ssid_list, errormsg) + feature_template_designs = config.get("feature_template_designs") + if feature_template_designs: + self.validate_feature_templates(feature_template_designs, ssid_list, errormsg) + def validate_ap_zone(self, ap_zones, ssid_list, errormsg): """ Extends validation for AP zone values. @@ -800,6 +1117,290 @@ def validate_ap_zone(self, ap_zones, ssid_list, errormsg): ) errormsg.append(zone_msg) + def validate_feature_templates(self, feature_template_designs, ssid_list, errormsg): + """ + Validate feature templates provided in the playbook configuration. + + Parameters: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + feature_template_designs (list): List of dictionaries containing feature template details. + ssid_list (list): List of dictionaries containing SSID details. + errormsg (list): List to collect error messages in case of validation failures. + + Returns: + None: This function updates the errormsg list directly if any validation errors are found. + """ + self.log("Validating feature template configurations for wireless network profile template assignment", "DEBUG") + self.log("Processing {0} feature templates for validation against wireless profile requirements".format( + len(feature_template_designs) if isinstance(feature_template_designs, list) else 0), "DEBUG") + + if not isinstance(feature_template_designs, list): + errormsg.append("feature_template_designs: Expected a list, but got a non-list value.") + return None + + if len(feature_template_designs) > 500: + errormsg.append( + "feature_template_designs: List contains more than 500 entries, which exceeds the allowed limit." + ) + return None + + if feature_template_designs \ + and self.compare_dnac_versions(self.get_ccc_version(), "3.1.3.0") < 0: + errormsg.append( + "The specified version '{0}' does not support for feature template." + "Supported version(s) start from '3.1.3.0' onwards.".format( + self.get_ccc_version()) + ) + return None + + self.log("Feature template basic validation passed - proceeding with detailed template configuration validation", "DEBUG") + + # Track validation statistics for operational visibility + templates_processed = 0 + templates_with_errors = 0 + advanced_ssid_templates_found = 0 + default_design_templates_found = 0 + for feature_template_design in feature_template_designs: + templates_processed += 1 + template_has_errors = False + + self.log("Validating feature template design configuration {0}/{1}".format( + templates_processed, len(feature_template_designs)), "DEBUG") + + # Validate design type configuration + design_type = feature_template_design.get("design_type") + if design_type: + validate_str( + design_type, + dict(type="str"), + "design_type", + errormsg, + ) + + # Special case validation for Advanced SSID Configuration + if design_type == "ADVANCED_SSID_CONFIGURATION": + advanced_ssid_templates_found += 1 + if len(feature_template_designs) > 1: + errormsg.append( + "design_type: 'ADVANCED_SSID_CONFIGURATION' is a special case and should be the only design type in feature_templates." + + "Please remove other design types if 'ADVANCED_SSID_CONFIGURATION' is used." + ) + template_has_errors = True + self.log("Advanced SSID Configuration validation failed - cannot be combined with other design types", "ERROR") + + # Validate design type against supported categories + if design_type not in self.available_design_types: + errormsg.append( + "design_type: Invalid design type '{0}' in playbook. " + "Available design types are: {1}".format( + design_type, self.available_design_types + ) + ) + template_has_errors = True + self.log("Design type validation failed for '{0}' - not in supported design types".format(design_type), "ERROR") + else: + errormsg.append("design_type: Design type is missing in feature template configuration.") + template_has_errors = True + + feature_templates = feature_template_design.get("feature_templates", []) + if not feature_templates: + errormsg.append( + "feature_templates: 'feature_templates' is missing in feature_template_design." + ) + template_has_errors = True + elif not isinstance(feature_templates, list): + errormsg.append( + "feature_templates: Expected a list for 'feature_templates', but got a non-list value." + ) + template_has_errors = True + else: + # Validate each template design entry + for design in feature_templates: + if not isinstance(design, str): + errormsg.append( + "feature_templates: Expected a string for each item in 'feature_templates', but got a non-string value." + ) + template_has_errors = True + elif "Default Advanced SSID Design" in feature_templates and len(feature_templates) > 1: + default_design_templates_found += 1 + if len(feature_templates) > 1: + errormsg.append( + "feature_templates: 'Default Advanced SSID Design' is a special case and should be the only " + + "template design in feature_template_designs. " + + "Please remove other template designs if 'Default Advanced SSID Design' is used." + ) + template_has_errors = True + self.log("Default Advanced SSID Design validation failed - cannot be combined with other designs", "ERROR") + + applicability_ssids = feature_template_design.get("applicability_ssids", []) + if applicability_ssids: + self.log("Validating SSID applicability for {0} SSIDs".format( + len(applicability_ssids)), "DEBUG") + if "Default Advanced SSID Design" not in feature_templates: + errormsg.append( + "applicability_ssids: 'applicability_ssids' should only be used with 'Default Advanced SSID Design' template design." + ) + template_has_errors = True + + if len(applicability_ssids) > 16: + errormsg.append( + "applicability_ssids: List contains more than 16 entries, which exceeds the allowed limit." + ) + template_has_errors = True + + for feature_ssid in applicability_ssids: + if not isinstance(feature_ssid, str): + errormsg.append( + "applicability_ssids: Expected a string for each item in 'applicability_ssids', but got a non-string value." + ) + template_has_errors = True + else: + validate_str(feature_ssid, + dict(type="str", length_max=32), + "applicability_ssids", errormsg) + + # Cross-reference SSID with ssid_details + if not self.value_exists(ssid_list, "ssid_name", feature_ssid): + errormsg.append( + "applicability_ssids: SSID '{0}' does not exist in ssid_details.".format( + feature_ssid + ) + ) + template_has_errors = True + self.log("SSID applicability validation failed - SSID '{0}' not found in ssid_details".format( + feature_ssid), "ERROR") + + if template_has_errors: + templates_with_errors += 1 + + self.log("Checking for duplicate template designs across feature template configurations", "DEBUG") + + duplicates, matches = self.find_duplicates_in_feature_templates(feature_template_designs) + if duplicates or matches: + errormsg.append( + "feature_templates: Duplicate feature_template '{0} {1}' found in playbook.".format( + str(duplicates), str(matches) + ) + ) + self.log("Duplicate feature_template validation failed - found duplicates: {0} {1}".format( + str(duplicates), str(matches)), "ERROR") + + if templates_with_errors > 0: + self.log("Feature template validation completed with errors - {0}/{1} templates failed validation".format( + templates_with_errors, templates_processed), "WARNING") + else: + self.log("Feature template validation completed successfully - all {0} templates passed validation".format( + templates_processed), "INFO") + + if advanced_ssid_templates_found > 0: + self.log("Advanced SSID Configuration templates found: {0}".format( + advanced_ssid_templates_found), "INFO") + + if default_design_templates_found > 0: + self.log("Default Advanced SSID Design templates found: {0}".format( + default_design_templates_found), "INFO") + + def find_duplicates_in_feature_templates(self, feature_template_designs): + """ + Checks for duplicate entries within each 'feature_templates' list in the provided feature templates, + and identifies dictionaries with identical 'feature_templates' lists. + + Args: + feature_template_designs (list of dict): A list where each dictionary contains at least the key + 'feature_templates', which is expected to be a list of template identifiers. + + Returns: + tuple: + - List[dict]: Dictionaries from feature_template_designs that contain duplicate entries within + their 'feature_templates' list. + - List[Tuple[int, int]]: Pairs of indices from feature_template_designs where + the 'feature_templates' lists are identical. + + Notes: + - A 'duplicate' within a 'feature_templates' means the same template identifier + appears more than once in the list. + - 'Matching' means two different dictionaries have exactly the same + 'feature_templates' list (order matters). + """ + self.log("Analyzing feature template configurations for duplicate feature_templates and identical feature_templates lists", "DEBUG") + self.log("Processing {0} feature templates for duplicate detection analysis".format( + len(feature_template_designs)), "DEBUG") + + duplicates_found = [] + matching_indices = [] + combine_designs = [] + + templates_processed = 0 + intra_template_duplicates = 0 + inter_template_duplicates = 0 + identical_lists_found = 0 + + # Track seen template design lists for identical list detection + seen_template_designs = {} + global_template_designs = [] + + # Process each feature template for duplicate detection + for template_index, feature_template_design in enumerate(feature_template_designs): + templates_processed += 1 + template_design_list = feature_template_design.get('feature_templates', []) + + self.log("Analyzing feature template {0}/{1} with {2} feature templates".format( + template_index + 1, len(feature_template_designs), len(template_design_list)), "DEBUG") + + # Check for intra-template duplicates (within same feature_templates list) + if len(template_design_list) != len(set(template_design_list)): + intra_template_duplicates += 1 + duplicates_found.append(feature_template_design) + self.log("Intra-template duplicate detected in feature_templates at index {0}: {1}".format( + template_index, template_design_list), "DEBUG") + + # Check for identical feature_templates lists across feature templates + template_design_tuple = tuple(template_design_list) + if template_design_tuple in seen_template_designs: + identical_lists_found += 1 + matching_indices.append((seen_template_designs[template_design_tuple], template_index)) + self.log("Identical feature_templates lists found between indices {0} and {1}: {2}".format( + seen_template_designs[template_design_tuple], template_index, template_design_list), "DEBUG") + else: + seen_template_designs[template_design_tuple] = template_index + + # Check for inter-template duplicates (same design across different templates) + for feature_template in template_design_list: + if feature_template in global_template_designs: + inter_template_duplicates += 1 + if feature_template_design not in duplicates_found: + duplicates_found.append(feature_template_design) + self.log("Inter-template duplicate design '{0}' found in feature template at index {1}".format( + feature_template, template_index), "DEBUG") + else: + global_template_designs.append(feature_template) + + total_duplicates = len(duplicates_found) + total_matches = len(matching_indices) + + if total_duplicates > 0 or total_matches > 0: + self.log("Duplicate detection completed - found {0} templates with duplicates and {1} identical template lists".format( + total_duplicates, total_matches), "WARNING") + + if intra_template_duplicates > 0: + self.log("Intra-template duplicates found in {0} feature templates".format( + intra_template_duplicates), "WARNING") + + if inter_template_duplicates > 0: + self.log("Inter-template duplicate designs detected: {0} occurrences".format( + inter_template_duplicates), "WARNING") + + if identical_lists_found > 0: + self.log("Identical template design lists found: {0} matches".format( + identical_lists_found), "WARNING") + + return duplicates_found, matching_indices + + self.log("Duplicate detection completed successfully - no duplicates or identical lists found in {0} feature templates".format( + templates_processed), "INFO") + + return None, None + def get_want(self, config): """ Retrieve wireless network profile or delete profile from playbook configuration. @@ -985,6 +1586,12 @@ def get_have(self, config): self.log("Fetching additional interface information.", "DEBUG") self.get_additional_interface_info(additional_interfaces, profile_info) + feature_template_designs = config.get("feature_template_designs") + if feature_template_designs \ + and self.compare_dnac_versions(self.get_ccc_version(), "3.1.3.0") >= 0: + self.log("Fetching feature template information.", "DEBUG") + self.get_feature_template_info(feature_template_designs, profile_info) + onboarding_templates = config.get("onboarding_templates") day_n_templates = config.get("day_n_templates") profile_id = profile_info.get("profile_info", {}).get("id") @@ -1104,6 +1711,7 @@ def get_have(self, config): "additional_interfaces", "onboarding_templates", "day_n_templates", + "feature_template_designs", ] ): self.log( @@ -1316,46 +1924,200 @@ def additional_interface_check_or_create(self, interface, vlan_id): self.log(msg, "ERROR") self.fail_and_exit(msg) - def compare_config_data(self, input_config, have_info): + def get_feature_template_info(self, feature_template_designs, profile_info): """ - This function used to compare the playbook input with the have data and - return the status and unmatch value + Retrieve feature template configuration details for wireless network profile management. + + This method queries the Catalyst Center wireless API to collect comprehensive feature + template information including template designs, device types, and SSID applicability + for specified feature template configurations. It processes template mappings to retrieve + design identifiers and SSID associations essential for wireless network profile + feature template assignment and configuration management. Parameters: self (object): An instance of a class used for interacting with Cisco Catalyst Center. - input_config (dict): A dict containing playbook config of wireless profile. - have_prof_info (dict): A string contain the profile response from have function + feature_template_designs (list): List of dictionaries containing feature template configurations. + Format: [{"design_type": "AAA_RADIUS_ATTRIBUTES_CONFIGURATION", + "feature_templates": ["design1", "design2"], + "applicability_ssids": ["SSID1", "SSID2"]}] + Each dictionary contains design type, template designs, + and optional SSID applicability. + profile_info (dict): Dictionary to store collected feature template information. + Updated with "feature_template_designs" key containing template details + for wireless profile configuration processing. Returns: - matched (bool): Update True or False if input match with the have data - dict or None: A dict contain unmatched kay value pair + None: This method updates the profile_info dictionary directly with feature template + details. Returns None if no templates found or if errors occur during processing. + + Note: + Feature template information is essential for wireless profile configuration + and defines how specific wireless features are applied to network profiles + and associated SSIDs within the Catalyst Center wireless infrastructure. """ - self.log( - "Compare the input config: {0} with have: {1}".format( - self.pprint(input_config), self.pprint(have_info) - ), - "INFO", - ) - unmatched_keys = [] - have_prof_info = have_info.get("profile_info") - ssid_list = input_config.get("ssid_details", []) - have_ssid_details = have_prof_info.get("ssidDetails", []) - ap_zones_list = input_config.get("ap_zones", []) - have_ap_zones = have_prof_info.get("ssidDetails", []) - additional_interfaces = input_config.get("additional_interfaces", []) - have_additional_interfaces = have_prof_info.get("additionalInterfaces", []) + self.log("Retrieving feature template configuration details for wireless network profile management", "DEBUG") + self.log("Processing {0} feature template configurations for template design collection".format( + len(feature_template_designs)), "DEBUG") - if ssid_list: - if not have_ssid_details: - self.log("No SSID details found in the existing profile.", "DEBUG") - unmatched_keys.append(ssid_list) - else: - if ssid_list: - for each_ssid in ssid_list: - for have_ssid in have_ssid_details: - if each_ssid.get("ssid_name") == have_ssid.get("ssidName"): - ssid_match, unmatched_values = ( - self.compare_each_config_with_have( + if not feature_template_designs: + self.log("No feature template designs provided for template information retrieval - returning without processing", "DEBUG") + return None + + all_template_details = [] + templates_processed = 0 + designs_collected = 0 + templates_with_errors = 0 + + try: + for feature_template_design in feature_template_designs: + templates_processed += 1 + + design_type = feature_template_design.get("design_type") + feature_templates = feature_template_design.get("feature_templates", []) + + self.log("Processing feature template {0}/{1} with design type '{2}' and {3} feature templates".format( + templates_processed, len(feature_template_designs), design_type, len(feature_templates)), "DEBUG") + + if not design_type: + self.log("Design type missing in feature template configuration - skipping template", "WARNING") + continue + + if not feature_templates or not isinstance(feature_templates, list): + self.log("Feature templates missing or invalid in feature template configuration - skipping template", "WARNING") + continue + + payload_template = {"type": design_type} + + # Process each template design within the feature template + for feature_template in feature_templates: + payload_template["design_name"] = feature_template + + self.log("Querying feature template design '{0}' for design type '{1}'".format( + feature_template, design_type), "DEBUG") + + try: + design_response = self.execute_get_request( + "wireless", "get_feature_template_summary", payload_template + ) + + self.log("Feature template design query completed for '{0}'".format( + feature_template), "DEBUG") + + # Validate and process template design response + if design_response and isinstance(design_response.get("response"), list): + response_data = design_response.get("response", []) + + if response_data and len(response_data) > 0: + instances = response_data[0].get("instances", []) + + if instances and len(instances) > 0: + design_id = instances[0].get("id") + + if design_id: + designs_collected += 1 + template_detail = { + "design_id": design_id, + "design_name": feature_template, + "design_type": design_type + } + + # Add SSID applicability if specified + applicability_ssids = feature_template_design.get("applicability_ssids") + if applicability_ssids: + template_detail["ssids"] = applicability_ssids + self.log("Added SSID applicability for feature templates '{0}': {1}".format( + feature_template, applicability_ssids), "DEBUG") + + all_template_details.append(template_detail) + self.log("Feature template design '{0}' collected successfully with ID '{1}'".format( + feature_template, design_id), "DEBUG") + else: + self.log("No design ID found in template response for '{0}'".format( + feature_template), "WARNING") + else: + self.log("No instances found in template response for '{0}'".format( + feature_template), "WARNING") + else: + self.log("Empty response data received for feature template '{0}'".format( + feature_template), "WARNING") + else: + self.log("Invalid or empty response received for feature template '{0}'".format( + feature_template), "WARNING") + + except Exception as design_exception: + templates_with_errors += 1 + self.log("Failed to retrieve feature template design '{0}': {1}".format( + feature_template, str(design_exception)), "ERROR") + + # Update profile_info with collected template details + if all_template_details: + profile_info["feature_template_designs"] = all_template_details + self.log("Feature template information collection completed - collected {0} template designs from {1} feature templates".format( + designs_collected, templates_processed), "INFO") + + if templates_with_errors > 0: + self.log("Warning: {0} template designs encountered errors during collection".format( + templates_with_errors), "WARNING") + + return self + + self.log("No feature template designs found for the provided feature template configurations", "DEBUG") + return None + + except Exception as api_exception: + error_message = "Failed to retrieve feature template information: {0}".format(str(api_exception)) + self.log(error_message, "ERROR") + return None + + def compare_config_data(self, input_config, have_info): + """ + This function used to compare the playbook input with the have data and + return the status and unmatch value + + Parameters: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + input_config (dict): A dict containing playbook config of wireless profile. + have_prof_info (dict): A string contain the profile response from have function + + Returns: + matched (bool): Update True or False if input match with the have data + dict or None: A dict contain unmatched kay value pair + """ + self.log( + "Compare the input config: {0} with have: {1}".format( + self.pprint(input_config), self.pprint(have_info) + ), + "INFO", + ) + unmatched_keys = [] + have_prof_info = have_info.get("profile_info") + ssid_list = input_config.get("ssid_details", []) + have_ssid_details = have_prof_info.get("ssidDetails", []) + ap_zones_list = input_config.get("ap_zones", []) + feature_template_designs = have_info.get("feature_template_designs", []) + + have_ap_zones = have_prof_info.get("apZones", []) + additional_interfaces = input_config.get("additional_interfaces", []) + have_additional_interfaces = have_prof_info.get("additionalInterfaces", []) + have_feature_templates = have_prof_info.get("featureTemplates", []) + + if ssid_list: + if not have_ssid_details: + self.log("No SSID details found in the existing profile.", "DEBUG") + unmatched_keys.append(ssid_list) + else: + if ssid_list: + for each_ssid in ssid_list: + self.log("Comparing Input SSID configurations for {0}".format( + each_ssid.get("ssid_name")), "INFO") + input_ssid_exist_state = False + for have_ssid in have_ssid_details: + if each_ssid.get("ssid_name") == have_ssid.get("ssidName"): + input_ssid_exist_state = True + self.log("Matching SSID found: {0}. Comparing configurations...".format( + each_ssid.get("ssid_name")), "INFO") + ssid_match, unmatched_values = ( + self.compare_each_config_with_have( each_ssid, have_ssid, "ssid_details" ) ) @@ -1368,38 +2130,125 @@ def compare_config_data(self, input_config, have_info): "WARNING", ) - if ap_zones_list: - for ap_zone in ap_zones_list: - for have_zone in have_ap_zones: - if ap_zone.get("ap_zone_name") == have_zone.get( - "apZoneName" - ): - zone_match, unmatched_values = ( - self.compare_each_config_with_have( - ap_zone, have_zone, "ap_zones" - ) - ) - if not zone_match: - self.log( - "AP Zone mismatch found: {0}".format( - unmatched_values - ), - "WARNING", - ) - unmatched_keys.append(unmatched_values) - - if additional_interfaces: - for each_interface in additional_interfaces: - interface_name = each_interface.get("interface_name") - if interface_name not in have_additional_interfaces: - unmatched_keys.append(unmatched_values) + if not input_ssid_exist_state: + unmatched_keys.append(each_ssid) self.log( - "Additional interface '{0}' not found in existing config.".format( - interface_name + "SSID '{0}' not found in existing profile.".format( + each_ssid.get("ssid_name") ), "WARNING", ) + if ap_zones_list: + if not have_ap_zones: + self.log("No AP Zone details found in the existing profile.", "DEBUG") + unmatched_keys.append(ap_zones_list) + else: + self.log("Comparing AP Zone configurations with existing profile AP Zones", "INFO") + for ap_zone in ap_zones_list: + self.log("Comparing Input AP Zone configuration for {0}".format( + ap_zone.get("ap_zone_name")), "INFO") + input_ap_zone_exist_state = False + for have_zone in have_ap_zones: + if ap_zone.get("ap_zone_name") == have_zone.get( + "apZoneName" + ): + input_ap_zone_exist_state = True + self.log("Matching AP Zone found: {0}. Comparing configurations...".format( + ap_zone.get("ap_zone_name")), "INFO") + zone_match, unmatched_values = ( + self.compare_each_config_with_have( + ap_zone, have_zone, "ap_zones" + ) + ) + if not zone_match: + self.log( + "AP Zone mismatch found: {0}".format( + unmatched_values + ), + "WARNING", + ) + unmatched_keys.append(unmatched_values) + + if not input_ap_zone_exist_state: + ap_zone_name = ap_zone.get("ap_zone_name", "Unknown") if ap_zone else "Unknown" + unmatched_keys.append(ap_zone) + self.log( + "AP Zone '{0}' not found in existing profile configuration.".format( + ap_zone_name + ), + "WARNING", + ) + + if additional_interfaces: + if not have_additional_interfaces: + self.log("No Additional interface details found in the existing profile.", "DEBUG") + unmatched_keys.append(additional_interfaces) + else: + self.log("Validating additional interface configurations against existing profile interfaces", "INFO") + for each_interface in additional_interfaces: + interface_name = each_interface.get("interface_name") + if interface_name not in have_additional_interfaces: + unmatched_keys.append(interface_name) + self.log( + "Additional interface '{0}' not found in existing config.".format( + interface_name + ), + "WARNING", + ) + + if feature_template_designs \ + and self.compare_dnac_versions(self.get_ccc_version(), "3.1.3.0") >= 0: + if not have_feature_templates: + self.log("No Feature template details found in the existing profile.", "DEBUG") + unmatched_keys.append(feature_template_designs) + else: + self.log("Validating feature template configurations against existing profile template assignments", "DEBUG") + self.log("Processing {0} feature template designs for configuration comparison with existing assignments".format( + len(feature_template_designs)), "DEBUG") + + feature_templates_processed = 0 + feature_templates_with_mismatches = 0 + for feature_template_design in feature_template_designs: + feature_templates_processed += 1 + template_design_name = feature_template_design.get("design_name") + template_design_id = feature_template_design.get("design_id") + template_ssids = feature_template_design.get("ssids") + + self.log("Validating feature template {0}/{1} with design '{2}'".format( + feature_templates_processed, len(feature_template_designs), template_design_name), "DEBUG") + + # Validate template design ID exists in current profile assignments + if template_design_id and not self.value_exists(have_feature_templates, "id", template_design_id): + feature_templates_with_mismatches += 1 + unmatched_keys.append( + "Feature template designs with feature template '{0}' not found.".format(template_design_name) + ) + self.log( + "Feature template design mismatch detected - feature template " + "'{0}' (ID: {1}) not found in existing profile assignments".format( + template_design_name, template_design_id), "WARNING") + + # Validate SSID applicability exists in current profile assignments + if template_ssids and not self.value_exists(have_feature_templates, "ssids", template_ssids): + feature_templates_with_mismatches += 1 + unmatched_keys.append( + "Feature template with applicability_ssids '{0}' not found.".format(template_ssids) + ) + self.log( + "Feature template SSID applicability number of mismatch " + "detected '{0}'- SSIDs '{1}' not found in existing profile template assignments".format( + len(unmatched_keys), template_ssids), "WARNING") + + # Log comprehensive feature template validation summary + if feature_templates_with_mismatches > 0: + self.log("Feature template validation completed with mismatches" + " - {0}/{1} templates have configuration differences".format( + feature_templates_with_mismatches, feature_templates_processed), "WARNING") + else: + self.log("Feature template validation completed successfully - all {0} templates match existing profile assignments".format( + feature_templates_processed), "DEBUG") + if unmatched_keys: self.log( "Unmatched SSID Details: {0}".format(str(unmatched_keys)), "WARNING" @@ -1625,9 +2474,12 @@ def parse_input_data_for_payload(self, wireless_data, payload_data): Returns: No return, parse the input data and load the parsed data to the payload_data """ + self.log( + "Parsing input data for payload: {0}".format(self.pprint(wireless_data)), + "DEBUG", + ) exclude_keys = [ "site_names", - "feature_templates", "onboarding_templates", "day_n_templates", "provision_group", @@ -1704,9 +2556,36 @@ def parse_input_data_for_payload(self, wireless_data, payload_data): payload_data["additionalInterfaces"].append( interface.get("interface_name") ) + + elif ( + key == "feature_template_designs" + and isinstance(value, list) + and self.compare_dnac_versions(self.get_ccc_version(), "3.1.3.0") >= 0 + ): + payload_data["featureTemplates"] = [] + feature_template_designs = wireless_data[key] + if feature_template_designs: + have_feature = self.have.get("wireless_profile").get("feature_template_designs", []) + for template in have_feature: + mapped_template = {} + if template.get("design_id"): + mapped_template["id"] = template.get("design_id") + + if template.get("ssids"): + mapped_template["ssids"] = template.get("ssids") + + if mapped_template: + payload_data["featureTemplates"].append( + mapped_template + ) + else: payload_data[mapped_key] = value + self.log( + "Parsed payload data: {0}".format(self.pprint(payload_data)), "INFO" + ) + except Exception as e: msg = "An error occurred during Parsing for payload: {0}".format(str(e)) self.log(msg, "ERROR") @@ -1833,10 +2712,8 @@ def compare_each_config_with_have(self, input_data, have_data, type_of): elif ssid_key in [ "wlan_profile_name", - "interface_name", - "enable_fabric", - "anchor_group_name", "policy_profile_name", + "enable_fabric", ]: if input_data[ssid_key] != have_data.get(self.keymap[ssid_key]): un_match_data[ssid_key] = input_data[ssid_key] @@ -1850,10 +2727,27 @@ def compare_each_config_with_have(self, input_data, have_data, type_of): "DEBUG", ) - elif ssid_key == "local_to_vlan": - input_vlan = int(input_data[ssid_key]) + elif ssid_key in [ + "interface_name", + "anchor_group_name", + ] and not input_data.get("enable_fabric"): + self.log(f"Comparing the '{ssid_key}' while 'Enable Fabric' is False", "DEBUG") + if input_data[ssid_key] != have_data.get(self.keymap[ssid_key]): + un_match_data[ssid_key] = input_data[ssid_key] + self.log( + "{0} mismatch for SSID '{1}'. Expected: {2}, Found: {3}".format( + ssid_key, + input_data.get("ssid_name"), + input_data[ssid_key], + have_data.get(self.keymap[ssid_key]), + ), + "DEBUG", + ) + + elif ssid_key == "local_to_vlan" and not input_data.get("enable_fabric"): + input_vlan = int(input_data.get(ssid_key, 0)) have_vlan = int( - have_data.get("flexConnect", {}).get(self.keymap[ssid_key]) + have_data.get("flexConnect", {}).get(self.keymap[ssid_key], 0) ) if input_vlan != have_vlan: un_match_data[ssid_key] = input_data[ssid_key] @@ -1875,6 +2769,17 @@ def compare_each_config_with_have(self, input_data, have_data, type_of): ), "DEBUG", ) + + have_zone_value = have_data.get(zone_key) + if zone_value != have_zone_value: + self.log( + "SSID list mismatch in AP Zone. Expected: {0}, Found: {1}".format( + zone_value, have_zone_value + ), + "DEBUG", + ) + un_match_data[zone_key] = zone_value + elif zone_key in ["ap_zone_name", "rf_profile_name"]: if input_data[zone_key] != have_data.get(self.keymap[zone_key]): un_match_data[zone_key] = zone_value @@ -2063,6 +2968,7 @@ def get_diff_merged(self, config): ssid_details = config.get("ssid_details") ap_zones = config.get("ap_zones") additional_interfaces = config.get("additional_interfaces") + feature_template_designs = config.get("feature_template_designs") profile_unmatch_stat = self.have["wireless_profile"].get("profile_compare_stat") template_unmatch_stat = self.have["wireless_profile"].get( @@ -2078,6 +2984,10 @@ def get_diff_merged(self, config): "DEBUG", ) + if feature_template_designs \ + and self.compare_dnac_versions(self.get_ccc_version(), "3.1.3.0") < 0: + del config["feature_template_designs"] + for profile in self.have["wireless_profile_list"]: if profile.get("name") == config.get("profile_name"): self.log("Found existing profile: {0}".format(profile), "DEBUG") @@ -2135,7 +3045,7 @@ def get_diff_merged(self, config): elif ( profile_id and not profile_unmatch_stat - and (ssid_details or ap_zones or additional_interfaces) + and (ssid_details or ap_zones or additional_interfaces or feature_template_designs) ): self.log( "Starting update for existing wireless profile '{0}' (ID: {1}) with new configuration.".format( @@ -2297,134 +3207,1012 @@ def verify_diff_merged(self, config): return self - def get_diff_deleted(self, each_profile): + def remove_network_profile_data(self, each_profile, each_have_profile): """ - Delete Network profile based on the given profile ID - Network configurations in Cisco Catalyst Center based on the playbook details + Remove the network profile data from Cisco Catalyst Center based on the playbook details. Parameters: each_profile (dict): The profile details to be deleted from Cisco Catalyst Center. + each_have_profile (dict): Contain existing details of the profile Returns: - self - The current object with deleted status and return response with task details. + dict: A dictionary containing the status of the removable data from profile or None. """ - if not self.value_exists( - self.have["wireless_profile_list"], "name", each_profile["profile_name"] - ): - self.msg = "No changes required, profile(s) are already deleted" - self.log(self.msg, "INFO") - self.set_operation_result( - "success", False, self.msg, "INFO" - ).check_return_status() - return self + self.log( + "Starting comprehensive network profile data removal for wireless profile management", + "INFO" + ) - each_have = self.have.get("wireless_profile") - have_profile_name = each_have.get("profile_info") - if not have_profile_name: - self.msg = "No changes were made. The specified profile(s) either do not exist or have already been deleted." - self.log(self.msg, "INFO") - self.set_operation_result( - "success", False, self.msg, "INFO" - ).check_return_status() - else: - have_profile_name = each_have.get("profile_info", {}).get( - "wirelessProfileName" - ) + profile_name = each_profile.get("profile_name") + self.log( + "Processing profile data removal for profile '{0}' with configuration: {1}".format( + profile_name, self.pprint(each_profile) + ), + "DEBUG" + ) + + # Input validation + if not isinstance(each_profile, dict) or not isinstance(each_have_profile, dict): + self.log("Invalid parameters provided for profile data removal", "ERROR") + return None + + if not profile_name: + self.log("Profile name missing in removal configuration", "ERROR") + return None - if have_profile_name != each_profile.get("profile_name"): - self.msg = "Profile name not matching : {0}".format( - each_profile.get("profile_name") + removable_data = copy.deepcopy(each_have_profile.get("profile_info", {})) + have_profile_id = each_have_profile.get("profile_info", {}).get("id") + have_profile_name = each_have_profile.get("profile_info", {}).get("wirelessProfileName") + + if not have_profile_id or not have_profile_name: + self.log( + "Missing essential profile information - ID: {0}, Name: {1}".format( + have_profile_id, have_profile_name + ), + "ERROR" ) - self.log(self.msg, "ERROR") - self.fail_and_exit(self.msg) + return None - have_profile_id = each_have.get("profile_info", {}).get("id") - sites = each_have.get("previous_sites") + # Statistics tracking for removal operations + remove_required = { + "ssid_status": False, + "additional_interfaces_status": False, + "ap_zones_status": False, + "feature_template_designs_status": False, + "day_n_templates_status": False, + "site_remove_status": False, + } - if sites: - unassign_site = [] - for each_site in sites: - unassign_response = self.unassign_site_to_network_profile( - each_profile["profile_name"], - have_profile_id, - each_site.get("id"), - each_site.get("id"), - ) - unassign_site.append(unassign_response) + # Execute removal operations using helper functions + if each_profile.get("ssid_details"): + remove_required["ssid_status"] = self._remove_ssid_details( + each_profile, removable_data, have_profile_name + ) - if len(unassign_site) == len(sites): - self.log("Sites unassigned successfully {0}".format(sites), "INFO") + if each_profile.get("additional_interfaces"): + remove_required["additional_interfaces_status"] = self._remove_additional_interfaces( + each_profile, removable_data, have_profile_name + ) - task_details = None - if have_profile_id: - task_details = self.delete_network_profiles( - each_profile.get("profile_name"), have_profile_id + if each_profile.get("ap_zones"): + remove_required["ap_zones_status"] = self._remove_ap_zones( + each_profile, removable_data, have_profile_name ) - if not task_details: - self.not_processed.append(each_profile) - self.msg = "Unable to delete profile: '{0}'.".format( - str(self.not_processed) + if each_profile.get("feature_template_designs"): + remove_required["feature_template_designs_status"] = self._remove_feature_template_designs( + each_profile, removable_data, have_profile_name ) - self.log(self.msg, "INFO") - self.fail_and_exit(self.msg) - profile_response = dict( - profile_name=each_profile["profile_name"], status=task_details["progress"] + unassign_templates = [] + if each_profile.get("day_n_templates"): + unassign_templates = self._remove_day_n_templates( + each_profile, each_have_profile, have_profile_id + ) + remove_required["day_n_templates_status"] = len(unassign_templates) > 0 + + unassign_sites = [] + if each_profile.get("site_names"): + unassign_sites = self._remove_site_names( + each_profile, each_have_profile, have_profile_name, have_profile_id + ) + remove_required["site_remove_status"] = len(unassign_sites) > 0 + + # Profile update processing + profile_update_required = ( + remove_required["ssid_status"] or + remove_required["additional_interfaces_status"] or + remove_required["ap_zones_status"] or + remove_required["feature_template_designs_status"] ) - self.deleted.append(profile_response) - self.msg = "Wireless Profile deleted successfully for '{0}'.".format( - str(self.deleted) + + if profile_update_required: + self.log( + "Profile update required - applying removable data changes to profile '{0}'".format( + have_profile_name + ), + "INFO" + ) + + update_response = self.create_update_wireless_profile(removable_data, have_profile_id) + + if update_response: + self.log( + "Successfully applied profile data removal changes to profile '{0}'".format( + have_profile_name + ), + "INFO" + ) + return remove_required + else: + self.msg = ( + "Failed to apply profile data removal changes to profile: '{0}'.".format( + each_profile["profile_name"] + ) + ) + self.log(self.msg, "ERROR") + self.fail_and_exit(self.msg) + + # Final comprehensive logging + total_operations = sum(1 for status in remove_required.values() if status) + + self.log( + "Network profile data removal completed for profile '{0}' - " + "processed {1} component types, templates unassigned: {2}, sites unassigned: {3}".format( + profile_name, total_operations, len(unassign_templates), len(unassign_sites) + ), + "INFO" ) - self.log(self.msg, "INFO") - self.set_operation_result( - "success", True, self.msg, "INFO", each_profile - ).check_return_status() - return self + return remove_required - def verify_diff_deleted(self, config): + def _remove_ssid_details(self, each_profile, removable_data, have_profile_name): """ - Verify the deletion status of wireless network profile in Cisco Catalyst Center. - Args: - - self (object): An instance of a class used for interacting with Cisco Catalyst Center. - - config (dict): The configuration details to be verified. + Remove SSID details from the wireless network profile during deletion operations. - Return: - - self (object): An instance of a class used for interacting with Cisco Catalyst Center. + Parameters: + each_profile (dict): Profile configuration containing SSIDs to remove + removable_data (dict): Current profile data to modify + have_profile_name (str): Name of existing profile for logging - Description: - This method checks the deletion status of a configuration in Cisco Catalyst Center. - It validates whether the specified profile exists in the Cisco Catalyst Center. + Returns: + bool: True if SSIDs were removed, False otherwise """ - if self.get_wireless_profile(config.get("profile_name")): - msg = "Unable to delete below wireless profile '{0}'.".format( - config.get("profile_name") - ) - self.log(msg, "INFO") - self.set_operation_result( - "failed", False, msg, "INFO", config.get("profile_name") - ).check_return_status() + self.log( + "Starting SSID details removal process for wireless network profile", + "DEBUG" + ) - msg = "Wireless profile deleted and verified successfully" - self.log(msg, "INFO") - self.set_operation_result( - "success", True, msg, "INFO", msg - ).check_return_status() + ssid_details = each_profile.get("ssid_details") + if not ssid_details: + self.log("No SSID details specified for removal - skipping SSID processing", "DEBUG") + return False - return self + ssids_removed = 0 + total_ssids = len(ssid_details) - def final_response_message(self, state): + self.log( + "Processing SSID removal for {0} SSID configurations from profile '{1}'".format( + total_ssids, have_profile_name + ), + "INFO" + ) + + for ssid in ssid_details: + ssid_name = ssid.get("ssid_name") + + if not ssid_name: + self.log( + "Skipping SSID entry with missing name in removal configuration", + "WARNING" + ) + continue + + if self.value_exists(removable_data.get("ssidDetails", []), "ssidName", ssid_name): + self.log( + "Removing SSID '{0}' from profile '{1}' during deletion process".format( + ssid_name, have_profile_name + ), + "INFO" + ) + removable_data["ssidDetails"] = [ + have_ssid for have_ssid in removable_data.get("ssidDetails", []) + if have_ssid.get("ssidName") != ssid_name + ] + ssids_removed += 1 + else: + self.log( + "SSID '{0}' not found in current profile configuration - skipping removal".format( + ssid_name + ), + "WARNING" + ) + + self.log( + "SSID removal completed - removed {0}/{1} SSID configurations from profile".format( + ssids_removed, total_ssids + ), + "INFO" + ) + + return ssids_removed > 0 + + def _remove_additional_interfaces(self, each_profile, removable_data, have_profile_name): """ - To show the final message with Wireless profile response + Remove additional interface configurations from the wireless network profile. Parameters: - configs (list of dict) - Playbook config contains Wireless profile - playbook information. + each_profile (dict): Profile configuration containing interfaces to remove + removable_data (dict): Current profile data to modify + have_profile_name (str): Name of existing profile for logging Returns: - self - Return response as verified created/updated/deleted - Wireless profile messages + bool: True if interfaces were removed, False otherwise + """ + + self.log( + "Starting additional interfaces removal process for wireless network profile", + "DEBUG" + ) + + additional_interfaces = each_profile.get("additional_interfaces") + if not additional_interfaces: + self.log( + "No additional interfaces specified for removal - skipping interface processing", + "DEBUG" + ) + return False + + interfaces_removed = 0 + total_interfaces = len(additional_interfaces) + + self.log( + "Processing interface removal for {0} additional interfaces from profile '{1}'".format( + total_interfaces, have_profile_name + ), + "INFO" + ) + + for interface in additional_interfaces: + interface_name = interface.get("interface_name") + + if not interface_name: + self.log( + "Skipping interface entry with missing name in removal configuration", + "WARNING" + ) + continue + + if interface_name in removable_data.get("additionalInterfaces", []): + self.log( + "Removing additional interface '{0}' from profile '{1}' during deletion".format( + interface_name, have_profile_name + ), + "INFO" + ) + removable_data["additionalInterfaces"].remove(interface_name) + interfaces_removed += 1 + else: + self.log( + "Additional interface '{0}' not found in profile - skipping removal".format( + interface_name + ), + "WARNING" + ) + + self.log( + "Interface removal completed - removed {0}/{1} additional interfaces from profile".format( + interfaces_removed, total_interfaces + ), + "INFO" + ) + + return interfaces_removed > 0 + + def _remove_ap_zones(self, each_profile, removable_data, have_profile_name): + """ + Remove AP zone configurations from the wireless network profile. + + Parameters: + each_profile (dict): Profile configuration containing AP zones to remove + removable_data (dict): Current profile data to modify + have_profile_name (str): Name of existing profile for logging + + Returns: + bool: True if AP zones were removed, False otherwise + """ + + self.log( + "Starting AP zones removal process for wireless network profile", + "DEBUG" + ) + + ap_zones = each_profile.get("ap_zones") + if not ap_zones: + self.log("No AP zones specified for removal - skipping AP zone processing", "DEBUG") + return False + + zones_removed = 0 + total_zones = len(ap_zones) + + self.log( + "Processing AP zone removal for {0} zones from profile '{1}'".format( + total_zones, have_profile_name + ), + "INFO" + ) + + for ap_zone in ap_zones: + ap_zone_name = ap_zone.get("ap_zone_name") + + if not ap_zone_name: + self.log( + "Skipping AP zone entry with missing name in removal configuration", + "WARNING" + ) + continue + + if self.value_exists(removable_data.get("apZones", []), "apZoneName", ap_zone_name): + self.log( + "Removing AP zone '{0}' from profile '{1}' during deletion process".format( + ap_zone_name, have_profile_name + ), + "INFO" + ) + removable_data["apZones"] = [ + have_apzone for have_apzone in removable_data.get("apZones", []) + if have_apzone.get("apZoneName") != ap_zone_name + ] + zones_removed += 1 + else: + self.log( + "AP zone '{0}' not found in current profile configuration - skipping removal".format( + ap_zone_name + ), + "WARNING" + ) + + self.log( + "AP zone removal completed - removed {0}/{1} AP zones from profile".format( + zones_removed, total_zones + ), + "INFO" + ) + + return zones_removed > 0 + + def _remove_feature_template_designs(self, each_profile, removable_data, have_profile_name): + """ + Remove feature template design configurations from the wireless network profile. + + Parameters: + each_profile (dict): Profile configuration containing feature templates to remove + removable_data (dict): Current profile data to modify + have_profile_name (str): Name of existing profile for logging + + Returns: + bool: True if feature templates were removed, False otherwise + """ + + self.log( + "Starting feature template designs removal process for wireless network profile", + "DEBUG" + ) + + feature_template_designs = each_profile.get("feature_template_designs") + if not feature_template_designs: + self.log( + "No feature template designs specified for removal - skipping template processing", + "DEBUG" + ) + return False + + templates_removed = 0 + total_templates = len(feature_template_designs) + + self.log( + "Processing feature template removal for {0} template designs from profile '{1}'".format( + total_templates, have_profile_name + ), + "INFO" + ) + + for feature_template in feature_template_designs: + feature_template_names = feature_template.get("feature_templates") + + if not feature_template_names: + self.log( + "Skipping feature template entry with missing template names", + "WARNING" + ) + continue + + for each_feature_template in feature_template_names: + if self.value_exists( + removable_data.get("featureTemplates", []), + "designName", + each_feature_template, + ): + self.log( + "Removing feature template '{0}' from profile '{1}' during deletion".format( + each_feature_template, have_profile_name + ), + "INFO" + ) + removable_data["featureTemplates"] = [ + have_feature_template + for have_feature_template in removable_data.get("featureTemplates", []) + if have_feature_template.get("designName") != each_feature_template + ] + templates_removed += 1 + else: + self.log( + "Feature template '{0}' not found in profile - skipping removal".format( + each_feature_template + ), + "WARNING" + ) + + self.log( + "Feature template removal completed - removed {0} template designs from profile".format( + templates_removed + ), + "INFO" + ) + + return templates_removed > 0 + + def _remove_day_n_templates(self, each_profile, each_have_profile, have_profile_id): + """ + Remove Day-N template assignments from the wireless network profile. + + Parameters: + each_profile (dict): Profile configuration containing Day-N templates to remove + each_have_profile (dict): Current profile state information + have_profile_id (str): Profile ID for template unassignment + + Returns: + list: Results of template unassignment operations + """ + + self.log( + "Starting Day-N templates removal process for wireless network profile", + "DEBUG" + ) + + day_n_templates = each_profile.get("day_n_templates") + if not day_n_templates: + self.log("No Day-N templates specified for removal - skipping template processing", "DEBUG") + return [] + + unassign_templates = [] + templates_removed = 0 + profile_name = each_profile.get("profile_name") + + self.log( + "Processing Day-N template removal for {0} templates from profile '{1}'".format( + len(day_n_templates), profile_name + ), + "INFO" + ) + + for day_n_template in day_n_templates: + if not self.value_exists( + each_have_profile.get("day_n_templates", {}), + "template_name", + day_n_template, + ): + self.log( + "Day-N template '{0}' not found in current profile assignments - skipping".format( + day_n_template + ), + "WARNING" + ) + continue + + for have_day_n_template in each_have_profile.get("day_n_templates", {}): + template_name = have_day_n_template.get("template_name") + template_id = have_day_n_template.get("template_id") + + if template_name == day_n_template: + self.log( + "Unassigning Day-N template '{0}' (ID: {1}) from profile '{2}'".format( + template_name, template_id, profile_name + ), + "INFO" + ) + + result = self.detach_networkprofile_cli_template( + profile_name, have_profile_id, template_name, template_id + ) + unassign_templates.append(result) + templates_removed += 1 + + self.log( + "Successfully unassigned Day-N template '{0}' from profile '{1}'".format( + template_name, profile_name + ), + "INFO" + ) + + self.log( + "Day-N template removal completed - removed {0} template assignments from profile".format( + templates_removed + ), + "INFO" + ) + + return unassign_templates + + def _remove_site_names(self, each_profile, each_have_profile, have_profile_name, have_profile_id): + """ + Remove site name assignments from the wireless network profile. + + Parameters: + each_profile (dict): Profile configuration containing site names to remove + each_have_profile (dict): Current profile state information + have_profile_name (str): Name of existing profile for logging + have_profile_id (str): Profile ID for site unassignment + + Returns: + list: Results of site unassignment operations + """ + + self.log( + "Starting site names removal process for wireless network profile", + "DEBUG" + ) + + site_names = each_profile.get("site_names") + if not site_names: + self.log("No site names specified for removal - skipping site processing", "DEBUG") + return [] + + unassign_sites = [] + sites_removed = 0 + + self.log( + "Processing site removal for {0} sites from profile '{1}'".format( + len(site_names), have_profile_name + ), + "INFO" + ) + + for site_name in site_names: + if not self.value_exists( + each_have_profile.get("site_response", {}), + "site_names", + site_name, + ): + self.log( + "Site '{0}' not found in current profile assignments - skipping removal".format( + site_name + ), + "WARNING" + ) + continue + + for have_site in each_have_profile.get("site_response", {}): + have_site_name = have_site.get("site_names") + have_site_id = have_site.get("site_id") + + if have_site_name == site_name: + self.log( + "Unassigning site '{0}' from profile '{1}' during removal process".format( + site_name, have_profile_name + ), + "INFO" + ) + + unassign_response = self.unassign_site_to_network_profile( + have_profile_name, have_profile_id, have_site_name, have_site_id + ) + unassign_sites.append(unassign_response) + sites_removed += 1 + + self.log( + "Successfully unassigned site '{0}' from profile '{1}'".format( + have_site_name, have_profile_name + ), + "INFO" + ) + + self.log( + "Site removal completed - removed {0} site assignments from profile".format( + sites_removed + ), + "INFO" + ) + + return unassign_sites + + def get_diff_deleted(self, each_profile): + """ + Delete Network profile based on the given profile ID + Network configurations in Cisco Catalyst Center based on the playbook details + + Parameters: + each_profile (dict): The profile details to be deleted from Cisco Catalyst Center. + + Returns: + self - The current object with deleted status and return response with task details. + """ + self.log( + "Starting comprehensive wireless network profile deletion process for profile management", + "INFO" + ) + + profile_name = each_profile.get("profile_name") + self.log( + "Processing profile deletion request for profile configuration: {0}".format( + self.pprint(each_profile) + ), + "DEBUG" + ) + + if not isinstance(each_profile, dict): + self.log( + "Invalid each_profile parameter - expected dict, got: {0}".format( + type(each_profile).__name__ + ), + "ERROR" + ) + self.msg = "Invalid profile configuration provided for deletion" + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + if not profile_name: + self.log( + "Profile name missing in deletion configuration - cannot proceed with deletion", + "ERROR" + ) + self.msg = "Profile name is required for deletion operations" + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + # Phase 1: Validate profile existence + if not self.value_exists( + self.have["wireless_profile_list"], "name", profile_name + ): + self.msg = "No changes required, profile(s) are already deleted" + self.log(self.msg, "INFO") + self.set_operation_result( + "success", False, self.msg, "INFO" + ).check_return_status() + return self + + each_have = self.have.get("wireless_profile") + have_profile_info = each_have.get("profile_info") + + if not have_profile_info: + self.msg = ( + "No changes were made. The specified profile(s) either do not exist " + "or have already been deleted." + ) + self.log(self.msg, "INFO") + self.set_operation_result( + "success", False, self.msg, "INFO" + ).check_return_status() + return self + + have_profile_name = have_profile_info.get("wirelessProfileName") + have_profile_id = have_profile_info.get("id") + + if have_profile_name != profile_name: + self.msg = "Profile name not matching: {0}".format(profile_name) + self.log(self.msg, "ERROR") + self.fail_and_exit(self.msg) + + if not have_profile_id: + self.log( + "Profile ID missing for profile '{0}' - cannot proceed with deletion".format( + profile_name + ), + "ERROR" + ) + self.msg = "Profile ID not found for deletion operations" + self.fail_and_exit(self.msg) + + self.log( + "Profile validation completed - proceeding with deletion for profile '{0}' " + "with ID '{1}'".format(have_profile_name, have_profile_id), + "INFO" + ) + + have_profile_id = each_have.get("profile_info", {}).get("id") + + # Determine deletion type based on profile components + profile_components_specified = any(each_profile.get(key) for key in [ + "site_names", "ssid_details", "day_n_templates", + "additional_interfaces", "ap_zones", "feature_template_designs" + ]) + + if not profile_components_specified: + self.log( + "No specific components specified - proceeding with complete profile deletion " + "for profile '{0}'".format(have_profile_name), + "INFO" + ) + + # Phase 2: Complete Profile Deletion + sites = each_have.get("previous_sites") + + if sites: + self.log( + "Phase 2a: Unassigning {0} sites before complete profile deletion".format( + len(sites) + ), + "INFO" + ) + + unassign_site = [] + sites_unassigned = 0 + + for each_site in sites: + site_id = each_site.get("id") + site_name = each_site.get("name", "Unknown") + + self.log( + "Unassigning site '{0}' (ID: {1}) from profile '{2}' before deletion".format( + site_name, site_id, have_profile_name + ), + "INFO" + ) + unassign_response = self.unassign_site_to_network_profile( + have_profile_name, + have_profile_id, + site_id, + site_id, + ) + + if unassign_response: + sites_unassigned += 1 + unassign_site.append(unassign_response) + self.log( + "Successfully unassigned site '{0}' from profile '{1}'".format( + site_name, have_profile_name + ), + "INFO" + ) + + self.log( + "Site unassignment completed - unassigned {0}/{1} sites from profile".format( + sites_unassigned, len(sites) + ), + "INFO" + ) + else: + self.log( + "No sites associated with profile '{0}' - skipping site unassignment".format( + have_profile_name + ), + "INFO" + ) + + # Phase 2b: Delete the complete profile + self.log( + "Phase 2b: Executing complete profile deletion for profile '{0}' " + "with ID '{1}'".format(have_profile_name, have_profile_id), + "INFO" + ) + + task_details = None + if have_profile_id: + task_details = self.delete_network_profiles( + profile_name, have_profile_id + ) + + if not task_details: + self.not_processed.append(each_profile) + self.msg = "Unable to delete profile: '{0}'.".format( + str(self.not_processed) + ) + self.log(self.msg, "ERROR") + self.fail_and_exit(self.msg) + + profile_response = dict( + profile_name=profile_name, + status=task_details["progress"] + ) + + self.deleted.append(profile_response) + self.msg = "Wireless Profile deleted successfully for '{0}'.".format( + str(self.deleted) + ) + + self.log(self.msg, "INFO") + self.set_operation_result( + "success", True, self.msg, "INFO", each_profile + ).check_return_status() + + else: + # Phase 3: Selective Component Removal + self.log( + "Specific profile components specified - proceeding with selective " + "component removal for profile '{0}'".format(have_profile_name), + "INFO" + ) + + remove_status = self.remove_network_profile_data(each_profile, each_have) or {} + + self.log( + "Profile component removal status: {0}".format( + self.pprint(remove_status) + ), + "DEBUG" + ) + + # Validate removal operation results + removal_occurred = any(remove_status.get(key, False) for key in [ + "site_remove_status", "day_n_templates_status", "ssid_status", + "ap_zones_status", "feature_template_designs_status", + "additional_interfaces_status" + ]) + + if not remove_status or not removal_occurred: + self.msg = ( + "Profile data already removed or not exist to remove data from " + "profile: '{0}'.".format(have_profile_name) + ) + self.log(self.msg, "DEBUG") + self.already_removed.append(have_profile_name) + self.set_operation_result( + "success", False, self.msg, "INFO", have_profile_name + ).check_return_status() + return self + + # Build comprehensive removal success message + self.msg = "Wireless Profile data removed successfully for '{0}'.".format( + profile_name + ) + + response_status = {} + # Process site removal status + if remove_status.get("site_remove_status"): + sites = each_profile.get("site_names", []) + sites_message = "Sites '{0}' unassigned successfully.".format( + "', '".join(sites) + ) + self.msg += " " + sites_message + response_status["site_remove_status"] = sites_message + + # Process Day N template removal status + if remove_status.get("day_n_templates_status"): + templates = each_profile.get("day_n_templates", []) + templates_message = "Day N templates '{0}' unassigned successfully.".format( + "', '".join(templates) + ) + self.msg += " " + templates_message + response_status["day_n_templates_status"] = templates_message + + # Process SSID removal status + if remove_status.get("ssid_status"): + ssids = each_profile.get("ssid_details", []) + ssid_names = [ssid.get("ssid_name") for ssid in ssids if ssid.get("ssid_name")] + + if ssid_names: + ssids_message = "SSIDs '{0}' removed successfully.".format( + "', '".join(ssid_names) + ) + self.msg += " " + ssids_message + response_status["ssid_status"] = ssids_message + + # Process additional interfaces removal status + if remove_status.get("additional_interfaces_status"): + additional_interfaces = each_profile.get("additional_interfaces", []) + interface_names = [ + interface.get("interface_name") + for interface in additional_interfaces + if interface.get("interface_name") + ] + + if interface_names: + interfaces_message = "Additional Interfaces '{0}' removed successfully.".format( + "', '".join(interface_names) + ) + self.msg += " " + interfaces_message + response_status["additional_interfaces_status"] = interfaces_message + + # Process AP zones removal status + if remove_status.get("ap_zones_status"): + ap_zones = each_profile.get("ap_zones", []) + zone_names = [ + zone.get("ap_zone_name") + for zone in ap_zones + if zone.get("ap_zone_name") + ] + + if zone_names: + zones_message = "AP Zones '{0}' removed successfully.".format( + "', '".join(zone_names) + ) + self.msg += " " + zones_message + response_status["ap_zones_status"] = zones_message + + # Process feature template designs removal status + if remove_status.get("feature_template_designs_status"): + feature_template_designs = each_profile.get("feature_template_designs", []) + template_names = [] + for design in feature_template_designs: + template_names.extend(design.get("feature_templates", [])) + + if template_names: + feature_templates_message = ( + "Feature Template Designs '{0}' removed successfully.".format( + "', '".join(template_names) + ) + ) + self.msg += " " + feature_templates_message + response_status["feature_template_designs_status"] = feature_templates_message + + self.remove_profile_data.append({profile_name: response_status}) + + self.log(self.msg, "INFO") + self.set_operation_result( + "success", True, self.msg, "INFO", remove_status + ).check_return_status() + + # Comprehensive deletion operation logging + total_components_processed = sum([ + 1 if each_profile.get("site_names") else 0, + 1 if each_profile.get("ssid_details") else 0, + 1 if each_profile.get("day_n_templates") else 0, + 1 if each_profile.get("additional_interfaces") else 0, + 1 if each_profile.get("ap_zones") else 0, + 1 if each_profile.get("feature_template_designs") else 0 + ]) + + deletion_type = "Complete profile deletion" if not profile_components_specified else "Selective component removal" + + self.log( + "Wireless network profile deletion process completed for profile '{0}' - " + "operation type: {1}, components processed: {2}".format( + profile_name, deletion_type, total_components_processed + ), + "INFO" + ) + + return self + + def verify_diff_deleted(self, config): + """ + Verify the deletion status of wireless network profile in Cisco Catalyst Center. + Args: + - self (object): An instance of a class used for interacting with Cisco Catalyst Center. + - config (dict): The configuration details to be verified. + + Return: + - self (object): An instance of a class used for interacting with Cisco Catalyst Center. + + Description: + This method checks the deletion status of a configuration in Cisco Catalyst Center. + It validates whether the specified profile exists in the Cisco Catalyst Center. + """ + if self.remove_profile_data: + msg = "Wireless profile data removed successfully for: {0}".format( + self.remove_profile_data + ) + self.log(msg, "INFO") + self.set_operation_result( + "success", True, msg, "INFO", self.remove_profile_data + ).check_return_status() + return self + + if self.already_removed: + self.log(self.msg, "INFO") + self.set_operation_result( + "success", False, self.msg, "INFO", self.already_removed + ).check_return_status() + return self + + if self.get_wireless_profile(config.get("profile_name")): + msg = "Unable to delete below wireless profile '{0}'.".format( + config.get("profile_name") + ) + self.log(msg, "INFO") + self.set_operation_result( + "failed", False, msg, "INFO", config.get("profile_name") + ).check_return_status() + + msg = "Wireless profile deleted and verified successfully" + self.log(msg, "INFO") + self.set_operation_result( + "success", True, msg, "INFO", msg + ).check_return_status() + + return self + + def final_response_message(self, state): + """ + To show the final message with Wireless profile response + + Parameters: + configs (list of dict) - Playbook config contains Wireless profile + playbook information. + + Returns: + self - Return response as verified created/updated/deleted + Wireless profile messages """ if state == "merged": if self.created: @@ -2481,6 +4269,20 @@ def final_response_message(self, state): self.set_operation_result( "failed", False, self.msg, "ERROR", self.not_processed ).check_return_status() + elif self.remove_profile_data: + self.msg = "Wireless profile data removed successfully for: {0}".format( + self.remove_profile_data + ) + self.log(self.msg, "INFO") + self.set_operation_result( + "success", True, self.msg, "INFO", self.remove_profile_data + ).check_return_status() + elif self.already_removed: + self.log(self.msg, "INFO") + self.set_operation_result( + "success", False, self.msg, "INFO", self.already_removed + ).check_return_status() + return self else: self.msg = "Wireless profile(s) already deleted for: {0}".format( self.config diff --git a/plugins/modules/network_settings_workflow_manager.py b/plugins/modules/network_settings_workflow_manager.py index 85c7363839..3435e60b08 100644 --- a/plugins/modules/network_settings_workflow_manager.py +++ b/plugins/modules/network_settings_workflow_manager.py @@ -1259,7 +1259,7 @@ def get_global_pool_params(self, pool_info): current_version = self.get_ccc_version() self.log("Current Cisco Catalyst Center Version: {}".format(current_version), "DEBUG") - if self.compare_dnac_versions(current_version, "2.3.7.6") <= 0: + if self.compare_dnac_versions(current_version, "2.3.7.9") < 0: self.log("Using get_global_pool_params_v1 based on version check", "DEBUG") return self.get_global_pool_params_v1(pool_info) @@ -1269,7 +1269,7 @@ def get_global_pool_params(self, pool_info): def get_global_pool_params_v1(self, pool_info): """ Process Global Pool parameters from playbook data for Global Pool configuration - in Cisco Catalyst Center version <= 2.3.7.6. + in Cisco Catalyst Center version < 2.3.7.9. Parameters: pool_info (dict) - Playbook data containing information about the global pool @@ -1310,7 +1310,7 @@ def get_global_pool_params_v1(self, pool_info): def get_global_pool_params_v2(self, pool_info): """ Process Global Pool parameters from playbook data for Global Pool configuration in - Cisco Catalyst Center version > 2.3.7.6. + Cisco Catalyst Center version <= 2.3.7.9. Parameters: pool_info (dict) - Playbook data containing information about the global pool @@ -1840,7 +1840,7 @@ def get_network_params(self, site_name, site_id): Returns: network_details: Processed Network data in a format suitable for configuration according to cisco catalyst center version. """ - if self.compare_dnac_versions(self.get_ccc_version(), "2.3.7.6") <= 0: + if self.compare_dnac_versions(self.get_ccc_version(), "2.3.7.9") < 0: self.log( "Using get_network_params_v1 based on version check", "DEBUG", @@ -1851,7 +1851,7 @@ def get_network_params(self, site_name, site_id): def get_network_params_v1(self, site_name, site_id): """ - Process Network parameters for Cisco Catalyst Center version <= 2.3.5.3. + Process Network parameters for Cisco Catalyst Center version < 2.3.7.9. Parameters: site_name (str) - The Site name @@ -2150,7 +2150,7 @@ def get_network_params_v1(self, site_name, site_id): def get_network_params_v2(self, site_name, site_id): """ - Process Network parameters for Cisco Catalyst Center version >= 2.3.7.6. + Process Network parameters for Cisco Catalyst Center version >= 2.3.7.9. Parameters: site_name (str) - The Site name @@ -2332,7 +2332,7 @@ def get_reserved_ip_subpool(self, site_name, site_id): start_time = time.time() while True: try: - if self.compare_dnac_versions(self.get_ccc_version(), "2.3.7.6") <= 0: + if self.compare_dnac_versions(self.get_ccc_version(), "2.3.7.9") < 0: response = self.dnac._exec( family="network_settings", function="get_reserve_ip_subpool", @@ -2420,7 +2420,7 @@ def global_pool_exists(self, name): global_pool_details = None response = None current_version = self.get_ccc_version() - is_old_version = self.compare_dnac_versions(current_version, "2.3.7.6") <= 0 + is_old_version = self.compare_dnac_versions(current_version, "2.3.7.9") < 0 page_limit = 25 if is_old_version else 500 while True: try: @@ -2568,7 +2568,7 @@ def reserve_pool_exists(self, name, site_name): if name == "": reserve_pool_details = self.all_reserved_pool_details.get(site_id) else: - if self.compare_dnac_versions(self.get_ccc_version(), "2.3.7.6") <= 0: + if self.compare_dnac_versions(self.get_ccc_version(), "2.3.7.9") < 0: reserve_pool_details = get_dict_result( self.all_reserved_pool_details.get(site_id), "groupName", name) else: @@ -3026,12 +3026,12 @@ def get_global_pool_cidr(self, global_pool_cidr, global_pool_name): global_pool_name (str): Name of the global pool to search for in Catalyst Center. Returns: - str: The global pool CIDR (for versions <= 2.3.7.6) or the global pool ID (for later versions). + str: The global pool CIDR (for versions < 2.3.7.9) or the global pool ID (for later versions). In case of error, the method sets internal status/message and returns from `check_return_status()`. """ self.log(f"Starting retrieval of global pool. CIDR: {global_pool_cidr}, Name: {global_pool_name}", "INFO") current_version = self.get_ccc_version() - is_old_version = self.compare_dnac_versions(current_version, "2.3.7.6") <= 0 + is_old_version = self.compare_dnac_versions(current_version, "2.3.7.9") < 0 page_limit = 25 if is_old_version else 500 # Direct return for older versions when CIDR is provided @@ -3084,7 +3084,7 @@ def get_global_pool_cidr(self, global_pool_cidr, global_pool_name): self.status = "failed" return self.check_return_status() - # Old Version (<= 2.3.7.6) + # Old Version (< 2.3.7.9) if is_old_version: self.log(f"Looking for global pool '{global_pool_name}' in older version format.", "DEBUG") global_pool_details = get_dict_result(all_global_pool_details, "ipPoolName", global_pool_name) @@ -3165,7 +3165,7 @@ def get_global_pool_cidr(self, global_pool_cidr, global_pool_name): def get_want_global_pool_v1(self, global_ippool): """ - Get all the Global Pool information from playbook for Catalyst Center version <= 2.3.7.6. + Get all the Global Pool information from playbook for Catalyst Center version < 2.3.7.9. Set the status and the msg before returning from the API Check the return value of the API with check_return_status() @@ -3255,7 +3255,7 @@ def get_want_global_pool_v1(self, global_ippool): def get_want_global_pool_v2(self, global_ippool): """ - Get all the Global Pool information from playbook for Catalyst Center version > 2.3.7.6. + Get all the Global Pool information from playbook for Catalyst Center version >= 2.3.7.9. Set the status and the msg before returning from the API Check the return value of the API with check_return_status() @@ -3361,7 +3361,7 @@ def get_want_global_pool_v2(self, global_ippool): def get_want_reserve_pool_v1(self, reserve_pool): """ - Get all the Reserved Pool information from playbook for Catalyst Center version <= 2.3.7.6. + Get all the Reserved Pool information from playbook for Catalyst Center version < 2.3.7.9. Set the status and the msg before returning from the API Check the return value of the API with check_return_status() @@ -3564,7 +3564,7 @@ def get_prefix_length_from_total_hosts(self, total_hosts, ip_version="IPv4"): # Calculate host bits (add 2 for network + broadcast in IPv4) adjustment = 2 if ip_version == "IPv4" else 0 - host_bits = math.ceil(math.log2(total_hosts + adjustment)) + host_bits = math.ceil(math.log2(total_hosts)) prefix_length = max_bits - host_bits self.log( @@ -3575,7 +3575,7 @@ def get_prefix_length_from_total_hosts(self, total_hosts, ip_version="IPv4"): def get_want_reserve_pool_v2(self, reserve_pool): """ - Get all the Reserved Pool information from playbook for Catalyst Center version > 2.3.7.6. + Get all the Reserved Pool information from playbook for Catalyst Center version >= 2.3.7.9. Set the status and the msg before returning from the API Check the return value of the API with check_return_status() @@ -3829,7 +3829,7 @@ def get_want_network(self, network_management_details): want_network_settings = want_network.get("settings") self.log("Current state (have): {0}".format(self.have), "DEBUG") have_network_details = self.have.get("network")[network_management_index].get("net_details").get("settings") - if self.compare_dnac_versions(self.get_ccc_version(), "2.3.7.6") <= 0: + if self.compare_dnac_versions(self.get_ccc_version(), "2.3.7.9") < 0: if item.get("dhcp_server") is not None: want_network_settings.update({ "dhcpServer": item.get("dhcp_server") @@ -4828,11 +4828,11 @@ def get_want(self, config): self.log("Processing global pool details from the playbook.", "INFO") global_ippool = config.get("global_pool_details", {}).get("settings", {}).get("ip_pool") if global_ippool: - if self.compare_dnac_versions(ccc_version, "2.3.7.6") <= 0: - self.log("Using Global Pool handling method: V1 (legacy) for Catalyst Center version <= 2.3.7.6.", "DEBUG") + if self.compare_dnac_versions(ccc_version, "2.3.7.9") < 0: + self.log("Using Global Pool handling method: V1 (legacy) for Catalyst Center version < 2.3.7.9.", "DEBUG") self.get_want_global_pool_v1(global_ippool).check_return_status() else: - self.log("Using Global Pool handling method: V2 (latest) for Catalyst Center version > 2.3.7.6.", "DEBUG") + self.log("Using Global Pool handling method: V2 (latest) for Catalyst Center version >= 2.3.7.9.", "DEBUG") self.get_want_global_pool_v2(global_ippool).check_return_status() else: self.log("No valid global pool details found in the playbook.", "WARNING") @@ -4840,12 +4840,12 @@ def get_want(self, config): if config.get("reserve_pool_details"): self.log("Processing reserve pool details from the playbook.", "INFO") reserve_pool = config.get("reserve_pool_details") - if self.compare_dnac_versions(ccc_version, "2.3.7.6") <= 0: - self.log("Detected Catalyst Center version <= 2.3.7.6: {}".format(ccc_version), "DEBUG") + if self.compare_dnac_versions(ccc_version, "2.3.7.9") < 0: + self.log("Detected Catalyst Center version < 2.3.7.9: {}".format(ccc_version), "DEBUG") self.log("Using Reserve Pool handling method: V1 (legacy)", "DEBUG") self.get_want_reserve_pool_v1(reserve_pool).check_return_status() else: - self.log("Detected Catalyst Center version > 2.3.7.6: {}".format(ccc_version), "DEBUG") + self.log("Detected Catalyst Center version >= 2.3.7.9: {}".format(ccc_version), "DEBUG") self.log("Using Reserve Pool handling method: V2 (latest)", "DEBUG") self.get_want_reserve_pool_v2(reserve_pool).check_return_status() else: @@ -4891,7 +4891,7 @@ def update_global_pool(self, global_pool): def update_global_pool_v1(self, global_pool): """ - Update/Create Global Pool in Cisco Catalyst Center with fields provided in playbook for Catalyst Center version <= 2.3.7.6. + Update/Create Global Pool in Cisco Catalyst Center with fields provided in playbook for Catalyst Center version < 2.3.7.9. Parameters: global_pool (list of dict) - Global Pool playbook details @@ -5088,7 +5088,7 @@ def update_global_pool_v1(self, global_pool): def update_global_pool_v2(self, global_pool): """ - Update/Create Global Pool in Cisco Catalyst Center with fields provided in playbook for Catalyst Center version > 2.3.7.6. + Update/Create Global Pool in Cisco Catalyst Center with fields provided in playbook for Catalyst Center version >= 2.3.7.9. Parameters: global_pool (list of dict) - Global Pool playbook details @@ -5096,7 +5096,7 @@ def update_global_pool_v2(self, global_pool): Returns: self: The current object after completing the global pool operations. """ - self.log("Starting global pool update/create process for Catalyst Center version > 2.3.7.6.", "INFO") + self.log("Starting global pool update/create process for Catalyst Center version >= 2.3.7.9.", "INFO") create_global_pool = [] update_global_pool = [] @@ -5236,14 +5236,14 @@ def update_reserve_pool(self, reserve_pool): Dispatcher function that routes to the appropriate reserve pool update method based on Catalyst Center version (v1 or v2). """ - if self.compare_dnac_versions(self.get_ccc_version(), "2.3.7.6") <= 0: + if self.compare_dnac_versions(self.get_ccc_version(), "2.3.7.9") < 0: return self.update_reserve_pool_v1(reserve_pool) else: return self.update_reserve_pool_v2(reserve_pool) def update_reserve_pool_v1(self, reserve_pool): """ - Update or Create a Reserve Pool in Cisco Catalyst Center based on the provided configuration for Catalyst Center version <= 2.3.7.6. + Update or Create a Reserve Pool in Cisco Catalyst Center based on the provided configuration for Catalyst Center version < 2.3.7.9. This method checks if a reserve pool with the specified name exists in Cisco Catalyst Center. If it exists and requires an update, it updates the pool. If not, it creates a new pool. @@ -5394,7 +5394,7 @@ def update_reserve_pool_v1(self, reserve_pool): def update_reserve_pool_v2(self, reserve_pool): """ - Update or Create a Reserve Pool in Cisco Catalyst Center based on the provided configuration for Catalyst Center version > 2.3.7.6. + Update or Create a Reserve Pool in Cisco Catalyst Center based on the provided configuration for Catalyst Center version >= 2.3.7.9. This method checks if a reserve pool with the specified name exists in Cisco Catalyst Center. If it exists and requires an update, it updates the pool. If not, it creates a new pool. @@ -5404,7 +5404,7 @@ def update_reserve_pool_v2(self, reserve_pool): Returns: self - The current object with Global Pool, Reserved Pool, Network Servers information. """ - self.log("Starting the reserved pool update/create process for Catalyst Cebter version > 2.3.7.6.", "INFO") + self.log("Starting the reserved pool update/create process for Catalyst Center version >= 2.3.7.9.", "INFO") reserve_pool_index = -1 for item in reserve_pool: @@ -5833,7 +5833,7 @@ def update_aaa_settings_for_site( param = {"id": site_id, "aaaClient": client_and_endpoint_aaa} if network_aaa == {} and client_and_endpoint_aaa == {}: - payload = {"settings": {"aaaNetwork": {}, "aaaClient": {}}} + payload = {"aaaNetwork": {}, "aaaClient": {}} param = {"id": site_id, "payload": payload} try: @@ -5889,8 +5889,8 @@ def update_network(self, network_management): # Check update is required or not skip_update = False - # Only apply extra checks for versions > 2.3.7.6 - if self.compare_dnac_versions(self.get_ccc_version(), "2.3.7.6") > 0: + # Only apply extra checks for versions > 2.3.7.9 + if self.compare_dnac_versions(self.get_ccc_version(), "2.3.7.9") >= 0: empty_settings = [ network_aaa, client_and_endpoint_aaa, @@ -5961,7 +5961,7 @@ def update_network(self, network_management): "Network parameters for 'update_network_v2': {0}".format(net_params), "DEBUG", ) - if self.compare_dnac_versions(self.get_ccc_version(), "2.3.7.6") <= 0: + if self.compare_dnac_versions(self.get_ccc_version(), "2.3.7.9") < 0: if "client_and_endpoint_aaa" in net_params["settings"]: net_params["settings"]["clientAndEndpoint_aaa"] = net_params[ "settings" @@ -6695,6 +6695,16 @@ def verify_diff_merged(self, config): self.status = "failed" return self + want_network_aaa = self.want.get("wantNetwork")[network_management_index].get("settings", {}).get("network_aaa", {}) + have_net_details = self.have.get("network")[network_management_index].get("net_details") + have_aaa_primary_ip = have_net_details.get("settings", {}).get("network_aaa", {}).get("primaryServerIp", "") + + # RESET CASE (both empty) + if want_network_aaa == {} and have_aaa_primary_ip not in ("", None): + self.msg = "Network AAA Primary IP update not applied on Cisco Catalyst Center" + self.status = "failed" + return self + self.log( "Successfully validated the network functions '{0}'.".format( item.get("site_name") diff --git a/plugins/modules/pnp_workflow_manager.py b/plugins/modules/pnp_workflow_manager.py index 804fc0a021..0289bdcc20 100644 --- a/plugins/modules/pnp_workflow_manager.py +++ b/plugins/modules/pnp_workflow_manager.py @@ -92,6 +92,19 @@ flag. type: bool required: false + authorize: + description: | + - Set the authorization flag for PnP devices to enable provisioning after claiming. + - When set to true, devices in "Pending Authorization" state will be automatically authorized. + - This flag moves devices from "Pending Authorization" to "Authorized" state, allowing them to proceed with the provisioning workflow. + - Authorization is performed after successful device import (bulk operations) or device addition (single device operations). + - If not specified, devices will remain in their current authorization state and may require manual authorization. + - This parameter only applies to devices that support the authorization workflow in their PnP process. + - Authorization is skipped for devices that are not in "Pending Authorization" state. + - Supported from Cisco Catalyst Center release version 2.3.7.9 onwards. + type: bool + required: false + default: false site_name: description: Name of the site for which the device will be claimed. @@ -213,14 +226,17 @@ sites.Sites.get_site, software_image_management_swim.SoftwareImageManagementSwim.get_software_image_details, configuration_templates.ConfigurationTemplates.gets_the_templates_available + - Paths used are post /dna/intent/api/v1/onboarding/pnp-device post /dna/intent/api/v1/onboarding/pnp-device/site-claim post /dna/intent/api/v1/onboarding/pnp-device/{id} get /dna/intent/api/v1/onboarding/pnp-device/count - get /dna/intent/api/v1/onboarding/pnp-device put - /onboarding/pnp-device/${id} get /dna/intent/api/v1/site + get /dna/intent/api/v1/onboarding/pnp-device + put /onboarding/pnp-device/${id} get /dna/intent/api/v1/site get /dna/intent/api/v1/image/importation get /dna/intent/api/v1/template-programmer/template + post /api/v1/onboarding/pnp-device/authorize + """ EXAMPLES = r""" --- @@ -269,6 +285,7 @@ hostname: New_WLC state: Unclaimed pid: C9800-CL-K9 + authorize: true site_name: Global/USA/San Francisco/BGL_18 template_name: Ansible_PNP_WLC template_params: @@ -554,24 +571,37 @@ def get_site_details(self): response = None try: - response = self.get_site(self.want.get("site_name")) - if response: + site_name = self.want.get("site_name") + response = self.get_site(site_name) + self.log("Response from get_site for the site '{0}': {1}".format( + site_name, self.pprint(response)), "DEBUG") + + if not response: + self.msg = "No site details found for site name: '{0}'.".format( + site_name + ) + self.log(self.msg, "CRITICAL") + self.set_operation_result( + "failed", False, self.msg, "ERROR" + ).check_return_status() + return self + + self.log( + "Received site details for '{0}': {1}".format( + site_name, str(response) + ), + "DEBUG", + ) + site = response.get("response") + if len(site) == 1: + site_id = site[0].get("id") + site_exists = True self.log( - "Received site details for '{0}': {1}".format( - self.want.get("site_name"), str(response) + "Site Name: {1}, Site ID: {0}".format( + site_id, self.want.get("site_name") ), - "DEBUG", + "INFO", ) - site = response.get("response") - if len(site) == 1: - site_id = site[0].get("id") - site_exists = True - self.log( - "Site Name: {1}, Site ID: {0}".format( - site_id, self.want.get("site_name") - ), - "INFO", - ) return (site_exists, site_id) except Exception: @@ -666,6 +696,10 @@ def get_pnp_params(self, params): param["serialNumber"] = param.pop("serial_number") if "is_sudi_required" in param: param["isSudiRequired"] = param.pop("is_sudi_required") + + if "authorize" in param: + param["authorize"] = param.pop("authorize") + device_dict["deviceInfo"] = param device_info_list.append(device_dict) @@ -830,6 +864,69 @@ def get_reset_params(self): ) return reset_params + def authorize_device(self, device_id): + """ + Sets the authorization flag for a device on Cisco Catalyst Center. + + Parameters: + device_id (str): The ID of the device to authorize. + + Returns: + dict: The API response if the authorization is successful. + None: If the authorization fails or an unexpected response is received. + + Description: + This function authorizes a PnP device by setting the authorization flag, which moves the device + from "Pending Authorization" state to "Authorized" state. This is required for devices to be + provisioned after being claimed to a site. The function is supported from Cisco Catalyst Center + release version 2.3.7.9 onwards and handles both successful and failed authorization scenarios. + """ + self.log("Initiating device authorization process for device ID: '{0}'".format( + device_id), "DEBUG") + + if not device_id: + self.msg = "No device ID provided for authorization." + self.log(self.msg, "ERROR") + return None + + authorize_payload = { + "deviceIdList": [device_id] + } + try: + authorize_response = self.dnac_apply['exec']( + family="device_onboarding_pnp", + function="authorize_device", + params=authorize_payload, + op_modifies=True + ) + self.log( + "Received API response from 'authorize_device' for device ID '{0}': {1}".format( + device_id, + self.pprint(authorize_response) + ), + "DEBUG", + ) + + if authorize_response and isinstance(authorize_response, dict): + self.log("Device authorization completed successfully for device ID: '{0}'".format( + device_id), "INFO") + return authorize_response + + self.log( + "Received unexpected response format from 'authorize_device' API for device ID '{0}' - expected dict, got: {1}".format( + device_id, type(authorize_response).__name__ + ), + "ERROR" + ) + + except Exception as e: + self.msg = "Exception occurred while executing 'authorize_device' for device ID: '{0}' - {1}".format( + device_id, str(e) + ) + self.log(self.msg, "ERROR") + + return None + def bulk_devices_import(self, add_devices): """ Add Multiple devices to the Cisco Catalyst Center. @@ -911,13 +1008,38 @@ def bulk_devices_import(self, add_devices): "failed", False, self.msg, "ERROR", bulk_params ).check_return_status() - self.result["msg"] = "{0} device(s) imported successfully".format( - len(bulk_params.get("successList")) - ) - self.log(self.result["msg"], "INFO") - self.result["response"] = bulk_params - self.result["diff"] = self.validated_config - self.result["changed"] = True + self.result['msg'] = "{0} device(s) imported successfully".format( + len(bulk_params.get("successList"))) + self.log(self.result['msg'], "INFO") + self.result['response'] = bulk_params + self.result['diff'] = self.validated_config + self.result['changed'] = True + + # Check for authorization support and process if applicable + current_version = self.get_ccc_version() + if self.compare_dnac_versions(current_version, "2.3.7.9") >= 0: + self.log("Cisco Catalyst Center version {0} supports device authorization. Checking for authorization requirements.".format( + current_version), "DEBUG") + + authorize_status, serial_number_list = self.bulk_authorize_devices(add_devices) + + if authorize_status: + auth_count = len(serial_number_list) + auth_msg = " {0} device(s) authorized successfully".format(auth_count) + self.result['msg'] += auth_msg + self.log("Device authorization completed successfully: {0} devices authorized".format( + auth_count), "INFO") + else: + if serial_number_list: + auth_msg = " Unable to authorize the device(s): {0}".format(serial_number_list) + self.log("Device authorization failed for devices: {0}".format( + serial_number_list), "WARNING") + else: + self.log("No devices required authorization or authorization was skipped", "INFO") + else: + self.log("Cisco Catalyst Center version {0} does not support device authorization feature (requires 2.3.7.9+)".format( + current_version), "INFO") + return self except Exception as e: @@ -934,6 +1056,142 @@ def bulk_devices_import(self, add_devices): ).check_return_status() return self + def bulk_authorize_devices(self, processed_devices): + """ + Authorizes multiple devices after bulk import is completed based on authorization flag. + + Parameters: + processed_devices (list): A list of dictionaries containing bulk device information. + + Returns: + tuple: + bool: True if all devices are successfully authorized, False otherwise. + list: A list of serial numbers of the authorized or unauthorized devices. + + Description: + This function processes device authorization for devices that have the 'authorize' flag set to True + in the configuration. It checks each device's state and attempts authorization only for devices + in "Pending Authorization" state. The function is supported from Cisco Catalyst Center release + version 2.3.7.9 onwards and provides comprehensive status reporting for bulk authorization operations. + """ + self.log("Initiating bulk device authorization process for {0} devices".format( + len(processed_devices)), "DEBUG") + + if not processed_devices: + self.log("No devices provided for bulk authorization - skipping process", "INFO") + return True, [] + + authorized_devices = [] + unauthorized_devices = [] + devices_requiring_auth = [] + + # First, identify devices that need authorization based on config + for device in processed_devices: + device_info = device.get("deviceInfo", {}) + serial_number = device_info.get("serialNumber") + + if not serial_number: + self.log("Device missing serial number - skipping authorization check: {0}".format(device), "WARNING") + continue + + self.log("Checking authorization requirements for device: '{0}'".format(serial_number), "DEBUG") + + # Check if this device has authorize flag set in config + authorization_required = False + for each_config in self.config: + input_device_info = each_config.get("device_info", []) + for each_info in input_device_info: + if (each_info.get("serialNumber") == serial_number and + each_info.get("authorize") is True): + authorization_required = True + self.log("Device '{0}' requires authorization based on config".format( + serial_number), "DEBUG") + break + if authorization_required: + break + + if authorization_required: + devices_requiring_auth.append(serial_number) + else: + self.log("Device '{0}' does not require authorization (authorize flag not set)".format(serial_number), "DEBUG") + + if not devices_requiring_auth: + self.log("No devices require authorization based on configuration", "INFO") + return True, [] + + self.log("Found {0} device(s) requiring authorization: {1}".format( + len(devices_requiring_auth), devices_requiring_auth), "INFO") + + # Process authorization for devices that require it + for serial_number in devices_requiring_auth: + self.log("Processing authorization for device: '{0}'".format(serial_number), "DEBUG") + + device_response = self.get_device_list_pnp(serial_number) + if not device_response or not isinstance(device_response, dict): + self.log("Unable to retrieve device details for serial number: '{0}' - skipping authorization".format( + serial_number), "WARNING") + unauthorized_devices.append(serial_number) + continue + + device_info = device_response.get("deviceInfo", {}) + current_state = device_info.get("state") + device_id = device_response.get("id") + + self.log("Device '{0}' current state: '{1}'".format(serial_number, current_state), "DEBUG") + + if current_state != "Pending Authorization": + self.log("Device '{0}' is not in 'Pending Authorization' state (current: '{1}') - skipping authorization".format( + serial_number, current_state), "INFO") + unauthorized_devices.append(serial_number) + continue + + if not device_id: + self.log("Device '{0}' missing device ID - cannot authorize".format(serial_number), "ERROR") + unauthorized_devices.append(serial_number) + continue + + # Attempt device authorization + self.log("Attempting to authorize device '{0}' with ID '{1}'".format(serial_number, device_id), "INFO") + authorize_response = self.authorize_device(device_id) + + self.log("Authorization response for device '{0}': {1}".format( + serial_number, self.pprint(authorize_response)), "DEBUG") + + if authorize_response and isinstance(authorize_response, dict): + self.log("Device '{0}' authorized successfully".format(serial_number), "INFO") + authorized_devices.append(serial_number) + else: + error_msg = str(authorize_response) if authorize_response else "No response received" + self.log("Failed to authorize device '{0}': {1}".format(serial_number, error_msg), "ERROR") + unauthorized_devices.append(serial_number) + + # Generate final status summary + total_auth_required = len(devices_requiring_auth) + auth_success_count = len(authorized_devices) + auth_failed_count = len(unauthorized_devices) + + self.log("Bulk authorization completed - Required: {0}, Successful: {1}, Failed: {2}".format( + total_auth_required, auth_success_count, auth_failed_count), "INFO") + + if authorized_devices: + self.log("Successfully authorized devices: {0}".format(authorized_devices), "INFO") + + if unauthorized_devices: + self.log("Failed to authorize devices: {0}".format(unauthorized_devices), "WARNING") + + # Return success status and appropriate device list + if authorized_devices and not unauthorized_devices: + self.log("All devices requiring authorization were successfully authorized", "INFO") + return True, authorized_devices + + if unauthorized_devices: + self.log("Some devices failed authorization or were not eligible", "WARNING") + return False, unauthorized_devices + + # This should not be reached, but included for completeness + self.log("No authorization operations were performed", "INFO") + return True, [] + def compare_config_with_device_info(self, input_config, device_info): """ Compare the input config with the device info. @@ -948,10 +1206,14 @@ def compare_config_with_device_info(self, input_config, device_info): bool: True if all input config values match the device info, False otherwise. int: The number of keys with mismatched values. """ + self.log("Starting comparison between input config and device info.", "INFO") + self.log("Input Config: {0}, Device Info: {1}".format( + self.pprint(input_config), self.pprint(device_info)), "INFO") unmatch_count = 0 for key, value in input_config.items(): device_value = device_info.get(key) - if value != device_value: + + if value != device_value and key == "hostname": self.log( "Mismatch found for key '{0}': expected '{1}', got '{2}'".format( key, value, device_value @@ -1019,6 +1281,10 @@ def update_device_info(self, input_config, device_info, device_id): ) if update_response and isinstance(update_response, dict): + self.msg = "Successfully updated device configuration for device ID {0}. ".format( + device_id + ) + self.log(self.msg, "INFO") return update_response self.log( @@ -1372,6 +1638,7 @@ class instance for further use. # Check the device already added and claimed for idempotent or import devices if self.want.get("pnp_params"): devices_exists, devices_not_exist, reset_devices = [], [], [] + device_updated_list = [] site = self.want.get("site_name") template_name = self.want.get("template_name") image_name = self.want.get("image_params", {}).get("image_name") @@ -1387,6 +1654,8 @@ class instance for further use. for each_device in pnp_devices: serial_number = each_device.get("deviceInfo", {}).get("serialNumber") + authorize_flag = each_device.get("deviceInfo", {}).get("authorize") + if not serial_number: self.log( "Skipping device entry due to missing serial number: {0}".format( @@ -1406,9 +1675,9 @@ class instance for further use. if device_response and isinstance(device_response, dict): device_info = device_response.get("deviceInfo", {}) - existing_device_info = each_device.get("deviceInfo") + input_device_info = each_device.get("deviceInfo") match_stat, un_match = self.compare_config_with_device_info( - device_info, existing_device_info + input_device_info, device_info ) claim_stat = device_info.get("state") @@ -1424,11 +1693,41 @@ class instance for further use. "Updating device info for serial: '{0}' as it's not provisioned or config doesn't match.".format( serial_number ), - "DEBUG", + "DEBUG" ) - self.update_device_info( - existing_device_info, device_info, device_response.get("id") + device_update_response = self.update_device_info( + input_device_info, device_info, device_response.get("id") ) + if device_update_response: + device_updated_list.append(serial_number) + self.log( + "Device '{0}' updated successfully.".format(serial_number), + "INFO", + ) + + current_version = self.get_ccc_version() + if authorize_flag and self.compare_dnac_versions(current_version, "2.3.7.9") >= 0 \ + and claim_stat == "Pending Authorization": + self.log("Initiating device authorization process for device '{0}' - Version: {1}, State: {2}".format( + serial_number, current_version, claim_stat), "INFO") + + device_id = device_response.get("id") + if not device_id: + self.log("Device ID not found for device '{0}' - cannot proceed with authorization".format( + serial_number), "ERROR") + else: + authorize_response = self.authorize_device(device_id) + self.log("Authorization API response for device '{0}': {1}".format( + serial_number, self.pprint(authorize_response)), "DEBUG") + + if authorize_response and isinstance(authorize_response, dict): + self.log("Device '{0}' authorized successfully and moved from 'Pending Authorization' state".format( + serial_number), "INFO") + else: + error_msg = str(authorize_response) if authorize_response else "No response received" + self.log("Failed to authorize device '{0}': {1}".format( + serial_number, error_msg), "ERROR") + else: self.log( "Device '{0}' already provisioned with matching config. No update needed.".format( @@ -1500,6 +1799,11 @@ class instance for further use. ) changed = True self.log(self.msg, "INFO") + + if device_updated_list: + changed = True + self.msg += " and Device information updated successfully." + self.set_operation_result( "success", changed, self.msg, "INFO", devices_exists ).check_return_status() @@ -1544,6 +1848,48 @@ class instance for further use. if self.have["deviceInfo"]: self.result["msg"] = "Only Device Added Successfully" + self.log("Device successfully added to PnP database", "INFO") + + # Check if device requires authorization based on state and version compatibility + device_state = self.have["deviceInfo"].get("state") + current_version = self.get_ccc_version() + device_id = dev_add_response.get("id") + serial_number = self.want.get("serial_number") + + self.log("Device '{0}' current state: '{1}', Catalyst Center version: '{2}'".format( + serial_number, device_state, current_version), "DEBUG") + + # Check authorization requirements + if (device_state == "Pending Authorization" and + self.compare_dnac_versions(current_version, "2.3.7.9") >= 0): + + self.log("Device '{0}' is in 'Pending Authorization' state and version supports authorization - proceeding with authorization".format( + serial_number), "INFO") + + if not device_id: + self.log("Device ID not found for device '{0}' - cannot proceed with authorization".format( + serial_number), "ERROR") + self.result["msg"] += ". Unable to authorize Device '{0}' - missing device ID.".format( + serial_number) + else: + self.log("Initiating authorization process for device '{0}' with ID '{1}'".format( + serial_number, device_id), "DEBUG") + + authorize_response = self.authorize_device(device_id) + self.log("Authorization API response for device '{0}': {1}".format( + serial_number, self.pprint(authorize_response)), "DEBUG") + + if authorize_response and isinstance(authorize_response, dict): + self.log("Device '{0}' authorization completed successfully".format( + serial_number), "INFO") + self.result["msg"] += ". Device '{0}' authorized successfully.".format( + serial_number) + else: + error_msg = str(authorize_response) if authorize_response else "No response received" + self.log("Failed to authorize device '{0}': {1}".format(serial_number, error_msg), "ERROR") + self.result["msg"] += ". Unable to authorize Device '{0}' - {1}.".format( + serial_number, error_msg) + self.log(self.result["msg"], "INFO") self.result["response"] = dev_add_response self.result["diff"] = self.validated_config @@ -1571,6 +1917,45 @@ class instance for further use. claim_params = self.get_claim_params() claim_params["deviceId"] = dev_add_response.get("id") + # Check if device requires authorization based on state and version compatibility + device_state = self.have["deviceInfo"].get("state") + current_version = self.get_ccc_version() + device_id = dev_add_response.get("id") + serial_number = self.want.get("serial_number") + + self.log("Device addition completed - checking authorization requirements for device '{0}'".format( + serial_number), "DEBUG") + self.log("Device '{0}' current state: '{1}', Catalyst Center version: '{2}'".format( + serial_number, device_state, current_version), "DEBUG") + + # Process device authorization if conditions are met + if (device_state == "Pending Authorization" and + self.compare_dnac_versions(current_version, "2.3.7.9") >= 0): + + self.log("Device '{0}' is in 'Pending Authorization' state and version supports authorization - initiating authorization process".format( + serial_number), "INFO") + + if not device_id: + self.log("Device ID not found for device '{0}' - cannot proceed with authorization".format( + serial_number), "ERROR") + self.result["msg"] += ". Unable to authorize Device '{0}' - missing device ID.".format(serial_number) + else: + self.log("Attempting device authorization for device '{0}' with ID '{1}'".format( + serial_number, device_id), "DEBUG") + + authorize_response = self.authorize_device(device_id) + self.log("Authorization API response for device '{0}': {1}".format( + serial_number, self.pprint(authorize_response)), "DEBUG") + + if authorize_response and isinstance(authorize_response, dict): + self.log("Device '{0}' authorization completed successfully and moved from 'Pending Authorization' state".format( + serial_number), "INFO") + self.result["msg"] += ". Device '{0}' authorized successfully.".format(serial_number) + else: + error_msg = str(authorize_response) if authorize_response else "No response received" + self.log("Failed to authorize device '{0}': {1}".format(serial_number, error_msg), "ERROR") + self.result["msg"] += ". Unable to authorize Device '{0}' - {1}.".format(serial_number, error_msg) + claim_response = self.claim_device_site(claim_params) self.log( "Response from API 'claim a device to a site' for a single claiming: {0}".format( @@ -1622,27 +2007,57 @@ class instance for further use. "DEBUG", ) - pnp_state = dev_details_response.get("deviceInfo").get("state") + pnp_state = dev_details_response.get("deviceInfo", {}).get("state") self.log("PnP state of the device: {0}".format(pnp_state), "INFO") + device_info = self.want.get("pnp_params")[0].get("deviceInfo") + match_stat, un_match = self.compare_config_with_device_info( + device_info, dev_details_response + ) + + update_response = {} + if not match_stat: + self.log( + "Updating device info for serial: '{0}' as config doesn't match.".format( + self.want.get("serial_number") + ), + "DEBUG" + ) + update_response = self.update_device_info( + device_info, + dev_details_response, + self.have["device_id"], + ) + if update_response: + self.log( + "Device '{0}' updated successfully.".format( + self.want.get("serial_number") + ), + "INFO", + ) + if not self.want["site_name"]: self.result["response"] = self.have.get("device_found") self.result["msg"] = "Device is already added" self.log(self.result["msg"], "WARNING") + if update_response.get("deviceInfo"): + self.result["changed"] = True + self.result["msg"] += " and Device '{0}' updated successfully.".format( + serial_number + ) return self - update_response = self.update_device_info( - self.want.get("pnp_params")[0].get("deviceInfo"), - dev_details_response, - self.have["device_id"], - ) - if pnp_state == "Error": reset_response = self.reset_error_device(self.have["device_id"]) if reset_response: self.msg = "Device reset done Successfully" self.log(self.msg, "INFO") self.result["diff"] = self.validated_config + + if update_response.get("deviceInfo"): + self.result["msg"] += " and Device '{0}' updated successfully.".format( + serial_number) + self.set_operation_result( "success", True, self.msg, "INFO", reset_response ).check_return_status() @@ -1658,6 +2073,9 @@ class instance for further use. self.log(self.result["msg"], "WARNING") if update_response.get("deviceInfo"): self.result["changed"] = True + self.result["msg"] += " and Device '{0}' updated successfully.".format( + serial_number + ) return self claim_params = self.get_claim_params() @@ -1679,6 +2097,10 @@ class instance for further use. self.result["response"] = claim_response self.result["diff"] = self.validated_config self.result["changed"] = True + if update_response.get("deviceInfo"): + self.result["msg"] += " and Device '{0}' updated successfully.".format( + serial_number + ) return self diff --git a/plugins/modules/provision_workflow_manager.py b/plugins/modules/provision_workflow_manager.py index d9998a82bd..4708bef2c0 100644 --- a/plugins/modules/provision_workflow_manager.py +++ b/plugins/modules/provision_workflow_manager.py @@ -49,7 +49,7 @@ type: str required: true provisioning: - description: + description: | - Specifies whether the user intends to perform site assignment only or full provisioning for a wired device. @@ -62,7 +62,7 @@ required: false default: true force_provisioning: - description: + description: | - Determines whether to force reprovisioning of a device. - A device cannot be re-provisioned to a different @@ -85,7 +85,7 @@ type: str required: true managed_ap_locations: - description: + description: | - Specifies the site locations allocated for Access Points (APs). - Renamed to 'primary_managed_ap_locations' @@ -102,7 +102,7 @@ type: list elements: str primary_managed_ap_locations: - description: + description: | - Specifies the site locations assigned to primary managed Access Points (APs). - Introduced as the updated name for 'managed_ap_locations' @@ -117,7 +117,7 @@ type: list elements: str secondary_managed_ap_locations: - description: + description: | - Specifies the site locations assigned to secondary managed Access Points (APs). - Introduced in Cisco Catalyst version 2.3.7.6 @@ -129,7 +129,7 @@ type: list elements: str dynamic_interfaces: - description: + description: | - A list of dynamic interfaces on the wireless controller. - Each entry represents an interface with @@ -161,7 +161,7 @@ Aggregation Group) identifier. type: str skip_ap_provision: - description: + description: | - If set to 'true', Access Point (AP) provisioning will be skipped during the workflow. - Use this option when AP provisioning is @@ -171,7 +171,7 @@ type: bool default: false rolling_ap_upgrade: - description: + description: | - Configuration options for performing a rolling upgrade of Access Points (APs) in phases. - Allows control over the gradual rebooting @@ -181,7 +181,7 @@ type: dict suboptions: enable_rolling_ap_upgrade: - description: + description: | - Enable or disable the rolling AP upgrade feature. - If set to 'true', APs will be upgraded @@ -192,7 +192,7 @@ type: bool default: false ap_reboot_percentage: - description: + description: | - The percentage of APs to reboot simultaneously during an upgrade. - Supported in Cisco Catalyst version @@ -200,6 +200,87 @@ - Must be either 5, 15 or 25 representing the proportion of APs to reboot at once. type: int + ap_authorization_list_name: + description: | + - The name of the Access Point (AP) authorization list to be used during WLC provisioning. + - This authorization list defines the security policies and access control rules that govern which APs can join the wireless network. + - The authorization list must exist in Cisco Catalyst Center before provisioning + and should contain the MAC addresses or certificate-based authentication rules + for APs. + - Used in conjunction with 'authorize_mesh_and_non_mesh_aps' for comprehensive AP management during wireless controller provisioning. + - If not specified, the default authorization behavior of the WLC will be applied. + type: str + required: false + authorize_mesh_and_non_mesh_aps: + description: | + - A flag that indicates whether to authorize both mesh and non-mesh Access Points (APs) during the WLC provisioning process. + - When set to true, all AP types (mesh and non-mesh) will be automatically authorized to join the wireless network. + - When set to false, only specifically configured APs matching the authorization criteria will be authorized. + - Mesh APs create wireless backhaul connections to extend network coverage, while non-mesh APs connect directly to the wired infrastructure. + - This setting works in conjunction with 'ap_authorization_list_name' for complete AP authorization workflow. + - Supported from Cisco Catalyst Center release version 2.3.7.6 onwards. + type: bool + feature_template: + description: | + - A dictionary containing feature template configuration for advanced wireless device provisioning. + - Feature templates provide standardized, reusable configuration patterns that ensure consistent deployment across multiple wireless controllers. + - Templates enable centralized configuration management, reduce manual errors, and enforce organizational policies. + - The specified template must exist in Cisco Catalyst Center before it can be applied during provisioning. + - Feature templates can include WLAN configurations, security policies, QoS settings, and other wireless controller parameters. + - Supported from Cisco Catalyst Center release version 3.1.3.0 onwards for wireless controller provisioning. + type: dict + required: false + suboptions: + design_name: + description: | + - The name of the feature template design to be applied during wireless controller provisioning. + - This template name must match exactly with the template name defined in Cisco Catalyst Center. + - The template defines standardized configuration parameters, policies, and settings to be applied to the wireless controller. + - Template names are case-sensitive and should follow organizational naming conventions. + type: str + required: true + additional_identifiers: + description: | + - A list of additional context-specific identifiers that provide customization parameters for the feature template. + - These identifiers enable site-specific and WLAN-specific customization of the template during deployment. + - Each identifier contains key-value pairs that help adapt the template for specific deployment scenarios and locations. + - Multiple identifiers can be specified to support complex deployment requirements with different WLAN profiles and site contexts. + type: list + elements: dict + required: false + suboptions: + wlan_profile_name: + description: | + - The WLAN profile name to be associated with the feature template during wireless controller provisioning. + - This profile defines wireless network parameters including SSID, security settings, VLAN assignments, and QoS policies. + - The WLAN profile must exist in Cisco Catalyst Center and be properly configured before template application. + - Multiple WLAN profiles can be referenced by specifying multiple additional identifier entries. + type: str + required: false + site_name_hierarchy: + description: | + - The site name hierarchy where the feature template should be applied during wireless controller provisioning. + - Defines the specific site context for template deployment within the organizational hierarchy. + - Must follow the format 'Global/Area/Building/Floor' as configured in Cisco Catalyst Center site topology. + - The site hierarchy must exist in Cisco Catalyst Center before template application. + - Used to apply site-specific configurations and policies defined in the feature template. + type: str + required: false + excluded_attributes: + description: | + - A list of specific template attributes to be excluded from the feature template application during wireless controller provisioning. + - Use this to selectively apply only certain parts of a template while excluding others that may not be applicable to the specific deployment. + - Attribute names must match the exact attribute names defined in the feature template configuration. + - This provides fine-grained control over which template configurations are applied, allowing for customized deployments. + - Useful for scenarios where most of the template is applicable but specific settings need to be omitted or handled separately. + type: list + elements: str + required: false + choices: ['["guest_ssid_settings", "bandwidth_limits"]', + '["dhcp_pool_configuration"]', + '["radius_server_config", "certificate_settings"]', + '["qos_policies", "traffic_shaping"]', + '["mesh_configuration", "ap_group_settings"]'] application_telemetry: description: | - A list of settings for enabling or disabling application telemetry on a group of network devices. @@ -309,6 +390,9 @@ rolling_ap_upgrade: enable_rolling_ap_upgrade: false ap_reboot_percentage: 5 + ap_authorization_list_name: "AP-Auth-List" + authorize_mesh_and_non_mesh_aps: true + - name: Provision a wired device to a site cisco.dnac.provision_workflow_manager: dnac_host: "{{dnac_host}}" @@ -457,6 +541,33 @@ - application_telemetry: - device_ips: ["204.1.1.2", "204.192.6.200"] telemetry: disable + +- name: Provision a wireless device to a site with feature template + cisco.dnac.provision_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: false + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: merged + config: + - site_name_hierarchy: Global/USA/SAN JOSE/BLD23 + management_ip_address: 204.192.4.2 + primary_managed_ap_locations: + - Global/USA/SAN JOSE/BLD23/FLOOR1_LEVEL2 + feature_template: + - design_name: newtest + additional_identifiers: + wlan_profile_name: ARUBA_SSID_profile + site_name_hierarchy: Global/USA/SAN JOSE/BLD23 + excluded_attributes: ["guest_ssid_settings", "bandwidth_limits"] """ RETURN = r""" # Case_1: Successful creation/updation/deletion of provision @@ -520,6 +631,7 @@ def __init__(self, module): self.re_provision_wireless_device = [] self.enable_application_telemetry = [] self.disable_application_telemetry = [] + self.assigned_device_to_site = [] def validate_input(self, state=None): """ @@ -571,6 +683,8 @@ def validate_input(self, state=None): }, "skip_ap_provision": {"type": "bool", "required": False}, "rolling_ap_upgrade": {"type": "dict", "required": False}, + "ap_authorization_list_name": {"type": "str", "required": False}, + "authorize_mesh_and_non_mesh_aps": {"type": "bool", "required": False, "default": False}, "provisioning": {"type": "bool", "required": False, "default": True}, "force_provisioning": {"type": "bool", "required": False, "default": False}, "clean_config": {"type": "bool", "required": False, "default": False}, @@ -588,6 +702,20 @@ def validate_input(self, state=None): }, }, }, + "feature_template": { + "type": "list", + "elements": "dict", + "options": { + "design_name": {"type": "str", "required": True}, + "attributes": {"type": "dict", "required": True}, + "additional_identifiers": {"type": "dict", "required": False}, + "excluded_attributes": { + "type": "list", + "elements": "str", + "required": False, + }, + }, + } } if state == "merged": @@ -1402,6 +1530,10 @@ def get_wireless_params(self): if self.validated_config.get("rolling_ap_upgrade"): rolling_ap_upgrade = self.validated_config["rolling_ap_upgrade"] wireless_params[0]["rolling_ap_upgrade"] = rolling_ap_upgrade + if self.validated_config.get("ap_authorization_list_name"): + wireless_params[0]["ap_authorization_list_name"] = self.validated_config.get("ap_authorization_list_name") + if self.validated_config.get("authorize_mesh_and_non_mesh_aps") is not None: + wireless_params[0]["authorize_mesh_and_non_mesh_aps"] = self.validated_config.get("authorize_mesh_and_non_mesh_aps") response = self.dnac_apply["exec"]( family="devices", @@ -1423,8 +1555,201 @@ def get_wireless_params(self): ), "INFO", ) + + if self.validated_config.get("feature_template"): + self.log("Processing feature template configuration for wireless device provisioning", "DEBUG") + feature_templates = self.validated_config.get("feature_template") + if not isinstance(feature_templates, list): + self.msg = "Feature template configuration must be a list. Received: {0}".format(type(feature_templates).__name__) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + if not feature_templates: + self.log("Empty feature template list provided", "WARNING") + return self + + wireless_params[0]["feature_template"] = [] + self.log("Processing feature template(s)", "INFO") + + for template_index, template in enumerate(feature_templates): + self.log("Processing feature template {0}".format(template_index + 1), "DEBUG") + design_name = template.get("design_name") + + if not design_name: + self.msg = "Feature template 'design_name' is required but not provided for template at index {0}".format(template_index) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + self.log("Processing feature template with design name: '{0}' at index {1}".format(design_name, template_index), "DEBUG") + + attributes = template.get("attributes", []) + cleaned_attributes = [] + + if attributes: + self.log("Processing template attributes for template '{0}'".format(design_name), "DEBUG") + + if isinstance(attributes, dict): + for key, value in attributes.items(): + if value is not None: + cleaned_attributes.append({ + "name": key, + "value": value + }) + self.log("Added template attribute for '{0}': '{1}' = '{2}'".format(design_name, key, value), "DEBUG") + elif isinstance(attributes, list): + self.log("Attributes provided as list for template '{0}', using directly".format(design_name), "DEBUG") + cleaned_attributes = attributes + else: + self.log("Invalid 'attributes' format for template '{0}'. Expected dict or list, got: {1}".format( + design_name, type(attributes).__name__), "WARNING") + else: + self.log("No attributes provided for feature template '{0}'".format(design_name), "DEBUG") + + excluded_attributes = template.get("excluded_attributes", []) + if excluded_attributes: + self.log("Processing {0} excluded attributes for template '{1}': {2}".format( + len(excluded_attributes), design_name, excluded_attributes), "DEBUG") + if not isinstance(excluded_attributes, list): + self.log("Invalid 'excluded_attributes' format for template '{0}'. Expected list, got: {1}".format( + design_name, type(excluded_attributes).__name__), "WARNING") + excluded_attributes = [] + else: + self.log("No excluded attributes specified for feature template '{0}'".format(design_name), "DEBUG") + + additional_identifiers = template.get("additional_identifiers", {}) + + if additional_identifiers: + self.log("Processing additional identifiers for template '{0}'".format( + design_name), "DEBUG") + for idx, identifier in enumerate(additional_identifiers): + if isinstance(identifier, dict): + wlan_profile = identifier.get("wlan_profile_name") + site_hierarchy = identifier.get("site_name_hierarchy") + if wlan_profile: + self.log("Template '{0}' - Additional identifier {1}: WLAN profile = '{2}'".format( + design_name, idx + 1, wlan_profile), "DEBUG") + if site_hierarchy: + self.log("Template '{0}' - Additional identifier {1}: Site hierarchy = '{2}'".format( + design_name, idx + 1, site_hierarchy), "DEBUG") + else: + self.log("Invalid additional identifier format for template '{0}' at index {1}. Expected dict, got: {2}".format( + design_name, idx, type(identifier).__name__), "WARNING") + + else: + self.log("No additional identifiers provided for feature template '{0}'".format(design_name), "DEBUG") + + if excluded_attributes: + self.log("Processing excluded attributes for template '{0}': {1}".format( + design_name, excluded_attributes), "DEBUG") + if not isinstance(excluded_attributes, list): + self.log("Invalid 'excluded_attributes' format for template '{0}'. Expected list, got: {1}".format( + design_name, type(excluded_attributes).__name__), "WARNING") + excluded_attributes = [] + else: + self.log("No excluded attributes specified for feature template '{0}'".format(design_name), "DEBUG") + + ft_entry = { + "design_name": design_name, + } + if cleaned_attributes: + ft_entry["attributes"] = cleaned_attributes + self.log("Added cleaned attributes to feature template '{0}' entry".format( + design_name), "DEBUG") + + if additional_identifiers: + ft_entry["additional_identifiers"] = additional_identifiers + self.log("Added additional identifiers to feature template '{0}' entry".format(design_name), "DEBUG") + + if excluded_attributes: + ft_entry["excluded_attributes"] = excluded_attributes + self.log("Added excluded attributes to feature template '{0}' entry".format( + design_name), "DEBUG") + + wireless_params[0]["feature_template"].append(ft_entry) + self.log("Successfully configured feature template '{0}' for wireless device provisioning".format(design_name), "INFO") + + self.log( + "Parameters collected for the provisioning of wireless device: {0}".format(wireless_params), + "INFO", + ) return wireless_params + def resolve_template_id(self, design_name): + """ + Retrieves the feature template ID for a given design name. + + Args: + design_name (str): Name of the feature template design to match. + + Description: + This function queries Cisco Catalyst Center to resolve a feature template design name + to its corresponding template ID. It searches through template groups and instances, + filtering out system templates to find user-defined templates. + + Returns: + str or None: The featureTemplateId if found, else None. + """ + self.log("Initiating feature template ID resolution for design name: '{0}'".format(design_name), "DEBUG") + + if not design_name: + self.log("Design name is empty or None - cannot resolve template ID", "ERROR") + return None + + if not isinstance(design_name, str): + self.log("Design name must be a string, received: {0}".format(type(design_name).__name__), "ERROR") + return None + + self.log("Querying Cisco Catalyst Center for feature template with design name: '{0}'".format(design_name), "INFO") + + try: + ft_response = self.dnac_apply["exec"]( + family="wireless", + function="get_feature_template_summary", + params={'designName': design_name} + ) + + self.log("Received feature template API response from 'get_feature_template_summary': {0}".format(str(ft_response)), "DEBUG") + + template_groups = ft_response.get("response", []) + if not template_groups: + self.log("No template groups found in API response", "WARNING") + return None + + self.log("Processing {0} template group(s) for design name: '{1}'".format(len(template_groups), design_name), "DEBUG") + + for group_index, template_group in enumerate(template_groups): + self.log("Processing template group {0} of {1}".format(group_index + 1, len(template_groups)), "DEBUG") + + instances = template_group.get("instances", []) + if not instances: + self.log("No instances found in template group {0}".format(group_index + 1), "DEBUG") + continue + + self.log("Found {0} template instance(s) in group {1}".format(len(instances), group_index + 1), "DEBUG") + + for instance_index, instance in enumerate(instances): + instance_design_name = instance.get("designName") + instance_id = instance.get("id") + is_system_template = instance.get("systemTemplate", False) + + self.log("Evaluating template instance {0}: design_name='{1}', id='{2}', system_template={3}".format( + instance_index + 1, instance_design_name, instance_id, is_system_template), "DEBUG") + + if instance_design_name == design_name and not is_system_template: + self.log("Successfully resolved feature template ID: '{0}' for design name: '{1}'".format(instance_id, design_name), "INFO") + return instance_id + + if instance_design_name == design_name and is_system_template: + self.log("Found matching design name '{0}' but it's a system template - skipping".format(design_name), "DEBUG") + + if instance_design_name != design_name: + self.log("Design name mismatch: expected '{0}', found '{1}' - skipping".format(design_name, instance_design_name), "DEBUG") + + self.log("Feature template with design name '{0}' not found after searching all template groups and instances".format(design_name), "WARNING") + return None + + except Exception as e: + msg = "Exception occurred while resolving feature template ID for design name '{0}': {1}".format(design_name, str(e)) + self.log(msg, "ERROR") + return None + def get_want(self, config): """ Get all provision related informantion from the playbook @@ -1469,7 +1794,6 @@ def get_want(self, config): "DEBUG", ) self.want["application_telemetry"] = application_telemetry - return self else: self.msg = "Application telemetry is available only in version {0} or higher. Current version: {1}".format( @@ -1721,7 +2045,7 @@ def get_diff_merged(self): self.device_ip ) ) - self.set_operation_result("success", False, self.msg, "INFO") + self.already_provisioned_wireless_device.append(self.device_ip) return self self.log("Starting wireless device provisioning...", "INFO") @@ -1780,6 +2104,12 @@ def application_telemetry(self, telemetry_config): application_telemetry_details = telemetry_config.get("application_telemetry", []) + if not application_telemetry_details: + self.msg = "No application telemetry configuration entries found in telemetry config." + self.log(self.msg, "WARNING") + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + return self + enable_payload = [] disable_ids = [] @@ -1792,6 +2122,12 @@ def application_telemetry(self, telemetry_config): self.log("Received telemetry configuration: {0}".format(telemetry_config), "DEBUG") application_telemetry_details = telemetry_config.get("application_telemetry", []) + if not application_telemetry_details: + self.msg = "No application telemetry configuration entries found in telemetry config." + self.log(self.msg, "WARNING") + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + return self + self.log("Processing {0} telemetry configuration entries".format(len(application_telemetry_details)), "INFO") for detail in application_telemetry_details: @@ -1836,11 +2172,12 @@ def application_telemetry(self, telemetry_config): ] if (device_type and device_type in unsupported_devices) or \ - (device_family and device_family.lower() not in ["routers", "wireless lan controllers", "switches and hubs"]): + (device_family and device_family.lower() not in ["routers", "wireless lan controllers", "switches and hubs", "wireless controller"]): self.msg = ("No telemetry-applicable interfaces/WLANs found. " "device : {0} Telemetry not supported for device type: {1}, family: {2}".format(ip, device_type, device_family)) self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() return self + device_type = self.get_dev_type() device_id = self.get_device_id_for_app_telemetry() @@ -2064,6 +2401,7 @@ def provision_bulk_wired_device(self): self.provisioned_device, self.already_provisioned_devices, ) = ([], [], []) + success_msg, provision_needed, reprovision_needed = [], [], [] self.log("Starting bulk wired device provisioning process.", "INFO") @@ -2127,12 +2465,13 @@ def provision_bulk_wired_device(self): device_ip, site_name ) ) + self.assigned_device_to_site.append(device_ip) continue if status == "success": if not to_force_provisioning: - self.already_provisioned_devices.append(device_ip) + self.already_provisioned_wired_device.append(device_ip) success_msg.append( "Wired Device '{0}' is already provisioned.".format(device_ip) ) @@ -3048,6 +3387,7 @@ def provision_wireless_device(self): "INFO", ) prov_params = self.want.get("prov_params")[0] + self.log("Provisioning parameters: {0}".format(prov_params), "DEBUG") payload = {"device_id": prov_params.get("device_id"), "interfaces": []} self.log("Processing interfaces if they exist", "INFO") @@ -3083,26 +3423,98 @@ def provision_wireless_device(self): self.log("'skip_ap_provision' is not specified", "DEBUG") self.log("Processing rolling AP upgrade settings", "INFO") + allowed_ap_reboot_percentages = {5, 10, 25} + if "rolling_ap_upgrade" in prov_params: - self.log( - "Found 'rolling_ap_upgrade' in provisioning parameters", "DEBUG" - ) - rolling_ap_upgrade = {} - for k, v in prov_params["rolling_ap_upgrade"].items(): - if v is not None: - rolling_ap_upgrade[k] = v + self.log("Found 'rolling_ap_upgrade' in provisioning parameters", "DEBUG") + + rolling_upgrade_config = {} + rolling_upgrade_data = prov_params["rolling_ap_upgrade"] + + if "ap_reboot_percentage" in rolling_upgrade_data: + reboot_percentage_value = rolling_upgrade_data["ap_reboot_percentage"] + + if reboot_percentage_value is None or not str(reboot_percentage_value).isdigit(): + self.msg = ( + "Error: Invalid percentage value '{0}'. Must be an integer. " + "Supported values are 5, 10, and 25.".format(reboot_percentage_value) + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + reboot_percentage_value = int(reboot_percentage_value) + if reboot_percentage_value not in allowed_ap_reboot_percentages: + self.msg = ( + "Error: Invalid percentage value '{0}'. " + "Supported values are 5, 10, and 25.".format(reboot_percentage_value) + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + rolling_upgrade_config["ap_reboot_percentage"] = reboot_percentage_value + self.log( + "Processed 'ap_reboot_percentage': {0}".format(reboot_percentage_value), + "DEBUG", + ) + + # Process remaining keys in 'rolling_ap¿_upgrade' + for key, value in rolling_upgrade_data.items(): + if key == "ap_reboot_percentage": + self.log("Skipping already processed key 'ap_reboot_percentage'", "DEBUG") + continue + + if value is not None: + rolling_upgrade_config[key] = value self.log( - "Processed 'rolling_ap_upgrade': {0}".format( - rolling_ap_upgrade - ), + "Processed 'rolling_ap_upgrade' key '{0}': {1}".format(key, value), "DEBUG", ) else: self.log( - "No 'rolling_ap_upgrade' found in provisioning parameters", + "No '{0}' found in rolling_ap_upgrade, skipping".format(key), "DEBUG", ) - payload["rollingApUpgrade"] = rolling_ap_upgrade + + payload["rollingApUpgrade"] = rolling_upgrade_config + + # Process AP authorization list configuration if provided + if "ap_authorization_list_name" in prov_params: + ap_auth_list = prov_params.get("ap_authorization_list_name") + self.log("Adding AP authorization list name to payload: '{0}'".format(ap_auth_list), "DEBUG") + payload["apAuthorizationListName"] = ap_auth_list + else: + self.log("No AP authorization list name provided in provisioning parameters", "DEBUG") + + # Process mesh and non-mesh AP authorization configuration if provided + if "authorize_mesh_and_non_mesh_aps" in prov_params: + authorize_aps = prov_params.get("authorize_mesh_and_non_mesh_aps") + self.log("Adding mesh and non-mesh AP authorization flag to payload: '{0}'".format(authorize_aps), "DEBUG") + payload["authorizeMeshAndNonMeshAPs"] = authorize_aps + else: + self.log("No mesh and non-mesh AP authorization flag provided in provisioning parameters", "DEBUG") + + current_version = self.get_ccc_version() + if self.compare_dnac_versions(current_version, "3.1.3.0") >= 0: + self.log("Cisco Catalyst Center version '{0}' supports feature template functionality (>= 3.1.3.0)".format(current_version), "INFO") + self.log(prov_params) + if "feature_template" in prov_params: + self.log("Processing feature template configuration from provisioning parameters", "INFO") + + feature_templates = prov_params.get("feature_template", []) + self.log(feature_templates) + if not feature_templates: + self.log("Empty feature template list found in provisioning parameters", "WARNING") + else: + self.log("Found {0} feature template(s) to process".format(len(feature_templates)), "DEBUG") + payload = self.process_feature_template_configuration(feature_templates, payload) + + else: + self.log("No feature template configuration found in provisioning parameters", "DEBUG") + + import json + + self.log( + "Final constructed payload:\n{0}".format(json.dumps(payload, indent=2)), + "INFO", + ) try: response = self.dnac_apply["exec"]( @@ -3160,6 +3572,224 @@ def provision_wireless_device(self): self.result["response"] = self.msg self.check_return_status() + def process_feature_template_configuration(self, feature_templates, payload): + """ + Processes feature template configuration for wireless device provisioning payload construction. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + feature_templates (list): List of feature template configurations to process. + payload (dict): The wireless provisioning payload to be updated with feature template data. + Returns: + dict: Updated payload containing feature template configuration. + Description: + This function validates and processes feature template configurations for wireless device + provisioning. It performs comprehensive validation of required fields including design name, + additional identifiers (WLAN profile and site hierarchy), and excluded attributes. The function + resolves template and site identifiers using Cisco Catalyst Center APIs, constructs the + appropriate payload structure for the provisioning API, and ensures all mandatory fields + are present and properly formatted before adding the template configuration to the payload. + """ + self.log("Processing feature template configuration with {0} templates".format( + len(feature_templates) if feature_templates else 0), "DEBUG") + self.log("Input feature_templates: {0}".format(self.pprint(feature_templates)), "DEBUG") + self.log("Input payload structure: {0}".format(self.pprint(payload)), "DEBUG") + + if not feature_templates: + self.log("No feature templates provided; returning original payload unchanged", "DEBUG") + return payload + + self.initialize_feature_template_payload_structure(payload) + + processing_stats = {"processed": 0, "skipped": 0, "errors": 0} + + for template_index, feature_template in enumerate(feature_templates): + self.log("Processing feature template #{0}: {1}".format( + template_index + 1, self.pprint(feature_template)), "DEBUG") + + if not isinstance(feature_template, dict): + message = "Feature template entry #{0} must be a dictionary. Skipping invalid entry.".format( + template_index + 1) + self.log(message, "WARNING") + processing_stats["skipped"] += 1 + continue + + try: + template_entry = self.process_individual_feature_template( + template_index, feature_template) + + if template_entry: + payload["featureTemplatesOverridenAttributes"]["editFeatureTemplates"].append( + template_entry) + processing_stats["processed"] += 1 + self.log("Successfully added feature template entry for templateId '{0}'".format( + template_entry.get("featureTemplateId")), "INFO") + else: + processing_stats["skipped"] += 1 + + except Exception as exception: + processing_stats["errors"] += 1 + error_message = "Failed to process feature template #{0}: {1}".format( + template_index + 1, str(exception)) + self.log(error_message, "ERROR") + + if hasattr(self, "set_operation_result"): + self.set_operation_result("failed", False, error_message, "ERROR").check_return_status() + return payload + + self.log("Feature template processing completed - Processed: {0}, Skipped: {1}, Errors: {2}".format( + processing_stats["processed"], processing_stats["skipped"], processing_stats["errors"]), "INFO") + + self.log("Final payload with feature templates: {0}".format( + self.pprint(payload["featureTemplatesOverridenAttributes"])), "DEBUG") + + return payload + + def initialize_feature_template_payload_structure(self, payload): + """ + Initializes the feature template payload structure if not already present. + Args: + payload (dict): The wireless provisioning payload to be updated. + Returns: + None: The function modifies the payload in place. + """ + self.log("Initializing feature template payload structure", "DEBUG") + + if "featureTemplatesOverridenAttributes" not in payload: + payload["featureTemplatesOverridenAttributes"] = {"editFeatureTemplates": []} + self.log("Created new featureTemplatesOverridenAttributes structure", "DEBUG") + return + + feature_template_attributes = payload["featureTemplatesOverridenAttributes"] + if ( + "editFeatureTemplates" not in feature_template_attributes + or not isinstance(feature_template_attributes["editFeatureTemplates"], list) + ): + feature_template_attributes["editFeatureTemplates"] = [] + self.log("Initialized editFeatureTemplates as empty list", "DEBUG") + + return + + def process_individual_feature_template(self, template_index, feature_template): + """ + Processes a single feature template entry and returns the formatted template entry. + Args: + template_index (int): Index of the template being processed + feature_template (dict): Individual feature template configuration + Returns: + dict or None: Formatted template entry for API payload, or None if skipped + """ + self.log("Processing individual feature template at index {0}".format(template_index), "DEBUG") + + normalized_params = self.normalize_feature_template_input(feature_template) + feature_template_id = normalized_params["feature_template_id"] + design_name = normalized_params["design_name"] + + if not feature_template_id and not design_name: + message = "Feature template #{0} missing both 'featureTemplateId' and 'design_name'. Skipping entry.".format( + template_index + 1) + self.log(message, "WARNING") + return None + + # Resolve template ID if only design name provided + if not feature_template_id and design_name: + self.log("Resolving feature template ID for design name '{0}'".format(design_name), "DEBUG") + try: + feature_template_id = self.resolve_template_id(design_name) + if not feature_template_id: + message = "Failed to resolve template ID for design '{0}'. Skipping entry.".format(design_name) + self.log(message, "WARNING") + return None + + self.log("Resolved template ID '{0}' for design '{1}'".format( + feature_template_id, design_name), "DEBUG") + + except Exception as exception: + error_message = "Exception resolving template ID for design '{0}': {1}".format( + design_name, str(exception)) + self.log(error_message, "ERROR") + raise + + template_entry = { + "featureTemplateId": feature_template_id, + "attributes": normalized_params["attributes"] if normalized_params["attributes"] else {} + } + + # Only include additionalIdentifiers if user actually provided something + if normalized_params["additional_identifiers"]: + template_entry["additionalIdentifiers"] = normalized_params["additional_identifiers"] + + # Include excludedAttributes if provided + if normalized_params["excluded_attributes"]: + template_entry["excludedAttributes"] = normalized_params["excluded_attributes"] + + self.log("Built template entry: {0}".format(self.pprint(template_entry)), "DEBUG") + return template_entry + + def normalize_feature_template_input(self, feature_template): + """ + Normalizes feature template input to handle both camelCase and snake_case formats. + Args: + feature_template (dict): Raw feature template input + Returns: + dict: Normalized parameter dictionary + """ + self.log("Normalizing feature template input parameters", "DEBUG") + + # Extract template identifiers with fallbacks + feature_template_id = ( + feature_template.get("featureTemplateId") + or feature_template.get("feature_template_id") + ) + + design_name = ( + feature_template.get("design_name") + or feature_template.get("designName") + or feature_template.get("designname") + ) + + # Extract configuration parameters + attributes = feature_template.get("attributes") or feature_template.get("attrs") or {} + if attributes is None: + attributes = {} + + excluded_attributes = ( + feature_template.get("excludedAttributes") + or feature_template.get("excluded_attributes") + or [] + ) + if excluded_attributes is None: + excluded_attributes = [] + + # Process additional identifiers + additional_identifiers_input = ( + feature_template.get("additionalIdentifiers") + or feature_template.get("additional_identifiers") + or {} + ) + + # Collect top-level identifier keys if not in nested structure + if not additional_identifiers_input: + additional_identifiers_input = {} + identifier_keys = [ + "wlan_profile_name", "wlanProfileName", + "site_name_hierarchy", "siteHierarchy", + "siteUuid", "site_uuid" + ] + for key in identifier_keys: + if key in feature_template: + additional_identifiers_input[key] = feature_template[key] + + normalized_result = { + "feature_template_id": feature_template_id, + "design_name": design_name, + "attributes": attributes, + "excluded_attributes": excluded_attributes, + "additional_identifiers": additional_identifiers_input + } + + self.log("Normalized parameters: {0}".format(self.pprint(normalized_result)), "DEBUG") + return normalized_result + def get_diff_deleted(self): """ Delete from provision database @@ -3181,10 +3811,11 @@ def get_diff_deleted(self): self.set_operation_result("success", False, self.msg, "INFO") return self - if device_type != "wired": - self.result["msg"] = "APIs are not supported for the device" - self.log(self.result["msg"], "CRITICAL") - return self + if self.compare_dnac_versions(self.get_ccc_version(), "2.3.7.6") <= 0: + if device_type != "wired": + self.result["msg"] = "APIs are not supported for the device" + self.log(self.result["msg"], "CRITICAL") + return self device_id = self.get_device_id() provision_id, status = self.get_device_provision_status(device_id) @@ -3194,7 +3825,7 @@ def get_diff_deleted(self): "Device associated with the passed IP address is not provisioned" ) self.log(self.result["msg"], "CRITICAL") - self.result["response"] = self.want["prov_params"] + self.result["response"] = self.result["msg"] return self if self.compare_dnac_versions(self.get_ccc_version(), "2.3.5.3") <= 0: @@ -3585,6 +4216,12 @@ def update_device_provisioning_messages(self): ) result_msg_list_changed.append(msg) + if self.assigned_device_to_site: + msg = "Device(s) '{0}' assigned to site successfully.".format( + "', '".join(self.assigned_device_to_site) + ) + result_msg_list_changed.append(msg) + if self.device_deleted: msg = "Device(s) '{0}' deleted successfully.".format( "', '".join(self.device_deleted) @@ -3615,7 +4252,14 @@ def update_device_provisioning_messages(self): self.result["changed"] = True self.msg = " ".join(result_msg_list_changed) else: - self.msg = "No device provisioning actions were performed." + input = self.validated_config + ips = [item["management_ip_address"] for item in input] + ip_list_str = ", ".join(ips) + + self.msg = "No provisioning operations were executed for these IPs: {0}".format(ip_list_str) + self.set_operation_result( + "success", False, self.msg, "INFO" + ) self.result["msg"] = self.msg self.result["response"] = self.msg diff --git a/plugins/modules/reports_workflow_manager.py b/plugins/modules/reports_workflow_manager.py new file mode 100644 index 0000000000..b413055e82 --- /dev/null +++ b/plugins/modules/reports_workflow_manager.py @@ -0,0 +1,4580 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Cisco Systems +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Ansible module to manage Report configurations in Cisco Catalyst Center.""" +from __future__ import absolute_import, division, print_function +__metaclass__ = type +__author__ = ["Megha Kandari, Madhan Sankaranarayanan"] + +DOCUMENTATION = r""" +--- +module: reports_workflow_manager +short_description: Resource module for managing Reports in Cisco Catalyst Center. +description: + - This module manages Report configurations in Cisco Catalyst Center. + - It allows you to create and schedule customized reports across wired and + wireless network entities. + - Supports configuration of report name, scheduling, entity selection, + filters, field groups, and output format options. + - Enables scheduling with immediate, later, or recurring execution patterns. + - Provides delivery methods including local download, email notification, + and webhook integration. + - Reports help monitor network and client health, device behavior, + compliance status, and utilization trends. + - Applicable from Cisco Catalyst Center version 2.3.7.9 and later. +version_added: '6.41.0' +extends_documentation_fragment: + - cisco.dnac.workflow_manager_params +author: + - Megha Kandari (@kandarimegha) + - Madhan Sankaranarayanan (@madhansansel) +options: + config_verify: + description: + - Set to C(True) to enable configuration verification on Cisco + Catalyst Center after applying the playbook config. + - This will ensure that the system validates the configuration state + after the change is applied. + type: bool + default: false + state: + description: + - Specifies the desired state for the configuration. + - If C(merged), the module will create or schedule new reports. + - If C(deleted), the module will remove existing scheduled reports. + type: str + choices: [merged, deleted] + default: merged + config: + description: + - A list of configuration settings for generating reports in Cisco + Catalyst Center. + - Each configuration defines report metadata, scheduling, delivery + options, view selections, format, and applicable filters. + - Supports creating, scheduling, and downloading customized network + reports across various data categories. + type: list + elements: dict + required: true + suboptions: + generate_report: + description: + - List of report configurations to be created or scheduled. + - Each entry represents a single report with its complete + configuration. + - Reports are processed sequentially, not in parallel, + which ensures data consistency. + type: dict + required: true + suboptions: + name: + description: + - The name of the report to be generated. + - Must be unique within the Catalyst Center instance. + - If not provided, it will be automatically generated using + the format " - - ". + - Example auto-generated name "Network - DeviceView - Jul 20 + 2025 08:26 PM". + type: str + required: false + new_report: + description: + - Specifies whether to create a new report when a report with the same name already exists. + - If set to C(True) and a report with the same name is found, + a new report is created with a unique timestamp suffix appended to its name. + type: bool + default: true + view_group_name: + description: + - The name of the view group as defined in Catalyst Center. For example, C(Inventory) + - Used to identify the viewGroupId via API calls. + - Determines the category of data included in the report. + type: str + required: true + choices: + - Compliance + - Executive Summary + - Inventory + - SWIM + - Access Point + - Long Term + - Network Devices + - Group Pair Communication Analytics + - Telemetry + - Group Communication Summary + - EoX + - Rogue and aWIPS + - Licensing + - AI Endpoint Analytics + - Audit Log + - Configuration Archive + - Client + - Security Advisories + tags: + description: + - Optional list of tags to filter reports. + - Tags help categorize and organize reports for easier management. + type: list + elements: str + required: false + view_group_version: + description: + - The version of the view group to be used for the report. + - Defaults to C(2.0.0) if not specified. + schedule: + description: + - Defines when the report should be executed (immediately, later, or + on a recurring basis). + - Controls the timing and frequency of report generation. + type: dict + required: true + suboptions: + schedule_type: + description: + - The scheduling type for the report execution. + - C(SCHEDULE_NOW) executes immediately, C(SCHEDULE_LATER) executes + at a specific time, C(SCHEDULE_RECURRENCE) executes repeatedly. + choices: + - SCHEDULE_NOW + - SCHEDULE_LATER + - SCHEDULE_RECURRENCE + type: str + required: true + date_time: + description: + - Scheduled time for report execution. + - Required if schedule_type is C(SCHEDULE_LATER) or + C(SCHEDULE_RECURRENCE). + - Must be in 'YYYY-MM-DD HH:MM AM/PM' format. + - Example "2025-09-02 07:30 PM". + - Only future dates are allowed. + type: str + required: false + time_zone: + description: + - Time zone identifier for the schedule. + - Uses standard time zone identifiers like C(Asia/Calcutta), + C(America/New_York), etc. For a complete list of supported time zones, + please refer to the time_zone field in the Inventory Workflow Manager documentation + https://galaxy.ansible.com/ui/repo/published/cisco/dnac/content/module/inventory_workflow_manager. + type: str + required: true + recurrence: + description: + - Recurrence settings for scheduled reports. + - Required only when schedule_type is C(SCHEDULE_RECURRENCE). + - Defines the pattern and frequency of recurring executions. + type: dict + required: false + suboptions: + recurrence_type: + description: + - Type of recurrence pattern. + - C(WEEKLY) for daily execution via weekly pattern with all + 7 days. + - C(MONTHLY) for monthly execution on specific day or last day. + choices: + - WEEKLY + - MONTHLY + type: str + required: true + days: + description: + - List of days for weekly recurrence. + - Required for C(WEEKLY) recurrence_type. + - Can specify individual days or use C(DAILY) for all seven days. + - Must include all 7 days for daily execution or DAILY. + ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"] or ["DAILY"]. + type: list + elements: str + required: false + last_day_of_month: + description: + - Whether to run on the last day of the month. + - Only applicable for C(MONTHLY) recurrence_type. + - When true, ignores day_of_month setting. + type: bool + required: false + day_of_month: + description: + - Specific day of the month to run the report. + - Only applicable for C(MONTHLY) recurrence_type when + last_day_of_month is false. + - Must be an integer between 1 and 31. + type: int + required: false + time: + description: + - Epoch time in milliseconds for scheduled execution. + - Automatically generated from date_time during processing. + - Used internally by the API for recurring schedules. + type: int + required: false + start_date: + description: + - Epoch start date in milliseconds for recurring schedules. + - Automatically generated from date_time during processing. + - Used internally by the API to determine recurrence start point. + type: int + required: false + deliveries: + description: + - Specifies how the generated report should be delivered. + - Must be a list containing exactly one delivery configuration. + - Supports three delivery methods DOWNLOAD, NOTIFICATION (email), + and WEBHOOK. + type: dict + required: true + suboptions: + delivery_type: + description: + - Delivery type for the report. + - C(DOWNLOAD) saves report to local file system. + - C(NOTIFICATION) sends report via email notification. + - C(WEBHOOK) triggers a configured webhook endpoint. + choices: + - DOWNLOAD + - NOTIFICATION + - WEBHOOK + type: str + required: true + file_path: + description: + - Local file system path where the report should be downloaded. + - Required only when delivery_type is C(DOWNLOAD). + - Must be a valid directory path where the user has write + permissions. + type: str + required: false + notification_endpoints: + description: + - Required when delivery_type is C(NOTIFICATION). + - Must be a list containing exactly one email endpoint + configuration. + - Specifies email recipients and notification preferences. + type: list + elements: dict + required: false + suboptions: + email_addresses: + description: + - List of email recipients for the notification. + - Required when delivery_type is C(NOTIFICATION). + - Each email address must be in valid email format. + type: list + elements: str + required: false + email_attach: + description: + - Whether the report should be attached in the notification email. + type: bool + required: false + default: false + notify: + description: + - List of report execution statuses that will trigger + a notification. + - If not specified, notifications are sent for all statuses. + - C(IN_QUEUE) notifies when report is queued for execution. + - C(IN_PROGRESS) notifies when report execution starts. + - C(COMPLETED) notifies when report execution finishes. + choices: + - C(IN_QUEUE) + - C(IN_PROGRESS) + - C(COMPLETED) + type: list + elements: str + required: false + webhook_name: + description: + - The name of the webhook to be triggered for the report. + - Required when delivery_type is C(WEBHOOK). + - Must reference an existing webhook configured in Catalyst + Center. + - The webhook will be called when the report is generated. + type: str + required: false + view: + description: + - Contains view details such as view selection, field groups, filters, + and output format for the report. + - Defines what data to include and how to present it in the final report. + type: dict + required: true + suboptions: + view_name: + description: + - The view name from which C(viewId) is derived via API calls. + - Must match exactly with available views in the specified view group. + - Determines the specific data subset and available fields for + the report. + type: str + required: true + choices: + - Network Device Compliance # viewName in viewGroup Compliance + - Network Device Availability # viewName in viewGroup Network Devices + - Channel Change Count # viewName in viewGroup Network Devices + - Transmit Power Change Count # viewName in viewGroup Network Devices + - VLAN # viewName in viewGroup Network Devices + - Port Capacity # viewName in viewGroup Network Devices + - Energy Management # viewName in viewGroup Network Devices + - PoE # viewName in viewGroup Network Devices + - Device CPU and Memory Utilization # viewName in viewGroup Network Devices + - Network Interface Utilization # viewName in viewGroup Network Devices + - Executive Summary # viewName in viewGroup Executive Summary + - All Data # viewName in viewGroup Inventory + - Port Reclaim View # viewName in viewGroup Inventory + - All Data Version 2.0 # viewName in viewGroup Inventory + - All Data # viewName in viewGroup SWIM + - All Data Version 2.0 # viewName in viewGroup SWIM + - AP # viewName in viewGroup Access Point + - AP Radio # viewName in viewGroup Access Point + - AP - Usage and Client Breakdown # viewName in viewGroup Access Point + - Worst Interferers # viewName in viewGroup Access Point + - AP RRM Events # viewName in viewGroup Access Point + - AP Performance Report # viewName in viewGroup Long Term + - Long Term AP Detail # viewName in viewGroup Long Term + - Long Term AP Radio # viewName in viewGroup Long Term + - Long Term AP Usage and Client Breakdown # viewName in viewGroup Long Term + - Long Term Client Detail # viewName in viewGroup Long Term + - Long Term Client Session # viewName in viewGroup Long Term + - Long Term Network Device Availability # viewName in viewGroup Long Term + - Security Group to Security Group # viewName in viewGroup Group Pair Communication Analytics + - Security Group to ISE Endpoint Profile Group # viewName in viewGroup Group Pair Communication Analytics + - Security Group to Host Group # viewName in viewGroup Group Pair Communication Analytics + - ISE Endpoint Profile Group to Security Group # viewName in viewGroup Group Pair Communication Analytics + - ISE Endpoint Profile Group to ISE Endpoint Profile Group # viewName in viewGroup Group Pair Communication Analytics + - ISE Endpoint Profile Group to Host Group # viewName in viewGroup Group Pair Communication Analytics + - Host Group to Security Group # viewName in viewGroup Group Pair Communication Analytics + - Host Group to ISE Endpoint Profile Group # viewName in viewGroup Group Pair Communication Analytics + - Host Group to Host Group # viewName in viewGroup Group Pair Communication Analytics + - Device Lifecycle Information # viewName in viewGroup Telemetry + - Security Group to Security Groups # viewName in viewGroup Group Communication Summary + - Security Group to ISE Endpoint Profile Groups # viewName in viewGroup Group Communication Summary + - Security Group to Host Groups # viewName in viewGroup Group Communication Summary + - ISE Endpoint Profile Group to Security Groups # viewName in viewGroup Group Communication Summary + - ISE Endpoint Profile Group to ISE Endpoint Profile Groups # viewName in viewGroup Group Communication Summary + - ISE Endpoint Profile Group to Host Groups # viewName in viewGroup Group Communication Summary + - Host Group to Security Groups # viewName in viewGroup Group Communication Summary + - Host Group to ISE Endpoint Profile Group # viewName in viewGroup Group Communication Summary + - Host Group to Host Group # viewName in viewGroup Group Communication Summary + - EoX Data # viewName in viewGroup EoX + - Threat Detail # viewName in viewGroup Rogue and aWIPS + - New Threat # viewName in viewGroup Rogue and aWIPS + - Rogue Additional Detail # viewName in viewGroup Rogue and aWIPS + - Non Compliant Devices # viewName in viewGroup Licensing + - Non Compliance Summary # viewName in viewGroup Licensing + - AireOS Controllers Licenses # viewName in viewGroup Licensing + - License Usage Upload Details # viewName in viewGroup Licensing + - License Historical Usage # viewName in viewGroup Licensing + - Endpoint Profiling # viewName in viewGroup AI Endpoint Analytics + - Audit Log # viewName in viewGroup Audit Log + - Configuration Archive # viewName in viewGroup Configuration Archive + - Client # viewName in viewGroup Client + - Client Summary # viewName in viewGroup Client + - Top N Summary # viewName in viewGroup Client + - Client Detail # viewName in viewGroup Client + - Client Trend # viewName in viewGroup Client + - Client Session # viewName in viewGroup Client + - Busiest Client # viewName in viewGroup Client + - Unique Clients and Users Summary # viewName in viewGroup Client + - Security Advisories Data # viewName in viewGroup Security Advisories + field_groups: + description: + - Groups of fields to include in the report, as defined in the + selected view. + - Can be empty list to include all available fields for the view. + - Field group availability depends on the selected view_name. + type: list + elements: dict + required: true + suboptions: + field_group_name: + description: + - The internal name of the field group as defined in the view metadata. + - Must match exactly with the available field_groups for the selected view. + type: str + required: true + field_group_display_name: + description: + - The display name shown in the UI for the field group. + - Optional but recommended for readability. + type: str + required: false + fields: + description: + - List of specific fields to include within the field group. + - Can be empty list to include all fields in the group. + - Field availability depends on the selected field group. + type: list + elements: dict + required: true + suboptions: + name: + description: + - Field identifier as defined in the view metadata. + - Must match exactly with available fields in the group. + type: str + required: true + display_name: + description: + - Optional UI-friendly display label for the field. + - Used only for readability; API uses `name`. + type: str + required: false + format: + description: + - Specifies the output format of the report. + - Determines how the report data will be structured and presented. + type: dict + required: true + suboptions: + format_type: + description: + - Type of format to be used for the report output. + - C(CSV) for comma-separated values + - C(PDF) for document format + - C(JSON) for structured data + - C(TDE) for Tableau data extract. + choices: + - CSV + - PDF + - JSON + - TDE + type: str + required: true + filters: + description: + - Filters to be applied to narrow down the report data. + - Optional parameter to refine report content based on specific + criteria. + - Filter availability depends on the selected view_name. + type: list + elements: dict + required: false + suboptions: + name: + description: + - Name of the filter as defined in the view metadata. + - Common filters include Location, Time Range, Device Type, etc. + type: str + required: true + display_name: + description: + - Human-readable name of the filter shown in the UI. + type: str + required: false + filter_type: + description: + - Type of the filter determining how values are selected. + - C(MULTI_SELECT) allows multiple discrete values. + - C(MULTI_SELECT_TREE) allows hierarchical multi-selection. + - C(SINGLE_SELECT_ARRAY) allows single value from array. + - C(TIME_RANGE) allows date/time range specification. + choices: + - MULTI_SELECT + - MULTI_SELECT_TREE + - SINGLE_SELECT_ARRAY + - TIME_RANGE + type: str + required: true + value: + description: + - Value(s) to apply in the filter based on filter_type. + - For C(TIME_RANGE), this is a dict with time_range_option, + start_date_time, end_date_time, and time_zone. + - For other types, this is a list of dicts with C(value) and + C(display_value) keys. + - Location filters are automatically resolved to site hierarchy IDs. + type: list + elements: dict + required: true + suboptions: + value: + description: + - API-compatible internal value (e.g., DeviceFamily = SWITCHES) + type: str + required: true + display_value: + description: + - Human-readable value (e.g., "Switches" or "Global/India") + type: str + required: true + +requirements: + - dnacentersdk >= 2.8.6 + - python >= 3.9 +notes: + - SDK Methods used are + reports.Reports.get_all_view_groups + reports.Reports.get_views_for_a_given_view_group + reports.Reports.get_view_details_for_a_given_view_group_and_view + reports.Reports.create_or_schedule_a_report + reports.Reports.delete_a_scheduled_report + reports.Reports.download_report_content + reports.Reports.get_execution_id_by_report_id + - Paths used are + GET /dna/intent/api/v1/data/view-groups + GET /dna/intent/api/v1/data/view-groups/{viewGroupId} + GET /dna/intent/api/v1/data/view-groups/{viewGroupId}/views/{viewId} + POST /dna/intent/api/v1/data/reports + DELETE /dna/intent/api/v1/data/reports/{reportId} + GET /dna/intent/api/v1/data/reports/{reportId}/executions/{executionId} +""" +""" +Mapping of View Names to Mandatory Filters and Available Filters: + +View Name Mandatory Filters Available Filters +--------- ----------------- ----------------- +Network Device Compliance Location Location, Device Type, Collection Status +Network Device Availability Location Location, Device Type, Time Range +Channel Change Count Location Location, Device Type, Time Range +Transmit Power Change Count Location Location, Device Type, Time Range +VLAN Location Location, Device Type +Port Capacity Location Location, Device Type +Energy Management Location Location, Device Type +PoE Location Location, Device Type +Device CPU and Memory Utilization Location Location, Device Type, Time Range +Network Interface Utilization Location Location, Device Type, Interface Type, Time Range +Executive Summary Location Location, Device Type, Time Range +All Data (inventory) N/A Location, Device Type, Software Version +Port Reclaim View Location Location, Device Type +All Data Version 2.0 Location Location, Device Type, Software Version +All Data (swim) N/A Device Type, Image Name, Software Version +All Data Version 2.0 N/A Device Type, Image Name, Software Version +AP Location Location, AP Name, Model, Controller +AP Radio Location Location, AP Name, Radio Band, Controller +AP - Usage and Client Breakdown Location, AP Name Location, AP Name, Controller, Time Range +Worst Interferers Location Location, AP Name, Controller, Time Range +AP RRM Events Location Location, AP Name, Controller, Time Range +AP Performance Report Location Location, AP Name, Controller, Time Range +Long Term AP Detail Location Location, AP Name, Controller, Time Range +Long Term AP Radio Location Location, AP Name, Radio Band, Time Range +Long Term AP Usage and Client Breakdown Location, AP Name Location, AP Name, Time Range +Long Term Client Detail Location, Time Range Location, Client MAC, User Name, Time Range +Long Term Client Session Location, Time Range Location, Client MAC, Session ID, Time Range +Long Term Network Device Availability Location Location, Device Type, Time Range +Security Group to Security Group Source/Destination SGT SGT, VN, Time Range +Security Group to ISE Endpoint Profile Group SGT, Endpoint Profile SGT, Endpoint Profile, VN, Time Range +Security Group to Host Group SGT, Host Group SGT, Host Group, VN, Time Range +ISE Endpoint Profile Group to Security Group Endpoint Profile, SGT Endpoint Profile, SGT, VN, Time Range +ISE Endpoint Profile Group to + ISE Endpoint Profile Group Endpoint Profile Endpoint Profile, VN, Time Range +ISE Endpoint Profile Group to Host Group Endpoint Profile, Host Group Endpoint Profile, Host Group, VN, Time Range +Host Group to Security Group Host Group, SGT Host Group, SGT, VN, Time Range +Host Group to ISE Endpoint Profile Group Host Group, Endpoint Profile Host Group, Endpoint Profile, VN, Time Range +Host Group to Host Group Host Group Host Group, VN, Time Range +Device Lifecycle Information Location Location, Device Type, Hardware Info +Security Group to Security Groups SGT SGT, VN, Time Range +Security Group to ISE Endpoint Profile Groups SGT, Endpoint Profile SGT, Endpoint Profile, VN, Time Range +Security Group to Host Groups SGT, Host Group SGT, Host Group, VN, Time Range +ISE Endpoint Profile Group to Security Groups Endpoint Profile, SGT Endpoint Profile, SGT, VN, Time Range +ISE Endpoint Profile Group to + ISE Endpoint Profile Groups Endpoint Profile Endpoint Profile, VN, Time Range +ISE Endpoint Profile Group to Host Groups Endpoint Profile, Host Group Endpoint Profile, Host Group, VN, Time Range +Host Group to Security Groups Host Group, SGT Host Group, SGT, VN, Time Range +Host Group to ISE Endpoint Profile Group Host Group, Endpoint Profile Host Group, Endpoint Profile, VN, Time Range +Host Group to Host Group Host Group Host Group, VN, Time Range +EoX Data N/A Device Type, EoX Type, Bulletin ID +Threat Detail Location Location, Threat Type, Severity, Time Range +New Threat Location Location, Threat Type, Severity, Time Range +Rogue Additional Detail Location Location, Threat Type, MAC Address, Time Range +Non Compliant Devices Location Location, Device Type, License Type +Non Compliance Summary Location Location, License Type, Compliance Status +AireOS Controllers Licenses N/A Controller Name, License Type, Status +License Usage Upload Details N/A Upload Date, License Type, Status +License Historical Usage N/A License Type, Time Range, Usage Type +Endpoint Profiling Location Location, Device Type, Profile Name, Time Range +Audit Log N/A Time Range +Configuration Archive Device, Time Range Device Name, Location, Archive Status, Time Range +Client Location Location, Client MAC, Device Type +Client Summary Location Location, Device Type, Connection Status +Top N Summary Location Location, Metric Type, Time Range +Client Detail Location Location, Client MAC, User Name +Client Trend Location, Time Range Location, Client MAC, Metric Type, Time Range +Client Session Location, Time Range Location, Client MAC, Session ID, Time Range +Busiest Client N/A Location, Client MAC, Traffic Type +Unique Clients and Users Summary Location, Client MAC Location, Client MAC, Time Range, Device Type +Security Advisories Data N/A Device Type, Software Version, Image Name, Time Range +""" + +"""Filter types for each filter category in Cisco Catalyst Center Reports: + +Filter Name: Location + Filter Type: MULTI_SELECT_TREE + Description: Hierarchical selection of network locations/sites + +Filter Name: Device Type + Filter Type: MULTI_SELECT + Description: Selection of device categories (Switch, Router, AP, etc.) + +Filter Name: Time Range + Filter Type: TIME_RANGE + Description: Date/time range specification for historical data + +Filter Name: Collection Status + Filter Type: MULTI_SELECT + Description: Device collection status (Collected, Not Collected, etc.) + +Filter Name: Software Version + Filter Type: MULTI_SELECT + Description: Device software/firmware versions + +Filter Name: Interface Type + Filter Type: MULTI_SELECT + Description: Network interface categories (Ethernet, Wireless, etc.) + +Filter Name: Image Name + Filter Type: MULTI_SELECT + Description: Software image names for SWIM reports + +Filter Name: AP Name + Filter Type: MULTI_SELECT + Description: Access Point device names + +Filter Name: Model + Filter Type: MULTI_SELECT + Description: Device hardware model numbers + +Filter Name: Controller + Filter Type: MULTI_SELECT + Description: Wireless controller names + +Filter Name: Radio Band + Filter Type: MULTI_SELECT + Description: Wireless radio frequency bands (2.4GHz, 5GHz, 6GHz) + +Filter Name: SSID + Filter Type: MULTI_SELECT + Description: Wireless network SSID names + +Filter Name: SGT (Security Group Tag) + Filter Type: MULTI_SELECT + Description: Cisco TrustSec security group tags + +Filter Name: Endpoint Profile + Filter Type: MULTI_SELECT + Description: ISE endpoint profile groups + +Filter Name: Host Group + Filter Type: MULTI_SELECT + Description: Host group classifications + +Filter Name: VN (Virtual Network) + Filter Type: MULTI_SELECT + Description: Virtual network identifiers + +Filter Name: Hardware Info + Filter Type: MULTI_SELECT + Description: Device hardware information categories + +Filter Name: EoX Type + Filter Type: MULTI_SELECT + Description: End of Life/Support announcement types + +Filter Name: Bulletin ID + Filter Type: SINGLE_SELECT_ARRAY + Description: Security bulletin identifiers + +Filter Name: Threat Type + Filter Type: MULTI_SELECT + Description: Security threat categories + +Filter Name: Severity + Filter Type: MULTI_SELECT + Description: Threat/alert severity levels + +Filter Name: MAC Address + Filter Type: MULTI_SELECT + Description: Device MAC addresses + +Filter Name: License Type + Filter Type: MULTI_SELECT + Description: Software license categories + +Filter Name: Compliance Status + Filter Type: MULTI_SELECT + Description: License compliance states + +Filter Name: Status + Filter Type: MULTI_SELECT + Description: General status indicators + +Filter Name: Upload Date + Filter Type: TIME_RANGE + Description: File upload date ranges + +Filter Name: Usage Type + Filter Type: MULTI_SELECT + Description: License usage categories + +Filter Name: Profile Name + Filter Type: MULTI_SELECT + Description: AI Endpoint Analytics profile names + +Filter Name: User Name + Filter Type: MULTI_SELECT + Description: User account names + +Filter Name: Event Category + Filter Type: MULTI_SELECT + Description: Audit log event categories + +Filter Name: Object Type + Filter Type: MULTI_SELECT + Description: Audit log object types + +Filter Name: Device Name + Filter Type: MULTI_SELECT + Description: Network device names + +Filter Name: Archive Status + Filter Type: MULTI_SELECT + Description: Configuration archive status + +Filter Name: Client MAC + Filter Type: MULTI_SELECT + Description: Client device MAC addresses + +Filter Name: Connection Status + Filter Type: MULTI_SELECT + Description: Client connection states + +Filter Name: Metric Type + Filter Type: MULTI_SELECT + Description: Performance metric categories + +Filter Name: Session ID + Filter Type: MULTI_SELECT + Description: Client session identifiers + +Filter Name: Traffic Type + Filter Type: MULTI_SELECT + Description: Network traffic categories + +Note: +- MULTI_SELECT: Allows selection of multiple discrete values +- MULTI_SELECT_TREE: Allows hierarchical multi-selection (like site locations) +- SINGLE_SELECT_ARRAY: Allows single value selection from an array +- TIME_RANGE: Allows date/time range specification with start_date_time, end_date_time, and time_zone +""" + +REPORT_TYPES_AND_FORMATS = r''' +Report Types with View Names and Eligible Format Types: + +COMPLIANCE REPORTS: +- View Name: "Network Device Compliance" +- View Group: "Compliance" +- Available Formats: CSV, PDF, JSON + +EXECUTIVE SUMMARY REPORTS: +- View Name: "Executive Summary" +- View Group: "Executive Summary" +- Available Formats: PDF + +INVENTORY REPORTS: +- View Name: "All Data" +- View Group: "Inventory" +- Available Formats: PDF, CSV, TDE + +- View Name: "Port Reclaim View" +- View Group: "Inventory" +- Available Formats: CSV, JSON, TDE + +- View Name: "All Data Version 2.0" +- View Group: "Inventory" +- Available Formats: CSV, PDF, TDE + +SWIM REPORTS: +- View Name: "All Data" +- View Group: "SWIM" +- Available Formats: CSV, PDF, TDE + +- View Name: "All Data Version 2.0" +- View Group: "SWIM" +- Available Formats: CSV, JSON, TDE + +ACCESS POINT REPORTS: +- View Name: "AP" +- View Group: "Access Point" +- Available Formats: CSV, JSON, TDE + +- View Name: "AP Radio" +- View Group: "Access Point" +- Available Formats: CSV, JSON, TDE + +- View Name: "AP - Usage and Client Breakdown" +- View Group: "Access Point" +- Available Formats: CSV, PDF, JSON, TDE + +- View Name: "Worst Interferers" +- View Group: "Access Point" +- Available Formats: CSV, JSON, TDE + +- View Name: "AP RRM Events" +- View Group: "Access Point" +- Available Formats: CSV, JSON, TDE + +NETWORK DEVICE REPORTS: +- View Name: "Network Device Availability" +- View Group: "Network Devices" +- Available Formats: CSV, JSON, TDE + +- View Name: "Channel Change Count" +- View Group: "Network Devices" +- Available Formats: CSV, JSON, TDE + +- View Name: "Transmit Power Change Count" +- View Group: "Network Devices" +- Available Formats: CSV, JSON, TDE + +- View Name: "VLAN" +- View Group: "Network Devices" +- Available Formats: CSV, TDE + +- View Name: "Port Capacity" +- View Group: "Network Devices" +- Available Formats: CSV, TDE + +- View Name: "Energy Management" +- View Group: "Network Devices" +- Available Formats: CSV, JSON, TDE + +- View Name: "PoE" +- View Group: "Network Devices" +- Available Formats: CSV, JSON, TDE + +- View Name: "Device CPU and Memory Utilization" +- View Group: "Network Devices" +- Available Formats: CSV, JSON, TDE + +- View Name: "Network Interface Utilization" +- View Group: "Network Devices" +- Available Formats: CSV, JSON, TDE + +LONG TERM REPORTS: +- View Name: "AP Performance Report" +- View Group: "Long Term" +- Available Formats: CSV, JSON, TDE + +- View Name: "Long Term AP Detail" +- View Group: "Long Term" +- Available Formats: CSV, JSON, TDE + +- View Name: "Long Term AP Radio" +- View Group: "Long Term" +- Available Formats: CSV, JSON, TDE + +- View Name: "Long Term AP Usage and Client Breakdown" +- View Group: "Long Term" +- Available Formats: CSV, PDF, JSON, TDE + +- View Name: "Long Term Client Detail" +- View Group: "Long Term" +- Available Formats: CSV, JSON, TDE + +- View Name: "Long Term Client Session" +- View Group: "Long Term" +- Available Formats: CSV, JSON, TDE + +- View Name: "Long Term Network Device Availability" +- View Group: "Long Term" +- Available Formats: CSV, JSON, TDE + +GROUP PAIR COMMUNICATION ANALYTICS REPORTS: +- View Name: "Security Group to Security Group" +- View Group: "Group Pair Communication Analytics" +- Available Formats: CSV + +- View Name: "Security Group to ISE Endpoint Profile Group" +- View Group: "Group Pair Communication Analytics" +- Available Formats: CSV + +- View Name: "Security Group to Host Group" +- View Group: "Group Pair Communication Analytics" +- Available Formats: CSV + +- View Name: "ISE Endpoint Profile Group to Security Group" +- View Group: "Group Pair Communication Analytics" +- Available Formats: CSV + +- View Name: "ISE Endpoint Profile Group to ISE Endpoint Profile Group" +- View Group: "Group Pair Communication Analytics" +- Available Formats: CSV + +- View Name: "ISE Endpoint Profile Group to Host Group" +- View Group: "Group Pair Communication Analytics" +- Available Formats: CSV + +- View Name: "Host Group to Security Group" +- View Group: "Group Pair Communication Analytics" +- Available Formats: CSV + +- View Name: "Host Group to ISE Endpoint Profile Group" +- View Group: "Group Pair Communication Analytics" +- Available Formats: CSV + +- View Name: "Host Group to Host Group" +- View Group: "Group Pair Communication Analytics" +- Available Formats: CSV + +TELEMETRY REPORTS: +- View Name: "Device Lifecycle Information" +- View Group: "Telemetry" +- Available Formats: JSON + +GROUP COMMUNICATION SUMMARY REPORTS: +- View Name: "Security Group to Security Groups" +- View Group: "Group Communication Summary" +- Available Formats: CSV + +- View Name: "Security Group to ISE Endpoint Profile Groups" +- View Group: "Group Communication Summary" +- Available Formats: CSV + +- View Name: "Security Group to Host Groups" +- View Group: "Group Communication Summary" +- Available Formats: CSV + +- View Name: "ISE Endpoint Profile Group to Security Groups" +- View Group: "Group Communication Summary" +- Available Formats: CSV + +- View Name: "ISE Endpoint Profile Group to ISE Endpoint Profile Groups" +- View Group: "Group Communication Summary" +- Available Formats: CSV + +- View Name: "ISE Endpoint Profile Group to Host Groups" +- View Group: "Group Communication Summary" +- Available Formats: CSV + +- View Name: "Host Group to Security Groups" +- View Group: "Group Communication Summary" +- Available Formats: CSV + +- View Name: "Host Group to ISE Endpoint Profile Group" +- View Group: "Group Communication Summary" +- Available Formats: CSV + +- View Name: "Host Group to Host Group" +- View Group: "Group Communication Summary" +- Available Formats: CSV + +EOX REPORTS: +- View Name: "EoX Data" +- View Group: "EoX" +- Available Formats: CSV, PDF, TDE + +ROGUE AND aWIPS REPORTS: +- View Name: "Threat Detail" +- View Group: "Rogue and aWIPS" +- Available Formats: CSV, JSON, TDE + +- View Name: "New Threat" +- View Group: "Rogue and aWIPS" +- Available Formats: CSV, JSON, TDE + +- View Name: "Rogue Additional Detail" +- View Group: "Rogue and aWIPS" +- Available Formats: CSV, JSON, TDE + +LICENSING REPORTS: +- View Name: "Non Compliant Devices" +- View Group: "Licensing" +- Available Formats: CSV, PDF + +- View Name: "Non Compliance Summary" +- View Group: "Licensing" +- Available Formats: CSV, PDF + +- View Name: "AireOS Controllers Licenses" +- View Group: "Licensing" +- Available Formats: CSV, PDF + +- View Name: "License Usage Upload Details" +- View Group: "Licensing" +- Available Formats: CSV, PDF + +- View Name: "License Historical Usage" +- View Group: "Licensing" +- Available Formats: CSV + +AI ENDPOINT ANALYTICS REPORTS: +- View Name: "Endpoint Profiling" +- View Group: "AI Endpoint Analytics" +- Available Formats: CSV + +AUDIT LOG REPORTS: +- View Name: "Audit Log" +- View Group: "Audit Log" +- Available Formats: CSV, JSON + +CONFIGURATION ARCHIVE REPORTS: +- View Name: "Configuration Archive" +- View Group: "Configuration Archive" +- Available Formats: CSV, PDF, JSON + +CLIENT REPORTS: +- View Name: "Client" +- View Group: "Client" +- Available Formats: CSV, PDF, JSON, TDE + +- View Name: "Client Summary" +- View Group: "Client" +- Available Formats: PDF + +- View Name: "Top N Summary" +- View Group: "Client" +- Available Formats: PDF + +- View Name: "Client Detail" +- View Group: "Client" +- Available Formats: CSV, JSON, TDE + +- View Name: "Client Trend" +- View Group: "Client" +- Available Formats: PDF + +- View Name: "Client Session" +- View Group: "Client" +- Available Formats: CSV, JSON, TDE + +- View Name: "Busiest Client" +- View Group: "Client" +- Available Formats: CSV, JSON, TDE + +- View Name: "Unique Clients and Users Summary" +- View Group: "Client" +- Available Formats: PDF + +SECURITY ADVISORIES REPORTS: +- View Name: "Security Advisories Data" +- View Group: "Security Advisories" +- Available Formats: CSV, PDF, TDE + +FORMAT TYPE DESCRIPTIONS: +- CSV: Comma-Separated Values format, suitable for spreadsheets and data analysis +- PDF: Portable Document Format, ideal for sharing and printing reports +- JSON: JavaScript Object Notation, useful for structured data exchange and integration +- TDE: Tableau Data Extract, optimized for use with Tableau software for data visualization + +Note: The available format types are retrieved through the following API endpoints: +- GET /dna/intent/api/v1/data/view-groups (to get all view groups) +- GET /dna/intent/api/v1/data/view-groups/{viewGroupId} (to get views for a view group) +- GET /dna/intent/api/v1/data/view-groups/{viewGroupId}/views/{viewId} (to get view details including format options) +''' + +EXAMPLES = r''' +- name: Create/Schedule a Sample Inventory Report and Download It + cisco.dnac.reports_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_port: "{{ dnac_port }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log_level: DEBUG + dnac_log: true + state: merged + config_verify: true + config: + - generate_report: + - name: "Sample Inventory report" + data_category: "Inventory" + view_group_version: "2.0.0" + view: + view_name: "All Data" + format: + format_type: "CSV" + field_groups: + - field_group_name: "inventoryAllData" + field_group_display_name: "All Data" + fields: + - name: "type" + display_name: "Device Type" + - name: "hostname" + display_name: "Device Name" + - name: "ipAddress" + display_name: "IP Address" + filters: + - name: "Location" + filter_type: "MULTI_SELECT_TREE" + value: [] + - name: "DeviceFamily" + filter_type: "MULTI_SELECT" + value: [] + - name: "DeviceType" + filter_type: "MULTI_SELECT" + value: [] + - name: "SoftwareVersion" + filter_type: "MULTI_SELECT" + value: [] + schedule: + schedule_type: "SCHEDULE_NOW" + time_zone: "Asia/Calcutta" + deliveries: + - delivery_type: "DOWNLOAD" + file_path: "/Users/xyz/Desktop" + +- name: Create/Schedule a compliance report with immediate execution + cisco.dnac.reports_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_port: "{{ dnac_port }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log_level: DEBUG + dnac_log: true + state: merged + config_verify: true + config: + - generate_report: + - name: "compliance_report" + view_group_name: "Compliance" + deliveries: + - delivery_type: "DOWNLOAD" + file_path: "/Users/xyz/Desktop" + schedule: + schedule_type: "SCHEDULE_NOW" + time_zone: "Asia/Calcutta" + view: + view_name: "Network Device Compliance" + field_groups: + - field_group_name: "Compliance" + field_group_display_name: "Compliance" + fields: + - name: "deviceName" + display_name: "Device Name" + format: + format_type: "CSV" + filters: [] + tags: ["network", "compliance"] + +- name: Create/Schedule an access point report with location filter + cisco.dnac.reports_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_port: "{{ dnac_port }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log_level: DEBUG + dnac_log: true + state: merged + config_verify: true + config: + - generate_report: + - name: "Access_point_report1" + view_group_name: "Access Point" + deliveries: + - delivery_type: "DOWNLOAD" + file_path: "/Users/xyz/Desktop" + schedule: + schedule_type: "SCHEDULE_NOW" + time_zone: "Asia/Calcutta" + view: + view_name: "AP" + field_groups: [] + format: + format_type: "JSON" + filters: + - name: "Location" + display_name: "Location" + filter_type: "MULTI_SELECT_TREE" + value: + - value: "Global/India" + display_value: "Routers" + +- name: Schedule a report for later execution + cisco.dnac.reports_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_port: "{{ dnac_port }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + state: merged + config_verify: true + config: + - generate_report: + - name: "scheduled_inventory_report" + view_group_name: "Inventory" + tags: ["inventory", "scheduled"] + deliveries: + - delivery_type: "NOTIFICATION" + notification_endpoints: + - email_addresses: + - "admin@company.com" + - "reports@company.com" + email_attach: true + notify: ["COMPLETED"] + schedule: + schedule_type: "SCHEDULE_LATER" + date_time: "2025-12-25 09:00 AM" + time_zone: "America/New_York" + view: + view_name: "All Data" + field_groups: [] + format: + format_type: "PDF" + filters: [] + +- name: Create recurring weekly report with webhook delivery + cisco.dnac.reports_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_port: "{{ dnac_port }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + state: merged + config_verify: true + config: + - generate_report: + - name: "weekly_device_report" + view_group_name: "Network Devices" + tags: ["weekly", "devices"] + deliveries: + - delivery_type: "WEBHOOK" + webhook_name: "report_webhook" + schedule: + schedule_type: "SCHEDULE_RECURRENCE" + date_time: "2025-09-15 08:00 AM" + time_zone: "UTC" + recurrence: + recurrence_type: "WEEKLY" + days: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", + "FRIDAY", "SATURDAY", "SUNDAY"] + view: + view_name: "Network Device Availability" + field_groups: + - field_group_name: "deviceInfo" + field_group_display_name: "Device Information" + fields: + - name: "hostname" + display_name: "host name" + format: + format_type: "CSV" + filters: + - name: "Location" + filter_type: "MULTI_SELECT_TREE" + value: + - value: "Global/US/California" + +- name: Create monthly report with time range filter + cisco.dnac.reports_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_port: "{{ dnac_port }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + state: merged + config_verify: true + config: + - generate_report: + - name: "monthly_client_report" + view_group_name: "Client" + tags: ["monthly", "clients"] + deliveries: + - delivery_type: "DOWNLOAD" + file_path: "/home/reports/monthly" + schedule: + schedule_type: "SCHEDULE_RECURRENCE" + date_time: "2025-09-01 06:00 AM" + time_zone: "Asia/Calcutta" + recurrence: + recurrence_type: "MONTHLY" + last_day_of_month: true + view: + view_name: "Client Detail" + field_groups: [] + format: + format_type: "JSON" + filters: + - name: "Time Range" + filter_type: "TIME_RANGE" + value: + time_range_option: "LAST_30_DAYS" + +- name: Create monthly report with time range CUSTOM filter + cisco.dnac.reports_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_port: "{{ dnac_port }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + state: merged + config_verify: true + config: + - generate_report: + - name: "monthly_client_report" + new_report: false + view_group_name: "Client" + tags: ["monthly", "clients"] + deliveries: + - delivery_type: "DOWNLOAD" + file_path: "/home/reports/monthly" + schedule: + schedule_type: "SCHEDULE_RECURRENCE" + date_time: "2025-09-01 06:00 AM" + time_zone: "Asia/Calcutta" + recurrence: + recurrence_type: "MONTHLY" + last_day_of_month: true + view: + view_name: "Client Detail" + field_groups: [] + format: + format_type: "JSON" + filters: + - name: "Time Range" + filter_type: "TIME_RANGE" + value: + time_range_option: "CUSTOM" + start_date_time: "2025-10-09 07:30 PM" + end_date_time: "2025-10-31 11:59 PM" + time_zone: "Asia/Calcutta" + +- name: Delete a report from Catalyst Center + cisco.dnac.reports_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_port: "{{ dnac_port }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + state: deleted + config_verify: true + config: + - generate_report: + - name: "compliance_report" # The name of the report to be deleted is required + view_group_name: "Compliance" # Required for identification + view: + view_name: "Network Device Compliance" # Required for identification +''' + +RETURN = r""" +# Case 1: Successful Report Creation/Scheduling +response_create_or_schedule_a_report: + description: Response returned after successfully creating or scheduling a + report in Cisco Catalyst Center. + returned: when state is merged and report creation succeeds + type: dict + sample: { + "response": [ + { + "create_report": { + "response": { + "reportId": "1234567890abcdef12345678", + "viewGroupId": "network-device-compliance", + "viewsId": "compliance-view-id" + }, + "msg": "Successfully created or scheduled report 'compliance_report'." + } + } + ], + } + +# Case 2: Successful Report Deletion +response_delete_a_scheduled_report: + description: Response returned after successfully deleting a scheduled report + from Cisco Catalyst Center. + returned: when state is deleted and report deletion succeeds + type: dict + sample: { + "response": [ + { + "delete_report": { + "response": {}, + "msg": "Report 'compliance_report' has been successfully deleted." + } + } + ], + } + +# Case 3: Successful Report Download +response_download_report_content: + description: Response returned after successfully downloading report content + to the specified local file path. + returned: when delivery_type is DOWNLOAD and download succeeds + type: dict + sample: { + "response": [ + { + "download_report": { + "response": { + "reportId": "1234567890abcdef12345678", + "reportName": "compliance_report", + "filePath": "/Users/xyz/Desktop" + }, + "msg": "Successfully downloaded report 'compliance_report' to + '/Users/xyz/Desktop'." + } + } + ], + } + +# Case 4: Report Already Exists +response_existing_report: + description: Response returned when a report with the same name already + exists in Cisco Catalyst Center. + returned: when state is merged and report already exists + type: dict + sample: { + "response": [ + { + "create_report": { + "response": { + "report_id": "existing1234567890abcdef", + "view_group_id": "network-device-compliance", + "view_id": "compliance-view-id" + }, + "msg": "Report 'compliance_report' already exists." + } + } + ], + "changed": false, + "msg": "No changes required - report already exists." + } + +# Case 5: Report Not Found for Deletion +response_report_not_found: + description: Response returned when attempting to delete a report that does + not exist in Cisco Catalyst Center. + returned: when state is deleted and report does not exist + type: dict + sample: { + "response": [ + { + "delete_report": { + "response": {}, + "msg": "Report 'nonexistent_report' does not exist." + } + } + ], + "changed": false, + "msg": "No changes required - report does not exist." + } + +# Case 6: Verification Success +response_verification_success: + description: Response returned after successful verification of report + operations when config_verify is enabled. + returned: when config_verify is true and verification succeeds + type: dict + sample: { + "response": [ + { + "create_report": { + "response": { + "reportId": "1234567890abcdef12345678", + "viewGroupId": "network-device-compliance", + "viewsId": "compliance-view-id" + }, + "msg": "Successfully created or scheduled report 'compliance_report'.", + "Validation": "Success" + } + } + ], + "changed": true, + "msg": "Report operations completed and verified successfully." + } + +# Case 7: Multiple Reports Processing +response_multiple_reports: + description: Response returned when processing multiple reports in a single + playbook execution. + returned: when config contains multiple report configurations + type: dict + sample: { + "response": [ + { + "create_report": { + "response": { + "reportId": "report1-id", + "viewGroupId": "compliance", + "viewsId": "compliance-view" + }, + "msg": "Successfully created or scheduled report 'compliance_report'." + } + }, + { + "download_report": { + "response": { + "reportId": "report1-id", + "reportName": "compliance_report", + "filePath": "/Users/xyz/Desktop" + }, + "msg": "Successfully downloaded report 'compliance_report' to + '/Users/xyz/Desktop'." + } + }, + { + "create_report": { + "response": { + "reportId": "report2-id", + "viewGroupId": "inventory", + "viewsId": "inventory-view" + }, + "msg": "Successfully created or scheduled report 'inventory_report'." + } + } + ], + "changed": true, + "msg": "Multiple report operations completed successfully." + } + +# Case 8: Error Response +response_error: + description: Response returned when an error occurs during report operations. + returned: when an error occurs during execution + type: dict + sample: { + "response": [], + "changed": false, + "failed": true, + "msg": "Failed to create report: Invalid view_group_name 'InvalidGroup'." + } +""" + +from datetime import datetime +import time +import os +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.cisco.dnac.plugins.module_utils.dnac import ( + DnacBase +) +from ansible_collections.cisco.dnac.plugins.module_utils.validation import ( + validate_list_of_dicts +) +import json +import re + +# common approach when a module relies on optional dependencies that are not available during the validation process. +try: + import pytz + + HAS_PYZIPPER = True +except ImportError: + HAS_PYZIPPER = False + pyzipper = None + + +class Reports(DnacBase): + """Class containing member attributes for Report Workflow Manager module""" + + def __init__(self, module): + super().__init__(module) + self.supported_states = ["merged", "deleted"] + self.state = self.params.get("state") + self.result["response"] = [] + + def validate_input(self): + """ + Validate the fields provided in the playbook. + Checks the configuration provided in the playbook against a predefined specification + to ensure it adheres to the expected structure and data types. + + Parameters: + self: The instance of the class containing the 'config' attribute to be validated. + + Returns: + self - The current object with Global Pool, Reserved Pool, Network Servers information. + """ + self.log("Starting playbook configuration validation for reports workflow", "INFO") + + config_spec = { + "generate_report": { + "type": "list", + "elements": "dict", + "required": True, + # fields for each generate_report item + "name": {"type": "str", "required": False}, + "new_report": {"type": "bool", "required": False, "default": True}, + "view_group_name": { + "type": "str", + "required": False, + "choices": [ + "Compliance", "Executive Summary", "Inventory", "SWIM", + "Access Point", "Long Term", "Network Devices", + "Group Pair Communication Analytics", "Telemetry", + "Group Communication Summary", "EoX", "Rogue and aWIPS", + "Licensing", "AI Endpoint Analytics", "Audit Log", + "Configuration Archive", "Client", "Security Advisories" + ] + }, + "tags": {"type": "list", "elements": "str", "default": []}, + "view_group_version": {"type": "str", "required": False, "default": "2.0.0"}, + + "schedule": { + "type": "dict", + "required": False, + "schedule_type": { + "type": "str", + "element": "str", + "required": True, + "choices": ["SCHEDULE_NOW", "SCHEDULE_LATER", "SCHEDULE_RECURRENCE"], + }, + "date_time": {"type": "str", "required": False}, + "time_zone": {"type": "str", "required": True}, + "recurrence": { + "type": "dict", + "recurrence_type": { + "type": "str", + "required": False, + # choose appropriate recurrence values for your system + "choices": ["WEEKLY", "MONTHLY"], + }, + "days": {"type": "list", "elements": "str", "required": False}, + "last_day_of_month": {"type": "bool", "required": False}, + "day_of_month": {"type": "int", "required": False}, + }, + "time": {"type": "int", "required": False}, + "start_date": {"type": "int", "required": False}, + }, + + "deliveries": { + "type": "list", + "elements": "dict", + "required": False, + "delivery_type": { + "type": "str", + "required": True, + "choices": ["DOWNLOAD", "NOTIFICATION", "WEBHOOK"], + }, + "file_path": {"type": "str", "required": False}, + "notification_endpoints": { + "type": "list", + "elements": "dict", + "required": False, + "email_addresses": {"type": "list", "elements": "str", "required": False}, + }, + "email_attach": {"type": "bool", "required": False, "default": False}, + "notify": { + "type": "list", + "elements": "str", + "required": False, + "choices": [["IN_QUEUE"], + ["IN_PROGRESS"], + ["COMPLETED"], + ["IN_QUEUE", "IN_PROGRESS"], + ["IN_PROGRESS", "IN_QUEUE"], + ["IN_QUEUE", "COMPLETED"], + ["COMPLETED", "IN_QUEUE"], + ["IN_PROGRESS", "COMPLETED"], + ["COMPLETED", "IN_PROGRESS"], + ["IN_QUEUE", "IN_PROGRESS", "COMPLETED"], + ["IN_QUEUE", "COMPLETED", "IN_PROGRESS"], + ["IN_PROGRESS", "IN_QUEUE", "COMPLETED"], + ["IN_PROGRESS", "COMPLETED", "IN_QUEUE"], + ["COMPLETED", "IN_QUEUE", "IN_PROGRESS"], + ["COMPLETED", "IN_PROGRESS", "IN_QUEUE"]], + }, + "webhook_name": {"type": "str", "required": False}, + }, + + "view": { + "type": "dict", + "required": False, + "view_name": {"type": "str", "required": True}, + "field_groups": { + "type": "list", + "elements": "dict", + "required": False, + "field_group_name": {"type": "str", "required": False}, + "field_group_display_name": {"type": "str", "required": False}, + "fields": { + "type": "list", + "elements": "dict", + "required": False, + "name": {"type": "str", "required": False}, + "display_name": {"type": "str", "required": False}, + }, + }, + "format": { + "type": "dict", + "required": False, + "format_type": { + "type": "str", + "required": True, + "choices": ["CSV", "PDF", "JSON", "TDE"] + }, + }, + "filters": { + "type": "list", + "elements": "dict", + "name": {"type": "str", "required": False}, + "display_name": {"type": "str", "required": False}, + "filter_type": { + "type": "str", + "required": False, + "choices": ["MULTI_SELECT", "MULTI_SELECT_TREE", "SINGLE_SELECT_ARRAY", "TIME_RANGE"], + }, + "value": { + "type": "raw", + "required": False + }, + }, + }, + } + } + + if not self.config: + self.msg = "Configuration is not available in the playbook for validation" + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + self.log("Validating configuration structure against specification", "DEBUG") + + valid_config, invalid_params = validate_list_of_dicts( + self.config, config_spec + ) + + if invalid_params: + self.msg = "Invalid parameters in playbook: {0}".format(invalid_params) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + if not valid_config: + self.log("Configuration validation failed. No valid config found: {0}".format(valid_config)) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + self.log("Configuration validated successfully: {0}".format(self.pprint(valid_config)), "INFO") + self.validated_config = valid_config + return self + + def input_data_validation(self, config): + """ + Validate and transform input data provided in the playbook configuration. + + This method performs comprehensive validation and transformation of report configuration + data, including schedule validation, delivery validation, filter processing, and + location resolution for multi-select tree filters. + + Parameters: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + config (dict): The configuration dictionary containing report generation details + including generate_report list with schedule, deliveries, view, and filter data + + Returns: + self: The current instance of the class with updated attribute. + + Description: + - Removes null values from configuration recursively + - Validates required fields for report generation + - Transforms schedule configuration based on schedule type + - Validates and transforms delivery configurations + - Processes location filters to resolve site hierarchy IDs + - Converts date/time strings to epoch format for API compatibility + - Logs all major validation steps and decision points for traceability + """ + + self.log( + "Starting input data validation for report configuration with {0} entries".format( + len(config.get("generate_report", [])) + ), + "INFO" + ) + # Clean entry in place (remove null fields at all levels) + self.log("Removing null values from configuration data", "DEBUG") + cleaned_entry = self.remove_nulls(config) + config.clear() + config.update(cleaned_entry) + self.log("Cleaned input data: {0}".format(self.pprint(config)), "DEBUG") + generate_report = config.get("generate_report", []) + if not generate_report: + self.msg = "The 'generate_report' field is missing or empty in the configuration." + self.set_operation_result("failed", False, self.msg, "ERROR") + self.log("Configuration validation failed - no generate_report entries found", "ERROR") + return self + + self.log("Validating {0} report entries for required fields and structure".format( + len(generate_report)), "DEBUG") + + for entry_index, entry in enumerate(generate_report): + self.log("Processing report entry {0}: {1}".format( + entry_index + 1, entry.get("name", "unnamed")), "DEBUG") + + if not isinstance(entry, dict): + self.msg = "Each entry in 'generate_report' must be a dictionary." + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + # Validate required fields + required_fields = ["view_group_name", "view", "schedule", "deliveries"] + for field in required_fields: + if field not in entry: + self.msg = "Missing required field '{0}' in 'generate_report' entry.".format(field) + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + # Generate name if missing + if not entry.get("name"): + timestamp = datetime.now().strftime("%b %d %Y %I:%M %p") + entry["name"] = "{0} - {1} - {2}".format( + entry.get("data_category", "Report"), + entry.get("view", {}).get("view_name", "View"), + timestamp + ) + self.log("Generated report name: {0}".format(entry["name"]), "DEBUG") + + # Validate deliveries + deliveries = entry.get("deliveries", {}) + if deliveries: + self.log("Validating delivery configuration for report: {0}".format( + entry.get("name")), "DEBUG") + if not self.validate_deliveries(deliveries): + return self + + # Set default values + entry.setdefault("tags", []) + entry.setdefault("view_group_version", "2.0.0") + entry.get("view").setdefault("filters", []) + entry.get("view").setdefault("field_groups", []) + entry.get("view").setdefault("format", {"format_type": "CSV"}) + + # Validate and transform schedule configuration + if not self._validate_schedule_configuration(entry): + return self + + # Validate and transform view configuration + if not self._validate_view_configuration(entry): + return self + + self.log("Completed input data validation for all report entries successfully", "INFO") + return self + + def _validate_schedule_configuration(self, entry): + """ + Validate and transform schedule configuration for a report entry. + + Parameters: + entry (dict): The report entry containing schedule configuration. + + Returns: + bool: True if validation succeeds, False if validation fails. + """ + self.log("Validating schedule configuration for report: {0}".format( + entry.get("name")), "DEBUG") + + schedule = entry.get("schedule", {}) + # Validate timezone + time_zone = schedule.get("time_zone") + if not time_zone: + self.msg = "Missing required schedule field: 'time_zone'" + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + if time_zone not in pytz.all_timezones: + self.msg = f"Invalid time_zone '{time_zone}'.\ + Please provide a valid timezone as per the IANA timezone database (e.g., 'Asia/Calcutta')." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + # Transform schedule_type to type + if "schedule" in entry and "schedule_type" in entry["schedule"]: + entry["schedule"]["type"] = entry["schedule"].pop("schedule_type") + + schedule_type = entry.get("schedule", {}).get("type") + valid_schedule_types = ["SCHEDULE_NOW", "SCHEDULE_LATER", "SCHEDULE_RECURRENCE"] + + if not schedule_type: + self.msg = "Missing required field 'schedule.type' in 'generate_report' entry." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + if schedule_type not in valid_schedule_types: + self.msg = "Invalid schedule type '{0}'. Must be one of {1}.".format( + schedule_type, valid_schedule_types) + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + # Handle SCHEDULE_LATER validation + if schedule_type == "SCHEDULE_LATER": + return self._validate_schedule_later(entry) + + # Handle SCHEDULE_RECURRENCE validation + if schedule_type == "SCHEDULE_RECURRENCE": + return self._validate_schedule_recurrence(entry) + + self.log("Schedule configuration validated successfully for type: {0}".format( + schedule_type), "DEBUG") + return True + + def _validate_schedule_later(self, entry): + """ + Validate and process the 'SCHEDULE_LATER' schedule entry. + + This function checks if the provided schedule entry contains a valid + `date_time` field under `schedule`. If missing, it logs and sets the + operation result as failed. If present, it attempts to convert the + `date_time` string into epoch milliseconds. + + Expected `date_time` format: "YYYY-MM-DD HH:MM AM/PM" + + Parameters: + entry (dict): The schedule entry containing 'schedule.date_time'. + + Returns: + bool: + - True if 'date_time' is valid and successfully converted. + - False if 'date_time' is missing or invalid. + + Description: + - Checks for the presence of the required `schedule.date_time` field. + - Converts the `date_time` string into epoch milliseconds. + - Updates the `schedule.date_time` field with the converted epoch value. + - Sets operation result to failed if `date_time` is missing or invalid. + - Logs all validation and transformation steps for traceability. + """ + date_time = entry.get("schedule", {}).get("date_time") + if not date_time: + self.msg = "Missing required field 'schedule.date_time' for 'SCHEDULE_LATER'." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + epoch_time = self.convert_to_epoch(date_time) + if epoch_time is None: + self.msg = "Invalid date_time format. Expected 'YYYY-MM-DD HH:MM AM/PM'." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + # Additional Check: Ensure the scheduled time is not in the past + current_epoch = int(time.time() * 1000) # current time in milliseconds + if epoch_time <= current_epoch: + self.msg = ( + f"Invalid schedule: The provided date_time '{date_time}' is in the past. " + "Please provide a future date and time for 'SCHEDULE_LATER' and 'SCHEDULE_RECURRENCE'." + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + return False + + entry["schedule"]["date_time"] = epoch_time + self.log("Converted date_time to epoch for SCHEDULE_LATER: {0}".format( + epoch_time), "DEBUG") + return True + + def _validate_schedule_recurrence(self, entry): + """ + Validate and transform recurrence-based schedule configuration. + + This method validates and transforms the input data provided in the playbook + configuration for schedules of type `SCHEDULE_RECURRENCE`. It ensures all required + fields are present, converts the provided date/time string into epoch format, + and restructures recurrence details to match Catalyst Center API requirements. + + Parameters: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + entry (dict): The configuration dictionary containing schedule and recurrence details + including date_time, time_zone, and recurrence rules. + + Returns: + bool: + - True if validation and transformation are successful. + - False if required fields are missing or if date_time format is invalid. + + Description: + - Extracts and validates recurrence configuration from the playbook entry. + - Converts `date_time` from string ("YYYY-MM-DD HH:MM AM/PM") to epoch milliseconds. + - Replaces `date_time` with `time` and `start_date` in the schedule. + - Renames `recurrence_type` to `type` for API compatibility. + - Ensures required fields (`time_zone`, `time`, `start_date`, `recurrence`, `type`) are present. + - Performs additional recurrence pattern validation via `_validate_recurrence_pattern`. + - Logs validation steps and error messages for traceability. + """ + + schedule = entry.get("schedule", {}) + recurrence = schedule.get("recurrence", {}) + + # Transform recurrence_type to type + if "recurrence_type" in recurrence: + recurrence["type"] = recurrence.pop("recurrence_type") + + # Validate required fields + date_time = schedule.get("date_time") + if not date_time: + self.msg = "Missing required schedule field: 'date_time'" + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + # Convert date_time to epoch and set time/start_date + epoch_time = self.convert_to_epoch(date_time) + if epoch_time is None: + self.msg = "Invalid date_time format for SCHEDULE_RECURRENCE." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + # Additional Check: Ensure the scheduled time is not in the past + current_epoch = int(time.time() * 1000) # current time in milliseconds + if epoch_time <= current_epoch: + self.msg = ( + f"Invalid schedule: The provided date_time '{date_time}' is in the past. " + "Please provide a future date and time for 'SCHEDULE_LATER' and 'SCHEDULE_RECURRENCE'." + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + return False + + schedule.pop("date_time") + schedule["time"] = epoch_time + schedule["start_date"] = epoch_time + + # Validate required fields after transformation + required_fields = ["time_zone", "time", "start_date", "recurrence", "type"] + for field in required_fields: + if field not in schedule: + self.msg = "Missing required schedule field: '{0}'".format(field) + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + return self._validate_recurrence_pattern(recurrence) + + def _validate_recurrence_pattern(self, recurrence): + """ + Validate recurrence pattern configuration for scheduled reports. + + This method verifies that the recurrence configuration provided in the + playbook is valid and supported by the module. It checks the recurrence + type (e.g., WEEKLY, MONTHLY) and delegates to the appropriate validation + function based on the type. + + Parameters: + recurrence (dict): The recurrence configuration dictionary containing + the recurrence type and associated scheduling details. + + Returns: + bool: + - True if the recurrence pattern is valid according to the type-specific rules. + - False if the recurrence type is unsupported or validation fails. + + Description: + - Extracts the recurrence type from the provided dictionary. + - If recurrence type is "WEEKLY", validates using `_validate_weekly_recurrence`. + - If recurrence type is "MONTHLY", validates using `_validate_monthly_recurrence`. + - If recurrence type is not supported, logs an error, sets the operation + result as failed, and returns False. + - Provides detailed error messages for unsupported recurrence types. + """ + recurrence_type = recurrence.get("type") + + if recurrence_type == "WEEKLY": + return self._validate_weekly_recurrence(recurrence) + elif recurrence_type == "MONTHLY": + return self._validate_monthly_recurrence(recurrence) + else: + self.msg = "Recurrence type '{0}' is not supported in this module.".format( + recurrence_type) + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + def _validate_weekly_recurrence(self, recurrence): + """ + Validate weekly recurrence configuration for scheduled reports. + + This method ensures that the recurrence configuration for a weekly schedule + includes the required `days` field, which specifies the days of the week + (e.g., MONDAY, TUESDAY) when the report should run. + + Parameters: + recurrence (dict): The recurrence configuration dictionary containing + scheduling details for a weekly recurrence pattern. + + Returns: + bool: + - True if the `days` field exists and is valid. + - False if the `days` field is missing or invalid. + + Description: + - Extracts the `days` key from the recurrence dictionary. + - Validates that the `days` field is present. + - If missing, logs an error and sets the operation result to failed. + - Used as a sub-validation method for `_validate_recurrence_pattern`. + """ + recurrence_days = recurrence.get("days", []) + if "days" not in recurrence: + self.msg = "Missing required schedule field: 'recurrence_days'" + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + expected_days = {"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", + "FRIDAY", "SATURDAY", "SUNDAY"} + + # Normalize input (uppercase for consistency) + recurrence_days = [d.upper() for d in recurrence_days] + + # If DAILY is provided, expand it to all days + if "DAILY" in recurrence_days: + recurrence["days"] = list(expected_days) + else: + # Validate input + if not set(recurrence_days).issubset(expected_days): + self.msg = "Invalid recurrence days. Must be DAILY or any of: MONDAY–SUNDAY." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + self.log("Weekly recurrence validated with all 7 days", "DEBUG") + return True + + def _validate_monthly_recurrence(self, recurrence): + """ + Validate monthly recurrence configuration for scheduled reports. + + This method ensures that the recurrence configuration for a monthly + schedule is correctly defined based on either `last_day_of_month` or + `day_of_month`. + + Parameters: + recurrence (dict): The recurrence configuration dictionary containing + scheduling details for a monthly recurrence pattern. + + Returns: + bool: + - True if the monthly recurrence is valid. + - False if required fields are missing or invalid. + + Description: + - Checks whether the recurrence specifies `last_day_of_month` or `day_of_month`. + - If `last_day_of_month` is False, validates that `day_of_month` is an integer + between 1 and 31. + - If `last_day_of_month` is True, removes `day_of_month` (if present) since + it becomes redundant. + - Logs debug information for traceability. + - Sets the operation result to failed if validation fails. + """ + last_day_of_month = recurrence.get("last_day_of_month", False) + day_of_month = recurrence.get("day_of_month") + + if not last_day_of_month: + if not isinstance(day_of_month, int) or not (1 <= day_of_month <= 31): + self.msg = ( + "For MONTHLY recurrence, 'dayOfMonth' must be an integer between 1 and 31 " + "when 'lastDayOfMonth' is false." + ) + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + else: + if "dayOfMonth" in recurrence: + self.log("'dayOfMonth' ignored because 'lastDayOfMonth' is true.", "DEBUG") + recurrence.pop("dayOfMonth") + + self.log("Monthly recurrence validated successfully", "DEBUG") + return True + + def convert_to_epoch(self, date_str): + """ + Convert a date string in the format 'YYYY-MM-DD HH:MM AM/PM' to epoch time in milliseconds. + + Parameters: + date_str (str): Date and time string to be converted. + Expected format: "YYYY-MM-DD HH:MM AM/PM" + (e.g., "2025-09-02 07:30 PM"). + + Returns: + int | None: Epoch time in milliseconds if conversion succeeds, + otherwise None if the input string is invalid or cannot be parsed. + + """ + try: + time_struct = time.strptime(date_str, "%Y-%m-%d %I:%M %p") + return int(time.mktime(time_struct) * 1000) + except ValueError: + self.log(f"exception occurred while converting date string to epoch time: {ValueError}", "ERROR") + return None + + def validate_deliveries(self, deliveries): + """ + Validate deliveries field according to rules: + 1. Must be a list with exactly one object. + 2. Type can be DOWNLOAD, NOTIFICATION (Email), or WEBHOOK. + 3. Enforce field-specific requirements for each type. + + Parameters: + deliveries (list): User-provided delivery configuration. + Expected format varies by delivery type. + + Returns: + bool: True if the input passes validation and normalization. + False if the input is invalid, with error messages set + in self.msg and logged via self.set_operation_result. + + Description: + - Validates delivery configuration structure and count + - Transforms delivery_type to type for API compatibility + - Validates type-specific requirements for each delivery method + - Normalizes NOTIFICATION delivery format for API calls + - Logs all major validation steps and decision points for traceability + """ + self.log( + "Starting delivery configuration validation for {0} delivery entries".format(len(deliveries) if isinstance(deliveries, list) else "invalid"), + "INFO" + ) + # 1. Check it's a list with exactly one object + if not isinstance(deliveries, list) or len(deliveries) != 1: + self.msg = ( + "'deliveries' must be a list containing exactly one delivery type object." + ) + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + delivery = deliveries[0] + if not isinstance(delivery, dict): + self.msg = "Each delivery entry must be a dictionary." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + delivery["type"] = delivery.pop("delivery_type", None) + delivery_type = delivery.get("type") + self.log("Validating delivery type: {0}".format(delivery_type), "DEBUG") + if delivery_type not in ["DOWNLOAD", "NOTIFICATION", "WEBHOOK"]: + self.msg = ( + f"Invalid delivery type '{delivery_type}'. Allowed types are: " + "DOWNLOAD, NOTIFICATION (Email), WEBHOOK." + ) + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + # 2. Type-specific validations + if delivery_type == "DOWNLOAD": + self.log("Processing DOWNLOAD delivery type - no additional validation required", "DEBUG") + # No extra validation needed; default case + pass + + elif delivery_type == "NOTIFICATION": + self.log("Processing NOTIFICATION delivery type with email validation", "DEBUG") + # Must have notification_endpoints with EMAIL type + endpoints = delivery.get("notification_endpoints", []) + if not isinstance(endpoints, list) or len(endpoints) != 1: + self.msg = "'notification_endpoints' must be a list containing exactly one endpoint." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + endpoint = endpoints[0] + # Default type to EMAIL if not provided + endpoint_type = endpoint.get("type", "EMAIL") + if endpoint_type != "EMAIL": + self.msg = "'notification_endpoints[0].type' must be 'EMAIL'." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + email_addresses = endpoint.get("email_addresses", []) + if not isinstance(email_addresses, list) or not all(isinstance(e, str) for e in email_addresses): + self.msg = "'email_addresses' must be a list of valid email strings." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + self.log("Validated {0} email addresses for notification".format(len(email_addresses)), "DEBUG") + + # Map to API format + api_endpoint = { + "type": "EMAIL", + "emailAddresses": email_addresses + } + + # Optional email_attach + email_attach = delivery.get("email_attach", False) + if not isinstance(email_attach, bool): + self.msg = "'email_attach' must be a boolean value." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + # Optional notify array + notify_values = ["IN_QUEUE", "IN_PROGRESS", "COMPLETED"] + notify = delivery.get("notify", []) + if notify and (not isinstance(notify, list) or not all(n in notify_values for n in notify)): + self.msg = f"'notify' must be a list containing only: {notify_values}." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + # Final normalized structure + normalized_delivery = { + "type": "NOTIFICATION", + "notificationEndpoints": [api_endpoint], + "emailAttach": email_attach, + "notify": notify + } + + # Replace original delivery with normalized one + delivery.clear() + delivery.update(normalized_delivery) + self.log("Successfully normalized NOTIFICATION delivery configuration", "DEBUG") + + elif delivery_type == "WEBHOOK": + self.log("Processing WEBHOOK delivery type with webhook name validation", "DEBUG") + webhook_name = delivery.get("webhook_name") + if not webhook_name or not isinstance(webhook_name, str): + self.msg = "'webhook_name' is required for WEBHOOK delivery type." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + self.log("Validated webhook name: {0}".format(webhook_name), "DEBUG") + + self.log( + "Completed delivery configuration validation successfully for type: {0}".format(delivery_type), + "INFO" + ) + return True + + def _validate_view_configuration(self, entry): + """Validate and transform the view configuration including filters. + + This method ensures that the `view` section of a report configuration + is valid, properly structured, and transformed where necessary. It + validates the existence of the view, checks the filters list, and + processes specific filters such as Location filters. + + Parameters: + entry (dict): The report configuration entry containing the view + definition with optional filters. + + Returns: + bool: + - True if the view configuration and its filters are valid. + - False if validation fails due to invalid structure or data. + + Description: + - Ensures `view` is a dictionary, otherwise fails validation. + - Ensures `filters`, if present, is a list of dictionaries. + - Transforms the key `filter_type` into `type` for consistency. + - Processes Location filters by delegating to `_process_location_filter`. + - Logs all significant validation steps and outcomes for traceability. + - Updates the operation result with error messages if validation fails. + """ + view = entry.get("view", {}) + if not isinstance(view, dict): + self.msg = "'view' must be a dictionary." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + filters = view.get("filters", []) + if not filters: + return True + + if not isinstance(filters, list): + self.msg = "'filters' must be a list." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + self.log("Processing {0} filter(s) for view configuration".format( + len(filters)), "DEBUG") + + for filter_index, filter_entry in enumerate(filters): + if not isinstance(filter_entry, dict): + self.msg = "Each filter entry must be a dictionary." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + # Transform filter_type to type + if "filter_type" in filter_entry: + filter_entry["type"] = filter_entry.pop("filter_type") + + # Process location filters + if filter_entry.get("name") == "Location": + if not self._process_location_filter(filter_entry, filter_index): + return False + + # Process time range filters + if filter_entry.get("name") == "Time Range": + if not self._process_time_range_filter(filter_entry, filter_index): + return False + + self.log("View configuration validation completed successfully", "DEBUG") + return True + + def _process_time_range_filter(self, filter_entry, filter_index): + """Validate and process the 'Time Range' filter by converting date strings to epoch milliseconds. + + This method: + - Validates the presence and format of `start_date_time`, `end_date_time`, and `time_zone`. + - Converts readable date strings (e.g., "2025-10-09 07:30 PM") to epoch milliseconds. + - Updates the filter value to match the format expected by the DNAC API. + + Parameters: + filter_entry (dict): Filter configuration containing 'value' with date/time fields. + filter_index (int): Index of the filter being processed (for logging context). + + Returns: + bool: True if successful; False if validation or conversion fails. + """ + self.log( + f"Processing time range filter {filter_index + 1} with filter entry: {self.pprint(filter_entry)}", + "DEBUG" + ) + + filter_value = filter_entry.get("value") + if not filter_value: + self.log("No time range provided, please provide a valid time range.", "DEBUG") + self.msg = "No time range provided in 'Time Range' filter." + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + return False + + # Expecting a single dict, not a list + item = filter_value[0] if isinstance(filter_value, list) else filter_value + time_range_option = item.get("time_range_option") + if not time_range_option: + self.msg = "Missing required field 'time_range_option' in 'Time Range' filter." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + if time_range_option in ["LAST_7_DAYS", "LAST_24_HOURS", "LAST_3_HOURS"]: + updated_value = { + "timeRangeOption": item.get("time_range_option", "Custom"), + "displayValue": filter_entry.get("display_value", filter_entry["name"]), + } + filter_entry["value"] = updated_value + self.log(f"Time range option '{time_range_option}' does not require further processing.", "DEBUG") + return True # No further processing needed for these options + + required_fields = ["start_date_time", "end_date_time", "time_zone"] + for field in required_fields: + if field not in item or not item[field]: + self.msg = f"Missing required field '{field}' in 'Time Range' filter." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + # Validate timezone + time_zone = item["time_zone"] + if time_zone not in pytz.all_timezones: + self.msg = ( + f"Invalid time_zone '{time_zone}'. " + "Please use a valid IANA timezone (e.g., 'Asia/Calcutta')." + ) + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + # Convert dates to epoch + start_str, end_str = item["start_date_time"], item["end_date_time"] + self.log(f"Converting time range: start={start_str}, end={end_str}", "DEBUG") + + start_epoch = self.convert_to_epoch(start_str) + end_epoch = self.convert_to_epoch(end_str) + + if start_epoch is None or end_epoch is None: + self.msg = ( + "Invalid date format in 'Time Range' filter. " + "Expected 'YYYY-MM-DD HH:MM AM/PM'." + ) + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + # Prepare final structure + display_value = f"{start_str} to {end_str}" + updated_value = { + "timeRangeOption": item.get("time_range_option", "Custom"), + "startDateTime": start_epoch, + "endDateTime": end_epoch, + "timeZone": time_zone, + "displayValue": display_value, + } + + filter_entry["value"] = updated_value + filter_entry["display_value"] = filter_entry.get("display_value", filter_entry["name"]) + + self.log( + f"Successfully processed time range filter: start={start_epoch}, end={end_epoch}, zone={time_zone}", + "DEBUG" + ) + return True + + def _process_location_filter(self, filter_entry, filter_index): + """Process and validate the 'Location' filter by resolving site hierarchy IDs. + + This method validates the structure of the 'Location' filter, ensures + its values are properly formatted, and replaces each location string + with the corresponding site hierarchy ID retrieved from the site + database. If validation or resolution fails, the operation result is + marked as failed. + + Parameters: + filter_entry (dict): The filter configuration dictionary that + must contain a 'value' list of locations. + filter_index (int): The index of the filter being processed, + used for logging purposes. + + Returns: + bool: + - True if the location filter is valid and successfully + resolved to site hierarchy IDs. + - False if validation fails or site resolution is unsuccessful. + + Description: + - Ensures `value` exists in the filter; initializes empty list if missing. + - Validates that `value` is a list of dictionaries, each containing + a `value` key (location string). + - Uses `display_value` if provided, otherwise defaults to the location string. + - Calls `get_site()` to resolve each location to its corresponding + site hierarchy ID. + - Replaces the original filter `value` with a list of resolved + site hierarchy IDs and display values. + - Logs detailed debug information at each step for traceability. + - Updates the operation result with clear error messages when validation fails. + """ + self.log("Processing location filter {0} with filter entry as {1}".format(filter_index + 1, self.pprint(filter_entry)), "DEBUG") + + filter_value = filter_entry.get("value") + self.log("Current location filter value: {0}".format(filter_value), "DEBUG") + if not filter_entry.get("display_value"): + filter_entry["display_value"] = filter_entry["name"] + + if not filter_value: + self.log("No locations provided in filter; initializing empty list", "DEBUG") + filter_entry["value"] = [] + return True + + updated_values = [] + for item_index, item in enumerate(filter_value): + if not isinstance(item, dict) or "value" not in item: + self.msg = "Each item in 'Location' filter value must contain 'value'." + self.log(self.msg, "ERROR") + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + display_value = item.get("display_value", item["value"]) + + # Resolve site hierarchy ID + self.log("Resolving site hierarchy for location: {0}".format( + item["value"]), "DEBUG") + + site_exist, site_id = self.get_site_id(item["value"]) + if not site_exist: + self.msg = "Failed to retrieve site information for location as site doesn't exist: {0}".format( + item["value"]) + self.log(self.msg, "ERROR") + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + updated_values.append({ + "value": site_id, + "display_value": display_value + }) + + self.log("Resolved location '{0}' to site ID: {1}".format( + item["value"], site_id), "DEBUG") + + filter_entry["value"] = updated_values + self.log("Successfully processed location filter with {0} locations".format( + len(updated_values)), "DEBUG") + return True + + def get_webhook_destination_in_ccc(self, name): + """ + Retrieve details of Rest Webhook destinations present in Cisco Catalyst Center. + + This method searches for a specific webhook destination by name using pagination + to handle large numbers of webhook destinations efficiently. + Parameters: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + name (str): The name of the syslog destination to retrieve details for. + Returns: + dict: A dictionary containing details of Rest Webhook destination present in Cisco Catalyst Center, + or None if no Rest Webhook destinations are found. + Description: + This function retrieves the details of Rest Webhook destinations present in Cisco Catalyst Center + using the 'event_management' API endpoint with the 'get_webhook_destination' function. + If an error occurs during the retrieval process, it logs the error message and raises an Exception. + """ + + self.log( + "Starting webhook destination retrieval for name='{0}'".format(name), + "INFO" + ) + try: + offset = 0 + limit = 10 + max_retries = 10 # Prevent infinite loops + retry_count = 0 + + while retry_count < max_retries: + self.log( + "Fetching webhook destinations with offset={0}, limit={1}, attempt={2}".format( + offset * limit, limit, retry_count + 1 + ), + "DEBUG" + ) + try: + response = self.dnac._exec( + family="event_management", + function="get_webhook_destination", + params={"offset": offset * limit, "limit": limit}, + ) + offset = offset + 1 + self.log( + "Received API response from 'get_webhook_destination': {0}".format( + str(response) + ), + "DEBUG", + ) + response = response.get("statusMessage", []) + + if not response: + self.log( + "There is no Rest Webhook destination present in Cisco Catalyst Center", + "INFO", + ) + return response + + for destination in response: + if destination.get("name") == name: + self.log( + "Webhook Destination '{0}' present in Cisco Catalyst Center".format( + name + ), + "INFO", + ) + return destination + + self.log( + "Webhook Destination '{0}' not found in Cisco Catalyst Center. Retrying after 1 second...".format(name), + "WARNING", + ) + offset += 1 + retry_count += 1 + + time.sleep(1) + except Exception as e: + expected_exception_msgs = [ + "Expecting value: line 1 column 1", + "not iterable", + "has no attribute", + ] + for msg in expected_exception_msgs: + if msg in str(e): + self.log( + "An exception occurred while checking for the Webhook destination with the name '{0}'. " + "It was not found in Cisco Catalyst Center.".format( + name + ), + "WARNING", + ) + return None + self.log( + "Webhook destination '{0}' not found after checking all available destinations".format(name), + "WARNING" + ) + self.log( + "Completed webhook destination retrieval for name='{0}' - not found after exhaustive search".format(name), + "INFO" + ) + return None + + except Exception as e: + self.status = "failed" + self.msg = "Error while getting the details of Webhook destination(s) present in Cisco Catalyst Center: {0}".format( + str(e) + ) + self.log(self.msg, "ERROR") + self.check_return_status() + + def get_want(self, config): + """ + This method processes the playbook configuration to extract and validate report + generation requirements, storing them in the instance's 'want' attribute for + further processing during state comparison and execution. + + Parameters: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + config (dict): The configuration dictionary containing image import and other details. + + Returns: + self: The current instance of the class with updated 'want' attributes. + + Description: + - Extracts generate_report configuration from playbook input + - Validates presence of required report generation configuration + - Stores desired configuration state for comparison with current state + - Logs all major decision points and validation steps for traceability + - Provides foundation for state-based configuration management + """ + self.log("Retrieving 'want' attributes from configuration: {0}".format(self.pprint(config)), "DEBUG") + + want = {"generate_report": config.get("generate_report", [])} + if not want["generate_report"]: + self.msg = "The 'generate_report' field is missing or empty in the configuration." + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + self.want = want + self.log("Desired State (want): {0}".format(self.pprint(self.want)), "INFO") + return self + + def get_all_view_groups(self, view_group_name): + """ + Retrieve all view groups from Cisco Catalyst Center and find matching view group. + + This method retrieves all available view groups from Cisco Catalyst Center and + searches for a specific view group by name to extract its ID and data category. + + + Parameters: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + view_group_name (str): The name of the view group to retrieve. + + Returns: + tuple[str, str] | object: + - (view_group_id, data_category): When a matching view group is found. + - self: If no view group is found or an error occurs, with error details + logged and `self.msg` populated. + + Description: + - Retrieves all view groups using the reports API + - Searches through view groups to find exact name match + - Extracts view group ID and data category for matched view group + - Logs all major decision points and API interactions for traceability + - Returns structured data for further report configuration processing + """ + self.log("Retrieving all view groups for view_group_name: {0}".format(self.pprint(view_group_name)), "DEBUG") + try: + response = self.dnac._exec( + family="reports", + function="get_all_view_groups", + ) + self.log("Response from get_all_view_groups: {0}".format(self.pprint(response)), "DEBUG") + if not response: + self.msg = "Failed to retrieve view groups from Cisco Catalyst Center." + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + self.log( + "Processing {0} view groups to find match for '{1}'".format( + len(response), view_group_name + ), + "DEBUG" + ) + + view_group_id = None + data_category = None + for view_group_detail in response: + if view_group_detail.get("name") == view_group_name: + self.log("Found data_category '{0}' in view groups.".format(view_group_name), "DEBUG") + view_group_id = view_group_detail.get("viewGroupId") + data_category = view_group_detail.get("category") + self.log("View group ID and data_category for view_group_name '{0}': {1}, {2}" + .format(view_group_name, view_group_id, data_category), "DEBUG") + break + + if not view_group_id: + self.msg = "No view group found for view_group_name '{0}'.".format(view_group_name) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + return self + + self.log( + "Successfully retrieved view group details for '{0}' - ID: {1}, category: {2}".format( + view_group_name, view_group_id, data_category + ), + "INFO" + ) + self.log( + "Completed view groups retrieval and search for view_group_name='{0}'".format( + view_group_name + ), + "INFO" + ) + return view_group_id, data_category + except Exception as e: + self.msg = "An error occurred while retrieving all view groups: {0}".format(str(e)) + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + def get_views_for_a_given_view_group(self, view_group_id, view_name): + """ + Retrieve all views for a given view group from Cisco Catalyst Center and find matching view. + + This method retrieves all available views for a specific view group and searches for a + particular view by name to extract its ID for report configuration. + + Parameters: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + view_group_id (str): The ID of the view group for which to retrieve views. + view_name (str): The name of the view to retrieve. If not provided, all views will be returned. + + Returns: + str | object: + - If a matching view is found: returns the view ID (str). + - If no matching view is found or an error occurs: returns `self` with the operation + result set to "failed". + + Description: + - Retrieves views for a specific view group using the reports API + - Searches through views to find exact name match + - Extracts view ID for matched view name + - Logs all major decision points and API interactions for traceability + - Returns view ID for further report configuration processing + """ + self.log( + "Starting view retrieval for view_group_id='{0}', view_name='{1}'".format( + view_group_id, view_name + ), + "INFO" + ) + try: + self.log("Fetching views from Cisco Catalyst Center for view group ID: {0}".format( + view_group_id), "DEBUG") + response = self.dnac._exec( + family="reports", + function="get_views_for_a_given_view_group", + params={"view_group_id": view_group_id}, + ) + self.log("Response from get_views_for_a_given_view_group: {0}".format(self.pprint(response)), "DEBUG") + if not response: + self.msg = "Failed to retrieve views for view group ID '{0}'.".format(view_group_id) + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + all_views_detail = response.get("views") + self.log("All views detail for view group ID '{0}': {1}".format(view_group_id, self.pprint(all_views_detail)), "DEBUG") + if not all_views_detail: + self.msg = "No views found for view group ID '{0}'.".format(view_group_id) + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + # Match the desired view by name + if view_name: + views_detail = None + for view in all_views_detail: + if view.get("viewName") == view_name: + views_detail = view + self.log("Found matching view '{0}' with ID='{1}' in view group '{2}'".format( + view_name, views_detail.get("viewId"), view_group_id + ), + "DEBUG" + ) + break + if not views_detail: + self.msg = "No views found with name '{0}' in view group ID '{1}'.".format(view_name, view_group_id) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + return self + + view_id = views_detail.get("viewId") + if not view_id: + self.msg = "No views found with name '{0}' in view group ID '{1}'.".format( + view_name, view_group_id + ) + self.log( + "View search failed - '{0}' not found in view group ID '{1}'".format( + view_name, view_group_id + ), + "ERROR" + ) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + return self + + self.log( + "Successfully retrieved view ID '{0}' for view_name '{1}' in view group '{2}'".format( + view_id, view_name, view_group_id + ), + "INFO" + ) + self.log( + "Completed view retrieval for view_group_id='{0}', view_name='{1}'".format( + view_group_id, view_name + ), + "INFO" + ) + return view_id + except Exception as e: + self.msg = "An error occurred while retrieving views for view group ID '{0}': {1}".format(view_group_id, str(e)) + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + def fetch_view_details(self, view_group_id, view_id): + """ + Fetch view details for a given view group and view ID from Cisco Catalyst Center. + + This method retrieves comprehensive view metadata including field groups, filters, + format options, and other configuration details for a specific view within a view group. + + Parameters: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + view_group_id (str): The ID of the view group. + view_id (str): The ID of the view. + + Returns: + self: The current instance of the class with updated 'view_details' attribute. + + Description: + - Retrieves detailed view metadata using the reports API + - Stores view configuration including field groups, filters, and format options + - Validates API response structure and content + - Logs all major decision points and API interactions for traceability + - Provides view metadata for report configuration validation and processing + """ + self.log("Fetching view details for view group ID: {0}, view ID: {1}".format(view_group_id, view_id), "DEBUG") + try: + response = self.dnac._exec( + family="reports", + function="get_view_details_for_a_given_view_group_and_view", + params={"view_group_id": view_group_id, "view_id": view_id}, + ) + self.log("Response from get_view_details_for_a_given_view_group_and_view: {0}".format(self.pprint(response)), "DEBUG") + if not response: + self.msg = "Failed to fetch view details for view group ID '{0}' and view ID '{1}'.".format(view_group_id, view_id) + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + # Validate response structure + self.log("Validating response structure and extracting view metadata", "DEBUG") + + # Log key view details for debugging + view_name = response.get("name", "unknown") + field_groups_count = len(response.get("fieldGroups", [])) + filters_count = len(response.get("filters", [])) + format_info = response.get("format", {}) + + self.log( + "View details retrieved - name: '{0}', field_groups: {1}, filters: {2}, format: {3}".format( + view_name, field_groups_count, filters_count, format_info.get("name", "unknown") + ), + "DEBUG" + ) + + # Store view details for further processing + self.view_details = response + + self.log( + "Successfully stored view details for view_group_id='{0}', view_id='{1}'".format( + view_group_id, view_id + ), + "INFO" + ) + self.log( + "Completed view details retrieval for view_group_id='{0}', view_id='{1}'".format( + view_group_id, view_id + ), + "INFO" + ) + + except Exception as e: + self.msg = "An error occurred while fetching view details: {0}".format(str(e)) + self.set_operation_result("failed", False, self.msg, "ERROR") + + return self + + def get_have(self, config): + """ + Retrieve and store the current state of reports from Cisco Catalyst Center. + + This method processes report configurations to determine their current state in + Catalyst Center, including existence verification, webhook validation, and + metadata retrieval for comparison with desired state. + + Parameters: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + config (dict): The configuration dictionary containing report details. + + Returns: + self: The current instance of the class with updated 'have' attributes. + + Description: + - Validates webhook destinations for WEBHOOK delivery types + - Resolves view group names to IDs and data categories + - Maps view names to view IDs within view groups + - Checks for existing scheduled reports by name + - Fetches detailed view metadata for non-deleted states + - Logs all major decision points and API interactions for traceability + """ + self.log("Retrieving 'have' attributes from configuration: {0}".format(self.pprint(config)), "DEBUG") + generate_report = config.get("generate_report", []) + + if not generate_report: + self.msg = "The 'generate_report' field is missing or empty in the configuration." + self.set_operation_result("failed", False, self.msg, "ERROR") + self.log("Current state retrieval failed - no generate_report entries found", "ERROR") + return self + + for entry_index, report_entry in enumerate(generate_report): + report_name = report_entry.get("name", "unnamed") + self.log( + "Processing current state for report entry {0}: '{1}'".format(entry_index + 1, report_name), + "DEBUG" + ) + + # Validate webhook destinations for WEBHOOK delivery type + if not self._validate_webhook_destinations(report_entry): + return self + + # Resolve view group information + if not self._resolve_view_group_details(report_entry): + return self + + # Check for existing scheduled reports + if not self._check_existing_scheduled_reports(report_entry): + return self + + # Fetch view details for non-deleted states + if self.state != "deleted": + self.log("Fetching detailed view metadata for report configuration validation", "DEBUG") + for report_entry in generate_report: + view_group_id = report_entry.get("view_group_id") + view_id = report_entry.get("view", {}).get("view_id") + if view_group_id and view_id: + self.fetch_view_details(view_group_id, view_id) + + # Store current state + have = {"generate_report": generate_report} + self.have = have + self.msg = "Successfully retrieved the details from the Cisco Catalyst Center" + + self.log("Current State (have): {0}".format(str(self.pprint(self.have))), "INFO") + self.log( + "Completed current state retrieval from Catalyst Center successfully", + "INFO" + ) + return self + + def _validate_webhook_destinations(self, report_entry): + """ + Validate webhook destinations for WEBHOOK delivery type. + + Parameters: + report_entry (dict): The report entry to validate. + + Returns: + bool: True if validation succeeds, False if validation fails. + """ + deliveries = report_entry.get("deliveries", []) + if not deliveries: + return True + + for delivery in deliveries: + if delivery.get("type") == "WEBHOOK" and self.state != "deleted": + webhook_name = delivery.get("webhook_name") + if not webhook_name: + self.msg = "webhook_name is required for WEBHOOK delivery type." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + self.log("Validating webhook destination: {0}".format(webhook_name), "DEBUG") + + webhook_destinations = self.get_webhook_destination_in_ccc(webhook_name) + if not webhook_destinations: + self.msg = "No Webhook destination found in Cisco Catalyst Center for '{0}'.".format( + webhook_name + ) + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + webhook_id = webhook_destinations.get("webhookId") + delivery["webhook_id"] = webhook_id + + self.log("Successfully validated webhook destination '{0}' with ID: {1}".format( + webhook_name, webhook_id), "DEBUG") + + return True + + def _resolve_view_group_details(self, report_entry): + """ + Resolve view group name to ID and data category. + + Parameters: + report_entry (dict): The report entry to process. + + Returns: + bool: True if resolution succeeds, False if resolution fails. + """ + view_group_name = report_entry.get("view_group_name") + if not view_group_name: + self.msg = "Mandatory parameter 'view_group_name' not found in report entry." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + self.log("Resolving view group details for: {0}".format(view_group_name), "DEBUG") + + view_group_id, data_category = self.get_all_view_groups(view_group_name) + if not view_group_id: + return False + + report_entry["view_group_id"] = view_group_id + report_entry["data_category"] = data_category + + self.log("Resolved view group '{0}' to ID: {1}, category: {2}".format( + view_group_name, view_group_id, data_category), "DEBUG") + + # Resolve view ID within the view group + view_name = report_entry.get("view", {}).get("view_name") + if not view_name: + self.msg = "view_name is required in view configuration." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + view_id = self.get_views_for_a_given_view_group(view_group_id, view_name) + if not view_id: + return False + + report_entry["view"]["view_id"] = view_id + + self.log("Resolved view '{0}' to ID: {1} in view group '{2}'".format( + view_name, view_id, view_group_name), "DEBUG") + + return True + + def _check_existing_scheduled_reports(self, report_entry): + """ + Check for existing scheduled reports by name. + + Parameters: + report_entry (dict): The report entry to check. + + Returns: + bool: True if check succeeds, False if check fails. + """ + view_group_id = report_entry.get("view_group_id") + view_id = report_entry.get("view", {}).get("view_id") + report_name = report_entry.get("name") + + if not report_name: + self.msg = "The 'name' field is mandatory in the 'generate_report' configuration." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + self.log("Checking for existing scheduled reports for: {0}".format(report_name), "DEBUG") + + try: + response = self.dnac._exec( + family="reports", + function="get_list_of_scheduled_reports", + params={"viewGroupId": view_group_id, "viewId": view_id} + ) + self.log("Response from get_list_of_scheduled_reports: {0}".format( + self.pprint(response)), "DEBUG") + + except Exception as e: + error_str = str(e) + if "status_code: 404" in error_str or "\"status\":404" in error_str: + self.log("No existing reports found (404 response) for report: {0}".format( + report_name), "DEBUG") + report_entry["exists"] = False + return True + else: + self.msg = "An error occurred while checking for existing reports: {0}".format(str(e)) + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + if not response: + self.log("No scheduled reports found for view group/view combination", "DEBUG") + report_entry["exists"] = False + return True + + # Search for report by name + get_list_of_scheduled_reports = response or [] + report_found = False + + for report in get_list_of_scheduled_reports: + if report.get("name") == report_name: + self.log("Found existing report '{0}' with ID: {1}".format( + report_name, report.get("reportId")), "DEBUG") + + report_entry["report_id"] = report.get("reportId") + report_entry["view_group_id"] = report.get("viewGroupId") + report_entry["view"]["view_id"] = report.get("view", {}).get("viewId") + report_entry["exists"] = True + report_found = True + break + + if not report_found: + self.log("Report '{0}' does not exist in current state".format(report_name), "DEBUG") + report_entry["exists"] = False + + return True + + def create_n_schedule_reports(self, generate_report): + """ + Create or schedule reports using two-phase parallel processing approach. + + This method processes report configurations in two distinct phases: + Phase 1: Trigger all report creation/scheduling operations in parallel + Phase 2: Handle downloads only for reports requiring immediate download + + Parameters: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + generate_report (list): A list of report configurations to be created or scheduled. + + Returns: + self: The current instance of the class with updated 'result' attribute. + + Description: + - Phase 1: Creates or schedules all reports via non-blocking API calls + - Handles existing report detection without triggering downloads + - Phase 2: Processes downloads only for DOWNLOAD delivery with SCHEDULE_NOW + - Provides significant performance improvement for multiple report scenarios + - Maintains proper error handling and validation throughout both phases + - Logs phase separation and batch operation progress for traceability + """ + self.log( + "Starting parallel report creation workflow with {0} reports using two-phase approach".format( + len(generate_report) if generate_report else 0 + ), + "DEBUG" + ) + if not generate_report: + self.msg = "The 'generate_report' field is missing or empty in the configuration." + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + # Phase 1: Create/Schedule all reports (parallel processing) + created_entries = [] # list of tuples: (report_entry, report_id) + pending_downloads = [] # same shape for both newly created and existing entries that require download + + self.log( + "Phase 1: Starting parallel report creation for {0} reports".format( + len(generate_report) + ), + "INFO" + ) + try: + for report_index, report_entry in enumerate(generate_report): + report_name = report_entry.get("name", "unnamed") + self.log( + "Processing report {0}/{1}: '{2}' (phase 1 - trigger)".format( + report_index + 1, len(generate_report), report_name + ), + "DEBUG" + ) + + # Validate required fields + if not self._validate_report_entry_fields(report_entry): + self.log( + "Phase 1: Field validation failed for report '{0}' - terminating workflow".format( + report_name + ), + "ERROR" + ) + return self + + self.log( + "Phase 1: Field validation successful for report '{0}'".format(report_name), + "DEBUG" + ) + + # Handle existing reports (do NOT trigger download here) + if report_entry.get("exists") and report_entry.get("new_report") is False: + self.log( + "Phase 1: Processing existing report '{0}' without immediate download".format( + report_name + ), + "INFO" + ) + # Build same result structure as _handle_existing_report but DO NOT download yet + report_id = report_entry.get("report_id") + result = { + "response": { + "report_id": report_id, + "view_group_id": report_entry.get("view_group_id"), + "view_id": report_entry.get("view", {}).get("view_id"), + }, + "msg": "Report '{0}' already exists.".format(report_name), + } + self.result["response"].append({"create_report": result}) + self.log( + "Phase 1: Existing report '{0}' added to results with ID: {1}".format( + report_name, report_id + ), + "DEBUG" + ) + # If download requested and immediate, schedule download in phase 2 + if (self._is_download_requested(report_entry) and + self._should_download_immediately(report_entry)): + pending_downloads.append((report_entry, report_id)) + self.log( + "Phase 1: Existing report '{0}' scheduled for Phase 2 download".format( + report_name + ), + "INFO" + ) + else: + self.log( + "Phase 1: No immediate download required for existing report '{0}'".format( + report_name + ), + "DEBUG" + ) + + continue + + # Create new report (non-blocking API call) + self.log( + "Phase 1: Creating new report '{0}' via API call".format(report_name), + "INFO" + ) + report_id = self._create_new_report(report_entry) + if not report_id: + # _create_new_report already set error msg and status + self.log( + "Phase 1: Report creation failed for '{0}' - error already logged by creation method".format( + report_name + ), + "ERROR" + ) + self.log( + "Phase 1: Terminating workflow due to report creation failure", + "ERROR" + ) + return self + + self.log( + "Phase 1: Successfully created report '{0}' with ID: {1}".format( + report_name, report_id + ), + "INFO" + ) + + # Collect for potential download in phase 2 + created_entries.append((report_entry, report_id)) + self.log( + "Phase 1: Report '{0}' added to created entries for Phase 2 processing".format( + report_name + ), + "DEBUG" + ) + + self.log( + "Phase 1 completed successfully: {0} reports created, {1} existing reports processed, {2} pending downloads".format( + len(created_entries), len(generate_report) - len(created_entries), len(pending_downloads) + ), + "INFO" + ) + + # Phase 2: perform downloads only for those needing immediate download + all_download_candidates = created_entries + pending_downloads + self.log( + "Phase 2: Starting download processing for {0} total candidates".format( + len(all_download_candidates) + ), + "INFO" + ) + + if not all_download_candidates: + self.log("Phase 2: No download candidates found - skipping download phase", "DEBUG") + + download_count = 0 + # Combine created entries and pending existing reports + for candidate_index, (entry, report_id) in enumerate(all_download_candidates): + report_name = entry.get("name", "unnamed") + self.log( + "Phase 2: Processing download candidate {0}/{1}: '{2}' (report_id={3})".format( + candidate_index + 1, len(all_download_candidates), report_name, report_id + ), + "DEBUG" + ) + try: + if not self._should_download_immediately(entry): + self.log( + "Phase 2: No immediate download required for report '{0}' - skipping".format( + report_name + ), + "DEBUG" + ) + + continue + + self.log( + "Phase 2: Immediate download required for report '{0}' - initiating download".format( + report_name + ), + "DEBUG" + ) + success = self._download_report_if_needed(entry, report_id) + if not success: + # _download_report_if_needed sets error and result already + self.log( + "Phase 2: Download failed for report '{0}' - error already logged by download method".format( + report_name + ), + "ERROR" + ) + self.log( + "Phase 2: Terminating workflow due to download failure", + "ERROR" + ) + return self + download_count += 1 + self.log( + "Phase 2: Download completed successfully for report '{0}'".format( + report_name + ), + "INFO" + ) + + except Exception as e: + self.msg = "Exception during post-create download handling for report '{0}': {1}".format(entry.get("name"), str(e)) + self.set_operation_result("failed", False, self.msg, "ERROR") + self.log("Exception during phase 2 downloads: {0}".format(str(e)), "ERROR") + return self + + self.log( + "Phase 2 completed: {0} downloads processed successfully".format( + download_count + ), + "INFO" + ) + + self.log( + "Completed report creation and scheduling workflow successfully for {0} reports".format( + len(generate_report) + ), + "INFO" + ) + + except Exception as e: + self.msg = "An error occurred while creating or scheduling reports: {0}".format(str(e)) + self.set_operation_result("failed", False, self.msg, "ERROR") + self.log( + "Exception during report creation workflow: {0}".format(str(e)), + "ERROR" + ) + return self + + return self + + def _validate_report_entry_fields(self, report_entry): + """ + Validate required fields for a report entry. + + Parameters: + report_entry (dict): The report entry to validate. + + Returns: + bool: True if validation succeeds, False if validation fails. + """ + required_fields = { + "name": "The 'name' field is mandatory in the 'generate_report' configuration.", + "view_group_id": "The 'view_group_id' field is mandatory in the 'generate_report' configuration.", + } + + for field, error_msg in required_fields.items(): + if not report_entry.get(field): + self.msg = error_msg + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + if not report_entry.get("view", {}).get("view_id"): + self.msg = "The 'view_id' field is mandatory in the 'view' configuration." + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + return True + + def _create_new_report(self, report_entry): + """ + Create a new report via API call. + + Parameters: + report_entry (dict): The report entry to create. + + Returns: + bool: True if creation succeeds, False if creation fails. + """ + report_name = report_entry.get("name") + self.log("Creating new report: '{0}'".format(report_name), "DEBUG") + if report_entry.get("exists") and report_entry.get("new_report", True) is True: + # Append timestamp to make the name unique + timestamp_suffix = datetime.now().strftime("%Y%m%dT%H%M%S") + new_report_name = f"{report_name}_{timestamp_suffix}" + self.log( + f"Report with name '{report_name}' already exists. " + f"Updating name to '{new_report_name}' to ensure uniqueness.", + "DEBUG" + ) + report_entry["name"] = new_report_name + report_name = report_entry.get("name") + + # Prepare API payload + report_payload = self._prepare_report_payload(report_entry) + if not report_payload: + return False + + try: + self.log("Sending report creation request to Catalyst Center API with payload: {0}".format(self.pprint(report_payload)), "DEBUG") + response = self.dnac._exec( + family="reports", + function="create_or_schedule_a_report", + params=report_payload + ) + self.log( + "Received response from create_or_schedule_a_report: {0}".format( + self.pprint(response) + ), + "DEBUG" + ) + + if not response: + self.msg = "Failed to create or schedule report '{0}'.".format(report_name) + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + # Process successful response + return self._process_creation_response(report_entry, response) + + except Exception as e: + self.msg = "API call failed for report '{0}': {1}".format(report_name, str(e)) + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + def _handle_existing_report(self, report_entry): + """ + Handle processing for existing reports. + + Parameters: + report_entry (dict): The report entry for an existing report. + + Returns: + bool: True if handling succeeds, False if handling fails. + """ + report_name = report_entry.get("name") + report_id = report_entry.get("report_id") + + self.log( + "Report '{0}' with ID '{1}' already exists - skipping creation".format( + report_name, report_id + ), + "DEBUG" + ) + + result = { + "response": { + "report_id": report_id, + "view_group_id": report_entry.get("view_group_id"), + "view_id": report_entry.get("view", {}).get("view_id"), + }, + "msg": "Report '{0}' already exists.".format(report_name), + } + self.result["response"].append({"create_report": result}) + + # Handle download for existing reports if requested + if self._is_download_requested(report_entry) and self._should_download_immediately(report_entry): + self.log( + "Download requested for existing report '{0}' - proceeding to download".format( + report_name + ), + "DEBUG" + ) + return self._download_report_if_needed(report_entry, report_id) + + return True + + def _prepare_report_payload(self, report_entry): + """ + Prepare API payload for report creation. + + Parameters: + report_entry (dict): The report entry to transform. + + Returns: + dict: API-compatible payload or None if preparation fails. + """ + try: + # Convert to camelCase for API compatibility + report_payload = self.convert_keys_to_camel_case(report_entry) + + # Transform specific fields for API requirements + if "schedule" in report_payload and "timeZone" in report_payload["schedule"]: + report_payload["schedule"]["timeZoneId"] = report_payload["schedule"].pop("timeZone") + + if "view" in report_payload and "format" in report_payload["view"]: + format_dict = report_payload["view"]["format"] + if "name" not in format_dict: + format_dict["name"] = format_dict.get("formatType", "CSV") + + view_data = report_payload["view"] + if "viewName" in view_data: + view_data["name"] = view_data.pop("viewName") + + if "view" in report_payload and "filters" in report_payload["view"]: + fixed_filters = [] + for flt in report_payload["view"]["filters"]: + + # ensure camelCase fields exist + flt["displayName"] = flt.get("displayName", flt.get("name")) + flt["type"] = flt.get("type", flt.get("filterType")) + + # Normalize value entries + new_values = [] + raw_values = flt.get("value", []) + + # TIME_RANGE uses dict, not list → keep as is + if isinstance(raw_values, dict): + new_values = raw_values + else: + for v in raw_values: + if isinstance(v, dict): + new_values.append({ + "value": v.get("value"), + "displayValue": v.get("displayValue", v.get("value")) + }) + else: + # simple value like "Global" + new_values.append({ + "value": v, + "displayValue": v + }) + + flt["value"] = new_values + + fixed_filters.append(flt) + report_payload["view"]["filters"] = fixed_filters + + self.log( + "Prepared API payload for report '{0}'".format(report_entry.get("name")), + "DEBUG" + ) + return report_payload + + except Exception as e: + self.msg = "Failed to prepare payload for report '{0}': {1}".format( + report_entry.get("name"), str(e) + ) + self.set_operation_result("failed", False, self.msg, "ERROR") + return None + + def _process_creation_response(self, report_entry, response): + """ + Process successful report creation response. + + Parameters: + report_entry (dict): The original report entry. + response (dict): The API response from report creation. + Returns: + report_id (str): The ID of the created report. + """ + report_name = report_entry.get("name") + report_id = response.get("reportId") + + result = { + "response": { + "reportId": report_id, + "viewGroupId": response.get("viewGroupId"), + "viewsId": response.get("view", {}).get("viewId"), + }, + "msg": "Successfully created or scheduled report '{0}'.".format(report_name) + } + + # Append to overall result list + self.result["response"].append({"create_report": result}) + self.log("Successfully created report '{0}' with ID: {1}".format( + report_name, report_id), "INFO") + + # mark success/change + self.status = "success" + self.result["changed"] = True + + # Note: do NOT trigger download here. Return report_id so caller can decide to download later. + return report_id + + def _is_download_requested(self, report_entry): + """Check if download is requested for the report.""" + return any( + d.get("type", "").upper() == "DOWNLOAD" + for d in report_entry.get("deliveries", []) + ) + + def _should_download_immediately(self, report_entry): + """Check if report should be downloaded immediately.""" + return ( + self._is_download_requested(report_entry) and + report_entry.get("schedule", {}).get("type") == "SCHEDULE_NOW" + ) + + def _download_report_if_needed(self, report_entry, report_id): + """ + Download report if needed and handle any errors. + + Parameters: + report_entry (dict): The report entry. + report_id (str): The report ID. + + Returns: + bool: True if download succeeds or is not needed, False if download fails. + """ + try: + self.report_download(report_entry, report_id) + return True + except Exception as e: + self.msg = "Failed to download report '{0}': {1}".format( + report_entry.get("name"), str(e) + ) + self.set_operation_result("failed", False, self.msg, "ERROR") + return False + + def get_diff_merged(self, config): + """ + Generate and apply configuration differences for merged state operations. + + This method processes the configuration to identify differences between desired + and current states, then applies the necessary changes to create or scheduleg2763 + + reports in Cisco Catalyst Center for the merged state. + + Parameters: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + config (dict): The configuration dictionary containing report details. + + Returns: + self: The current instance of the class with updated 'diff' attributes. + + Description: + - Validates presence of report generation configuration + - Identifies differences between desired and current states + - Creates or schedules new reports as needed + - Updates existing reports if configuration changes are detected + - Logs all major decision points and processing steps for traceability + - Ensures idempotent behavior for merged state operations + """ + self.log( + "Starting merged state difference generation and application for {0} report entries".format( + len(config.get("generate_report", [])) + ), + "INFO" + ) + generate_report = config.get("generate_report", []) + if not generate_report: + self.msg = "The 'generate_report' field is missing or empty in the configuration." + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + self.log( + "Processing {0} report configurations for merged state operations".format( + len(generate_report) + ), + "DEBUG" + ) + + # Log summary of reports to be processed + for report_index, report_entry in enumerate(generate_report): + report_name = report_entry.get("name", "unnamed") + exists = report_entry.get("exists", False) + action = "update/verify" if exists else "create" + + self.log( + "Report {0}/{1}: '{2}' - action: {3}".format( + report_index + 1, len(generate_report), report_name, action + ), + "DEBUG" + ) + + # Delegate to report creation and scheduling method + self.log("Delegating to report creation and scheduling workflow", "DEBUG") + self.create_n_schedule_reports(generate_report).check_return_status() + + self.log( + "Completed merged state difference generation and application successfully", + "INFO" + ) + return self + + def get_execution_id_for_report(self, report_id): + """ + Retrieve the execution ID for a given report ID from Cisco Catalyst Center, + retrying until the execution status is 'SUCCESS' or timeout is reached. + + Parameters: + report_id (str): The ID of the report for which to retrieve the execution ID. + + Returns: + str: The execution ID associated with the specified report ID if successful. + None: If no successful execution is found within the timeout period. + """ + self.log( + "Fetching execution ID for report ID: {0}".format(report_id), + "INFO", + ) + + start_time = time.time() + retry_interval = int(self.payload.get("dnac_task_poll_interval", 5)) + timeout = int(self.payload.get("dnac_api_task_timeout", 100)) + + while True: + try: + response = self.dnac._exec( + family="reports", + function="get_all_execution_details_for_a_given_report", + params={"report_id": report_id}, + ) + self.log( + "Response from get_execution_id_for_report: {0}".format( + self.pprint(response) + ), + "DEBUG", + ) + + executions = response.get("executions", []) if response else [] + if not executions: + self.log( + "No executions found yet for report ID '{0}'.".format(report_id), + "WARNING", + ) + else: + # Iterate through executions to check status + for execution in executions: + execution_id = execution.get("executionId") + status = execution.get("processStatus") + + self.log( + "Execution ID: {0}, Status: {1}".format(execution_id, status), + "DEBUG", + ) + + if status and status.upper() == "SUCCESS": + self.log( + "Found successful execution for report ID '{0}': {1}".format( + report_id, execution_id + ), + "INFO", + ) + return execution_id + + except Exception as e: + self.log( + "Error while fetching execution ID for report ID {0}: {1}".format( + report_id, str(e) + ), + "ERROR", + ) + + # Timeout check + if time.time() - start_time >= timeout: + self.log( + "Timeout reached while waiting for successful execution of report ID: {0}".format( + report_id + ), + "ERROR", + ) + return None + + # Sleep before retrying + self.log( + "Waiting {0} seconds before retrying execution status for report ID: {1}".format( + retry_interval, report_id + ), + "DEBUG", + ) + time.sleep(retry_interval) + + def download_report_with_retry(self, report_id, execution_id): + """ + Download report content with retry mechanism for handling transient failures. + + This method attempts to download report content from Cisco Catalyst Center with + built-in retry logic to handle temporary network issues or API unavailability. + It provides robust download functionality with proper error handling and logging. + + Parameters: + report_id (str): Unique identifier for a report definition/configuration. + execution_id (str): Unique identifier for a specific execution/run of a report. + + Returns: + download_data: The downloaded report content if successfully downloaded. + """ + + self.log( + f"Attempting to download report with report_id={report_id}, execution_id={execution_id}", + "INFO" + ) + + start_time = time.time() + retry_interval = int(self.payload.get("dnac_task_poll_interval", 5)) + resync_retry_count = int(self.payload.get("dnac_api_task_timeout", 100)) + + while True: + try: + download_response = self.dnac._exec( + family="reports", + function="download_report_content", + params={"report_id": report_id, "execution_id": execution_id} + ) + + download_data = download_response.data + self.log( + "Response from download_report_content: {0}".format(download_data), + "DEBUG" + ) + + # If data is present and not error, return it + if download_data and not isinstance(download_data, dict): + return download_data + + except Exception as e: + err_str = str(e) + error_code = None + error_msg = None + + # Try to extract JSON part from exception + match = re.search(r'(\{.*\})', err_str) + if match: + try: + err_json = json.loads(match.group(1)) + if "error" in err_json: + error_code = err_json["error"][0].get("errorCode") + error_msg = err_json["error"][0].get("errorMessage") + except json.JSONDecodeError: + pass + + if error_code == 4002: + self.log( + f"Report not ready yet (error {error_code}: {error_msg}), retrying...", + "WARNING" + ) + else: + self.msg = f"Exception during report download with retry: {err_str}" + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + # Timeout check + if time.time() - start_time >= resync_retry_count: + self.msg = f"Max retries reached. Report file not available (report_id={report_id}, execution_id={execution_id})." + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + # Wait before retry + self.log( + f"Waiting {retry_interval} seconds before retrying report download (report_id={report_id}, execution_id={execution_id})", + "DEBUG" + ) + time.sleep(retry_interval) + + def report_download(self, report_entry, report_id): + """ + Download the report content after it has been created or scheduled. + + This method manages the complete report download workflow including execution ID retrieval, + content download with retry mechanism, and local file storage. It handles both immediate + and scheduled report downloads with proper validation and error recovery. + + Parameters: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + report_entry (dict): The report entry containing details for downloading the report. + report_id (str): The unique identifier of the report to download from Catalyst Center. + + Returns: + self: The current instance of the class with updated 'result' attribute. + + Description: + - Validates report configuration and download requirements + - Retrieves execution ID for completed report instances + - Downloads report content using retry mechanism for reliability + - Saves report content to local file system with proper naming + - Handles various download scenarios including immediate and scheduled reports + - Logs all major decision points and download progress for traceability + - Updates operation results with success or failure status + """ + self.log("Downloading report content for report entry: {0}".format(self.pprint(report_entry)), "DEBUG") + + if not report_entry: + self.msg = "Report entry configuration is required for download operation." + self.set_operation_result("failed", False, self.msg, "ERROR") + self.log("Report download failed - no report_entry provided", "ERROR") + return self + + if not report_id: + self.msg = "Report ID is required for download operation." + self.set_operation_result("failed", False, self.msg, "ERROR") + self.log("Report download failed - no report_id provided", "ERROR") + return self + + report_name = report_entry.get("name", "unnamed") + + self.log( + "Starting report download workflow for report_id='{0}', report_name='{1}'".format( + report_id, report_name + ), + "INFO" + ) + + try: + file_path = report_entry.get("file_path", "./") + if not file_path: + self.msg = "File path is required for downloading the report." + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + execution_id = self.get_execution_id_for_report(report_id) + if not execution_id: + self.msg = "Failed to retrieve execution ID for report '{0}'.".format(report_entry.get("name")) + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + download_data = self.download_report_with_retry(report_id, execution_id) + + # Validate file_path + deliveries = report_entry.get("deliveries", []) + view = report_entry.get("view", {}) + file_format = view.get("format", {}).get("format_type") + default_format = ".csv" # Default file format if not specified + + for delivery in deliveries: + if delivery.get("type", "").upper() == "DOWNLOAD" and "file_path" in delivery: + file_path = delivery["file_path"] + break # Found it, no need to continue + + if not file_path: + self.log("No 'file_path' provided. Cannot save the downloaded file.", "WARNING") + self.msg = "File path is required for saving the downloaded report." + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + # Determine file format + if not file_format.startswith("."): + file_format = "." + file_format # Ensure it starts with "." + + # Determine file name (download_id or default name) + report_name = report_entry.get("name", "report") + + # Construct full path + full_path = os.path.join(file_path, f"{report_name}{file_format}") + + # Save the file + try: + os.makedirs(file_path, exist_ok=True) + with open(full_path, "wb") as f: + f.write(download_data) + self.log(f"File saved successfully at {full_path}", "INFO") + except Exception as e: + self.msg = "Failed to save the downloaded file: {0}".format(str(e)) + self.log(self.msg, "ERROR") + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + return self + + result = { + "response": { + "reportId": report_id, + "reportName": report_entry.get("name"), + "filePath": file_path + }, + "msg": "Successfully downloaded report '{0}' to '{1}'.".format(report_entry.get("name"), file_path), + } + self.result["response"].append({"download_report": result}) + self.log("Successfully downloaded report: {0}".format(report_entry.get("name")), "INFO") + self.status = "success" + self.result["changed"] = True + except Exception as e: + self.msg = "An error occurred while downloading the report: {0}".format(str(e)) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + return self + + def get_diff_deleted(self, config): + """ + Generate and apply configuration differences for deleted state operations. + + This method processes the configuration to identify and remove existing reports + from Cisco Catalyst Center that are marked for deletion. It handles the complete + deletion workflow including validation, existence checking, and cleanup operations. + + Parameters: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + config (dict): The configuration dictionary containing report details. + + Returns: + self: The current instance of the class with updated 'diff' attributes. + + Description: + - Validates presence of report deletion configuration + - Identifies existing reports that need to be deleted + - Removes scheduled reports and their associated configurations + - Cleans up related resources and execution histories + - Logs all major decision points and deletion steps for traceability + - Ensures complete cleanup for deleted state operations + """ + self.log("Starting deletion from configuration: {0}".format(self.pprint(config)), "DEBUG") + generate_report = config.get("generate_report", []) + if not generate_report: + self.msg = "The 'generate_report' field is missing or empty in the configuration." + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + self.log( + "Processing {0} report configurations for deletion state operations".format( + len(generate_report) + ), + "DEBUG" + ) + + try: + deletion_candidates = 0 + for report_index, report_entry in enumerate(generate_report): + report_name = report_entry.get("name", "unnamed") + for report_entry in generate_report: + report_name = report_entry.get("name") + self.log("Attempting to delete report: {0}".format(report_name), "DEBUG") + if not report_entry.get("exists", False): + self.log("Report '{0}' does not exist, skipping deletion.".format(report_name), "DEBUG") + result = { + "response": {}, + "msg": "Report '{0}' does not exist.".format(report_name), + } + self.result["response"].append({"delete_report": result}) + self.msg = "Report '{0}' does not exist.".format(report_name) + self.log("Report '{0}' does not exist, skipping deletion.".format(report_name), "DEBUG") + continue + if not report_entry.get("report_id"): + self.msg = "The 'report_id' field is mandatory in the 'generate_report' configuration for deletion." + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + response = self.dnac._exec( + family="reports", + function="delete_a_scheduled_report", + params={"report_id": report_entry.get("report_id")}, + ) + self.log("Response from delete_a_scheduled_report: {0}".format(self.pprint(response)), "DEBUG") + if not response.get("status") == 200: + self.msg = "Failed to delete report with ID '{0}'.".format(report_entry.get("report_id")) + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + result = { + "response": {"report_id": report_entry.get("report_id")}, + "msg": "Report '{0}' has been successfully deleted.".format(report_entry.get("name")), + } + self.result["response"].append({"delete_report": result}) + self.msg = "Successfully deleted report with ID: {0}".format(report_entry.get("report_id")) + self.log(self.msg, "INFO") + self.status = "success" + self.result["changed"] = True + except Exception as e: + self.msg = "An error occurred while deleting the report: {0}".format(str(e)) + self.set_operation_result("failed", False, self.msg, "ERROR") + + self.log( + "Completed deleted state difference generation and processing successfully", + "INFO" + ) + return self + + def verify_diff_merged(self, config): + """ + Verify merged state configuration against current state in Cisco Catalyst Center. + + This method validates that the desired report configurations match the current + state in Catalyst Center, ensuring idempotency and confirming successful + deployment of report generation workflows. + + Parameters: + self (object): An instance of a class used for interacting with Cisco + Catalyst Center. + config (dict): The configuration dictionary containing report generation + details including generate_report list with all report specifications + that need to be verified against current state. + + Returns: + self: The current instance of the class with updated 'result' attributes + containing the verification outcomes and any discrepancies found. + + Description: + - Validates presence of report generation configuration + - Compares desired state against current state in Catalyst Center + - Verifies report existence, configuration accuracy, and operational status + - Identifies configuration drift or deployment issues + - Validates webhook destinations, view groups, and delivery configurations + - Logs all major decision points and verification steps for traceability + - Ensures configuration compliance and operational readiness + """ + self.log( + "Starting merged state verification for {0} report entries against Catalyst Center".format( + len(config.get("generate_report", [])) + ), + "INFO" + ) + getattr(self, "get_have")(self.validated_config[0]) + generate_report = self.have.get("generate_report", []) + + if not generate_report: + self.msg = "No reports found in the current state after creation." + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + self.log( + "Processing {0} report configurations for merged state verification".format( + len(generate_report) + ), + "DEBUG" + ) + + # Log summary of reports to be verified + verification_summary = { + "total_reports": len(generate_report), + "existing_reports": 0, + "new_reports": 0, + "webhook_deliveries": 0, + "notification_deliveries": 0, + "download_deliveries": 0 + } + for report_index, report_entry in enumerate(generate_report): + report_name = report_entry.get("name", "unnamed") + exists = report_entry.get("exists", False) + + if exists: + verification_summary["existing_reports"] += 1 + status = "verify existing configuration" + else: + verification_summary["new_reports"] += 1 + status = "verify new deployment" + + # Count delivery types for verification complexity assessment + deliveries = report_entry.get("deliveries", []) + for delivery in deliveries: + delivery_type = delivery.get("type", "").upper() + if delivery_type == "WEBHOOK": + verification_summary["webhook_deliveries"] += 1 + elif delivery_type == "NOTIFICATION": + verification_summary["notification_deliveries"] += 1 + elif delivery_type == "DOWNLOAD": + verification_summary["download_deliveries"] += 1 + + self.log( + "Report {0}/{1}: '{2}' - {3}".format( + report_index + 1, len(generate_report), report_name, status + ), + "DEBUG" + ) + + self.log( + "Verification summary - Total: {0}, Existing: {1}, New: {2}, Webhook: {3}, Notification: {4}, Download: {5}".format( + verification_summary["total_reports"], + verification_summary["existing_reports"], + verification_summary["new_reports"], + verification_summary["webhook_deliveries"], + verification_summary["notification_deliveries"], + verification_summary["download_deliveries"] + ), + "INFO" + ) + + # Validate configuration integrity before verification + self.log("Validating configuration integrity before state verification", "DEBUG") + + validation_errors = [] + for report_entry in generate_report: + report_name = report_entry.get("name", "unnamed") + + # Validate required fields for verification + if not report_entry.get("view_group_name"): + validation_errors.append("Report '{0}' missing view_group_name".format(report_name)) + + if not report_entry.get("view", {}).get("view_name"): + validation_errors.append("Report '{0}' missing view.view_name".format(report_name)) + + if not report_entry.get("deliveries"): + validation_errors.append("Report '{0}' missing deliveries configuration".format(report_name)) + + if validation_errors: + self.msg = "Configuration validation failed: {0}".format("; ".join(validation_errors)) + self.set_operation_result("failed", False, self.msg, "ERROR") + self.log( + "Merged state verification failed - configuration validation errors: {0}".format( + "; ".join(validation_errors) + ), + "ERROR" + ) + return self + + self.log("Configuration integrity validation passed successfully", "DEBUG") + + # Delegate to report verification workflow + self.log("Delegating to report verification workflow for detailed state comparison", "DEBUG") + + try: + self.log( + "Report verification workflow completed - checking operation status", + "DEBUG" + ) + + # Log verification results summary + if hasattr(self, 'result') and self.result.get("response"): + verification_results = len(self.result["response"]) + self.log( + "Verification completed with {0} result entries processed".format( + verification_results + ), + "INFO" + ) + + except Exception as e: + self.msg = "Error during report verification workflow: {0}".format(str(e)) + self.set_operation_result("failed", False, self.msg, "ERROR") + self.log( + "Exception during report verification workflow: {0}".format(str(e)), + "ERROR" + ) + return self + + self.log( + "Completed merged state verification for {0} report entries successfully".format( + len(generate_report) + ), + "INFO" + ) + return self + + def verify_diff_deleted(self, config): + """ Verify deleted state configuration against current state in Cisco Catalyst Center. + + This method validates that reports marked for deletion have been successfully + removed from Catalyst Center, ensuring complete cleanup and confirming the + absence of scheduled reports and their associated configurations. + + Parameters: + self (object): An instance of a class used for interacting with Cisco + Catalyst Center. + config (dict): The configuration dictionary containing report generation + details including generate_report list with reports that should be + verified as deleted from the system. + + Returns: + self: The current instance of the class with updated 'result' attributes + containing the deletion verification outcomes and any cleanup issues found. + + Description: + - Validates presence of report deletion configuration + - Verifies complete removal of reports from Catalyst Center + - Confirms cleanup of scheduled reports, executions, and related resources + - Identifies incomplete deletions or orphaned configurations + - Validates webhook destinations cleanup and delivery configuration removal + - Logs all major decision points and verification steps for traceability + - Ensures complete state cleanup and deletion compliance + """ + self.log( + "Starting deleted state verification for {0} report entries against Catalyst Center".format( + len(config.get("generate_report", [])) + ), + "INFO" + ) + + generate_report = config.get("generate_report", []) + if not generate_report: + self.msg = "The 'generate_report' field is missing or empty in the configuration." + self.set_operation_result("failed", False, self.msg, "ERROR") + self.log("Deleted state verification failed - no generate_report entries found", "ERROR") + return self + + self.log( + "Processing {0} report configurations for deleted state verification".format( + len(generate_report) + ), + "DEBUG" + ) + + # Log summary of reports to be verified for deletion + verification_summary = { + "total_reports": len(generate_report), + "should_be_deleted": 0, + "already_absent": 0, + "webhook_deliveries": 0, + "notification_deliveries": 0, + "download_deliveries": 0 + } + + for report_index, report_entry in enumerate(generate_report): + report_name = report_entry.get("name", "unnamed") + exists = report_entry.get("exists", False) + + if exists: + verification_summary["should_be_deleted"] += 1 + status = "verify successful deletion" + else: + verification_summary["already_absent"] += 1 + status = "confirm already deleted" + + # Count delivery types for verification complexity assessment + deliveries = report_entry.get("deliveries", []) + if deliveries: + for delivery in deliveries: + delivery_type = delivery.get("type", "").upper() + if delivery_type == "WEBHOOK": + verification_summary["webhook_deliveries"] += 1 + elif delivery_type == "NOTIFICATION": + verification_summary["notification_deliveries"] += 1 + elif delivery_type == "DOWNLOAD": + verification_summary["download_deliveries"] += 1 + + self.log( + "Report {0}/{1}: '{2}' - {3}".format( + report_index + 1, len(generate_report), report_name, status + ), + "DEBUG" + ) + + self.log( + "Deletion verification summary - Total: {0}, Should be deleted: {1}, Already absent: {2}, Webhook: {3}, Notification: {4}, Download: {5}".format( + verification_summary["total_reports"], + verification_summary["should_be_deleted"], + verification_summary["already_absent"], + verification_summary["webhook_deliveries"], + verification_summary["notification_deliveries"], + verification_summary["download_deliveries"] + ), + "INFO" + ) + + # Validate configuration integrity before deletion verification + self.log("Validating configuration integrity before deletion state verification", "DEBUG") + + validation_errors = [] + for report_entry in generate_report: + report_name = report_entry.get("name", "unnamed") + + # Validate required fields for deletion verification + if not report_name or report_name == "unnamed": + validation_errors.append("Report entry missing valid name for deletion verification") + + if validation_errors: + self.msg = "Configuration validation failed for deletion verification: {0}".format("; ".join(validation_errors)) + self.set_operation_result("failed", False, self.msg, "ERROR") + self.log( + "Deleted state verification failed - configuration validation errors: {0}".format( + "; ".join(validation_errors) + ), + "ERROR" + ) + return self + + self.log("Configuration integrity validation passed for deletion verification", "DEBUG") + + # Verify current state to confirm deletions + self.log("Checking current state in Catalyst Center to verify report deletions", "DEBUG") + + try: + # Get current state to verify deletions + current_state_config = {"generate_report": generate_report} + self.get_have(current_state_config) + + self.log("Current state retrieval completed for deletion verification", "DEBUG") + + # Analyze deletion verification results + deletion_verification_results = [] + for report_entry in generate_report: + report_name = report_entry.get("name", "unnamed") + currently_exists = report_entry.get("exists", False) + + if currently_exists: + deletion_verification_results.append( + "Report '{0}' still exists - deletion not completed".format(report_name) + ) + else: + deletion_verification_results.append( + "Report '{0}' successfully deleted or already absent".format(report_name) + ) + + # Log deletion verification results + for result in deletion_verification_results: + if "still exists" in result: + self.log(result, "WARNING") + else: + self.log(result, "DEBUG") + + # Check if any reports still exist that shouldn't + remaining_reports = [ + entry.get("name", "unnamed") for entry in generate_report + if entry.get("exists", False) + ] + + if remaining_reports: + self.log( + "Deletion verification found {0} reports still existing: {1}".format( + len(remaining_reports), ", ".join(remaining_reports) + ), + "WARNING" + ) + else: + self.log( + "Deletion verification confirmed all {0} reports are successfully deleted or absent".format( + len(generate_report) + ), + "INFO" + ) + + except Exception as e: + self.msg = "Error during deletion verification state check: {0}".format(str(e)) + self.set_operation_result("failed", False, self.msg, "ERROR") + self.log( + "Exception during deletion verification state check: {0}".format(str(e)), + "ERROR" + ) + return self + + # Update result with verification summary + if hasattr(self, 'result') and 'response' in self.result: + verification_result = { + "verification_type": "deleted_state", + "total_reports_checked": len(generate_report), + "reports_verified_deleted": len([r for r in generate_report if not r.get("exists", False)]), + "reports_still_existing": len([r for r in generate_report if r.get("exists", False)]) + } + self.result["response"].append({"deletion_verification": verification_result}) + + self.log( + "Completed deleted state verification for {0} report entries successfully".format( + len(generate_report) + ), + "INFO" + ) + return self + + +def main(): + """main entry point for module execution""" + element_spec = { + "dnac_host": {"type": "str", "required": True}, + "dnac_port": {"type": "str", "default": "443"}, + "dnac_username": {"type": "str", "default": "admin", "aliases": ["user"]}, + "dnac_password": {"type": "str", "no_log": True}, + "dnac_verify": {"type": "bool", "default": True}, + "dnac_version": {"type": "str", "default": "2.2.3.3"}, + "dnac_debug": {"type": "bool", "default": False}, + "dnac_log": {"type": "bool", "default": False}, + "dnac_log_level": {"type": "str", "default": "WARNING"}, + "dnac_log_file_path": {"type": "str", "default": "dnac.log"}, + "dnac_log_append": {"type": "bool", "default": True}, + "config_verify": {"type": "bool", "default": False}, + "dnac_api_task_timeout": {"type": "int", "default": 1200}, + "dnac_task_poll_interval": {"type": "int", "default": 2}, + "config": {"type": "list", "required": True, "elements": "dict"}, + "state": {"default": "merged", "choices": ["merged", "deleted"], "type": "str"}, + "validate_response_schema": {"type": "bool", "default": True}, + } + + module = AnsibleModule(argument_spec=element_spec, supports_check_mode=False) + + ccc_report = Reports(module) + state = ccc_report.params.get("state") + + if state not in ccc_report.supported_states: + ccc_report.status = "invalid" + ccc_report.msg = "State '{0}' is invalid. Supported states: {1}".format( + state, ", ".join(ccc_report.supported_states) + ) + ccc_report.check_return_status() + + ccc_version = ccc_report.get_ccc_version() + if ccc_report.compare_dnac_versions(ccc_version, "2.3.7.9") < 0: + ccc_report.msg = ( + "The specified version '{0}' does not support the Flexible Report features. " + "Supported versions start from '2.3.7.9' onwards.".format(ccc_version) + ) + ccc_report.status = "failed" + ccc_report.check_return_status() + ccc_report.validate_input().check_return_status() + config_verify = ccc_report.params.get("config_verify") + + for config in ccc_report.validated_config: + if state != "deleted": + ccc_report.input_data_validation(config).check_return_status() + ccc_report.get_want(config).check_return_status() + ccc_report.get_have(config).check_return_status() + ccc_report.get_diff_state_apply[state](config).check_return_status() + if config_verify: + ccc_report.verify_diff_state_apply[state](config).check_return_status() + + module.exit_json(**ccc_report.result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/sda_fabric_sites_zones_workflow_manager.py b/plugins/modules/sda_fabric_sites_zones_workflow_manager.py index d89fe735d5..239d0ab5ae 100644 --- a/plugins/modules/sda_fabric_sites_zones_workflow_manager.py +++ b/plugins/modules/sda_fabric_sites_zones_workflow_manager.py @@ -748,9 +748,8 @@ def get_have(self, config): site_exists, site_id = self.get_site_id(site_name) if not site_exists: self.msg = ( - "Given site '{0}' does not exist in the Catalyst Center.".format( - site_name - ) + f"The site '{site_name}' does not exist in the Catalyst Center. " + "A site must be created first before it can be converted into a Fabric Site." ) self.set_operation_result( "failed", False, self.msg, "ERROR" @@ -881,9 +880,8 @@ def create_fabric_site(self, site): site_exists, site_id = self.get_site_id(site_name) if not site_exists: self.msg = ( - "Given site '{0}' does not exist in the Catalyst Center.".format( - site_name - ) + f"The site '{site_name}' does not exist in the Catalyst Center. " + "A site must be created first before it can be converted into a Fabric Site." ) self.set_operation_result( "failed", False, self.msg, "ERROR" @@ -1054,9 +1052,8 @@ def create_fabric_zone(self, zone): site_exists, site_id = self.get_site_id(site_name) if not site_exists: self.msg = ( - "Given site '{0}' does not exist in the Catalyst Center.".format( - site_name - ) + f"The site '{site_name}' does not exist in the Catalyst Center. " + "A site must be created first before it can be converted into a Fabric Site." ) self.set_operation_result( "failed", False, self.msg, "ERROR" @@ -2346,9 +2343,8 @@ def get_diff_merged(self, config): site_exists, site_id = self.get_site_id(site_name) if not site_exists: self.msg = ( - "Given site '{0}' does not exist in the Catalyst Center.".format( - site_name - ) + f"The site '{site_name}' does not exist in the Catalyst Center. " + "A site must be created first before it can be converted into a Fabric Site." ) self.set_operation_result( "failed", False, self.msg, "ERROR" @@ -2669,9 +2665,8 @@ def get_diff_deleted(self, config): site_exists, site_id = self.get_site_id(site_name) if not site_exists: self.msg = ( - "Given site '{0}' does not exist in the Catalyst Center.".format( - site_name - ) + f"The site '{site_name}' does not exist in the Catalyst Center. " + "A site must be created first before it can be converted into a Fabric Site." ) self.set_operation_result( "failed", False, self.msg, "ERROR" @@ -2753,8 +2748,9 @@ def verify_diff_merged(self, config): fabric_type = site.get("fabric_type", "fabric_site") site_exists, site_id = self.get_site_id(site_name) if not site_exists: - self.msg = "Given site '{0}' does not exist in the Catalyst Center.".format( - site_name + self.msg = ( + f"The site '{site_name}' does not exist in the Catalyst Center. " + "A site must be created first before it can be converted into a Fabric Site." ) self.set_operation_result( "failed", False, self.msg, "ERROR" @@ -2846,9 +2842,8 @@ def verify_diff_deleted(self, config): site_exists, site_id = self.get_site_id(site_name) if not site_exists: self.msg = ( - "Given site '{0}' does not exist in the Catalyst Center.".format( - site_name - ) + f"The site '{site_name}' does not exist in the Catalyst Center. " + "A site must be created first before it can be converted into a Fabric Site." ) self.set_operation_result( "failed", False, self.msg, "ERROR" diff --git a/plugins/modules/swim_workflow_manager.py b/plugins/modules/swim_workflow_manager.py index 8c7d24df7c..d2b9f81a68 100644 --- a/plugins/modules/swim_workflow_manager.py +++ b/plugins/modules/swim_workflow_manager.py @@ -6,6 +6,7 @@ __metaclass__ = type __author__ = "Madhan Sankaranarayanan, Rishita Chowdhary, Abhishek Maheshwari, Syed Khadeer Ahmed, Ajith Andrew J" + DOCUMENTATION = r""" --- module: swim_workflow_manager @@ -33,6 +34,8 @@ - Provides an API to distribute a software image to a device. The software image must be imported into Catalyst Center before it can be distributed. + - Provides an API to delete software images from Catalyst Center. + - A golden tag is mandatory for a site, and the workflow cannot proceed without it version_added: '6.6.0' extends_documentation_fragment: - cisco.dnac.workflow_manager_params @@ -50,7 +53,7 @@ description: The state of Catalyst Center after module completion. type: str - choices: [merged] + choices: [ merged, deleted] default: merged config: description: List of details of SWIM image being @@ -59,6 +62,19 @@ elements: dict required: true suboptions: + image_name: + description: + - A list of image names to be deleted. + - This parameter is mandatory and exclusively used when the state is 'deleted'. + - Applicable for v3.1.3.0 and later. + type: list + elements: str + sync_cco: + description: + - Set to True to synchronize the image with Cisco Connection Online (CCO). + - Applicable for v3.1.3.0 and later. + type: bool + default: false import_image_details: description: Details of image being imported type: dict @@ -395,6 +411,36 @@ description: Specify the name of the device family such as Switches and Hubs, etc. type: str + image_distribution_timeout: + description: | + Timeout duration in seconds for image distribution API operations. + Controls how long the system waits for image distribution tasks to complete, + including image transfer to target devices, network propagation, and distribution validation. + + Operation phases covered by this timeout: + - Image preparation and validation on Catalyst Center + - Network transfer to target devices + - Installation verification on target devices + - Distribution status confirmation + + Default of 1800 seconds (30 minutes) accounts for: + - Large image files (up to several GB) + - Multiple target devices in site-based operations + - Network latency and bandwidth constraints + - Device processing and storage capabilities + + Recommended timeout values: + - Small networks (1-10 devices): 900-1800 seconds + - Medium networks (10-50 devices): 1800-3600 seconds + - Large networks (50+ devices): 3600-7200 seconds + + Note: This timeout is independent of the global 'dnac_api_task_timeout' parameter + and specifically applies to distribution operations only. + + default: 1800 + type: int + version_added: 3.1.3.0 + site_name: description: Used to get device details associated to this site. @@ -420,6 +466,9 @@ description: Device serial number where the image needs to be distributed type: str + device_tag: + description: Device tag for filtering the target device(s) + type: str device_ip_address: description: Device IP address where the image needs to be distributed @@ -451,6 +500,39 @@ ACCESS, BORDER ROUTER, DISTRIBUTION, and CORE. type: str + image_activation_timeout: + description: | + Timeout duration in seconds for image activation API operations. + Controls how long the system waits for image activation processes to complete + before timing out, including device reboot and startup verification. + + Operation phases covered by this timeout: + - Image validation and preparation for activation + - Device upgrade mode processing (install/bundle/currentlyExists) + - Device reboot and startup sequence (if required) + - Post-activation connectivity and status verification + - Golden image validation (if applicable) + + Default of 1800 seconds (30 minutes) accommodates: + - Device boot time variations (switches: 5-15 min, routers: 10-20 min) + - Image installation and verification processes + - Network convergence and connectivity restoration + - Multiple devices in concurrent activation scenarios + + Recommended timeout values by device type: + - Access switches: 1200-1800 seconds (20-30 minutes) + - Distribution/Core switches: 1800-2700 seconds (30-45 minutes) + - Routers and complex devices: 2700-3600 seconds (45-60 minutes) + + Warning: Setting timeout too low may cause premature failure reporting + for successful but slow activation processes. + + Note: This timeout is independent of the global 'dnac_api_task_timeout' parameter + + type: int + default: 1800 + version_added: 3.1.3.0 + device_family_name: description: Specify the name of the device family such as Switches and Hubs, etc. @@ -522,6 +604,9 @@ description: Device serial number where the image needs to be activated type: str + device_tag: + description: Device tag for filtering the target device(s) + type: str device_ip_address: description: Device IP address where the image needs to be activated @@ -539,6 +624,20 @@ ScheduleValidate, validates data before schedule (optional). type: bool + compatible_features: + description: + - List of compatible feature key-value pairs. + - Applicable for v3.1.3.0 and later. + type: list + elements: dict + suboptions: + key: + description: Feature name. (e.g., "ISSU", "Rommon update") + type: str + value: + description: Feature status (e.g., Enable or Disable) + type: str + requirements: - dnacentersdk == 2.7.3 - python >= 3.9 @@ -548,16 +647,17 @@ software_image_management_swim.SoftwareImageManagementSwim.tag_as_golden_image, software_image_management_swim.SoftwareImageManagementSwim.trigger_software_image_distribution, software_image_management_swim.SoftwareImageManagementSwim.trigger_software_image_activation, + software_image_management_swim.SoftwareImageManagementSwim.delete_image_v1, + - Paths used are post /dna/intent/api/v1/image/importation/source/url, post /dna/intent/api/v1/image/importation/golden, post /dna/intent/api/v1/image/distribution, - post - /dna/intent/api/v1/image/activation/device, - - Added - the parameter 'dnac_api_task_timeout', - 'dnac_task_poll_interval' - options in v6.13.2. + post /dna/intent/api/v1/image/activation/device, + delete /dna/intent/api/v1/images/{id}, + + - Added the parameter 'dnac_api_task_timeout', 'dnac_task_poll_interval' options in v6.13.2. + """ EXAMPLES = r""" --- @@ -800,6 +900,51 @@ device_family_name: Switches and Hubs device_series_name: Cisco Catalyst 9300 Series Switches + +- name: Distribute the given image on devices associated with device tag + to that site with specified role. + cisco.dnac.swim_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log_level: "{{dnac_log_level}}" + dnac_log: true + config: + - image_distribution_details: + image_name: cat9k_iosxe.17.12.01.SPA.bin + site_name: Global/a_swim/swim_test1 + device_role: ALL + device_family_name: Switches and Hubs + device_tag: AUTO_INV_EVENT_SYNC_DISABLED + +- name: Activate the given image on devices associated with device tag + to that site with specified role. + cisco.dnac.swim_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log_level: "{{dnac_log_level}}" + dnac_log: true + config: + - image_activation_details: + image_name: cat9k_iosxe.17.12.01.SPA.bin + site_name: Global/USA/San Francisco/BGL_18 + device_role: ALL + device_family_name: Switches and Hubs + device_series_name: Cisco Catalyst 9300 Series Switches + device_tag: AUTO_INV_EVENT_SYNC_DISABLED + schedule_validate: false + activate_lower_image_version: true + distribute_if_needed: true + - name: Activate the given image on devices associated to that site with specified role. cisco.dnac.swim_workflow_manager: @@ -818,11 +963,109 @@ site_name: Global/USA/San Francisco/BGL_18 device_role: ALL device_family_name: Switches and Hubs - device_series_name: Cisco Catalyst 9300 Series - Switches - scehdule_validate: false + device_series_name: Cisco Catalyst 9300 Series Switches + schedule_validate: false activate_lower_image_version: true distribute_if_needed: true + +- name: Activate the golden image on devices associated + to that site with specified role. + cisco.dnac.swim_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log_level: "{{dnac_log_level}}" + dnac_log: true + config: + - image_activation_details: + site_name: Global/USA/San Francisco/BGL_18 + device_role: ALL + device_family_name: Switches and Hubs + device_series_name: Cisco Catalyst 9300 Series Switches + schedule_validate: false + activate_lower_image_version: true + distribute_if_needed: true + +- name: distribute the golden image on devices associated + to that site with specified role with custom api timeout. + cisco.dnac.swim_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log_level: "{{dnac_log_level}}" + dnac_log: true + config: + - image_activation_details: + site_name: Global/USA/San Francisco/BGL_18 + image_activation_timeout: 2500 + device_role: ALL + device_family_name: Switches and Hubs + device_series_name: Cisco Catalyst 9300 Series Switches + schedule_validate: false + activate_lower_image_version: true + distribute_if_needed: true + +- name: Activate the given image on devices associated + to that site for v3.1.3.0. + cisco.dnac.swim_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log_level: "{{dnac_log_level}}" + dnac_log: true + config: + - image_activation_details: + image_name: cat9k_iosxe.17.12.01.SPA.bin + site_name: Global/USA/San Francisco/BGL_18 + compatible_features: + - key: "ISSU" + value: Enable + - key: "Rommon update" + value: Disable + +- name: Sync cco images to Cisco Catalyst Center for v3.1.3.0. + cisco.dnac.swim_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log_level: "{{dnac_log_level}}" + dnac_log: true + config: + - sync_cco: true + +- name: Delete image from Cisco Catalyst Center for v3.1.3.0. + cisco.dnac.swim_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: 1000 + dnac_task_poll_interval: 1 + state: deleted + config: + - image_name: [C9800-SW-iosxe-wlc.17.07.01.SPA.bin] """ RETURN = r""" #Case: SWIM image is successfully imported, tagged as golden, distributed and activated on a device @@ -865,7 +1108,7 @@ class Swim(DnacBase): def __init__(self, module): super().__init__(module) - self.supported_states = ["merged"] + self.supported_states = ["merged", "deleted"] self.images_to_import, self.existing_images = [], [] def validate_input(self): @@ -886,7 +1129,6 @@ def validate_input(self): will contain the validated configuration. If it fails, 'self.status' will be 'failed', 'self.msg' will describe the validation issues. """ - if not self.config: self.status = "success" self.msg = "Configuration is not available in the playbook for validation" @@ -894,10 +1136,12 @@ def validate_input(self): return self temp_spec = dict( - import_image_details=dict(type="dict"), - tagging_details=dict(type="dict"), - image_distribution_details=dict(type="dict"), - image_activation_details=dict(type="dict"), + image_name=dict(type='list', elements='str'), + sync_cco=dict(type='bool', default=False), + import_image_details=dict(type='dict'), + tagging_details=dict(type='dict'), + image_distribution_details=dict(type='dict'), + image_activation_details=dict(type='dict'), ) # Validate swim params @@ -997,6 +1241,56 @@ def get_image_id(self, name): return image_id + def get_image_id_v1(self, name): + """ + Retrieve the unique image ID based on the provided image name. + Parameters: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + name (str): The name of the software image to search for. + Returns: + str or None: The unique image ID (UUID) if a single matching image is found, otherwise None. + Description: + This function sends a request to Cisco Catalyst Center to retrieve details about a software image based on its name. + It extracts and returns the image ID if a single matching image is found. If no image or multiple + images are found with the same name, it raises an exception. + """ + + self.log("Attempting to find image ID for image with name: '{0}'".format(name), "DEBUG") + try: + image_response = self.dnac._exec( + family="software_image_management_swim", + function="get_software_image_details", + op_modifies=True, + params={"image_name": name}, + ) + + self.log("Received API response from 'get_software_image_details': {0}".format(str(image_response)), "DEBUG") + + image_list = image_response.get("response", []) if isinstance(image_response, dict) else [] + + if len(image_list) == 1: + image_id = image_list[0].get("imageUuid") + if image_id: + self.log("Successfully found SWIM image '{0}' with ID: {1}".format(name, image_id), "INFO") + return image_id + else: + self.log("Image found but missing imageUuid field for '{0}'".format(name), "WARNING") + return None + + if len(image_list) == 0: + self.log("No SWIM image found with name '{0}'".format(name), "INFO") + return None + + self.log( + "Multiple SWIM images ({0}) found with name '{1}' - unable to uniquely identify".format(len(image_list), name), + "WARNING", + ) + return None + + except Exception as e: + self.log("An exception occurred while retrieving image ID for '{0}': {1}".format(name, str(e)), "ERROR") + return None + def get_cco_image_id(self, cco_image_name): """ Retrieve the unique image ID from Cisco.com based on the provided image name. @@ -1230,11 +1524,21 @@ def get_device_uuids( device_uuid_list = [] device_id_list, site_response_list = [], [] if not site_name: - site_names = "Global/.*" - self.log( - "Site name not specified; defaulting to 'Global' to fetch all devices under this category", - "INFO", - ) + current_version = self.get_ccc_version() + if self.compare_dnac_versions(current_version, "2.3.5.3") <= 0: + site_name = "Global/.*" + self.log( + "Catalyst Center version {0} (≤2.3.5.3) detected - using wildcard pattern 'Global/.*' " + "to fetch devices from Global site and all child sites via legacy API".format(current_version), + "INFO", + ) + else: + site_name = "Global" + self.log( + "Catalyst Center version {0} (>2.3.5.3) detected - using 'Global' site name " + "to fetch devices via enhanced site hierarchy API".format(current_version), + "INFO", + ) (site_exists, site_id) = self.site_exists(site_name) if not site_exists: @@ -1293,319 +1597,323 @@ def get_device_uuids( for item_dict in item["response"]: site_response_list.append(item_dict) else: - site_type = self.get_sites_type(site_name) - site_info = {} - - self.log("Starting site hierarchy processing for: '{0}' (Type: {1})".format(site_name, site_type), "INFO") - if site_type == "building": + if site_name: self.log( - "Processing site as a building: {site_name}".format(site_name=site_name), - "DEBUG", + "Fetching devices for site '{0}'".format(site_name), "DEBUG" ) - + site_type = self.get_sites_type(site_name) + self.log("Determined site type: {0}".format(site_type), "DEBUG") site_info = {} - self.log("Fetching parent site data for building: {0}".format(site_name), "DEBUG") - parent_site_data = self.get_site(site_name) + self.log("Starting site hierarchy processing for: '{0}' (Type: {1})".format(site_name, site_type), "INFO") + if site_type == "building": + self.log( + "Processing site as a building: {site_name}".format(site_name=site_name), + "DEBUG", + ) + site_info = {} + + self.log("Fetching parent site data for building: {0}".format(site_name), "DEBUG") + parent_site_data = self.get_site(site_name) + + if parent_site_data.get("response"): + self.log( + "Parent site data found for building: '{0}'. Processing {1} items.".format( + site_name, + len(parent_site_data.get('response') or []) + ), + "DEBUG" + ) + for item in parent_site_data["response"]: + if "nameHierarchy" in item and "id" in item: + site_info[item["nameHierarchy"]] = item["id"] + self.log("Added parent site '{0}' with ID '{1}' to site_info.".format(item['nameHierarchy'], item['id']), "DEBUG") + else: + self.log( + "Missing 'nameHierarchy' or 'id' in parent site item: {0}".format(str(item)), + "WARNING" + ) + self.log("Parent site data: {0}".format(str(parent_site_data)), "DEBUG") + else: + self.log("No data found for parent site: {0}".format(site_name), "WARNING") + + self.log("Current site_info after parent processing: {0}".format(site_info), "DEBUG") + wildcard_site_name = site_name + "/.*" + self.log("Attempting to fetch child sites for building with wildcard: {0}".format(wildcard_site_name), "DEBUG") + child_site_data = self.get_site(wildcard_site_name) + + if child_site_data and child_site_data.get("response"): + self.log( + "Child site data found for building: '{0}'. Processing {1} items.".format( + wildcard_site_name, + len(child_site_data.get('response') or []) + ), + "DEBUG" + ) + for item in child_site_data["response"]: + if "nameHierarchy" in item and "id" in item: + site_info[item["nameHierarchy"]] = item["id"] + self.log("Added child site '{0}' with ID '{1}' to site_info.".format(item['nameHierarchy'], item['id']), "DEBUG") + else: + self.log( + "Missing 'nameHierarchy' or 'id' in child site item: {0}".format(str(item)), + "WARNING" + ) + self.log("Child site data found and logged for: {0}".format(wildcard_site_name), "DEBUG") + site_names = wildcard_site_name + else: + self.log("No child site data found under: {0}".format(wildcard_site_name), "DEBUG") + site_names = site_name + + elif site_type in ["area", "global"]: + self.log( + "Processing site as an area: {site_name}".format(site_name=site_name), + "DEBUG", + ) + + wildcard_site_name = site_name + "/.*" + self.log("Attempting to fetch child sites for area using wildcard:: {0}".format(wildcard_site_name), "DEBUG") + child_site_data = self.get_site(wildcard_site_name) + self.log("Child site data: {0}".format(str(child_site_data)), "DEBUG") + + if child_site_data and child_site_data.get("response"): + self.log("Child sites found for area: '{0}'. Setting site_names to wildcard.".format(wildcard_site_name), "DEBUG") + site_names = wildcard_site_name + else: + self.log("No child sites found under area: '{0}'. Using original site name: '{1}'.".format(wildcard_site_name, site_name), "DEBUG") + site_names = site_name - if parent_site_data.get("response"): + elif site_type == "floor": self.log( - "Parent site data found for building: '{0}'. Processing {1} items.".format( - site_name, - len(parent_site_data.get('response') or []) + "Processing site as a floor: {site_name}".format( + site_name=site_name ), - "DEBUG" + "DEBUG", ) - for item in parent_site_data["response"]: - if "nameHierarchy" in item and "id" in item: - site_info[item["nameHierarchy"]] = item["id"] - self.log("Added parent site '{0}' with ID '{1}' to site_info.".format(item['nameHierarchy'], item['id']), "DEBUG") - else: - self.log( - "Missing 'nameHierarchy' or 'id' in parent site item: {0}".format(str(item)), - "WARNING" - ) - self.log("Parent site data: {0}".format(str(parent_site_data)), "DEBUG") - else: - self.log("No data found for parent site: {0}".format(site_name), "WARNING") - self.log("Current site_info after parent processing: {0}".format(site_info), "DEBUG") - wildcard_site_name = site_name + "/.*" - self.log("Attempting to fetch child sites for building with wildcard: {0}".format(wildcard_site_name), "DEBUG") - child_site_data = self.get_site(wildcard_site_name) + site_names = site_name - if child_site_data and child_site_data.get("response"): + else: self.log( - "Child site data found for building: '{0}'. Processing {1} items.".format( - wildcard_site_name, - len(child_site_data.get('response') or []) + "Unknown site type '{site_type}' for site '{site_name}'.".format( + site_type=site_type, site_name=site_name ), - "DEBUG" + "ERROR", ) - for item in child_site_data["response"]: - if "nameHierarchy" in item and "id" in item: - site_info[item["nameHierarchy"]] = item["id"] - self.log("Added child site '{0}' with ID '{1}' to site_info.".format(item['nameHierarchy'], item['id']), "DEBUG") + + if site_type in ["area", "floor", "global"]: + self.log("Fetching site names for pattern: {0}".format(site_names), "DEBUG") + get_site_names = self.get_site(site_names) + self.log("Fetched site names: {0}".format(str(get_site_names)), "DEBUG") + + for item in get_site_names.get('response', []): + if 'nameHierarchy' in item and 'id' in item: + site_info[item['nameHierarchy']] = item['id'] else: self.log( - "Missing 'nameHierarchy' or 'id' in child site item: {0}".format(str(item)), + "Missing 'nameHierarchy' or 'id' in site item: {0}".format(str(item)), "WARNING" ) - self.log("Child site data found and logged for: {0}".format(wildcard_site_name), "DEBUG") - site_names = wildcard_site_name - else: - self.log("No child site data found under: {0}".format(wildcard_site_name), "DEBUG") - site_names = site_name + self.log("Site information retrieved: {0}".format(str(site_info)), "DEBUG") - elif site_type == "area": - self.log( - "Processing site as an area: {site_name}".format(site_name=site_name), - "DEBUG", - ) + for site_name, site_id in site_info.items(): + offset = 1 + limit = self.get_device_details_limit() - wildcard_site_name = site_name + "/.*" - self.log("Attempting to fetch child sites for area using wildcard:: {0}".format(wildcard_site_name), "DEBUG") - child_site_data = self.get_site(wildcard_site_name) - self.log("Child site data: {0}".format(str(child_site_data)), "DEBUG") + while True: + try: + response = self.dnac._exec( + family="site_design", + function="get_site_assigned_network_devices", + params={ + "site_id": site_id, + "offset": offset, + "limit": limit, + }, + ) + self.log( + "Received API response from 'get_site_assigned_network_devices' for site '{0}': {1}".format( + site_name, response + ), + "DEBUG", + ) - if child_site_data and child_site_data.get("response"): - self.log("Child sites found for area: '{0}'. Setting site_names to wildcard.".format(wildcard_site_name), "DEBUG") - site_names = wildcard_site_name - else: - self.log("No child sites found under area: '{0}'. Using original site name: '{1}'.".format(wildcard_site_name, site_name), "DEBUG") - site_names = site_name + devices = response.get("response", []) + if not devices: + self.log( + "No more devices found for site '{0}'.".format( + site_name + ), + "INFO", + ) + break - elif site_type == "floor": - self.log( - "Processing site as a floor: {site_name}".format( - site_name=site_name - ), - "DEBUG", - ) - site_names = site_name + for device in devices: + device_id_list.append(device.get("deviceId")) - else: - self.log( - "Unknown site type '{site_type}' for site '{site_name}'.".format( - site_type=site_type, site_name=site_name - ), - "ERROR", - ) + offset += limit - if site_type in ["area", "floor"]: - self.log("Fetching site names for pattern: {0}".format(site_names), "DEBUG") - get_site_names = self.get_site(site_names) - self.log("Fetched site names: {0}".format(str(get_site_names)), "DEBUG") + except Exception as e: + self.log( + "Unable to fetch devices for site '{0}' due to '{1}'".format( + site_name, e + ), + "WARNING", + ) + break - for item in get_site_names.get('response', []): - if 'nameHierarchy' in item and 'id' in item: - site_info[item['nameHierarchy']] = item['id'] - else: - self.log( - "Missing 'nameHierarchy' or 'id' in site item: {0}".format(str(item)), - "WARNING" - ) - self.log("Site information retrieved: {0}".format(str(site_info)), "DEBUG") - - for site_name, site_id in site_info.items(): - offset = 1 - limit = self.get_device_details_limit() - - while True: + for device_id in device_id_list: + self.log("Processing device_id: {0}".format(device_id)) try: - response = self.dnac._exec( - family="site_design", - function="get_site_assigned_network_devices", - params={ - "site_id": site_id, - "offset": offset, - "limit": limit, - }, + device_list_response = self.dnac._exec( + family="devices", + function="get_device_list", + params={"id": device_id}, ) + self.log( - "Received API response from 'get_site_assigned_network_devices' for site '{0}': {1}".format( - site_name, response + "Received API response from 'get_device_list': {0}".format( + str(device_list_response) ), "DEBUG", ) - devices = response.get("response", []) - if not devices: + device_response = device_list_response.get("response") + if not device_response: self.log( - "No more devices found for site '{0}'.".format( - site_name - ), + "No device data found for device_id: {0}".format(device_id), "INFO", ) - break + continue - for device in devices: - device_id_list.append(device.get("deviceId")) - - offset += limit + for device in device_response: + if device.get("instanceUuid") in device_id_list: + if ( + device_family is None + or device.get("family") == device_family + ): + site_response_list.append(device) except Exception as e: self.log( - "Unable to fetch devices for site '{0}' due to '{1}'".format( - site_name, e + "Unable to fetch devices for site '{0}' due to: {1}".format( + site_name, str(e) ), "WARNING", ) - break + return device_uuid_list - for device_id in device_id_list: - self.log("Processing device_id: {0}".format(device_id)) - try: - device_list_response = self.dnac._exec( - family="devices", - function="get_device_list", - params={"id": device_id}, - ) + self.device_ips = [] + for item in site_response_list: + device_ip = item["managementIpAddress"] + self.device_ips.append(device_ip) + + if device_role.upper() == "ALL": + device_role = None + + device_params = { + "series": device_series_name, + "family": device_family, + "role": device_role, + } + offset = 0 + limit = self.get_device_details_limit() + initial_exec = False + site_memberships_ids, device_response_ids = [], [] + while True: + try: + if initial_exec: + device_params["limit"] = limit + device_params["offset"] = offset * limit + device_list_response = self.dnac._exec( + family="devices", + function="get_device_list", + params=device_params, + ) + else: + initial_exec = True + device_list_response = self.dnac._exec( + family="devices", + function="get_device_list", + params=device_params, + ) self.log( - "Received API response from 'get_device_list': {0}".format( + "Received API response from 'device_list_response': {0}".format( str(device_list_response) ), "DEBUG", ) - + offset = offset + 1 device_response = device_list_response.get("response") + if not device_response: self.log( - "No device data found for device_id: {0}".format(device_id), + "Failed to retrieve devices associated with the site '{0}' due to empty API response.".format( + site_name + ), "INFO", ) - continue - - for device in device_response: - if device.get("instanceUuid") in device_id_list: - if ( - device_family is None - or device.get("family") == device_family - ): - site_response_list.append(device) - - except Exception as e: - self.log( - "Unable to fetch devices for site '{0}' due to: {1}".format( - site_name, str(e) - ), - "WARNING", - ) - return device_uuid_list - - self.device_ips = [] - for item in site_response_list: - device_ip = item["managementIpAddress"] - self.device_ips.append(device_ip) - - if device_role.upper() == "ALL": - device_role = None - - device_params = { - "series": device_series_name, - "family": device_family, - "role": device_role, - } - offset = 0 - limit = self.get_device_details_limit() - initial_exec = False - site_memberships_ids, device_response_ids = [], [] - - while True: - try: - if initial_exec: - device_params["limit"] = limit - device_params["offset"] = offset * limit - device_list_response = self.dnac._exec( - family="devices", - function="get_device_list", - params=device_params, - ) - else: - initial_exec = True - device_list_response = self.dnac._exec( - family="devices", - function="get_device_list", - op_modifies=True, - params=device_params, - ) - self.log( - "Received API response from 'device_list_response': {0}".format( - str(device_list_response) - ), - "DEBUG", - ) - offset = offset + 1 - device_response = device_list_response.get("response") - - if not device_response: - self.log( - "Failed to retrieve devices associated with the site '{0}' due to empty API response.".format( - site_name - ), - "INFO", - ) - break + break - for item in site_response_list: - if item["reachabilityStatus"] != "Reachable": + for item in site_response_list: + if item["reachabilityStatus"] != "Reachable": + self.log( + """Device '{0}' is currently '{1}' and cannot be included in the SWIM distribution/activation + process.""".format( + item["managementIpAddress"], item["reachabilityStatus"] + ), + "INFO", + ) + continue self.log( - """Device '{0}' is currently '{1}' and cannot be included in the SWIM distribution/activation + """Device '{0}' from site '{1}' is ready for the SWIM distribution/activation process.""".format( - item["managementIpAddress"], item["reachabilityStatus"] + item["managementIpAddress"], site_name ), "INFO", ) - continue - self.log( - """Device '{0}' from site '{1}' is ready for the SWIM distribution/activation - process.""".format( - item["managementIpAddress"], site_name - ), - "INFO", - ) - site_memberships_ids.append(item["instanceUuid"]) + site_memberships_ids.append(item["instanceUuid"]) - for item in device_response: - if item["reachabilityStatus"] != "Reachable": + for item in device_response: + if item["reachabilityStatus"] != "Reachable": + self.log( + """Unable to proceed with the device '{0}' for SWIM distribution/activation as its status is + '{1}'.""".format( + item["managementIpAddress"], item["reachabilityStatus"] + ), + "INFO", + ) + continue self.log( - """Unable to proceed with the device '{0}' for SWIM distribution/activation as its status is - '{1}'.""".format( - item["managementIpAddress"], item["reachabilityStatus"] + """Device '{0}' matches to the specified filter requirements and is set for SWIM + distribution/activation.""".format( + item["managementIpAddress"] ), "INFO", ) - continue - self.log( - """Device '{0}' matches to the specified filter requirements and is set for SWIM - distribution/activation.""".format( - item["managementIpAddress"] - ), - "INFO", + device_response_ids.append(item["instanceUuid"]) + except Exception as e: + self.msg = "An exception occured while fetching the device uuids from Cisco Catalyst Center: {0}".format( + str(e) ) - device_response_ids.append(item["instanceUuid"]) - except Exception as e: - self.msg = "An exception occured while fetching the device uuids from Cisco Catalyst Center: {0}".format( - str(e) + self.log(self.msg, "ERROR") + return device_uuid_list + + if not device_response_ids or not site_memberships_ids: + self.log( + "Failed to retrieve devices associated with the site '{0}' due to empty API response.".format( + site_name + ), + "INFO", ) - self.log(self.msg, "ERROR") return device_uuid_list - if not device_response_ids or not site_memberships_ids: - self.log( - "Failed to retrieve devices associated with the site '{0}' due to empty API response.".format( - site_name - ), - "INFO", + # Find the intersection of device IDs with the response get from get_membership api and get_device_list api with provided filters + device_uuid_list = set(site_memberships_ids).intersection( + set(device_response_ids) ) - return device_uuid_list - - # Find the intersection of device IDs with the response get from get_membership api and get_device_list api with provided filters - device_uuid_list = set(site_memberships_ids).intersection( - set(device_response_ids) - ) - return device_uuid_list + return device_uuid_list def get_device_family_identifier(self, family_name): """ @@ -1679,6 +1987,25 @@ def get_have(self): device families, distribution devices, and activation devices based on user-provided data in the 'want' dictionary. It validates and retrieves the necessary information from Cisco Catalyst Center to support later actions. """ + self.log("Retrieving and storing software image and device details from Cisco Catalyst Center", "DEBUG") + + if self.want.get("image_name"): + self.log("Processing bulk image names for ID resolution", "DEBUG") + have = {} + names = self.want.get("image_name") + image_id_map = {} + + for name in names: + image_id = self.get_image_id(name) + if image_id: + image_id_map[name] = image_id + self.log("Successfully resolved image ID for '{0}': {1}".format(name, image_id), "DEBUG") + else: + self.log("Failed to resolve image ID for '{0}'".format(name), "WARNING") + + have["image_ids"] = image_id_map + self.have.update(have) + self.log("Processed {0} image names for bulk operations".format(len(image_id_map)), "INFO") if self.want.get("tagging_details"): have = {} @@ -1741,7 +2068,7 @@ def get_have(self): # check if image for distributon is available if distribution_details.get("image_name"): name = distribution_details.get("image_name").split("/")[-1] - image_id = self.get_image_id(name) + image_id = self.get_image_id_v1(name) have["distribution_image_id"] = image_id elif self.have.get("imported_image_id"): @@ -1749,13 +2076,9 @@ def get_have(self): else: self.log( - "Image details required for distribution have not been provided", + "Image details for distribution have not been provided, will proceed with the golden image", "ERROR", ) - self.module.fail_json( - msg="Image details required for distribution have not been provided", - response=[], - ) device_params = { "hostname": distribution_details.get("device_hostname"), @@ -1800,7 +2123,7 @@ def get_have(self): # check if image for activation is available if activation_details.get("image_name"): name = activation_details.get("image_name").split("/")[-1] - image_id = self.get_image_id(name) + image_id = self.get_image_id_v1(name) have["activation_image_id"] = image_id elif self.have.get("imported_image_id"): @@ -1810,10 +2133,6 @@ def get_have(self): "Image details required for activation have not been provided", "ERROR", ) - self.module.fail_json( - msg="Image details required for activation have not been provided", - response=[], - ) site_name = activation_details.get("site_name") if site_name: @@ -1893,7 +2212,6 @@ def get_want(self, config): import, tagging, distribution, and activation. It stores these details in the 'want' dictionary for later use in the Ansible module. """ - want = {} import_image_details = config.get("import_image_details", {}) if import_image_details: @@ -1956,6 +2274,8 @@ def get_want(self, config): want["tagging_details"] = config.get("tagging_details") want["distribution_details"] = config.get("image_distribution_details") want["activation_details"] = config.get("image_activation_details") + want["image_name"] = config.get("image_name") + want["sync_cco"] = config.get("sync_cco") self.want = want self.log("Desired State (want): {0}".format(str(self.want)), "INFO") @@ -2825,6 +3145,221 @@ def check_swim_task_status(self, swim_task_dict, swim_task_name): return device_ips_list, device_count + def filter_device_uuids_by_tag(self, device_uuid_list, device_tag): + """ + Filter device UUIDs based on a specified device tag. + Parameters: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + device_uuid_list (list): A list of device UUIDs to be filtered. + device_tag (str): The tag used to filter the devices. + Returns: + list: A list of device UUIDs that match the specified device tag. + Description: + This function filters the provided list of device UUIDs based on the specified device tag. + It retrieves the tags associated with each device UUID and checks if the specified tag is present. + If the tag is found, the device UUID is added to the filtered list. The function returns the list of filtered device UUIDs. + """ + # Validate input parameters + + self.log( + "Starting device UUID filtering based on tag criteria for SWIM operations", + "INFO" + ) + + self.log( + "Processing tag-based device filtering with parameters - " + "device_uuid_list: {0} devices, device_tag: '{1}'".format( + len(device_uuid_list), device_tag + ), + "DEBUG" + ) + + if not device_uuid_list: + self.log("Empty device UUID list provided for tag filtering", "DEBUG") + return [] + + if not device_tag or not isinstance(device_tag, str): + self.log("Invalid device tag provided: {0}".format(device_tag), "WARNING") + return [] + + filtered_device_uuids = [] + + # Statistics tracking + statistics = { + 'devices_processed': 0, + 'devices_with_matching_tags': 0, + 'devices_without_tags': 0, + 'devices_with_api_errors': 0, + 'invalid_uuids': 0 + } + + for device_index, device_uuid in enumerate(device_uuid_list, start=1): + statistics['devices_processed'] += 1 + + self.log( + "Processing device {0}/{1} - UUID: {2}".format( + device_index, len(device_uuid_list), device_uuid + ), + "DEBUG" + ) + + # Validate device UUID format + if not device_uuid or not isinstance(device_uuid, str): + self.log( + "Skipping invalid device UUID at index {0}: {1}".format( + device_index, device_uuid + ), + "WARNING" + ) + statistics['invalid_uuids'] += 1 + continue + + self.log( + "Retrieving device tags for UUID: {0}".format(device_uuid), + "DEBUG" + ) + + try: + response = self.dnac_apply["exec"]( + family="devices", + function="get_device_detail", + params={"search_by": device_uuid, "identifier": "uuid"}, + ) + + self.log( + "Response collected from API 'get_device_detail' for UUID {0}: {1}".format( + device_uuid, response + ), + "DEBUG", + ) + + # Validate API response structure + if not response or not isinstance(response, dict): + self.log( + "Invalid API response structure for device UUID {0} - " + "expected dict, got: {1}".format( + device_uuid, type(response).__name__ + ), + "WARNING" + ) + statistics['devices_with_api_errors'] += 1 + continue + + device_response = response.get("response", {}) + + if not device_response: + self.log( + "Empty device response for UUID {0} - device may not exist".format( + device_uuid + ), + "WARNING" + ) + statistics['devices_with_api_errors'] += 1 + continue + + device_tags = device_response.get("tagIdList", []) + + if not device_tags: + self.log( + "No tags found for device UUID {0} - excluding from filtered results".format( + device_uuid + ), + "DEBUG" + ) + statistics['devices_without_tags'] += 1 + continue + + self.log( + "Retrieved {0} tags for device UUID {1}: {2}".format( + len(device_tags), device_uuid, device_tags + ), + "DEBUG", + ) + + # Check if specified tag exists in device tags + if device_tag in device_tags: + self.log( + "Device UUID {0} matches the specified tag '{1}' - " + "adding to filtered results".format(device_uuid, device_tag), + "DEBUG", + ) + filtered_device_uuids.append(device_uuid) + statistics['devices_with_matching_tags'] += 1 + else: + self.log( + "Device UUID {0} does not contain the specified tag '{1}' - " + "excluding from filtered results. Available tags: {2}".format( + device_uuid, device_tag, device_tags + ), + "DEBUG" + ) + + except Exception as e: + self.log( + "Failed to process device UUID {0} due to API error: {1}".format( + device_uuid, str(e) + ), + "ERROR" + ) + statistics['devices_with_api_errors'] += 1 + continue + + # Log comprehensive filtering statistics + self.log( + "Device tag filtering completed - " + "processed: {0}, matching tags: {1}, without tags: {2}, API errors: {3}, invalid UUIDs: {4}".format( + statistics['devices_processed'], + statistics['devices_with_matching_tags'], + statistics['devices_without_tags'], + statistics['devices_with_api_errors'], + statistics['invalid_uuids'] + ), + "INFO" + ) + + self.log( + "Tag-based device filtering results for tag '{0}': {1} devices matched " + "out of {2} total devices processed".format( + device_tag, len(filtered_device_uuids), len(device_uuid_list) + ), + "INFO", + ) + + # Log warnings for problematic scenarios + if statistics['devices_with_api_errors'] > 0: + self.log( + "Warning: {0} devices encountered API errors during tag filtering process".format( + statistics['devices_with_api_errors'] + ), + "WARNING" + ) + + if statistics['invalid_uuids'] > 0: + self.log( + "Warning: {0} invalid device UUIDs were skipped during filtering".format( + statistics['invalid_uuids'] + ), + "WARNING" + ) + + if len(filtered_device_uuids) == 0: + self.log( + "No devices found matching the specified tag '{0}'. " + "Consider checking if the tag exists or if devices are properly tagged.".format( + device_tag + ), + "WARNING" + ) + + self.log( + "Final filtered device UUIDs based on tag '{0}': {1}".format( + device_tag, filtered_device_uuids + ), + "DEBUG", + ) + + return filtered_device_uuids + def get_diff_distribution(self): """ Get image distribution parameters from the playbook and trigger image distribution. @@ -2839,7 +3374,7 @@ def get_diff_distribution(self): """ self.log("Retrieving distribution details from the playbook.", "DEBUG") - + self.bulk_distribution_success = False distribution_details = self.want.get("distribution_details") if not distribution_details: self.log( @@ -2849,9 +3384,10 @@ def get_diff_distribution(self): site_name = distribution_details.get("site_name") device_family = distribution_details.get("device_family_name") + device_tag = distribution_details.get("device_tag") device_role = distribution_details.get("device_role", "ALL") device_series_name = distribution_details.get("device_series_name") - + self.max_timeout = distribution_details.get("image_distribution_timeout", 1800) self.log( "Fetching device UUIDs for site '{0}', family '{1}', role '{2}', and series '{3}'.".format( site_name, device_family, device_role, device_series_name @@ -2862,6 +3398,22 @@ def get_diff_distribution(self): device_uuid_list = self.get_device_uuids( site_name, device_family, device_role, device_series_name ) + + self.log( + "Initial device UUIDs retrieved for distribution: {0}".format( + device_uuid_list + ), + "DEBUG", + ) + if device_tag: + device_uuid_list = self.filter_device_uuids_by_tag( + device_uuid_list, device_tag + ) + self.log( + "Retrieved device UUIDs for distribution: {0}".format(device_uuid_list), + "DEBUG", + ) + image_id = self.have.get("distribution_image_id") distribution_device_id = self.have.get("distribution_device_id") device_ip = self.get_device_ip_from_id(distribution_device_id) @@ -2905,7 +3457,7 @@ def get_diff_distribution(self): ) image_ids = { - image: self.get_image_id(image) for image in all_images_for_distribution + image: self.get_image_id_v1(image) for image in all_images_for_distribution } self.log("Resolved image IDs: {0}".format(image_ids), "DEBUG") @@ -2940,83 +3492,161 @@ def get_diff_distribution(self): self.set_operation_result("success", False, self.msg, "INFO") return self - success_distribution_list = [] - failed_distribution_list = [] - - for image_name, image_id in image_ids.items(): - self.log( - "Initiating image distribution for '{0}' (ID: {1}) to device {2}".format( - image_name, image_id, elg_device_ip - ), - "INFO", - ) - distribution_params = { - "payload": [{"deviceUuid": device_id, "imageUuid": image_id}] - } - self.log( - "Generated distribution parameters: {0}".format( - distribution_params - ), - "DEBUG", - ) - - response = self.dnac._exec( - family="software_image_management_swim", - function="trigger_software_image_distribution", - op_modifies=True, - params=distribution_params, - ) - self.log( - "Received API response from 'trigger_software_image_distribution': {0}".format( - str(response) - ), - "DEBUG", - ) + if ( + self.compare_dnac_versions(self.get_ccc_version(), "2.3.7.9") + <= 0 + ): + success_distribution_list = [] + failed_distribution_list = [] - if ( - not response - or "response" not in response - or "taskId" not in response["response"] - ): - failed_msg = "Failed to initiate image distribution for '{0}' (ID: {1}) to the device with IP {2}.".format( - image_name, image_id, elg_device_ip + for image_name, image_id in image_ids.items(): + self.log( + "Initiating image distribution for '{0}' (ID: {1}) to device {2}".format( + image_name, image_id, elg_device_ip + ), + "INFO", + ) + distribution_params = { + "payload": [{"deviceUuid": device_id, "imageUuid": image_id}] + } + self.log( + "Generated distribution parameters: {0}".format( + distribution_params + ), + "DEBUG", ) - failed_msg_parts.append(failed_msg) - failed_distribution_list.append(image_name) - self.log(failed_msg, "ERROR") - continue - task_id = response["response"]["taskId"] - self.log( - "Tracking distribution task with Task ID: {0}".format(task_id), - "INFO", - ) + response = self.dnac._exec( + family="software_image_management_swim", + function="trigger_software_image_distribution", + op_modifies=True, + params=distribution_params, + ) + self.log( + "Received API response from 'trigger_software_image_distribution': {0}".format( + str(response) + ), + "DEBUG", + ) + deviceip = self.get_device_ip_from_id + if ( + not response + or "response" not in response + or "taskId" not in response["response"] + ): + failed_msg = "Failed to initiate image distribution for '{0}' (ID: {1}) to the device with IP {2}.".format( + image_name, image_id, elg_device_ip + ) + failed_msg_parts.append(failed_msg) + failed_distribution_list.append(image_name) + self.log(failed_msg, "ERROR") + continue - while True: - task_details = self.get_task_details(task_id) - self.log("Task details received: {0}".format(task_details), "DEBUG") + task_id = response["response"]["taskId"] + self.log( + "Tracking distribution task with Task ID: {0}".format(task_id), + "INFO", + ) + + while True: + task_details = self.get_task_details(task_id) + self.log("Task details received: {0}".format(task_details), "DEBUG") + + if not task_details.get( + "isError" + ) and "completed successfully" in task_details.get("progress"): + if image_id: + success_msg = ( + "'{0}' (ID: {1}) successfully distributed for device {2}.".format( + image_name, image_id, elg_device_ip + ) + ) + else: + success_msg = ( + "Golden image successfully distributed to device {0}.".format( + elg_device_ip + ) + ) + success_msg_parts.append(success_msg) + success_distribution_list.append(image_name) + self.log(success_msg, "INFO") + break - if not task_details.get( - "isError" - ) and "completed successfully" in task_details.get("progress"): - success_msg = ( - "'{0}' (ID: {1}) successfully distributed.".format( - image_name, image_id + if task_details.get("isError"): + failed_msg = "Image '{0}' (ID: {1}) distribution failed for device {2}.".format( + image_name, image_id, elg_device_ip ) + failed_msg_parts.append(failed_msg) + failed_distribution_list.append(image_name) + self.log(failed_msg, "ERROR") + break + else: + self.log( + "Distribution device ID provided. Starting image distribution for device IP {0} (ID: {1}) with software version >= 2.3.7.9.".format( + elg_device_ip, device_id + ), + "DEBUG", + ) + + distributed_images = [{"id": img_id} for img_id in image_ids.values()] + + payload = [ + { + "id": distribution_device_id, + "distributedImages": distributed_images, + "networkValidationIds": None # Update after confirmation from DNAC team + } + ] + + self.log( + "Payload for image distribution: {0}".format(str(payload)), "DEBUG" + ) + + try: + response = self.dnac._exec( + family="software_image_management_swim", + function="distribute_images_on_the_network_device", + op_modifies=True, + id=distribution_device_id, + params=payload + ) + + self.log( + "Received API response from 'distribute_images_on_the_network_device': {0}".format( + str(response) + ), + "DEBUG", + ) + + self.check_swim_tasks_response_status( + response, "distribute_images_on_the_network_device" + ) + + if self.status not in ["failed", "exited"]: + self.msg = ( + "Image distribution completed successfully for the device IP {0} " + "(ID: {1}).".format(elg_device_ip, device_id) ) - success_msg_parts.append(success_msg) - success_distribution_list.append(image_name) - self.log(success_msg, "INFO") - break + self.set_operation_result("success", True, self.msg, "INFO") + return self - if task_details.get("isError"): - failed_msg = "Image '{0}' (ID: {1}) distribution failed for device {2}.".format( - image_name, image_id, elg_device_ip + if self.status == "failed": + fail_reason = self.msg + self.msg = ( + "Image distribution failed due to - {0}".format(fail_reason) ) - failed_msg_parts.append(failed_msg) - failed_distribution_list.append(image_name) - self.log(failed_msg, "ERROR") - break + self.set_operation_result( + "failed", False, self.msg, "ERROR" + ).check_return_status() + + except Exception as e: + self.log( + "Error occurred while distributing image: {0}".format(str(e)), + "ERROR", + ) + self.set_operation_result( + "failed", False, str(e), "ERROR" + ).check_return_status() if success_msg_parts: final_msg += "Successfully distributed: " + "; ".join(success_msg_parts) @@ -3064,111 +3694,159 @@ def get_diff_distribution(self): already_distributed_devices = [] elg_device_list = [] device_ip_for_not_elg_list = [] + bulk_payload = [] - for device_uuid in device_uuid_list: - device_ip = self.get_device_ip_from_id(device_uuid) - self.log("Processing device: {0}".format(device_ip), "DEBUG") - distributed = False + if self.compare_dnac_versions(self.get_ccc_version(), "2.3.7.9") <= 0: + # -------- OLD VERSION (Sequential Distribution) -------- # + self.log( + "Using old version of SWIM API for image distribution (before 2.3.7.9)" + ) + for device_uuid in device_uuid_list: + device_ip = self.get_device_ip_from_id(device_uuid) + self.log("Processing device: {0}".format(device_ip), "DEBUG") + distributed = False - for img_name, img_id in image_ids.items(): - self.log( - "Checking compliance for image '{0}' on device {1}".format( - img_name, device_ip - ), - "DEBUG", - ) - elg_device_ip, device_id = self.check_device_compliance( - device_uuid, img_name - ) + for img_name, img_id in image_ids.items(): + self.log("Checking compliance for image '{0}' on device {1}".format(img_name, device_ip), "DEBUG") + elg_device_ip, device_id = self.check_device_compliance(device_uuid, img_name) - if not elg_device_ip: - device_ip_for_not_elg = self.get_device_ip_from_id(device_uuid) - device_ip_for_not_elg_list.append(device_ip_for_not_elg) - self.log( - "Device {0} is not eligible for image '{1}'".format( - device_ip, img_name - ), - "WARNING", + if not elg_device_ip: + device_ip_for_not_elg_list.append(device_ip) + self.log("Device {0} is not eligible for image '{1}'".format(device_ip, img_name), "WARNING") + continue + + self.log("Device {0} is eligible for distribution of image {1}".format(elg_device_ip, img_name), "INFO") + elg_device_list.append(elg_device_ip) + + distribution_params = {"payload": [{"deviceUuid": device_id, "imageUuid": img_id}]} + self.log("Distribution Params: {0}".format(str(distribution_params)), "INFO") + + response = self.dnac._exec( + family="software_image_management_swim", + function="trigger_software_image_distribution", + op_modifies=True, + params=distribution_params, ) + self.log("Received API response: {0}".format(str(response)), "DEBUG") + + if response: + task_id = response.get("response", {}).get("taskId") + distribution_task_dict[(device_ip, img_name)] = task_id + distributed = True + + if not distributed: + already_distributed_devices.append(device_ip) + + # -------- Task Status Tracking -------- + for (device_ip, img_name), task_id in distribution_task_dict.items(): + task_name = "Distribution to {0}".format(device_ip) + success_msg = "Successfully distributed image {0} to device {1}".format(img_name, device_ip) + + status_check = self.get_task_status_from_tasks_by_id(task_id, task_name, success_msg) + + if status_check.status == "success": + success_distribution_list.append((device_ip, img_name)) + else: + failed_distribution_list.append((device_ip, img_name)) + + else: + # -------- NEW VERSION (Bulk Distribution) -------- # + for device_uuid in device_uuid_list: + device_ip = self.get_device_ip_from_id(device_uuid) + self.log("Processing device: {0}".format(device_ip), "DEBUG") + device_distributed_images = [] + + elg_device_ip, elg_device_uuid = self.check_device_compliance(device_uuid) + + if not elg_device_ip: + device_ip_for_not_elg_list.append(device_ip) + self.log("Device {0} is not eligible for image distribution".format(device_ip), "WARNING") continue - self.log( - "Device {0} is eligible for distribution of image {1}".format( - elg_device_ip, image_name - ), - "INFO", - ) - elg_device_list.append(elg_device_ip) + for img_name, img_id in image_ids.items(): - self.log( - "Starting distribution of '{0}' to device {1}".format( - img_name, device_ip - ), - "INFO", - ) - distribution_params = dict( - payload=[dict(deviceUuid=device_id, imageUuid=img_id)] - ) - self.log( - "Distribution Params: {0}".format(str(distribution_params)), "INFO" - ) + self.log("Device {0} is eligible for bulk image distribution of '{1}'".format(elg_device_ip, img_name), "INFO") + elg_device_list.append(elg_device_ip) + + device_distributed_images.append({"id": img_id}) + + # Build payload only with non-empty values + bulk_payload_entry = {} + if device_uuid: + bulk_payload_entry["id"] = device_uuid + if device_distributed_images: + bulk_payload_entry["distributedImages"] = device_distributed_images + network_validation_ids = distribution_details.get("network_validation_ids") + if network_validation_ids: + bulk_payload_entry["networkValidationIds"] = network_validation_ids + + if bulk_payload_entry: + bulk_payload.append(bulk_payload_entry) + + if not bulk_payload: + if device_ip_for_not_elg_list: + self.msg = "No eligible devices for bulk distribution. Devices not eligible: {0}".format( + ", ".join(device_ip_for_not_elg_list) + ) + self.set_operation_result("success", False, self.msg, "ERROR").check_return_status() + else: + self.msg = "No images or devices to distribute (empty payload)." + self.set_operation_result("success", False, self.msg, "ERROR").check_return_status() + return self + # -------- Bulk API Call -------- + self.log("Bulk Payload for Distribution: {0}".format(str(bulk_payload)), "DEBUG") + try: response = self.dnac._exec( family="software_image_management_swim", - function="trigger_software_image_distribution", + function="bulk_distribute_images_on_network_devices", op_modifies=True, - params=distribution_params, + params={"payload": bulk_payload}, ) - self.log( - "Received API response from 'trigger_software_image_distribution': {0}".format( - str(response) - ), - "DEBUG", - ) - - if response: - task_id = response.get("response", {}).get("taskId") - distribution_task_dict[(device_ip, img_name)] = task_id - distributed = True - if not distributed: - already_distributed_devices.append(device_ip) + self.log("API response from 'bulk_distribute_images_on_network_devices': {0}".format(str(response)), "DEBUG") - # Check task status sequentially - self.log("Checking task statuses for distributed images", "INFO") - - for (device_ip, img_name), task_id in distribution_task_dict.items(): - task_name = "Distribution to {0}".format(device_ip) - success_msg = "Successfully distributed image {0} to device {1}".format( - img_name, device_ip - ) + self.check_swim_tasks_response_status( + response, "bulk_distribute_images_on_network_devices" + ) - status_check = self.get_task_status_from_tasks_by_id( - task_id, task_name, success_msg - ) + if response and self.status not in ["failed", "exited"]: + device_ip = ", ".join(elg_device_list) + self.bulk_distribution_success_ips = device_ip + self.msg = "Bulk image distribution completed successfully - {0}.".format(device_ip) + self.bulk_distribution_success = True + success_distribution_list.extend([(ip, None) for ip in elg_device_list]) + self.set_operation_result("success", True, self.msg, "INFO") + return self + else: + self.msg = "Bulk image distribution failed." + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() - if status_check.status == "success": - success_distribution_list.append((device_ip, img_name)) - else: - failed_distribution_list.append((device_ip, img_name)) + except Exception as e: + self.msg = "Exception occurred during bulk image distribution: {0}".format(str(e)) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + self.log(self.msg, "ERROR") + # -------- Final Summary Logging -------- success_image_map = {} failed_image_map = {} for device_ip, img_name in success_distribution_list: - if img_name not in success_image_map: - success_image_map[img_name] = [] - success_image_map[img_name].append(device_ip) + success_image_map.setdefault(img_name, []).append(device_ip) for device_ip, img_name in failed_distribution_list: - if img_name not in failed_image_map: - failed_image_map[img_name] = [] - failed_image_map[img_name].append(device_ip) + failed_image_map.setdefault(img_name, []).append(device_ip) - success_msg_parts = [ - "{} to {}".format(img, ", ".join(devices)) - for img, devices in success_image_map.items() - ] + if list(success_image_map.keys()) == [None]: + success_msg_parts = [ + "Golden image has been sent to {}".format(", ".join(devices)) + for devices in success_image_map.values() + ] + else: + success_msg_parts = [ + "{} to {}".format(img, ", ".join(devices)) + for img, devices in success_image_map.items() + ] failed_msg_parts = [ "{} to {}".format(img, ", ".join(devices)) @@ -3204,7 +3882,7 @@ def get_diff_distribution(self): return self - def check_device_compliance(self, device_uuid, image_name): + def check_device_compliance(self, device_uuid, image_name=None): """ Check the compliance status of a device's image. Parameters: @@ -3286,6 +3964,8 @@ def get_diff_activation(self): device_family = activation_details.get("device_family_name") device_role = activation_details.get("device_role", "ALL") device_series_name = activation_details.get("device_series_name") + device_tag = activation_details.get("device_tag") + self.max_timeout = activation_details.get("image_activation_timeout", 1800) self.log( "Fetching device UUIDs for site '{0}', family '{1}', role '{2}', and series '{3}'.".format( @@ -3297,6 +3977,22 @@ def get_diff_activation(self): device_uuid_list = self.get_device_uuids( site_name, device_family, device_role, device_series_name ) + + self.log( + "Initial device UUIDs retrieved for distribution: {0}".format( + device_uuid_list + ), + "DEBUG", + ) + if device_tag: + device_uuid_list = self.filter_device_uuids_by_tag( + device_uuid_list, device_tag + ) + self.log( + "Retrieved device UUIDs for distribution: {0}".format(device_uuid_list), + "DEBUG", + ) + image_id = self.have.get("activation_image_id") activation_device_id = self.have.get("activation_device_id") device_ip = self.get_device_ip_from_id(activation_device_id) @@ -3336,12 +4032,14 @@ def get_diff_activation(self): all_images_for_activation.extend([str(img) for img in sub_package_images]) image_ids = { - image: self.get_image_id(image) for image in all_images_for_activation + image: self.get_image_id_v1(image) for image in all_images_for_activation } - self.log( - "Images identified for activation: {0}".format(", ".join(image_ids.keys())), - "INFO", - ) + + if image_ids and not (len(image_ids) == 1 and None in image_ids): + self.log( + "Images identified for activation: {0}".format(", ".join(str(k) for k in image_ids.keys())), + "INFO", + ) if activation_device_id: success_msg_parts = [] @@ -3375,92 +4073,123 @@ def get_diff_activation(self): success_activation_list = [] failed_activation_list = [] - for image_name, image_id in image_ids.items(): - payload = [ - { - "activateLowerImageVersion": activation_details.get( - "activate_lower_image_version" - ), - "deviceUpgradeMode": activation_details.get( - "device_upgrade_mode" - ), - "distributeIfNeeded": activation_details.get( - "distribute_if_needed" - ), - "deviceUuid": self.have.get("activation_device_id"), - "imageUuidList": [image_id], + activation_payload_list = [] + + # OLD FLOW (for DNAC < 2.3.7.9) + if self.compare_dnac_versions(self.get_ccc_version(), "2.3.7.9") <= 0: + for image_name, image_id in image_ids.items(): + payload = [ + { + "activateLowerImageVersion": activation_details.get("activate_lower_image_version"), + "deviceUpgradeMode": activation_details.get("device_upgrade_mode"), + "distributeIfNeeded": activation_details.get("distribute_if_needed"), + "deviceUuid": self.have.get("activation_device_id"), + "imageUuidList": [image_id], + } + ] + + activation_params = { + "schedule_validate": activation_details.get("schedule_validate"), + "payload": payload, } - ] - activation_params = { - "schedule_validate": activation_details.get("schedule_validate"), - "payload": payload, - } - - self.log( - "Activation Params: {0}".format(str(activation_params)), "INFO" - ) + self.log("Activation Params: {0}".format(str(activation_params)), "INFO") - response = self.dnac._exec( - family="software_image_management_swim", - function="trigger_software_image_activation", - op_modifies=True, - params=activation_params, - ) - self.log( - "Received API response from 'trigger_software_image_activation': {0}".format( - str(response) - ), - "DEBUG", - ) - - if ( - not response - or "response" not in response - or "taskId" not in response["response"] - ): - failed_msg = "Failed to initiate activation for image '{0}' (ID: {1}) on device with IP {2}.".format( - image_name, image_id, elg_device_ip + response = self.dnac._exec( + family="software_image_management_swim", + function="trigger_software_image_activation", + op_modifies=True, + params=activation_params, ) - failed_msg_parts.append(failed_msg) - failed_activation_list.append(image_name) - self.log(failed_msg, "ERROR") - continue - task_id = response["response"]["taskId"] - self.log( - "Tracking activation task with Task ID: {0}".format(task_id), "INFO" - ) + self.log("Received API response from 'trigger_software_image_activation': {0}".format(str(response)), "DEBUG") - while True: - task_details = self.get_task_details(task_id) - - if not task_details.get( - "isError" - ) and "completed successfully" in task_details.get("progress"): - success_msg = "'{0}' (ID: {1})".format(image_name, image_id) - success_msg_parts.append(success_msg) - success_activation_list.append(image_name) - self.log( - "Image '{0}' (ID: {1}) activation success.".format( - image_name, image_id - ), - "INFO", - ) - break - - if task_details.get("isError"): - failed_msg = "Activation of image '{0}' (ID: {1}) to the device with IP {2} has failed. Error: {3}".format( - image_name, - image_id, - elg_device_ip, - task_details.get("progress", "Unknown error"), + if not response or "response" not in response or "taskId" not in response["response"]: + failed_msg = "Failed to initiate activation for image '{0}' (ID: {1}) on device with IP {2}.".format( + image_name, image_id, self.get_device_ip_from_id(self.have.get("activation_device_id")) ) failed_msg_parts.append(failed_msg) failed_activation_list.append(image_name) self.log(failed_msg, "ERROR") - break + continue + + task_id = response["response"]["taskId"] + self.log("Tracking activation task with Task ID: {0}".format(task_id), "INFO") + + while True: + task_details = self.get_task_details(task_id) + + if not task_details.get("isError") and "completed successfully" in task_details.get("progress"): + success_msg = "'{0}' (ID: {1})".format(image_name, image_id) + success_msg_parts.append(success_msg) + success_activation_list.append(image_name) + self.log("Image '{0}' (ID: {1}) activation success.".format(image_name, image_id), "INFO") + break + if task_details.get("isError"): + failed_msg = "Activation of image '{0}' (ID: {1}) to the device with IP {2} has failed. Error: {3}".format( + image_name, + image_id, + self.get_device_ip_from_id(self.have.get("activation_device_id")), + task_details.get("progress", "Unknown error"), + ) + failed_msg_parts.append(failed_msg) + failed_activation_list.append(image_name) + self.log(failed_msg, "ERROR") + break + + # NEW FLOW (for Catalyst Center > 2.3.7.9) + else: + self.log("Using new SWIM API for image activation (after 2.3.7.9)", "INFO") + + activation_device_id = self.have.get("activation_device_id") + + # Correct: Combine all image IDs into one installedImages list + activation_payload = { + "id": activation_device_id + } + + # Add installedImages only if image_ids has values + if image_ids: + activation_payload["installedImages"] = [{"id": image_id} for image_id in image_ids.values() if image_id] + + # Add compatibleFeatures only if available + compatible_features = activation_details.get("compatible_features") + if compatible_features: + activation_payload["compatibleFeatures"] = compatible_features + + # Add networkValidationIds only if available + network_validation_ids = activation_details.get("network_validation_ids") + if network_validation_ids: + activation_payload["networkValidationIds"] = network_validation_ids + + self.log("Payload for 'update_images_on_the_network_device': {0}".format(str(activation_payload)), "DEBUG") + + try: + response = self.dnac._exec( + family="software_image_management_swim", + function="update_images_on_the_network_device", + op_modifies=True, + params=activation_payload + ) + + self.log("API response from 'update_images_on_the_network_device': {0}".format(str(response)), "DEBUG") + self.check_swim_tasks_response_status(response, "update_images_on_the_network_device") + + device_ip = self.get_device_ip_from_id(activation_device_id) + if response and self.status not in ["failed", "exited"]: + success_msg_parts = ["All images activated successfully on device {0}".format(device_ip)] + success_activation_list = list(image_ids.keys()) + else: + failed_msg_parts = ["Image activation failed on device {0}".format(device_ip)] + failed_activation_list = list(image_ids.keys()) + + except Exception as e: + self.log("Exception during activation: {0}".format(str(e)), "ERROR") + failed_msg_parts = ["Exception during activation: {0}".format(str(e))] + failed_activation_list = list(image_ids.keys()) + + # Final status summary final_msg = "" if success_msg_parts: final_msg += "Successfully activated: " + "; ".join(success_msg_parts) @@ -3470,6 +4199,7 @@ def get_diff_activation(self): final_msg += "Failed to activate: " + "; ".join(failed_msg_parts) + "." self.log("Final activation status: {0}".format(final_msg), "INFO") + self.msg = final_msg if not success_activation_list and failed_activation_list: self.msg = final_msg @@ -3509,148 +4239,176 @@ def get_diff_activation(self): elg_device_list = [] device_ip_for_not_elg_list = [] - for device_uuid in device_uuid_list: - device_ip = self.get_device_ip_from_id(device_uuid) - activated = False - self.log("Checking compliance for device {0}".format(device_ip), "INFO") + # OLD FLOW (for DNAC <= 2.3.7.9) + if self.compare_dnac_versions(self.get_ccc_version(), "2.3.7.9") <= 0: + for device_uuid in device_uuid_list: + device_ip = self.get_device_ip_from_id(device_uuid) + activated = False + self.log("Checking compliance for device {0}".format(device_ip), "INFO") - for image_name, image_id in image_ids.items(): + for image_name, image_id in image_ids.items(): + elg_device_ip, device_id = self.check_device_compliance(device_uuid, image_name) - elg_device_ip, device_id = self.check_device_compliance( - device_uuid, image_name - ) + if not elg_device_ip: + device_ip_for_not_elg = self.get_device_ip_from_id(device_uuid) + device_ip_for_not_elg_list.append(device_ip_for_not_elg) + self.log("Device {0} is not eligible for activation of image '{1}'".format(device_ip, image_name), "WARNING") + continue - if not elg_device_ip: - device_ip_for_not_elg = self.get_device_ip_from_id(device_uuid) - device_ip_for_not_elg_list.append(device_ip_for_not_elg) - self.log( - "Device {0} is not eligible for activation of image '{1}'".format( - device_ip, image_name - ), - "WARNING", - ) - continue + self.log("Device {0} is eligible for activation of image {1}".format(elg_device_ip, image_name), "INFO") + elg_device_list.append(elg_device_ip) - self.log( - "Device {0} is eligible for activation of image {1}".format( - elg_device_ip, image_name - ), - "INFO", - ) - elg_device_list.append(elg_device_ip) + self.log("Starting activation of image '{0}' on device {1}".format(image_name, device_ip), "INFO") - self.log( - "Starting activation of image '{0}' on device {1}".format( - image_name, device_ip - ), - "INFO", - ) + payload = [ + dict( + activateLowerImageVersion=activation_details.get("activate_lower_image_version"), + deviceUpgradeMode=activation_details.get("device_upgrade_mode"), + distributeIfNeeded=activation_details.get("distribute_if_needed"), + deviceUuid=device_id, + imageUuidList=[image_id], + ) + ] - payload = [ - dict( - activateLowerImageVersion=activation_details.get( - "activate_lower_image_version" - ), - deviceUpgradeMode=activation_details.get("device_upgrade_mode"), - distributeIfNeeded=activation_details.get( - "distribute_if_needed" - ), - deviceUuid=device_id, - imageUuidList=[image_id], + activation_params = dict( + schedule_validate=activation_details.get("schedule_validate"), + payload=payload, ) - ] + self.log("Activation Params: {0}".format(str(activation_params)), "INFO") - activation_params = dict( - schedule_validate=activation_details.get("schedule_validate"), - payload=payload, - ) - self.log( - "Activation Params: {0}".format(str(activation_params)), "INFO" - ) + response = self.dnac._exec( + family="software_image_management_swim", + function="trigger_software_image_activation", + op_modifies=True, + params=activation_params, + ) + self.log("Received API from from 'trigger_software_image_activation': {0}".format(str(response)), "DEBUG") - response = self.dnac._exec( - family="software_image_management_swim", - function="trigger_software_image_activation", - op_modifies=True, - params=activation_params, - ) - self.log( - "Received API from from 'trigger_software_image_activation': {0}".format( - str(response) - ), - "DEBUG", - ) + if response: + task_id = response.get("response", {}).get("taskId") + activation_task_dict[(device_ip, image_name)] = task_id + self.log("Task ID {0} assigned for image {1} activation on device {2}".format(task_id, image_name, device_ip), "INFO") + activated = True - if response: - task_id = response.get("response", {}).get("taskId") - activation_task_dict[(device_ip, image_name)] = task_id - self.log( - "Task ID {0} assigned for image {1} activation on device {2}".format( - task_id, image_name, device_ip - ), - "INFO", - ) - activated = True + if not activated: + already_activated_devices.append(device_ip) + self.log("Image already activated on device {0}".format(device_ip), "INFO") - if not activated: - already_activated_devices.append(device_ip) - self.log( - "Image already activated on device {0}".format(device_ip), "INFO" - ) + # Check activation status sequentially + for (device_ip, img_name), task_id in activation_task_dict.items(): + task_name = "Activation for {0}".format(device_ip) + self.log("Checking activation status for device {0}, image {1}, Task ID {2}".format(device_ip, img_name, task_id), "INFO") + success_msg = "Successfully activated image {0} on device {1}".format(img_name, device_ip) - # Check activation status sequentially - for (device_ip, img_name), task_id in activation_task_dict.items(): - task_name = "Activation for {0}".format(device_ip) - self.log( - "Checking activation status for device {0}, image {1}, Task ID {2}".format( - device_ip, img_name, task_id - ), - "INFO", - ) - success_msg = "Successfully activated image {0} on device {1}".format( - img_name, device_ip - ) + status_check = self.get_task_status_from_tasks_by_id(task_id, task_name, success_msg) - status_check = self.get_task_status_from_tasks_by_id( - task_id, task_name, success_msg - ) + if status_check.status == "success": + success_activation_list.append((device_ip, img_name)) + self.log("Activation successful for device {0}, image {1}".format(device_ip, img_name), "INFO") + else: + failed_activation_list.append((device_ip, img_name)) + self.log("Activation failed for device {0}, image {1}".format(device_ip, img_name), "ERROR") - if status_check.status == "success": - success_activation_list.append((device_ip, img_name)) - self.log( - "Activation successful for device {0}, image {1}".format( - device_ip, img_name - ), - "INFO", - ) - else: - failed_activation_list.append((device_ip, img_name)) - self.log( - "Activation failed for device {0}, image {1}".format( - device_ip, img_name - ), - "ERROR", - ) + success_image_map = {} + failed_image_map = {} - success_image_map = {} - failed_image_map = {} + for device_ip, img_name in success_activation_list: + success_image_map.setdefault(img_name, []).append(device_ip) - for device_ip, img_name in success_activation_list: - success_image_map.setdefault(img_name, []).append(device_ip) + for device_ip, img_name in failed_activation_list: + failed_image_map.setdefault(img_name, []).append(device_ip) - for device_ip, img_name in failed_activation_list: - failed_image_map.setdefault(img_name, []).append(device_ip) + success_msg_parts = [ + "{} to {}".format(img, ", ".join(devices)) for img, devices in success_image_map.items() + ] - # Building message parts - success_msg_parts = [ - "{} to {}".format(img, ", ".join(devices)) - for img, devices in success_image_map.items() - ] + failed_msg_parts = [ + "{} to {}".format(img, ", ".join(devices)) for img, devices in failed_image_map.items() + ] - failed_msg_parts = [ - "{} to {}".format(img, ", ".join(devices)) - for img, devices in failed_image_map.items() - ] + # NEW FLOW (for DNAC > 2.3.7.9) + else: + image_id_base = self.have.get("activation_image_id") + # Resolve sub-package ids (if any) + sub_image_ids = [self.get_image_id_v1(pkg) for pkg in sub_package_images] if sub_package_images else [] + device_ips = [] + activation_payload_list = [] + device_ip_for_not_elg_list = [] + + for device_uuid in device_uuid_list: + device_ip = self.get_device_ip_from_id(device_uuid) + self.log("Processing device: {0}".format(device_ip), "DEBUG") + + # Aggregate all image ids for this device + installed_image_ids = set() + + if image_id_base: + installed_image_ids.add(image_id_base) + + for sid in sub_image_ids: + if sid: + installed_image_ids.add(sid) + + elg_device_ip, device_id = self.check_device_compliance(device_uuid) + + if not elg_device_ip: + self.log("Device not eligible for activation: {0}".format(device_ip), "INFO") + device_ip_for_not_elg_list.append(device_ip) + continue + + device_ips.append(elg_device_ip) + + activation_payload = {} + if device_id: + activation_payload["id"] = device_id + + activation_payload["installedImages"] = [{"id": iid} for iid in installed_image_ids] + + compatible_features = activation_details.get("compatible_features") + if compatible_features: + activation_payload["compatibleFeatures"] = compatible_features + + network_validation_ids = activation_details.get("network_validation_ids") + if network_validation_ids: + activation_payload["networkValidationIds"] = network_validation_ids + + activation_payload_list.append(activation_payload) + + self.log("Activation Payload List: {0}".format(str(activation_payload_list)), "DEBUG") + + if not activation_payload_list: + self.msg = "No eligible devices found for activation. Devices not eligible: {0}".format( + ", ".join(device_ip_for_not_elg_list) if device_ip_for_not_elg_list else "None" + ) + self.log(self.msg, "INFO") + self.set_operation_result("success", False, self.msg, "ERROR") + return self + + try: + response = self.dnac._exec( + family="software_image_management_swim", + function="bulk_update_images_on_network_devices", + op_modifies=True, + params={"payload": activation_payload_list}, + ) + self.log("API response from 'bulk_update_images_on_network_devices': {0}".format(str(response)), "DEBUG") + self.check_swim_tasks_response_status(response, "bulk_update_images_on_network_devices") + + if response and self.status not in ["failed", "exited"]: + self.msg = "All eligible images activated successfully on the devices {0}.".format(", ".join(device_ips)) + self.set_operation_result("success", True, self.msg, "INFO") + return self + else: + self.msg = "Some or all image activations failed for the devices {0}.".format(", ".join(device_ips)) + failed_activation_list = device_ips + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + except Exception as e: + self.log("Exception during bulk activation: {0}".format(str(e)), "ERROR") + failed_msg_parts = ["Exception during bulk activation: {0}".format(str(e))] + failed_activation_list = device_ips + self.msg = "Exception during bulk activation: {0}".format(str(e)) + self.set_operation_result("failed", False, self.msg, "ERROR") + return self # Final single-line message formation final_msg = "" @@ -3660,20 +4418,90 @@ def get_diff_activation(self): if final_msg: final_msg += ". " final_msg += "Failed to activate: " + "; ".join(failed_msg_parts) + "." + if device_ip_for_not_elg_list: + if final_msg: + final_msg += ". " + final_msg += "Devices not eligible for activation: " + ", ".join(device_ip_for_not_elg_list) + "." - if not success_activation_list and failed_activation_list: - self.msg = final_msg - self.set_operation_result( - "failed", False, self.msg, "ERROR" - ).check_return_status() - elif success_activation_list and failed_activation_list: - self.msg = final_msg + self.msg = final_msg + self.log("Final activation status: {0}".format(final_msg), "INFO") + + if not success_activation_list and failed_activation_list and not device_ip_for_not_elg_list: + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + elif failed_activation_list and not success_activation_list and device_ip_for_not_elg_list: + self.set_operation_result("failed", False, self.msg, "ERROR") + elif (success_activation_list and failed_activation_list) or device_ip_for_not_elg_list: self.set_operation_result("success", True, self.msg, "INFO") self.partial_successful_activation = True else: - self.msg = final_msg self.set_operation_result("success", True, self.msg, "INFO") self.complete_successful_activation = True + return self + + def check_swim_tasks_response_status(self, response, api_name): + """ + Get the task response status from taskId + Args: + self: The current object details. + response (dict): API response. + api_name (str): API name. + Returns: + self (object): The current object with updated desired Fabric Transits information. + Description: + Poll the function 'get_tasks_by_id' until it returns either 'SUCCESS' or 'FAILURE' + state or till it reaches the maximum timeout. + Log the task details and return self. + """ + self.log("Starting SWIM task status monitoring for API operation: {0}".format(api_name), "DEBUG") + self.log("Input response: {0}".format(response), "DEBUG") + self.log("Max timeout for task monitoring is set to {0} seconds.".format(self.max_timeout), "DEBUG") + if not response: + self.msg = "response is empty" + self.status = "exited" + return self + + if not isinstance(response, dict): + self.msg = "response is not a dictionary" + self.status = "exited" + return self + + task_info = response.get("response") + if task_info.get("errorcode") is not None: + self.msg = response.get("response").get("detail") + self.status = "failed" + return self + + task_id = task_info.get("taskId") + start_time = time.time() + while True: + elapsed_time = time.time() - start_time + if elapsed_time >= self.max_timeout: + self.msg = "Max timeout of {0} sec has reached for the task id '{1}'. " \ + .format(self.max_timeout, task_id) + \ + "Exiting the loop due to unexpected API '{0}' status.".format(api_name) + self.log(self.msg, "WARNING") + self.status = "failed" + break + + task_details = self.get_tasks_by_id(task_id) + self.log('Getting tasks details from task ID {0}: {1}' + .format(task_id, task_details), "DEBUG") + + task_status = task_details.get("status") + if task_status == "FAILURE": + details = self.get_task_details_by_id(task_id) + self.msg = details.get("failureReason") + self.status = "failed" + break + + elif task_status == "SUCCESS": + self.result["changed"] = True + self.log("The task with task ID '{0}' is executed successfully." + .format(task_id), "INFO") + break + + self.log("Progress is {0} for task ID: {1}" + .format(task_status, task_id), "DEBUG") return self @@ -3692,6 +4520,10 @@ def get_diff_merged(self, config): operations are successful, 'changed' is set to True. """ + config_cco = config.get("sync_cco", False) + if config_cco: + self.sync_cco_image() + if config.get("tagging_details"): self.get_diff_tagging().check_return_status() @@ -3703,6 +4535,53 @@ def get_diff_merged(self, config): return self + def sync_cco_image(self): + """ + Synchronize software images from Cisco CCO to Cisco Catalyst Center. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + Returns: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + Description: + This method synchronizes software images from Cisco CCO to Cisco Catalyst Center. + It retrieves the image details from the playbook and triggers the synchronization process. + The method logs the status of the synchronization and updates the result accordingly. + """ + self.log("Starting synchronization of software images from Cisco CCO.", "INFO") + + sync_cco = self.want.get("sync_cco") + self.log("CCO synchronization configuration: {0}".format(sync_cco), "DEBUG") + + if not sync_cco: + self.log("No CCO synchronization details found. Skipping synchronization.", "INFO") + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + return None + try: + response = self.dnac._exec( + family="software_image_management_swim", + function="initiates_sync_of_software_images_from_cisco_com_v1", + op_modifies=True, + ) + self.log("Received API response from 'initiates_sync_of_software_images_from_cisco_com_v1' for Update: {0}".format(response), "DEBUG") + self.check_tasks_response_status(response, "initiates_sync_of_software_images_from_cisco_com_v1") + + # Handle successful update + if self.status not in ["failed", "exited"]: + self.msg = ("Synchronization of software images from Cisco CCO initiated successfully, Fetched recommended image(s) from cisco.com") + self.set_operation_result("success", True, self.msg, "INFO") + return self + + # Handle failed update + if self.status == "failed": + fail_reason = self.msg + self.msg = "Synchronization of software images from Cisco CCO failed: {}".format(fail_reason) + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + except Exception as e: + self.msg = ("Error occurred during CCO image synchronization: {}".format(e), "ERROR") + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + return self + def verify_diff_imported(self, import_type): """ Verify the successful import of a software image into Cisco Catalyst Center. @@ -3893,7 +4772,8 @@ def verify_diff_distributed(self): """ image_id = self.have.get("distribution_image_id") - image_name = self.get_image_name_from_id(image_id) + if image_id: + image_name = self.get_image_name_from_id(image_id) if self.have.get("distribution_device_id"): if self.single_device_distribution: @@ -3911,11 +4791,27 @@ def verify_diff_distributed(self): ), "INFO", ) + elif self.bulk_distribution_success: + if image_id: + self.msg = """The requested image '{0}', with ID '{1}', has been successfully distributed + to all specified devices - '{2}' in the Cisco Catalyst Center.""".format( + image_name, image_id, self.bulk_distribution_success_ips + ) + else: + self.msg = """The golden image has been successfully distributed + to all specified devices '{0}' in the Cisco Catalyst Center.""".format(self.bulk_distribution_success_ips) + + self.log(self.msg, "INFO") + elif self.complete_successful_distribution: - self.msg = """The requested image '{0}', with ID '{1}', has been successfully distributed to all devices within the specified - site in the Cisco Catalyst Center.""".format( - image_name, image_id - ) + if image_id: + self.msg = """The requested image '{0}', with ID '{1}', has been successfully distributed to all devices within the specified + site in the Cisco Catalyst Center.""".format( + image_name, image_id + ) + else: + self.msg = """The golden image has been successfully distributed to all devices within the specified site in the Cisco Catalyst Center.""" + self.log(self.msg, "INFO") elif self.partial_successful_distribution: self.msg = """T"The requested image '{0}', with ID '{1}', has been partially distributed across some devices in the Cisco Catalyst @@ -3946,7 +4842,8 @@ def verify_diff_activated(self): """ image_id = self.have.get("activation_image_id") - image_name = self.get_image_name_from_id(image_id) + if image_id: + image_name = self.get_image_name_from_id(image_id) if self.have.get("activation_device_id"): if self.single_device_activation: @@ -3976,6 +4873,9 @@ def verify_diff_activated(self): image_name, image_id ) self.log(self.msg, "INFO") + elif image_id is None: + self.msg = """The golden image has been successfully activated on all devices within the specified site in the Cisco Catalyst Center.""" + self.log(self.msg, "INFO") else: self.msg = """The activation of the requested image '{0}', with ID '{1}', failed on devices in the Cisco Catalyst Center.""".format( @@ -4021,6 +4921,201 @@ def verify_diff_merged(self, config): return self + def get_diff_deleted(self, config): + """ + Deletes software images from Cisco Catalyst Center based on the image names provided in the configuration. + + Args: + config (dict): A dictionary containing the configuration for image deletion. + Expected to have a key "image_name" with a list of image names to be deleted. + + Returns: + self (object): Returns the current object instance after processing the deletion request. + + Raises: + None explicitly, but exceptions during API execution are caught and logged. + + Description: + 1. Extract the list of image names from the config. + 2. For each image name: + - Retrieve the corresponding image ID using `get_image_id`. + - If image ID is found: + - Attempt to delete the image using Cisco Catalyst Center API. + - Log the response and update status based on API result. + - If image ID is not found or an exception occurs, log it as a failure. + 3. After processing all images: + - Summarize the results into success and failure messages. + - Set final operation result status (`success` or `failed`) based on outcomes. + """ + self.log("Initiating software image deletion process from Cisco Catalyst Center", "DEBUG") + image_names = config.get("image_name", []) + self.log("Image names to be deleted: {0}".format(image_names), "INFO") + + if not image_names: + self.msg = "No image names provided for deletion." + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + + results = [] + success_deletions = [] + failed_deletions = [] + + for image_name in image_names: + self.log("Processing deletion request for image: '{0}'".format(image_name), "DEBUG") + image_id = self.get_image_id(image_name) + + if not image_id: + msg = "Image '{0}' does not exist in Cisco Catalyst Center.".format(image_name) + failed_deletions.append(image_name) + results.append({"image": image_name, "status": "failed", "message": msg}) + continue + + try: + self.log("Attempting to delete image '{0}' with ID '{1}'.".format(image_name, image_id), "INFO") + response = self.dnac._exec( + family="software_image_management_swim", + function='delete_image_v1', + op_modifies=True, + params={"id": image_id} + ) + + self.check_tasks_response_status(response, "delete_image_v1") + self.log("Received API response from 'delete_image_v1': {0}".format(str(response)), "DEBUG") + + if self.status not in ["failed", "exited"]: + msg = "Image '{0}' deleted successfully.".format(image_name) + success_deletions.append(image_name) + results.append({"image": image_name, "status": "success", "message": msg}) + else: + msg = "Image '{0}' failed to delete: {1}".format(image_name, self.msg) + failed_deletions.append(image_name) + results.append({"image": image_name, "status": "failed", "message": msg}) + + except Exception as e: + msg = "Image '{0}' failed to delete due to exception: {1}".format(image_name, str(e)) + failed_deletions.append(image_name) + results.append({"image": image_name, "status": "failed", "message": msg}) + + # Summarize results + self.log("Image deletion process completed - generating final status report", "DEBUG") + + success_count = len(success_deletions) + failed_count = len(failed_deletions) + total_count = len(image_names) + + self.log("Deletion summary: {0} successful, {1} failed out of {2} total images".format( + success_count, failed_count, total_count), "INFO") + + # Build final status message + status_parts = [] + + if success_deletions: + success_list = "', '".join(success_deletions) + status_parts.append("Successfully deleted image(s): '{0}'".format(success_list)) + + if failed_deletions: + failed_list = "', '".join(failed_deletions) + status_parts.append("Failed to delete image(s): '{0}'".format(failed_list)) + + final_message = ". ".join(status_parts) + "." + + # Determine final operation status + if success_deletions and not failed_deletions: + # All deletions successful + self.msg = final_message + self.log("All image deletion operations completed successfully", "INFO") + self.set_operation_result("success", True, self.msg, "INFO") + return self + + if success_deletions and failed_deletions: + # Partial success + self.msg = final_message + self.log("Image deletion completed with partial success", "WARNING") + self.set_operation_result("success", True, self.msg, "WARNING") + return self + + # All deletions failed + self.msg = final_message + self.log("All image deletion operations failed", "ERROR") + self.set_operation_result("failed", False, self.msg, "ERROR").check_return_status() + return self + + def verify_diff_deleted(self, config): + """ + Verify the successful deletion of software images from Cisco Catalyst Center. + Args: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + config (dict): A dictionary containing the configuration with image names to verify deletion. + Returns: + self (object): An instance of a class used for interacting with Cisco Catalyst Center. + Description: + This function verifies that software images have been successfully deleted from Cisco Catalyst Center + by checking their existence status. It processes multiple images and provides comprehensive verification + results, logging the status of each image and providing a final summary of the verification process. + """ + self.log("Initiating verification process for deleted software images from Cisco Catalyst Center", "DEBUG") + + image_names = config.get("image_name", []) + self.log("Processing deletion verification for {0} image(s): {1}".format(len(image_names), image_names), "INFO") + + verified_deleted = [] + still_existing = [] + + for image_name in image_names: + self.log("Verifying deletion status for image: '{0}'".format(image_name), "DEBUG") + + image_id = self.get_image_id(image_name) + + if not image_id: + self.log("Verification successful: Image '{0}' no longer exists in Cisco Catalyst Center".format(image_name), "INFO") + verified_deleted.append(image_name) + else: + self.log("Verification failed: Image '{0}' still exists in Cisco Catalyst Center with ID '{1}'".format(image_name, image_id), "ERROR") + still_existing.append(image_name) + + # Generate comprehensive verification summary + self.log("Deletion verification process completed - generating final status report", "DEBUG") + + verified_count = len(verified_deleted) + existing_count = len(still_existing) + total_count = len(image_names) + + self.log("Verification summary: {0} confirmed deleted, {1} still existing out of {2} total images".format( + verified_count, existing_count, total_count), "INFO") + + # Build final status message + status_parts = [] + + if verified_deleted: + verified_list = "', '".join(verified_deleted) + status_parts.append("Successfully verified deletion of image(s): '{0}'".format(verified_list)) + + if still_existing: + existing_list = "', '".join(still_existing) + status_parts.append("Image(s) still exist and deletion not verified: '{0}'".format(existing_list)) + + final_message = ". ".join(status_parts) + "." + + # Determine final verification status + if verified_deleted and not still_existing: + # All deletions verified successfully + self.msg = final_message + self.log("All image deletion operations have been successfully verified", "INFO") + self.set_operation_result("success", True, self.msg, "INFO") + return self + + if verified_deleted and still_existing: + # Partial verification success + self.msg = final_message + self.log("Image deletion verification completed with partial success", "WARNING") + self.set_operation_result("success", True, self.msg, "WARNING") + return self + + # All verifications failed (all images still exist) + self.msg = final_message + self.log("All image deletion verification attempts failed - no images were successfully deleted", "ERROR") + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + def update_swim_profile_messages(self): """ Verify the merged status (Importing/Tagging/Distributing/Activating) of the SWIM Image in devices in Cisco Catalyst Center. @@ -4068,27 +5163,27 @@ def update_swim_profile_messages(self): def main(): - """main entry point for module execution""" - - element_spec = { - "dnac_host": {"required": True, "type": "str"}, - "dnac_port": {"type": "str", "default": "443"}, - "dnac_username": {"type": "str", "default": "admin", "aliases": ["user"]}, - "dnac_password": {"type": "str", "no_log": True}, - "dnac_verify": {"type": "bool", "default": "True"}, - "dnac_version": {"type": "str", "default": "2.2.3.3"}, - "dnac_debug": {"type": "bool", "default": False}, - "dnac_log_level": {"type": "str", "default": "WARNING"}, - "dnac_log_file_path": {"type": "str", "default": "dnac.log"}, - "dnac_log_append": {"type": "bool", "default": True}, - "dnac_log": {"type": "bool", "default": False}, - "validate_response_schema": {"type": "bool", "default": True}, - "config_verify": {"type": "bool", "default": False}, - "dnac_api_task_timeout": {"type": "int", "default": 1200}, - "dnac_task_poll_interval": {"type": "int", "default": 2}, - "config": {"required": True, "type": "list", "elements": "dict"}, - "state": {"default": "merged", "choices": ["merged"]}, - } + + """ main entry point for module execution """ + + element_spec = {'dnac_host': {'required': True, 'type': 'str'}, + 'dnac_port': {'type': 'str', 'default': '443'}, + 'dnac_username': {'type': 'str', 'default': 'admin', 'aliases': ['user']}, + 'dnac_password': {'type': 'str', 'no_log': True}, + 'dnac_verify': {'type': 'bool', 'default': 'True'}, + 'dnac_version': {'type': 'str', 'default': '2.2.3.3'}, + 'dnac_debug': {'type': 'bool', 'default': False}, + 'dnac_log_level': {'type': 'str', 'default': 'WARNING'}, + "dnac_log_file_path": {"type": 'str', "default": 'dnac.log'}, + "dnac_log_append": {"type": 'bool', "default": True}, + 'dnac_log': {'type': 'bool', 'default': False}, + 'validate_response_schema': {'type': 'bool', 'default': True}, + 'config_verify': {'type': 'bool', "default": False}, + 'dnac_api_task_timeout': {'type': 'int', "default": 1200}, + 'dnac_task_poll_interval': {'type': 'int', "default": 2}, + 'config': {'required': True, 'type': 'list', 'elements': 'dict'}, + 'state': {'default': 'merged', 'choices': ['merged', 'deleted']} + } module = AnsibleModule(argument_spec=element_spec, supports_check_mode=False) diff --git a/plugins/modules/template_workflow_manager.py b/plugins/modules/template_workflow_manager.py index 893b0501b0..892da77eea 100644 --- a/plugins/modules/template_workflow_manager.py +++ b/plugins/modules/template_workflow_manager.py @@ -114,6 +114,7 @@ can be applied to. type: list elements: dict + required: true suboptions: product_family: description: Denotes the family @@ -132,6 +133,7 @@ - Voice and Telephony - Wireless Controller type: str + required: true product_series: description: Specifies the series classification of the device. @@ -166,6 +168,27 @@ description: Narrative that elaborates on the purpose and scope of the project. type: str + profile_names: + description: | + - List of profile names to be associated with the Configuration Template during creation or update operations. + - Enables assignment of one or more network profiles to CLI templates for enhanced device configuration management. + - Profile names must correspond to existing network profiles in Cisco Catalyst Center for the specified device types. + - Supports assignment of multiple profiles simultaneously for comprehensive device configuration coverage. + - Profiles are validated against the device types specified in the template configuration to ensure compatibility. + - When combined with existing profile assignments, new profiles are added while preserving existing assignments. + - Profile assignment operations are idempotent - re-assigning existing profiles will not cause errors or duplicate assignments. + - Requires Cisco Catalyst Center version 3.1.3.0 or later for profile assignment functionality. + - Profile names are case-sensitive and must match exactly as configured in Cisco Catalyst Center. + - Each profile in the list must be a valid string representing an existing network profile name. + - If no profiles are specified, the template will not be associated with any profiles by default. + - Profile names can be detached from the template based on deleted state operations. + - C(examples): + - ["Enterprise_Security_Profile", "QoS_Voice_Profile"] + - ["Campus_Switching_Profile"] + - ["WAN_Edge_Profile", "Security_Baseline_Profile", "Monitoring_Profile"] + type: list + elements: str + required: false tags: description: A list of dictionaries representing tags associated with @@ -1416,6 +1439,8 @@ template_name: string project_name: string project_description: string + profile_names: + - string software_type: string software_version: string tags: @@ -1454,6 +1479,8 @@ new_template_name: string project_name: string project_description: string + profile_names: + - string software_type: string software_version: string tags: @@ -1795,6 +1822,147 @@ config: - projects: - name: Wireless_Template_Management + +- name: Creating complete configuration template with profiles + response in Case_9 + cisco.dnac.template_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log: true + dnac_log_level: "{{dnac_log_level}}" + state: merged + config_verify: true + config: + - configuration_templates: + author: Test_User + composite: false + custom_params_order: true + template_description: Template to configure access + VLAN and access interfaces + device_types: + - product_family: Switches and Hubs + product_series: Cisco Catalyst 9300 Series + Switches + failure_policy: ABORT_TARGET_ON_ERROR + language: JINJA + template_name: PnP-Upstream-SW1 + profile_names: + - TestProfile + - PNP_Onboarding_Template + project_name: access_vlan_template_9300_switches + project_description: This project contains + all the templates for Access Switches + software_type: IOS-XE + template_content: | + {% raw %} + vlan {{ vlan }} + interface {{ interface }} + no shutdown + switchport access vlan {{ vlan }} + switchport mode access + description {{ interface_description }} + {% endraw %} + version: "1.0" + +- name: Update configuration template with additional profile + response in Case_10 + cisco.dnac.template_workflow_manager: + dnac_host: "{{dnac_host}}" + dnac_username: "{{dnac_username}}" + dnac_password: "{{dnac_password}}" + dnac_verify: "{{dnac_verify}}" + dnac_port: "{{dnac_port}}" + dnac_version: "{{dnac_version}}" + dnac_debug: "{{dnac_debug}}" + dnac_log: true + dnac_log_level: "{{dnac_log_level}}" + state: merged + config_verify: true + config: + - configuration_templates: + author: Test_User + composite: false + custom_params_order: true + template_description: Template to configure access + VLAN and access interfaces + device_types: + - product_family: Switches and Hubs + product_series: Cisco Catalyst 9300 Series + Switches + failure_policy: ABORT_TARGET_ON_ERROR + language: JINJA + template_name: PnP-Upstream-SW1 + profile_names: + - TestProfile + - PNP_Onboarding_Template + project_name: access_vlan_template_9300_switches + project_description: This project contains + all the templates for Access Switches + software_type: IOS-XE + template_content: | + {% raw %} + vlan {{ vlan }} + interface {{ interface }} + no shutdown + switchport access vlan {{ vlan }} + switchport mode access + description {{ interface_description }} + {% endraw %} + version: "1.0" + +- name: Detach a profile from the configuration template on deleted state + response in Case_11 + cisco.dnac.template_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_port: "{{ dnac_port }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log_level: "{{ dnac_log_level }}" + dnac_log: true + config_verify: true + state: deleted + config: + configuration_templates: + project_name: "access_vlan_template_9300_switches" + template_name: "AA_PnP-Upstream-SW1" + language: "JINJA" + software_type: "IOS-XE" + profile_names: + - TestProfile + device_types: + - product_family: "Switches and Hubs" + +- name: Deleting configuration template no need to attach profiles + it will unassign profiles and delete the template without impacting profiles + response in Case_12 + cisco.dnac.template_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_port: "{{ dnac_port }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log_level: "{{ dnac_log_level }}" + dnac_log: true + config_verify: true + state: deleted + config: + configuration_templates: + project_name: "access_vlan_template_9300_switches" + template_name: "AA_PnP-Upstream-SW1" + language: "JINJA" + software_type: "IOS-XE" + device_types: + - product_family: "Switches and Hubs" """ RETURN = r""" @@ -1904,6 +2072,66 @@ ], "status": "success" } + +# Case_9: Response for Creating a Complete Configuration Template with profiles +response_9: + description: Response when a complete configuration template is created successfully with profiles. + returned: always + type: dict + sample: > + { + "msg": "Template '['AA_PnP-Upstream-SW1']' created successfully in the Cisco Catalyst Center. + Template '['AA_PnP-Upstream-SW1']' committed successfully in the Cisco Catalyst Center. + Profile(s) '['TestProfile', 'PNP_Onboarding_Template']' assigned successfully to the template.", + "response": "Template '['AA_PnP-Upstream-SW1']' created successfully in the Cisco Catalyst Center. + Template '['AA_PnP-Upstream-SW1']' committed successfully in the Cisco Catalyst Center. + Profile(s) '['TestProfile', 'PNP_Onboarding_Template']' assigned successfully to the template.", + "status": "success" + } + +# Case_10: Response for Updating a Configuration Template with Additional Profile +response_10: + description: Response when a configuration template is updated successfully with an additional profile. + returned: always + type: dict + sample: > + { + "msg": "Template '['AA_PnP-Upstream-SW1']' updated successfully in the Cisco Catalyst Center. + Template '['AA_PnP-Upstream-SW1']' committed successfully in the Cisco Catalyst Center. + Profile(s) '['PNP_Onboarding_Template']' assigned successfully to the template. + Profile(s) '['TestProfile']' already exist and cannot be assigned to the template.", + "response": "Template '['AA_PnP-Upstream-SW1']' updated successfully in the Cisco Catalyst Center. + Template '['AA_PnP-Upstream-SW1']' committed successfully in the Cisco Catalyst Center. + Profile(s) '['PNP_Onboarding_Template']' assigned successfully to the template. + Profile(s) '['TestProfile']' already exist and cannot be assigned to the template.", + "status": "success" + } + +# Case_11: Response for Detach a profile from the configuration template on deleted state +response_11: + description: Response when a profile is detached from the configuration template on deleted state. + returned: always + type: dict + sample: > + { + "msg": "Profile(s) '['TestProfile']' detached successfully from the template.", + "response": "Profile(s) '['TestProfile']' detached successfully from the template.", + "status": "success" + } + +# Case_12: Response for Deleting a configuration template without affecting profiles +response_12: + description: Response when a configuration template is deleted without affecting profiles. + returned: always + type: dict + sample: > + { + "msg": "Task: deletes_the_template is successful for parameters: + {'template_id': '9a68dfa3-86ac-442b-bc92-957bfbd76ca7', 'active_validation': False}", + "response": "Task: deletes_the_template is successful for parameters: + {'template_id': '9a68dfa3-86ac-442b-bc92-957bfbd76ca7', 'active_validation': False}", + "status": "success" + } """ import copy @@ -1912,15 +2140,17 @@ import re from ansible.module_utils.basic import AnsibleModule from ansible_collections.cisco.dnac.plugins.module_utils.dnac import ( - DnacBase, validate_list_of_dicts, get_dict_result, dnac_compare_equality, validate_str ) +from ansible_collections.cisco.dnac.plugins.module_utils.network_profiles import ( + NetworkProfileFunctions, +) -class Template(DnacBase): +class Template(NetworkProfileFunctions): """Class containing member attributes for template_workflow_manager module""" def __init__(self, module): @@ -1933,6 +2163,8 @@ def __init__(self, module): self.max_timeout = self.params.get('dnac_api_task_timeout') self.template_created, self.no_update_template, self.template_updated = [], [], [] self.project_created, self.template_committed = [], [] + self.profile_assigned, self.no_profile_assigned, self.profile_exists = [], [], [] + self.profile_detached, self.profile_not_detached, self.profile_already_detached = [], [], [] self.result['response'] = [ {"configurationTemplate": {"response": {}, "msg": {}}}, {"export": {"response": {}}}, @@ -1986,6 +2218,7 @@ def validate_input(self): "name": {"type": "str"}, "project_name": {"type": "str"}, "project_description": {"type": "str"}, + "profile_names": {"type": "list", "elements": "str"}, "software_type": {"type": "str"}, "software_version": {"type": "str"}, "template_content": {"type": "str"}, @@ -2111,7 +2344,7 @@ def validate_input(self): self.validated_config = valid_temp self.log( - "Successfully validated playbook config params: {0}".format(valid_temp), + "Successfully validated playbook config params: {0}".format(self.pprint(valid_temp)), "INFO", ) self.msg = "Successfully validated input" @@ -2162,6 +2395,28 @@ def input_data_validation(self, config): if description and isinstance(description, str): validate_str(description, param_spec_str, "description", errormsg) + self.log("Initiating profile assignment validation for CLI templates", "DEBUG") + configuration_templates = config[0].get("configuration_templates") + if configuration_templates and isinstance(configuration_templates, dict): + profile_names = configuration_templates.get("profile_names") + ccc_version = self.get_ccc_version() + self.log("Processing profile assignment configuration - profiles: {0}".format( + profile_names), "DEBUG") + + if profile_names and isinstance(profile_names, list): + if self.compare_dnac_versions(ccc_version, "3.1.3.0") < 0: + msg = ( + "Profile assignment feature is not supported in Cisco Catalyst Center version '{0}'. " + "Supported versions start from '3.1.3.0' onwards. Current configuration includes " + "profiles: {1}".format(ccc_version, bool(profile_names)) + ) + errormsg.append(msg) + else: + self.log("Validating profiles configuration for template profile assignment", "DEBUG") + for each_profile in profile_names: + if each_profile and isinstance(each_profile, str): + validate_str(each_profile, param_spec_str, "profile_names", errormsg) + if errormsg: msg = "Invalid parameters in playbook config: '{0}' ".format(errormsg) self.log(msg, "ERROR") @@ -2441,33 +2696,6 @@ def get_template_info(self, template_params): return templateParams - def get_templates_details(self, name): - """ - Get the template details from the template name provided in the playbook. - - Parameters: - name (str) - Name of the template provided in the playbook. - - Returns: - result (dict) - Template details for the given template name. - """ - - result = None - items = self.dnac_apply["exec"]( - family="configuration_templates", - function="get_templates_details", - op_modifies=True, - params={"name": name}, - ) - if items: - result = items - - self.log( - "Received API response from 'get_templates_details': {0}".format(items), - "DEBUG", - ) - return result - def get_project_defined_template_details(self, project_name, template_name): """ Get the template details from the template name provided in the playbook. @@ -3027,6 +3255,264 @@ def get_have_template(self, config, template_available): self.status = "success" return self + def _retrieve_all_profiles_with_pagination(self, device_type): + """ + Retrieves all profiles for the specified device type using pagination. + + Parameters: + device_type (str): The type of device for which to retrieve profiles. + """ + + self.log("Starting profile retrieval with pagination for device type: '{0}'".format( + device_type), "DEBUG") + + offset = 1 + limit = 500 + api_timeout = int(self.payload.get("dnac_api_task_timeout", 1200)) + poll_interval = int(self.payload.get("dnac_task_poll_interval", 2)) + + start_time = time.time() + + while True: + # Check timeout + elapsed_time = time.time() - start_time + if elapsed_time >= api_timeout: + self.msg = "Timeout exceeded ({0}s) while retrieving profiles for device type '{1}'".format( + api_timeout, device_type) + self.log(self.msg, "ERROR") + self.fail_and_exit(self.msg) + + self.log("Retrieving profiles with offset={0}, limit={1} for device type '{2}'".format( + offset, limit, device_type), "DEBUG") + + profiles = self._get_profiles_by_device_type(device_type, offset, limit) + + if not profiles: + self.log("No more profiles received from API (offset={0}). Pagination complete.".format(offset), "DEBUG") + break + + self.log("Retrieved {0} profile(s) from API (offset={1})".format(len(profiles), offset), "DEBUG") + self.have["profile_list"].extend(profiles) + + # Check if we've received all available profiles + if len(profiles) < limit: + self.log("Received fewer profiles than limit ({0} < {1}). Last page reached.".format( + len(profiles), limit), "DEBUG") + break + + # Prepare for next iteration + offset += limit + self.log("Incrementing offset to {0} for next API request".format(offset), "DEBUG") + + # Rate limiting + self.log("Applying rate limiting delay of {0} seconds before next API call".format(poll_interval), "DEBUG") + time.sleep(poll_interval) + + def _get_profiles_by_device_type(self, device_type, offset, limit): + """ + Maps device type to appropriate network profile category and retrieves profiles. + + Parameters: + device_type (str): The type of device. + offset (int): Pagination offset. + limit (int): Pagination limit. + + Returns: + list: List of profiles for the specified device type. + """ + + # Device type to profile category mapping + device_type_mapping = { + "Switches and Hubs": "Switching", + "Wireless Controller": "Wireless", + "Routers": "Routing", + "Security and VPN": "Firewall" + } + + profile_category = device_type_mapping.get(device_type, "Assurance") + + self.log("Mapping device type '{0}' to profile category '{1}'".format( + device_type, profile_category), "DEBUG") + + try: + profiles = self.get_network_profile(profile_category, offset, limit) + self.log("Successfully retrieved profiles for category '{0}'".format( + profile_category), "DEBUG") + return profiles + + except Exception as e: + self.log("Error retrieving profiles for category '{0}': {1}".format( + profile_category, str(e)), "ERROR") + return [] + + def _process_individual_profile(self, profile_name, template_name): + """ + Processes an individual profile to determine its assignment status. + + Parameters: + profile_name (str): Name of the profile to process. + template_name (str): Name of the template to check assignment against. + + Returns: + dict: Profile information including assignment status. + """ + self.log("Processing individual profile: '{0}' for template: '{1}'".format( + profile_name, template_name), "DEBUG") + + profile_info = { + "profile_name": profile_name, + "template_name": template_name + } + + # Validate profile existence + if not self.value_exists(self.have["profile_list"], "name", profile_name): + self.msg = "Profile '{0}' does not exist in Cisco Catalyst Center".format(profile_name) + self.log(self.msg, "ERROR") + self.fail_and_exit(self.msg) + + # Get profile ID + profile_index = next( + (index for index, profile in enumerate(self.have["profile_list"]) + if profile.get("name") == profile_name), -1 + ) + + if profile_index == -1: + self.msg = "Failed to locate profile '{0}' in retrieved profile list".format(profile_name) + self.log(self.msg, "ERROR") + self.fail_and_exit(self.msg) + + profile_id = self.have["profile_list"][profile_index]["id"] + profile_info["profile_id"] = profile_id + + self.log("Successfully resolved profile '{0}' to ID: '{1}'".format( + profile_name, profile_id), "DEBUG") + + # Check template assignment + assignment_status = self._check_profile_template_assignment( + profile_name, profile_id, template_name) + profile_info["profile_status"] = assignment_status + + if assignment_status == "already assigned": + self.profile_exists.append(profile_name) + self.log("Profile '{0}' marked as existing (already assigned)".format( + profile_name), "DEBUG") + + self.log("Profile processing completed for '{0}': status='{1}'".format( + profile_name, assignment_status), "DEBUG") + return profile_info + + def _check_profile_template_assignment(self, profile_name, profile_id, template_name): + """ + Checks if a profile is assigned to the specified template. + + Parameters: + profile_name (str): Name of the profile. + profile_id (str): ID of the profile. + template_name (str): Name of the template. + + Returns: + str: Assignment status ('Not Assigned' or 'already assigned'). + """ + + self.log("Checking template assignment for profile '{0}' (ID: {1}) against template '{2}'".format( + profile_name, profile_id, template_name), "DEBUG") + + try: + template_details = self.get_templates_for_profile(profile_id) + + if not template_details: + self.log("No templates found assigned to profile '{0}'".format( + profile_name), "INFO") + return "Not Assigned" + + self.log("Found {0} template(s) assigned to profile '{1}'".format( + len(template_details), profile_name), "DEBUG") + + # Check if the specific template is assigned + if self.value_exists(template_details, "name", template_name): + self.log("Profile '{0}' is already assigned to template '{1}'".format( + profile_name, template_name), "INFO") + return "already assigned" + else: + self.log("Profile '{0}' is not assigned to template '{1}' (assigned to other templates)".format( + profile_name, template_name), "INFO") + return "Not Assigned" + + except Exception as e: + self.log("Error checking template assignment for profile '{0}': {1}".format( + profile_name, str(e)), "ERROR") + return "Not Assigned" + + def get_profile_details(self, device_type, input_profiles, template_name): + """ + Retrieves profile details and assignment status for given profile names from Cisco Catalyst Center. + + Parameters: + device_type (str) - The type of device for which to retrieve profile details. + input_profiles (list) - List of profile names to retrieve details for. + template_name (str) - The name of the template for which to retrieve profile details. + + Returns: + list: A list of dictionaries containing profile information including: + - profile_name (str): Name of the profile + - profile_id (str): UUID of the profile + - profile_status (str): Assignment status ('Not Assigned' or 'already assigned') + - template_name (str): Name of the template + + Description: + This function retrieves comprehensive profile information from Cisco Catalyst Center and determines + the assignment status of each profile to the specified template. It handles pagination for large + profile datasets, validates profile existence, and checks current template assignments. The function + supports multiple device types and maps them to appropriate network profile categories for API calls. + """ + self.log("Initiating profile details collection for device type '{0}' with profiles: {1} and template '{2}'".format( + device_type, input_profiles, template_name), "DEBUG") + + # Input validation + if not device_type: + self.msg = "Device type is required but not provided for profile details collection" + self.log(self.msg, "ERROR") + self.fail_and_exit(self.msg) + + if not input_profiles or not isinstance(input_profiles, list): + self.msg = "Input profiles must be provided as a non-empty list for profile details collection" + self.log(self.msg, "ERROR") + self.fail_and_exit(self.msg) + + if not template_name: + self.msg = "Template name is required but not provided for profile details collection" + self.log(self.msg, "ERROR") + self.fail_and_exit(self.msg) + + self.log("Collecting profile information for device type '{0}', profiles: {1}, template: '{2}'".format( + device_type, input_profiles, template_name), "INFO") + + # Initialize profile storage + self.have["profile"] = [] + self.have["profile_list"] = [] + + # Retrieve all profiles with pagination + self._retrieve_all_profiles_with_pagination(device_type) + + if not self.have["profile_list"]: + self.msg = "No profiles found for device type '{0}' in Cisco Catalyst Center".format(device_type) + self.log(self.msg, "ERROR") + self.fail_and_exit(self.msg) + + self.log("Successfully retrieved {0} total profile(s) for device type '{1}'".format( + len(self.have["profile_list"]), device_type), "INFO") + + # Process each input profile + processed_profiles = [] + for profile_name in input_profiles: + profile_info = self._process_individual_profile(profile_name, template_name) + processed_profiles.append(profile_info) + + self.log("Profile details collection completed successfully. Processed {0} profile(s): {1}".format( + len(processed_profiles), self.pprint(processed_profiles)), "INFO") + + return processed_profiles + def get_have(self, config): """ Get the current project and template details from Cisco Catalyst Center. @@ -3040,7 +3526,12 @@ def get_have(self, config): have = {} configuration_templates = config.get("configuration_templates") if configuration_templates: - if not configuration_templates.get("project_name"): + profile_names = configuration_templates.get("profile_names") + template_name = configuration_templates.get("template_name") + device_types = configuration_templates.get("device_types") + project_name = configuration_templates.get("project_name") + + if not project_name: self.msg = "The parameter 'project_name' is required but not provided." self.status = "failed" return self @@ -3048,6 +3539,21 @@ def get_have(self, config): if template_available: self.get_have_template(config, template_available) + if profile_names and template_name and device_types: + self.log("Initiating profile assignment collection for template profile management", "DEBUG") + + if device_types: + parsed_current_profile = [] + for each_type in device_types: + each_family = each_type.get("product_family") + parsed_current_profile.extend( + self.get_profile_details(each_family, + profile_names, + template_name) + ) + + have["current_profile"] = self.deduplicate_list_of_dict(parsed_current_profile) + project_config = config.get("projects", []) if project_config and isinstance(project_config, list): have["projects"] = [] @@ -3211,6 +3717,13 @@ def get_want(self, config): if self.params.get("state") == "merged": self.update_mandatory_parameters(template_params) + ccc_version = self.get_ccc_version() + if ( + self.compare_dnac_versions(ccc_version, "3.1.3.0") >= 0 + and configuration_templates.get("profile_names") + ): + want["profile_names"] = configuration_templates.get("profile_names") + want["template_params"] = template_params want["project_params"] = project_params want["comments"] = version_comments @@ -4099,9 +4612,82 @@ def update_configuration_templates(self, config, configuration_templates): if is_commit: name = self.want.get("template_params").get("name") self.log("Attempting to commit template '{0}' with ID '{1}'.".format(name, template_id), "INFO") + self.commit_the_template(template_id, name).check_return_status() self.log("Template '{0}' committed successfully in the Cisco Catalyst Center.".format(name), "INFO") + self.log("Initiating profile assignment and detachment processing for template '{0}'".format( + name), "DEBUG") + current_profiles = self.have.get("current_profile", []) + self.log("Processing {0} profile(s) for template '{1}'.".format( + len(current_profiles), name), "INFO") + + for profile_index, each_profile in enumerate(current_profiles): + # Extract profile information once per iteration + each_profile_name = each_profile.get("profile_name") + each_profile_id = each_profile.get("profile_id") + profile_template_name = each_profile.get("template_name") + profile_status = each_profile.get("profile_status") + + # Skip profiles not associated with the current template + if profile_template_name != name: + self.log("Skipping profile '{0}' - not associated with template '{1}' (associated with '{2}')".format( + each_profile_name, name, profile_template_name), "DEBUG") + continue + + self.log("Processing profile '{0}' (index {1}) with status '{2}' for template '{3}'".format( + each_profile_name, profile_index, profile_status, name), "DEBUG") + + # Case 1: Assign profile to template + if profile_status == "Not Assigned": + self.log("Assigning profile '{0}' to template '{1}' - profile not currently assigned".format( + each_profile_name, name), "INFO") + + try: + template_status = self.attach_networkprofile_cli_template( + each_profile_name, each_profile_id, name, template_id) + self.log("Received response from profile attachment API for profile '{0}': {1}".format( + each_profile_name, template_status), "DEBUG") + + if template_status and template_status.get("progress"): + success_msg = "Profile '{0}' successfully attached to template '{1}'".format( + each_profile_name, name) + self.log(success_msg, "INFO") + self.profile_assigned.append(each_profile_name) + else: + error_msg = "Failed to attach profile '{0}' to template '{1}' - API response indicates failure".format( + each_profile_name, name) + self.log(error_msg, "ERROR") + self.no_profile_assigned.append(each_profile_name) + + except Exception as e: + error_msg = "Exception occurred while attaching profile '{0}' to template '{1}': {2}".format( + each_profile_name, name, str(e)) + self.log(error_msg, "ERROR") + self.no_profile_assigned.append(each_profile_name) + + # Case 2: Profile already assigned (idempotent case) + elif profile_status == "already assigned": + self.log("Profile '{0}' already assigned to template '{1}' - no action required".format( + each_profile_name, name), "DEBUG") + + # Case 3: Unexpected scenario + else: + self.log("Unexpected scenario for profile '{0}' on template '{1}': status='{2}'".format( + each_profile_name, name, profile_status), "WARNING") + + # Log summary of operations + total_assigned = len(getattr(self, 'profile_assigned', [])) + total_assignment_failures = len(getattr(self, 'no_profile_assigned', [])) + + self.log("Profile operation summary for template '{0}':".format(name), "INFO") + self.log(" - Profiles assigned: {0} {1}".format(total_assigned, + getattr(self, 'profile_assigned', [])), "INFO") + self.log(" - Assignment failures: {0} {1}".format(total_assignment_failures, + getattr(self, 'no_profile_assigned', [])), "INFO") + + self.log("Completed profile assignment and processing for template '{0}'".format(name), "INFO") + return self def handle_export(self, export): @@ -5343,9 +5929,40 @@ def update_template_projects_message(self): commit_template_msg = "Template '{0}' committed successfully in the Cisco Catalyst Center.".format(self.template_committed) result_msg_list.append(commit_template_msg) + if self.profile_assigned: + profile_assign_msg = "Profile(s) '{0}' assigned successfully to the template.".format(str( + self.profile_assigned)) + result_msg_list.append(profile_assign_msg) + + if self.no_profile_assigned: + no_profile_assign_msg = "Unable to assign the profile(s) '{0}' to the template.".format(str( + self.no_profile_assigned)) + result_msg_list.append(no_profile_assign_msg) + + if (self.profile_exists and not self.profile_detached + and not self.profile_not_detached and not self.profile_already_detached): + profile_exists_msg = "Profile(s) '{0}' already exist and cannot be assigned to the template.".format(str( + self.profile_exists)) + result_msg_list.append(profile_exists_msg) + + if self.profile_detached: + profile_detach_msg = "Profile(s) '{0}' detached successfully from the template.".format(str( + self.profile_detached)) + result_msg_list.append(profile_detach_msg) + + if self.profile_not_detached: + profile_not_detach_msg = "Profile(s) '{0}' could not be detached from the template.".format(str( + self.profile_not_detached)) + result_msg_list.append(profile_not_detach_msg) + + if self.profile_already_detached: + profile_already_detach_msg = "Profile(s) '{0}' were already detached from the template.".format(str( + self.profile_already_detached)) + result_msg_list.append(profile_already_detach_msg) + if ( self.project_created or self.template_created or self.template_updated - or self.template_committed + or self.template_committed or self.profile_assigned or self.profile_detached ): self.result["changed"] = True @@ -5715,6 +6332,16 @@ def delete_project_or_template(self, config, is_delete_project=False): ) time.sleep(sleep_duration) else: + current_profiles = self.have.get("current_profile", []) + if current_profiles and self.compare_dnac_versions(ccc_version, "3.1.3.0") >= 0: + template_name = self.want.get("template_params").get("name") + self.log("Detaching profile from template", "DEBUG") + detach_status = self.detach_profiles_from_template(template_name, current_profiles) + if detach_status: + self.log("Received response from detach profile.", "DEBUG") + self.update_template_projects_message().check_return_status() + return self + self.log( "Deleting '{0}' using function '{1}' with parameters: '{2}' on Catalyst version: {3} (> 2.3.5.3)".format( name, deletion_value, params_key, ccc_version @@ -5744,6 +6371,92 @@ def delete_project_or_template(self, config, is_delete_project=False): return self + def detach_profiles_from_template(self, name, current_profiles): + """ + Detach profiles from a specific template in Cisco Catalyst Center. + + Args: + name (str): The name of the template. + current_profiles (list): A list of profile names to detach. + + Returns: + bool: Returns True if the detachment was successful execution. + """ + self.log("Detaching profiles from template '{0}': {1}".format(name, current_profiles), "INFO") + + for profile_index, each_profile in enumerate(current_profiles): + # Extract profile information once per iteration + each_profile_name = each_profile.get("profile_name") + each_profile_id = each_profile.get("profile_id") + profile_template_name = each_profile.get("template_name") + profile_status = each_profile.get("profile_status") + template_id = self.have_template.get("id") + + # Skip profiles not associated with the current template + if profile_template_name != name: + self.log("Skipping profile '{0}' - not associated with template '{1}' (associated with '{2}')".format( + each_profile_name, name, profile_template_name), "DEBUG") + continue + + self.log("Processing profile '{0}' (index {1}) with status '{2}' for template '{3}'".format( + each_profile_name, profile_index, profile_status, name), "DEBUG") + + # Case 1: Detach profile from template + if profile_status == "already assigned": + self.log("Detaching profile '{0}' from template '{1}' - profile currently assigned and detach requested".format( + each_profile_name, name), "INFO") + + try: + template_status = self.detach_networkprofile_cli_template( + each_profile_name, each_profile_id, name, template_id) + self.log("Received response from profile detachment API for profile '{0}': {1}".format( + each_profile_name, template_status), "DEBUG") + + if template_status and template_status.get("progress"): + success_msg = "Profile '{0}' successfully detached from template '{1}'".format( + each_profile_name, name) + self.log(success_msg, "INFO") + self.profile_detached.append(each_profile_name) + else: + error_msg = "Failed to detach profile '{0}' from template '{1}' - API response indicates failure".format( + each_profile_name, name) + self.log(error_msg, "ERROR") + self.profile_not_detached.append(each_profile_name) + + except Exception as e: + error_msg = "Exception occurred while detaching profile '{0}' from template '{1}': {2}".format( + each_profile_name, name, str(e)) + self.log(error_msg, "ERROR") + self.profile_not_detached.append(each_profile_name) + + # Case 2: Profile already detached (idempotent case) + elif profile_status == "Not Assigned": + self.log("Profile '{0}' already detached from template '{1}' - no action required".format( + each_profile_name, name), "INFO") + self.profile_already_detached.append(each_profile_name) + + # Case 3: Unexpected scenario + else: + self.log("Unexpected scenario for profile '{0}' on template '{1}': status='{2}'".format( + each_profile_name, name, profile_status), "WARNING") + + # Log summary of operations + total_detached = len(getattr(self, 'profile_detached', [])) + total_detachment_failures = len(getattr(self, 'profile_not_detached', [])) + total_already_detached = len(getattr(self, 'profile_already_detached', [])) + + self.log("Profile operation summary for template '{0}':".format(name), "INFO") + self.log(" - Profiles detached: {0} {1}".format(total_detached, + getattr(self, 'profile_detached', [])), "INFO") + self.log(" - Detachment failures: {0} {1}".format(total_detachment_failures, + getattr(self, 'profile_not_detached', [])), "INFO") + self.log(" - Already detached: {0} {1}".format(total_already_detached, + getattr(self, 'profile_already_detached', [])), "INFO") + + self.log("Completed profile detachment processing for template '{0}'".format(name), "INFO") + + return True + def get_diff_deleted(self, config): """ Delete projects or templates in Cisco Catalyst Center with fields provided in playbook. diff --git a/plugins/modules/wired_campus_automation_workflow_manager.py b/plugins/modules/wired_campus_automation_workflow_manager.py new file mode 100644 index 0000000000..4c72ece1f2 --- /dev/null +++ b/plugins/modules/wired_campus_automation_workflow_manager.py @@ -0,0 +1,14662 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright (c) 2025, Cisco Systems +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Ansible module to manage wired campus automation in Cisco Catalyst Center.""" +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +__author__ = "Rugvedi Kapse, Madhan Sankaranarayanan" + + +DOCUMENTATION = r""" +--- +module: wired_campus_automation_workflow_manager +short_description: Manage wired campus automation operations in Cisco Catalyst Center +description: + - BETA MODULE, CISCO INTERNAL USE ONLY + - This module is currently in beta and is intended for Cisco internal purposes only. + - It is not available for customer consumption and should not be used in production environments. + - This module provides comprehensive management of Layer 2 wired network configurations in + - Cisco Catalyst Center. + - Configure VLANs, STP, CDP, LLDP, VTP, DHCP Snooping, IGMP/MLD Snooping, authentication, + - port channels, and interface settings. + - Supports both creation and updating of configurations on network devices. + - Provides automated deployment of intended configurations to devices. + - Includes comprehensive validation of all configuration parameters before applying changes. + + - Feature Support Matrix + - C(VLANs) - create, update, delete + - C(CDP) - create, update, delete + - C(LLDP) - create, update, delete + - C(STP) - create, update (delete not supported due to API limitations) + - C(VTP) - create, update, delete + - C(DHCP Snooping) - create, update, delete + - C(IGMP Snooping) - create, update (delete not supported due to API limitations) + - C(MLD Snooping) - create, update (delete not supported due to API limitations) + - C(Authentication) - create, update, delete + - C(Logical Ports) - create, update (delete not supported due to API limitations) + - C(Port Configuration) - create, update (delete not supported due to API limitations) + + - Known API Limitations & Issues + - The deleted state is not supported for STP, IGMP Snooping, MLD Snooping, + Port Configuration, and Logical Ports due to underlying beta API limitations. + - Several known issues exist with the beta APIs that may affect functionality. + + - VLANs (vlanConfig) - + - VLAN configuration may silently fail when VTP mode is SERVER (CSCwr00884) + - VLAN name cannot be reset to empty string once set + + - STP (stpGlobalConfig) - + - STP instance deletion does not properly remove deployed configuration (CSCwr01764) + - Incorrect payload structure validation for isStpEnabled parameter (CSCwr0107) + + - VTP (vtpGlobalConfig) - + - Domain name cannot be removed once set (expected behavior) + - Configuration file name and source interface cannot be reset to empty string (CSCwr01195) + - Misleading validation error when attempting to remove VTP domain name (CSCwr01131) + + - DHCP Snooping (dhcpSnoopingGlobalConfig) - + - Global configuration not fully reset to defaults after intent deletion (CSCwr01309) + - Agent URL, proxy bridge VLANs, and snooping VLANs cannot be reset using empty strings (CSCwr01255, CSCwr01321, CSCwr01327) + + - IGMP/MLD Snooping (igmpSnoopingGlobalConfig, mldSnoopingGlobalConfig) - + - Querier address does not reset to default on intent deletion (CSCwr01879) + - MLD snooping rejects empty querier address in update operations (CSCwr06296) + + - Logical Ports (portchannelConfig) - + - Port channel configuration may fail silently without proper error response (CSCwr01895) + - Optional fields incorrectly enforced as required during validation (CSCwr08060) + + - Port Configuration (switchportInterfaceConfig) - + - Switchport configuration may silently fail during comprehensive port updates + - Storm Control, Port Security, and UDLD interface configurations are not supported (available in 3.2.x release) +version_added: "6.20.0" +extends_documentation_fragment: + - cisco.dnac.workflow_manager_params +author: + - Rugvedi Kapse (@rukapse) + - Madhan Sankaranarayanan (@madhansansel) +options: + config_verify: + description: Set to true to verify the Cisco Catalyst + Center configuration after applying the playbook + configuration. + type: bool + default: false + state: + description: The desired state of Cisco Catalyst Center after module execution. + type: str + choices: [merged, deleted] + default: "merged" + config: + description: List of wired campus automation configurations to be applied to network devices. + type: list + elements: dict + required: true + suboptions: + ip_address: + description: + - The management IP address of the network device to configure. + - Must be a valid IPv4 address format. + - Either "ip_address" or "hostname" must be provided to identify the device. + - If both are provided, ip_address takes precedence. + - Example - "192.168.1.1" + type: str + required: false + hostname: + description: + - The hostname of the network device to configure. + - Used when IP address is not available or preferred. + - Must match the hostname registered in Catalyst Center. + - Either "ip_address" or "hostname" must be provided to identify the device. + type: str + required: false + device_collection_status_check: + description: + - Controls whether to verify the device's collection status before applying configurations. + - When true, ensures the device is in "Managed" or "In Progress" state before proceeding. + - When false, skips the collection status check (useful for devices being onboarded). + - Recommended to keep as true for production environments. + type: bool + required: false + default: true + layer2_configuration: + description: + - Comprehensive Layer 2 configuration settings for the network device. + - Contains all supported Layer 2 protocols and features. + - Each feature is optional and can be configured independently. + type: dict + required: false + suboptions: + vlans: + description: + - List of VLAN configurations to create or modify on the device. + - VLANs are fundamental building blocks for network segmentation. + - Each VLAN must have a unique ID within the valid range (1-4094). + - Default VLANs (1, 1002-1005) are typically pre-configured and should not be modified. + type: list + elements: dict + required: false + suboptions: + vlan_id: + description: + - Unique identifier for the VLAN. + - Must be within the valid range of 1 to 4094. + - VLAN 1 is the default VLAN and exists on all switches. + - VLANs 1002-1005 are reserved for legacy protocols. + - Extended VLANs (1006-4094) may require VTP version 3. + type: int + required: true + vlan_name: + description: + - Descriptive name for the VLAN to aid in identification and management. + - Maximum length depends on VTP version (32 chars for v1/v2, 128 chars for v3). + - Should be descriptive and follow organizational naming conventions. + - If not specified, defaults to "VLAN" followed by the VLAN ID with leading zeros. + - Must contain only ASCII characters (0-127) as per Catalyst Center API requirements. + - Cannot contain whitespace characters (spaces, tabs, newlines) or question marks (?). + - Use underscores (_) or hyphens (-) instead of spaces for better compatibility. + - Empty strings are not allowed and will cause API validation errors. + - Examples - "SALES_VLAN", "IOT_DEVICES", "GUEST_NETWORK" + type: str + required: false + vlan_admin_status: + description: + - Administrative status of the VLAN (enabled or disabled). + - When true, the VLAN is active and can carry traffic. + - When false, the VLAN is administratively shut down. + - Disabled VLANs do not forward traffic but retain their configuration. + - NOTE - "vlan_admin_status" Can only be modified for VLAN IDs 2-1001. + - Extended range VLANs (1002-4094) do not support admin status updates. + type: bool + required: false + default: true + cdp: + description: + - Cisco Discovery Protocol (CDP) global configuration settings. + - CDP is a Cisco proprietary protocol for discovering neighboring Cisco devices. + - Runs over Layer 2 and provides device information like platform, capabilities, and addresses. + - Useful for network topology discovery and troubleshooting. + type: dict + required: false + suboptions: + cdp_admin_status: + description: + - Globally enable or disable CDP on the device. + - When true, CDP is enabled globally (equivalent to "cdp run" command). + - When false, CDP is disabled globally on all interfaces. + - Individual interfaces can still override this setting. + type: bool + required: false + default: true + cdp_hold_time: + description: + - Time in seconds that receiving devices should hold CDP information before discarding it. + - Must be between 10 and 255 seconds. + - Should be set higher than the timer interval to prevent information loss. + - Typical values are 180 seconds (3 times the default timer). + - Equivalent to "cdp holdtime" command. + type: int + required: false + default: 180 + cdp_timer: + description: + - Frequency in seconds at which CDP advertisements are sent. + - Must be between 5 and 254 seconds. + - Lower values provide more current information but increase network overhead. + - Higher values reduce overhead but may delay topology discovery. + - Equivalent to "cdp timer" command. + type: int + required: false + default: 60 + cdp_advertise_v2: + description: + - Enable CDP version 2 advertisements. + - When true, sends CDP version 2 advertisements (default and recommended). + - When false, sends CDP version 1 advertisements (legacy compatibility). + - Version 2 provides additional information and error detection. + - Equivalent to "cdp advertise-v2" command. + type: bool + required: false + default: true + cdp_log_duplex_mismatch: + description: + - Enable logging of duplex mismatches detected by CDP. + - When true, logs warnings when CDP detects duplex mismatches with neighbors. + - When false, duplex mismatch detection is disabled. + - Useful for identifying and troubleshooting duplex configuration issues. + - Equivalent to "cdp log mismatch duplex" command. + type: bool + required: false + default: true + lldp: + description: + - Link Layer Discovery Protocol (LLDP) global configuration settings. + - LLDP is an IEEE 802.1AB standard protocol for discovering neighboring devices. + - Vendor-neutral alternative to CDP, supported by multiple vendors. + - Provides device identification, capabilities, and management information. + type: dict + required: false + suboptions: + lldp_admin_status: + description: + - Globally enable or disable LLDP on the device. + - When true, LLDP is enabled globally (equivalent to "lldp run" command). + - When false, LLDP is disabled globally on all interfaces. + - Individual interfaces can still override this setting. + type: bool + required: false + default: false + lldp_hold_time: + description: + - Time in seconds that receiving devices should hold LLDP information before discarding it. + - Must be between 0 and 32767 seconds. + - Should be set higher than the timer interval to prevent information loss. + - A value of 0 means the information should not be aged out. + - Equivalent to "lldp holdtime" command. + type: int + required: false + default: 120 + lldp_timer: + description: + - Frequency in seconds at which LLDP advertisements are sent. + - Must be between 5 and 32767 seconds. + - Lower values provide more current information but increase network overhead. + - Higher values reduce overhead but may delay topology discovery. + - Equivalent to "lldp timer" command. + type: int + required: false + default: 30 + lldp_reinitialization_delay: + description: + - Delay in seconds for LLDP initialization on any interface. + - Must be between 2 and 5 seconds. + - Prevents rapid enable/disable cycles during interface initialization. + - Provides stability during interface state changes. + - Equivalent to "lldp reinit" command. + type: int + required: false + default: 2 + stp: + description: + - Spanning Tree Protocol (STP) global and per-VLAN configuration settings. + - STP prevents loops in redundant network topologies while providing path redundancy. + - Supports PVST+, RSTP, and MST modes for different network requirements. + - Critical for network stability in environments with redundant paths. + type: dict + required: false + suboptions: + stp_mode: + description: + - Spanning Tree Protocol mode to operate in. + - C(PVST) (Per-VLAN Spanning Tree Plus) - Cisco proprietary, one instance per VLAN. + - C(RSTP) (Rapid Spanning Tree Protocol) - IEEE 802.1w, faster convergence than PVST. + - C(MST) (Multiple Spanning Tree) - IEEE 802.1s, maps multiple VLANs to instances. + - Choose based on network size, convergence requirements, and vendor compatibility. + type: str + required: false + choices: ["PVST", "RSTP", "MST"] + default: "RSTP" + stp_portfast_mode: + description: + - Global PortFast mode configuration for edge ports. + - C(ENABLE) - Enables PortFast on all access ports globally. + - C(DISABLE) - Disables PortFast globally. + - C(EDGE) - Enables PortFast on edge ports (recommended for end devices). + - C(NETWORK) - Configures network ports (inter-switch links). + - C(TRUNK) - Enables PortFast on trunk ports (use with caution). + - PortFast bypasses listening and learning states for faster convergence. + - Advanced portfast modes (EDGE, NETWORK, TRUNK) are only supported on + Catalyst 9600 Series and specific Catalyst 9500 Series models (C9500-32C, + C9500-32QC, C9500-48Y4C, C9500-24Y4C, C9500X-28C8D). + type: str + required: false + choices: ["ENABLE", "DISABLE", "EDGE", "NETWORK", "TRUNK"] + stp_bpdu_guard: + description: + - Global BPDU Guard configuration for PortFast-enabled ports. + - When true, shuts down PortFast ports that receive BPDUs. + - Protects against accidental switch connections to access ports. + - Essential security feature for edge port protection. + - Equivalent to "spanning-tree portfast bpduguard default" command. + type: bool + required: false + default: false + stp_bpdu_filter: + description: + - Global BPDU Filter configuration for PortFast-enabled ports. + - When true, prevents sending and receiving BPDUs on PortFast ports. + - Should be used with caution as it can create loops if misconfigured. + - Typically used in environments where STP is not needed on edge ports. + - Equivalent to "spanning-tree portfast bpdufilter default" command. + type: bool + required: false + default: false + stp_backbonefast: + description: + - Enable BackboneFast for faster convergence on indirect link failures. + - When true, enables BackboneFast to detect indirect failures quickly. + - Reduces convergence time from 50 seconds to 30 seconds for indirect failures. + - Works in conjunction with UplinkFast for optimal convergence. + - Equivalent to "spanning-tree backbonefast" command. + type: bool + required: false + default: false + stp_extended_system_id: + description: + - Enable extended system ID for bridge priority calculation. + - When true, uses VLAN ID as part of bridge ID calculation. + - Required for PVST plus operation with more than 64 VLANs. + - Changes bridge priority calculation to include VLAN ID. + - Equivalent to "spanning-tree extend system-id" command. + type: bool + required: false + default: true + stp_logging: + description: + - Enable STP event logging for troubleshooting. + - When true, logs STP state changes and events. + - Useful for monitoring STP behavior and troubleshooting issues. + - May increase log verbosity in environments with frequent topology changes. + - Equivalent to "spanning-tree logging" command. + type: bool + required: false + default: false + stp_loopguard: + description: + - Global Loop Guard configuration to prevent loops from unidirectional failures. + - When true, prevents alternate/root ports from becoming designated ports. + - Protects against loops caused by unidirectional link failures. + - Complements UDLD for comprehensive loop prevention. + - Equivalent to "spanning-tree loopguard default" command. + type: bool + required: false + default: false + stp_transmit_hold_count: + description: + - Maximum number of BPDUs sent per hello interval. + - Must be between 1 and 20. + - Controls BPDU transmission rate to prevent overwhelming neighbors. + - Higher values allow more BPDUs but may impact performance. + - Equivalent to "spanning-tree transmit hold-count" command. + type: int + required: false + default: 6 + stp_uplinkfast: + description: + - Enable UplinkFast for faster convergence on direct link failures. + - When true, enables UplinkFast for access layer switches. + - Provides sub-second convergence for direct uplink failures. + - Should only be enabled on access layer switches, not distribution/core. + - Equivalent to "spanning-tree uplinkfast" command. + type: bool + required: false + default: false + stp_uplinkfast_max_update_rate: + description: + - Maximum rate of update packets sent when UplinkFast is enabled. + - Must be between 0 and 32000 packets per second. + - Controls the rate of multicast packets sent during convergence. + - Higher rates provide faster convergence but may impact performance. + - Only applicable when UplinkFast is enabled. + type: int + required: false + default: 150 + stp_etherchannel_guard: + description: + - Enable EtherChannel Guard to detect EtherChannel misconfigurations. + - When true, detects when one side has EtherChannel configured but the other doesn't. + - Prevents loops and inconsistencies in EtherChannel configurations. + - Essential for maintaining EtherChannel integrity. + - Equivalent to "spanning-tree etherchannel guard misconfig" command. + type: bool + required: false + default: true + stp_instances: + description: + - List of per-VLAN STP instance configurations. + - Allows customization of STP parameters for specific VLANs. + - Each instance can have different priorities and timers. + - Useful for load balancing and fine-tuning STP behavior. + type: list + elements: dict + required: false + suboptions: + stp_instance_vlan_id: + description: + - VLAN ID for this STP instance configuration. + - Must be between 1 and 4094. + - Each VLAN can have its own STP parameters. + - VLAN must exist before STP instance configuration. + type: int + required: true + stp_instance_priority: + description: + - Bridge priority for this VLAN's STP instance. + - Must be between 0 and 61440 in increments of 4096. + - Lower values have higher priority (more likely to be root). + - Default is 32768. Common values 4096, 8192, 16384, 24576. + - Used for load balancing across multiple VLANs. + type: int + required: false + default: 32768 + enable_stp: + description: + - Enable or disable STP for this specific VLAN. + - When true, STP is active for this VLAN. + - When false, STP is disabled for this VLAN (use with caution). + - Disabling STP can create loops if redundant paths exist. + type: bool + required: false + default: true + stp_instance_max_age_timer: + description: + - Maximum age timer for this STP instance in seconds. + - Must be between 6 and 40 seconds. + - Time to wait for BPDUs before aging out port information. + - Should be coordinated with hello interval and forward delay. + - Affects convergence time and stability. + type: int + required: false + default: 20 + stp_instace_hello_interval_timer: + description: + - Hello interval timer for this STP instance in seconds. + - Must be between 1 and 10 seconds. + - Frequency of BPDU transmission by the root bridge. + - Lower values provide faster detection but increase overhead. + - Should be coordinated with max age and forward delay. + type: int + required: false + default: 2 + stp_instace_forward_delay_timer: + description: + - Forward delay timer for this STP instance in seconds. + - Must be between 4 and 30 seconds. + - Time spent in listening and learning states during convergence. + - Should be coordinated with max age and hello interval. + - Affects convergence time, shorter delays mean faster convergence. + type: int + required: false + default: 15 + vtp: + description: + - VLAN Trunking Protocol (VTP) configuration settings. + - VTP synchronizes VLAN configuration across switches in a domain. + - Enables centralized VLAN management for large switched networks. + - Requires careful planning to avoid accidental VLAN deletion. + type: dict + required: false + suboptions: + vtp_mode: + description: + - VTP operational mode for this switch. + - C(SERVER) - Can create, modify, and delete VLANs; propagates changes. + - C(CLIENT) - Cannot modify VLANs locally; accepts updates from servers. + - C(TRANSPARENT) - Can modify VLANs locally; forwards but doesn't process updates. + - C(OFF) - VTP is disabled; no VTP processing or forwarding. + - Choose based on network role and VLAN management strategy. + - VTP modes SERVER and CLIENT do not support extended range VLANs (1006-4094). + - If extended range VLANs are configured on the device, VTP mode + must be set to TRANSPARENT or OFF. + type: str + required: false + choices: ["SERVER", "CLIENT", "TRANSPARENT", "OFF"] + default: "SERVER" + vtp_version: + description: + - VTP protocol version to use. + - C(VERSION_1) - Original VTP implementation, basic functionality. + - C(VERSION_2) - Adds support for Token Ring and unrecognized TLVs. + - C(VERSION_3) - Adds extended VLANs, private VLANs, and MST support. + - Higher versions provide more features but require compatible switches. + type: str + required: false + choices: ["VERSION_1", "VERSION_2", "VERSION_3"] + default: "VERSION_1" + vtp_domain_name: + description: + - VTP domain name for switch participation. + - Maximum 32 characters for VTP domains. + - All switches in the same domain share VLAN information. + - Case-sensitive and must match exactly across all domain switches. + - Required for VTP version 3 operation. + - Once domain name is set, it can be updated but cannot be reset. + type: str + required: false + vtp_configuration_file_name: + description: + - Custom filename for VTP configuration storage. + - Default is "vlan.dat" in the flash file system. + - Maximum 244 characters for custom filenames. + - Useful for backup and recovery procedures. + - Should include full path if not in default location. + - NOTE - Due to API limitations, this parameter does not support + empty string values ("") for resetting to default. + - To reset this parameter, the entire VTP configuration has + to be reset using the "deleted" state. + type: str + required: false + vtp_source_interface: + description: + - Interface to use as the source for VTP updates. + - Specifies which interface IP becomes the VTP updater address. + - Useful for identifying which switch made the last update. + - Should be a consistently available interface like a loopback. + - Format interface type and number (Example, "GigabitEthernet1/0/1"). + - NOTE - Due to API limitations, this parameter does not support + empty string values ("") for resetting to default. + - To reset this parameter, the entire VTP configuration + has to be reset using the "deleted" state. + type: str + required: false + vtp_pruning: + description: + - Enable VTP pruning to optimize bandwidth usage. + - When true, restricts flooded traffic to only necessary trunk links. + - Reduces unnecessary broadcast traffic in the VTP domain. + - Only affects VLANs 2-1001; VLAN 1 and extended VLANs are not pruned. + - Can only be configured when "vtp_mode" is "SERVER". + type: bool + required: false + default: false + dhcp_snooping: + description: + - DHCP Snooping configuration for securing DHCP operations. + - Prevents rogue DHCP servers and protects against DHCP-based attacks. + - Maintains a binding table of legitimate DHCP assignments. + - Foundation for other security features like IP Source Guard. + type: dict + required: false + suboptions: + dhcp_admin_status: + description: + - Globally enable or disable DHCP Snooping on the device. + - When true, enables DHCP Snooping globally. + - When false, disables DHCP Snooping on all VLANs. + - Must be enabled before configuring per-VLAN or per-interface settings. + - Equivalent to "ip dhcp snooping" command. + type: bool + required: false + default: false + dhcp_snooping_vlans: + description: + - List of VLAN IDs where DHCP Snooping should be enabled. + - Each VLAN ID must be between 1 and 4094. + - Only VLANs in this list will have DHCP packets inspected. + - VLANs not in the list will forward DHCP packets normally. + - Can be configured as individual VLANs or ranges. + - All VLANs specified in "dhcp_snooping_proxy_bridge_vlans" + must also be included in this list. + type: list + elements: int + required: false + dhcp_snooping_glean: + description: + - Enable DHCP gleaning for learning bindings from DHCP traffic. + - When true, learns DHCP bindings by monitoring DHCP acknowledgments. + - Useful for populating the binding table in existing networks. + - Should be used temporarily during initial deployment. + - Equivalent to "ip dhcp snooping glean" command. + type: bool + required: false + default: false + dhcp_snooping_database_agent_url: + description: + - URL for storing DHCP Snooping binding database remotely. + - Supports TFTP, FTP, and other file transfer protocols. + - Provides persistence of bindings across switch reboots. + - Minimum 5 characters, maximum 227 characters. + - Format for the URL - "protocol://server_ip/filename" + - The URL must start with one of the following protocol prefixes + ("bootflash:", "crashinfo:", "flash:", "ftp:", "http:", "https:" + "rcp:", "scp:", "sftp:", "tftp:") + - Examples of valid URLs + - tftp URL - "tftp://192.168.1.100/dhcp_bindings.db", + - ftp URL - "ftp://server.example.com/backups/dhcp_bindings.db", + - flash URL - "flash:dhcp_bindings.db", + - bootflash URL - "bootflash:dhcp_bindings.db" + type: str + required: false + dhcp_snooping_database_timeout: + description: + - Timeout in seconds for database operations. + - Must be between 0 and 86400 seconds (24 hours). + - Time to wait for database read/write operations to complete. + - 0 means no timeout (wait indefinitely). + - Should be set based on network latency and server performance. + type: int + required: false + default: 300 + dhcp_snooping_database_write_delay: + description: + - Delay in seconds between database write operations. + - Must be between 15 and 86400 seconds. + - Batches multiple binding changes to reduce I/O overhead. + - Lower values provide more current data but increase overhead. + - Should balance between data currency and performance. + type: int + required: false + default: 300 + dhcp_snooping_proxy_bridge_vlans: + description: + - List of VLAN IDs to enable in bridge mode for DHCP relay. + - Each VLAN ID must be between 1 and 4094. + - Enables DHCP relay functionality in bridge mode. + - Useful for environments with DHCP servers on different subnets. + - Works in conjunction with DHCP relay configuration. + - All VLANs specified here must also be included in "dhcp_snooping_vlans" list. + type: list + elements: int + required: false + igmp_snooping: + description: + - Internet Group Management Protocol (IGMP) Snooping configuration. + - Optimizes multicast traffic delivery in Layer 2 networks. + - Prevents unnecessary multicast flooding by learning group memberships. + - Essential for efficient multicast application delivery. + type: dict + required: false + suboptions: + enable_igmp_snooping: + description: + - Globally enable or disable IGMP Snooping. + - When true, enables IGMP Snooping globally on the switch. + - When false, disables IGMP Snooping and floods all multicast traffic. + - When disabling IGMP snooping globally, first disable IGMP + snooping on all VLANs where it is currently enabled + - Enabled by default on most modern switches. + - Equivalent to "ip igmp snooping" command. + type: bool + required: false + default: true + igmp_snooping_querier: + description: + - Enable IGMP Querier functionality globally. + - When true, the switch can act as an IGMP querier. + - When false, relies on external queriers (routers). + - Required when no multicast router is present in the VLAN. + - Equivalent to "ip igmp snooping querier" command. + type: bool + required: false + default: false + igmp_snooping_querier_address: + description: + - Source IP address for IGMP query messages. + - Must be a valid IPv4 or IPv6 address. + - Used when the switch acts as an IGMP querier. + - Should be an address reachable by all multicast receivers. + - Helps identify the querier in network troubleshooting. + type: str + required: false + igmp_snooping_querier_version: + description: + - IGMP version for query messages. + - C(VERSION_1) - Basic join/leave functionality. + - C(VERSION_2) - Adds leave group messages and group-specific queries. + - C(VERSION_3) - Adds source-specific multicast (SSM) support. + - Choose based on receiver capabilities and application requirements. + type: str + required: false + choices: ["VERSION_1", "VERSION_2", "VERSION_3"] + default: "VERSION_2" + igmp_snooping_querier_query_interval: + description: + - Interval in seconds between IGMP general query messages. + - Must be between 1 and 18000 seconds. + - Lower values provide faster detection of membership changes. + - Higher values reduce network overhead but slow detection. + - Should be coordinated with receiver timeout settings. + type: int + required: false + default: 125 + igmp_snooping_vlans: + description: + - List of per-VLAN IGMP Snooping configurations. + - Allows customization of IGMP Snooping parameters per VLAN. + - Each VLAN can have different querier settings and mrouter ports. + - Useful for optimizing multicast delivery per network segment. + type: list + elements: dict + required: false + suboptions: + igmp_snooping_vlan_id: + description: + - VLAN ID for this IGMP Snooping configuration. + - Must be between 1 and 4094. + - VLAN must exist before configuring IGMP Snooping. + - Each VLAN can have independent IGMP Snooping settings. + type: int + required: true + enable_igmp_snooping: + description: + - Enable IGMP Snooping for this specific VLAN. + - When true, IGMP Snooping is active for this VLAN. + - When false, multicast traffic is flooded in this VLAN. + - Overrides the global IGMP Snooping setting for this VLAN. + type: bool + required: false + default: true + igmp_snooping_immediate_leave: + description: + - Enable immediate leave processing for IGMP in this VLAN. + - When true, immediately removes port from multicast group upon leave message. + - When false, waits for query timeout before removing port from group. + - Use with caution in shared media environments where multiple devices may be on same port. + - Provides faster leave processing for point-to-point links and single device connections. + - Equivalent to "ip igmp snooping immediate-leave" command per VLAN. + type: bool + required: false + default: true + igmp_snooping_querier: + description: + - Enable IGMP Querier for this specific VLAN. + - When true, this VLAN can have its own querier. + - When false, relies on external queriers for this VLAN. + - Useful when different VLANs have different querier requirements. + - If any VLAN in "igmp_snooping_vlans" has "igmp_snooping_querier" set to true, this must also be true. + type: bool + required: false + default: false + igmp_snooping_querier_address: + description: + - Source IP address for IGMP queries in this VLAN. + - Must be a valid IPv4 or IPv6 address. + - Should be an address within the VLAN's subnet. + - Used for VLAN-specific querier identification. + type: str + required: false + igmp_snooping_querier_version: + description: + - IGMP version for this VLAN's query messages. + - C(VERSION_1) - Basic join/leave functionality. + - C(VERSION_2) - Adds leave group messages and group-specific queries. + - C(VERSION_3) - Adds source-specific multicast (SSM) support. + - Can be different from the global IGMP version. + - Choose based on VLAN-specific application requirements. + type: str + required: false + choices: ["VERSION_1", "VERSION_2", "VERSION_3"] + default: "VERSION_2" + igmp_snooping_querier_query_interval: + description: + - Query interval for this specific VLAN in seconds. + - Must be between 1 and 18000 seconds. + - Can be optimized based on VLAN's multicast traffic patterns. + - Lower intervals for VLANs with dynamic memberships. + type: int + required: false + igmp_snooping_mrouter_port_list: + description: + - List of interface names that connect to multicast routers. + - Interfaces in this list are treated as mrouter ports. + - Multicast traffic is always forwarded to these ports. + - Format interface type and number (Example, "GigabitEthernet1/0/1"). + - Essential for proper multicast routing integration. + type: list + elements: str + required: false + mld_snooping: + description: + - Multicast Listener Discovery (MLD) Snooping configuration for IPv6. + - IPv6 equivalent of IGMP Snooping for optimizing IPv6 multicast traffic. + - Prevents unnecessary IPv6 multicast flooding in Layer 2 networks. + - Essential for efficient IPv6 multicast application delivery. + type: dict + required: false + suboptions: + enable_mld_snooping: + description: + - Globally enable or disable MLD Snooping. + - When true, enables MLD Snooping globally on the switch. + - When false, disables MLD Snooping and floods all IPv6 multicast traffic. + - Disabled by default on most switches. + - Equivalent to "ipv6 mld snooping" command. + type: bool + required: false + default: false + mld_snooping_querier: + description: + - Enable MLD Querier functionality globally. + - When true, the switch can act as an MLD querier. + - When false, relies on external queriers (IPv6 routers). + - Required when no IPv6 multicast router is present in the VLAN. + - Equivalent to "ipv6 mld snooping querier" command. + type: bool + required: false + default: false + mld_snooping_querier_address: + description: + - Source IPv6 address for MLD query messages. + - Querier Address must be a valid IPv6 Link-Local address. + - Used when the switch acts as an MLD querier. + - Should be an address reachable by all IPv6 multicast listeners. + - Helps identify the querier in network troubleshooting. + type: str + required: false + mld_snooping_querier_version: + description: + - MLD version for query messages. + - C(VERSION_1) - Basic IPv6 multicast listener functionality. + - C(VERSION_2) - Adds source-specific multicast and enhanced features. + - Choose based on IPv6 application requirements and receiver capabilities. + - VERSION_2" is recommended for modern IPv6 networks. + type: str + required: false + choices: ["VERSION_1", "VERSION_2"] + default: "VERSION_2" + mld_snooping_listener: + description: + - Enable listener message suppression for MLD. + - When true, suppresses duplicate listener reports to reduce overhead. + - When false, forwards all listener reports to queriers. + - Helps optimize bandwidth usage in dense IPv6 multicast environments. + - Equivalent to "ipv6 mld snooping listener-message-suppression" command. + type: bool + required: false + default: true + mld_snooping_querier_query_interval: + description: + - Interval in seconds between MLD general query messages. + - Must be between 1 and 18000 seconds. + - Lower values provide faster detection of IPv6 membership changes. + - Higher values reduce network overhead but slow detection. + - Should be coordinated with IPv6 receiver timeout settings. + type: int + required: false + default: 125 + mld_snooping_vlans: + description: + - List of per-VLAN MLD Snooping configurations. + - Allows customization of MLD Snooping parameters per VLAN. + - Each VLAN can have different querier settings and mrouter ports. + - Useful for optimizing IPv6 multicast delivery per network segment. + type: list + elements: dict + required: false + suboptions: + mld_snooping_vlan_id: + description: + - VLAN ID for this MLD Snooping configuration. + - Must be between 1 and 4094. + - VLAN must exist before configuring MLD Snooping. + - Each VLAN can have independent MLD Snooping settings. + type: int + required: true + enable_mld_snooping: + description: + - Enable MLD Snooping for this specific VLAN. + - When true, MLD Snooping is active for this VLAN. + - When false, IPv6 multicast traffic is flooded in this VLAN. + - Overrides the global MLD Snooping setting for this VLAN. + type: bool + required: false + default: false + mld_snooping_enable_immediate_leave: + description: + - Enable immediate leave processing for MLDv1 in this VLAN. + - When true, immediately removes port from multicast group upon leave. + - When false, waits for query timeout before removing port. + - Use with caution in shared media environments. + - Provides faster leave processing for point-to-point links. + type: bool + required: false + default: false + mld_snooping_querier: + description: + - Enable MLD Querier for this specific VLAN. + - When true, this VLAN can have its own MLD querier. + - When false, relies on external queriers for this VLAN. + - Useful when different VLANs have different querier requirements. + type: bool + required: false + default: false + mld_snooping_querier_address: + description: + - Source IPv6 address for MLD queries in this VLAN. + - Must be a valid IPv6 address format. + - Should be an address within the VLAN's IPv6 prefix. + - Used for VLAN-specific querier identification. + type: str + required: false + mld_snooping_querier_version: + description: + - MLD version for this VLAN's query messages. + - C(VERSION_1) - Basic IPv6 multicast listener functionality. + - C(VERSION_2) - Adds source-specific multicast and enhanced features. + - Can be different from the global MLD version. + - Choose based on VLAN-specific IPv6 application requirements. + type: str + required: false + choices: ["VERSION_1", "VERSION_2"] + default: "VERSION_1" + mld_snooping_querier_query_interval: + description: + - Query interval for this specific VLAN in seconds. + - Must be between 1 and 18000 seconds. + - Can be optimized based on VLAN's IPv6 multicast traffic patterns. + - Lower intervals for VLANs with dynamic IPv6 memberships. + type: int + required: false + mld_snooping_mrouter_port_list: + description: + - List of interface names that connect to IPv6 multicast routers. + - Interfaces in this list are treated as IPv6 mrouter ports. + - IPv6 multicast traffic is always forwarded to these ports. + - Format interface type and number (Example, "GigabitEthernet1/0/1"). + - Essential for proper IPv6 multicast routing integration. + type: list + elements: str + required: false + authentication: + description: + - IEEE 802.1X authentication configuration settings. + - Provides port-based network access control for enhanced security. + - Authenticates devices before granting network access. + - Foundation for Identity-Based Networking Services (IBNS). + type: dict + required: false + suboptions: + enable_dot1x_authentication: + description: + - Globally enable or disable 802.1X authentication. + - When true, enables 802.1X authentication globally. + - When false, disables 802.1X authentication on all ports. + - Must be enabled before configuring per-port authentication. + - Equivalent to "dot1x system-auth-control" command. + type: bool + required: false + default: false + authentication_config_mode: + description: + - Authentication configuration mode (legacy vs. new style). + - C(LEGACY) - Traditional authentication manager configuration mode. + - C(NEW_STYLE) - Identity-Based Networking Services (IBNS) mode. + - NEW_STYLE is recommended for modern authentication deployments. + - Affects how authentication policies are configured and applied. + - Once the authentication configuration mode is set, it cannot be changed. + type: str + required: false + choices: ["LEGACY", "NEW_STYLE"] + default: "LEGACY" + logical_ports: + description: + - Port channel (EtherChannel) configuration for link aggregation. + - Combines multiple physical links into a single logical interface. + - Provides increased bandwidth and redundancy for critical connections. + - Supports LACP, PAgP, and static (manual) aggregation methods. + type: dict + required: false + suboptions: + port_channel_auto: + description: + - Enable automatic port channel creation (Auto-LAG). + - When true, enables automatic detection and creation of port channels. + - When false, requires manual port channel configuration. + - Auto-LAG can simplify configuration but may not suit all environments. + - Equivalent to "port-channel auto" command. + type: bool + required: false + default: false + port_channel_lacp_system_priority: + description: + - System priority for LACP protocol negotiation. + - Must be between 0 and 65535. + - Lower values have higher priority in LACP negotiations. + - Used to determine which switch controls the port channel. + - Should be consistent across switches for predictable behavior. + type: int + required: false + default: 32768 + port_channel_load_balancing_method: + description: + - Method for distributing traffic across port channel members. + - Based on MAC addresses - "SRC_MAC", "DST_MAC", "SRC_DST_MAC". + - Based on IP addresses - "SRC_IP", "DST_IP", "SRC_DST_IP". + - Based on TCP/UDP ports - "RC_PORT", "DST_PORT", "SRC_DST_PORT". + - VLAN-based load balancing methods - "VLAN_SRC_IP", "VLAN_DST_IP", "VLAN_SRC_DST_IP", + "VLAN_SRC_MIXED_IP_PORT", "VLAN_DST_MIXED_IP_PORT", "VLAN_SRC_DST_MIXED_IP_PORT". + - VLAN-based load balancing methods for port channels are only + supported on Cisco Catalyst 9600 Series Switches. + - Choose based on traffic patterns and load balancing requirements. + - Mixed options combine multiple criteria for better distribution. + type: str + required: false + choices: ["SRC_MAC", "DST_MAC", "SRC_DST_MAC", "SRC_IP", "DST_IP", + "SRC_DST_IP", "SRC_PORT", "DST_PORT", "SRC_DST_PORT", "SRC_DST_MIXED_IP_PORT", + "SRC_MIXED_IP_PORT", "DST_MIXED_IP_PORT", "VLAN_SRC_IP", "VLAN_DST_IP", + "VLAN_SRC_DST_IP", "VLAN_SRC_MIXED_IP_PORT", "VLAN_DST_MIXED_IP_PORT", + "VLAN_SRC_DST_MIXED_IP_PORT"] + default: "SRC_DST_IP" + port_channels: + description: + - List of port channel configurations to create. + - Each port channel aggregates multiple physical interfaces. + - Supports different protocols (LACP, PAgP, static). + - Each port channel has unique members and configuration. + - Port channels can only be configured when "port_channel_auto" is false. + type: list + elements: dict + required: false + suboptions: + port_channel_protocol: + description: + - Protocol to use for this port channel. + - C(LACP) - IEEE 802.3ad standard, recommended for most environments. + - C(PAGP) - Cisco proprietary protocol, for Cisco-only environments. + - C(NONE) - Static port channel without negotiation protocol. + - LACP provides better standards compliance and interoperability. + type: str + required: true + choices: ["LACP", "PAGP", "NONE"] + port_channel_name: + description: + - Name identifier for the port channel interface. + - Must be between 13 and 15 characters. + - Format typically follows "Port-channelX" where X is the number. + - Must be unique within the switch configuration. + - Used in interface configuration and monitoring. + type: str + required: true + port_channel_min_links: + description: + - Minimum number of active links required for port channel to be operational. + - Must be between 2 and 8. + - Port channel goes down if active links fall below this threshold. + - Provides guaranteed bandwidth and redundancy requirements. + - Should be set based on application bandwidth and availability needs. + type: int + required: false + default: 1 + port_channel_members: + description: + - List of physical interfaces that belong to this port channel. + - All member interfaces must have compatible configuration. + - Includes interface names and protocol-specific parameters. + - Member configuration varies based on the chosen protocol. + type: list + elements: dict + required: true + suboptions: + port_channel_interface_name: + description: + - Name of the physical interface to add to the port channel. + - Must be a valid interface on the switch. + - Format interface type and number (Example, "GigabitEthernet1/0/1"). + - Interface must not be a member of another port channel. + - Interface configuration must be compatible with other members. + type: str + required: true + port_channel_mode: + description: + - Port channel mode for this member interface. + - For "LACP" protocol + - C(ACTIVE) - (initiates negotiation) + - C(PASSIVE) - (responds only) + - For "PAgP" protocol + - C(AUTO) - (responds only) + - C(AUTO_NON_SILENT - (responds only, with more frequent messages) + - C(DESIRABLE) - (initiates negotiation) + - C(DESIRABLE_NON_SILENT) - (initiates negotiation, with more frequent messages) + - For "NONE" protocol + - C(ON) - (static aggregation without negotiation) + - Choose based on desired negotiation behavior and protocol. + type: str + required: false + choices: ["ACTIVE", "PASSIVE", "AUTO", "AUTO_NON_SILENT", "DESIRABLE", "DESIRABLE_NON_SILENT", "ON"] + port_channel_port_priority: + description: + - Priority for this interface in port channel selection. + - For "LACP" protocol - 0-65535 (lower values have higher priority). + - For "PAgP" protocol - 0-255 (lower values have higher priority). + - Used when more interfaces are available than can be active. + - Helps determine which interfaces carry traffic in standby scenarios. + type: int + required: false + port_channel_rate: + description: + - LACP packet transmission rate (LACP protocol only). + - C(1) - Fast rate (1 second intervals). + - C(30) - Normal rate (30 second intervals). + - Fast rate provides quicker failure detection but increases overhead. + - Only applicable when using LACP protocol. + type: int + required: false + choices: [1, 30] + default: 30 + port_channel_learn_method: + description: + - Learning method for PAgP protocol (PAgP only). + - C(AGGREGATION_PORT) - Learn on the port channel interface. + - C(PHYSICAL_PORT) - Learn on individual physical interfaces. + - Affects MAC address learning and forwarding behavior. + - Only applicable when using PAgP protocol. + type: str + required: false + choices: ["AGGREGATION_PORT", "PHYSICAL_PORT"] + default: "AGGREGATION_PORT" + port_configuration: + description: + - Individual interface configuration settings for all port types. + - Allows per-interface customization of Layer 2 features. + - Each interface can have unique switchport, security, and protocol settings. + - Essential for fine-grained network access control and optimization. + - NOTE - configure switchport_interface_config FIRST before other interface features + type: list + elements: dict + required: false + suboptions: + interface_name: + description: + - Name of the interface to configure. + - Must be a valid interface identifier on the target switch. + - Format interface type and number (Example, "GigabitEthernet1/0/1"). + - Interface must exist on the device and be configurable. + - Used as the key to identify which interface to configure. + type: str + required: true + switchport_interface_config: + description: + - Basic switchport configuration for Layer 2 operation. + - Defines interface mode, VLAN assignments, and administrative settings. + - Essential for connecting end devices and configuring trunk links. + - Forms the foundation of Layer 2 connectivity. + type: dict + required: false + suboptions: + switchport_description: + description: + - Descriptive text for interface documentation and identification. + - Maximum 230 characters of descriptive text. + - Should follow organizational naming conventions. + - Useful for documentation, monitoring, and troubleshooting. + - Cannot include non-ASCII characters. + type: str + required: false + switchport_mode: + description: + - Switchport operational mode. + - C(ACCESS) - Interface carries traffic for a single VLAN. + - C(TRUNK) - Interface carries traffic for multiple VLANs. + - C(DYNAMIC_AUTO) - Negotiates mode with neighbor (becomes trunk if neighbor is trunk/desirable). + - C(DYNAMIC_DESIRABLE) - Actively negotiates to become trunk. + - C(DOT1Q_TUNNEL) - Interface acts as a tunnel port for service provider networks. + type: str + required: false + choices: ["ACCESS", "TRUNK", "DYNAMIC_AUTO", "DYNAMIC_DESIRABLE", "DOT1Q_TUNNEL"] + default: "ACCESS" + access_vlan: + description: + - VLAN ID for untagged traffic when interface is in access mode. + - Must be between 1 and 4094. + - Only applicable when switchport_mode is "ACCESS". + - VLAN must exist before assigning to interface. + - Defines which VLAN untagged traffic will be placed in. + type: int + required: false + default: 1 + voice_vlan: + description: + - VLAN ID for IP phone traffic on access ports. + - Must be between 1 and 4094. + - Allows IP phones to use a separate VLAN for voice traffic. + - Enables QoS prioritization and security separation for voice. + - Only applicable on access ports with connected IP phones. + type: int + required: false + admin_status: + description: + - Administrative status of the interface. + - When true, interface is administratively enabled (no shutdown). + - When false, interface is administratively disabled (shutdown). + - Disabled interfaces do not pass traffic but retain configuration. + - Used for maintenance and security purposes. + type: bool + required: false + default: true + allowed_vlans: + description: + - List of VLAN IDs allowed on trunk interfaces. + - Each VLAN ID must be between 1 and 4094. + - Only applicable when switchport_mode is TRUNK. + - Controls which VLANs can traverse the trunk link. + - Helps optimize bandwidth and enhance security. + type: list + elements: int + required: false + native_vlan_id: + description: + - Native VLAN ID for trunk interfaces (untagged traffic). + - Must be between 1 and 4094. + - Only applicable when switchport_mode is TRUNK. + - Defines which VLAN untagged traffic belongs to on trunk. + - Should be changed from default (VLAN 1) for security. + type: int + required: false + default: 1 + vlan_trunking_interface_config: + description: + - VLAN trunking specific configuration for trunk interfaces. + - Controls DTP negotiation, protection, and VLAN pruning. + - Optimizes trunk operation and enhances security. + type: dict + required: false + suboptions: + enable_dtp_negotiation: + description: + - Dynamic Trunking Protocol (DTP) negotiation setting. + - Controls whether the interface participates in DTP negotiation. + - When enabled, interface can negotiate trunking with neighbor. + - When disabled, prevents DTP packet transmission (recommended for security). + - Disable DTP when connecting to non-Cisco devices or for security. + - DTP negotiation control REQUIRES "switchport_mode" to be "TRUNK" (not "DYNAMIC") + type: bool + required: false + default: true + protected: + description: + - Enable protected port functionality. + - When true, prevents traffic between protected ports at Layer 2. + - Traffic between protected ports must traverse a Layer 3 device. + - Useful for isolating ports within the same VLAN. + - Enhances security in shared network environments. + type: bool + required: false + default: false + pruning_vlan_ids: + description: + - List of VLAN IDs eligible for VTP pruning on this trunk. + - Each VLAN ID must be between 1 and 4094. + - Controls which VLANs can be pruned from this trunk. + - Helps optimize bandwidth by removing unnecessary VLAN traffic. + - Works in conjunction with global VTP pruning settings. + type: list + elements: int + required: false + dot1x_interface_config: + description: + - 802.1X authentication configuration for the interface. + - Configures authentication settings, timers, and behavior for network access control. + type: dict + required: false + suboptions: + dot1x_interface_authentication_mode: + description: + - Sets the 802.1X authentication mode for the interface. + - C(AUTO) - Interface can authenticate both 802.1X and non-802.1X devices. + - C(FORCE_AUTHORIZED) - Interface only allows authenticated devices. + - C(FORCE_UNAUTHORIZED) - Interface only allows unauthenticated devices. + - Determines how the interface handles authentication requests. + type: str + choices: ["AUTO", "FORCE_AUTHORIZED", "FORCE_UNAUTHORIZED"] + required: false + dot1x_interface_pae_type: + description: + - Port Access Entity (PAE) type for 802.1X authentication. + - C(AUTHENTICATOR) - Interface acts as an authenticator (common for switches). + - C(SUPPLICANT) - Interface acts as a supplicant (common for client + devices). + - C(BOTH) - Interface can act as both authenticator and supplicant. + - Defines the role of the interface in the authentication process. + type: str + choices: ["AUTHENTICATOR", "SUPPLICANT", "BOTH"] + required: false + dot1x_interface_control_direction: + description: + - Control direction for 802.1X authentication on the interface. + - When set to C(BOTH), controls both inbound and outbound traffic. + - When set to C(IN), only controls inbound traffic. + - Specifies which traffic direction is controlled by authentication. + type: str + choices: ["BOTH", "IN"] + required: false + dot1x_interface_host_mode: + description: + - Host mode for 802.1X authentication on the interface. + - C(SINGLE_HOST) - Only one host can authenticate on the port. + - C(MULTI_HOST) - Multiple hosts can authenticate, but only one at a + time. + - C(MULTI_AUTH) - Multiple hosts can authenticate simultaneously. + - C(MULTI_DOMAIN) - Multiple hosts from different domains can authenticate. + - Determines how many hosts can authenticate on a single port. + type: str + choices: ["SINGLE_HOST", "MULTI_HOST", "MULTI_AUTH", "MULTI_DOMAIN"] + required: false + dot1x_interface_enable_inactivity_timer_from_server: + description: + - Enable receiving inactivity timer value from RADIUS server. + - When enabled, uses server-provided inactivity timeout values. + type: bool + required: false + dot1x_interface_inactivity_timer: + description: + - Inactivity timer value in seconds for 802.1X authentication. + - Time after which an inactive authenticated session is terminated. + - Valid range is 1-65535 seconds. + type: int + required: false + dot1x_interface_authentication_order: + description: + - Authentication method order for the interface. + - C(DOT1X) - 802.1X authentication method. + - C(MAB) - MAC Authentication Bypass method. + - C(WEBAUTH) - Web-based authentication method. + - Defines the sequence in which authentication methods are tried. + - Methods are attempted in the order specified in the list. + type: list + elements: str + choices: ["DOT1X", "MAB", "WEBAUTH"] + required: false + dot1x_interface_enable_reauth: + description: + - Enable periodic re-authentication for 802.1X on the interface. + - When enabled, authenticated clients are re-authenticated periodically. + type: bool + required: false + dot1x_interface_port_control: + description: + - Port control mode for 802.1X authentication. + - C(AUTO) - Port automatically authorizes or unauthorizes based on + authentication state. + - C(FORCE_AUTHORIZED) - Port is always authorized regardless of + authentication state. + - C(FORCE_UNAUTHORIZED) - Port is always unauthorized regardless of + authentication state. + - Determines the initial authorization state of the port. + type: str + choices: ["AUTO", "FORCE_AUTHORIZED", "FORCE_UNAUTHORIZED"] + required: false + dot1x_interface_priority: + description: + - Authentication priority list for the interface. + - Defines priority order for authentication methods when multiple are configured. + type: list + elements: str + required: false + dot1x_interface_max_reauth_requests: + description: + - Maximum number of re-authentication requests sent to a client. + - After this limit, the client is considered unreachable. + - Valid range is 1-10 requests. + type: int + required: false + dot1x_interface_enable_reauth_timer_from_server: + description: + - Enable receiving re-authentication timer value from RADIUS server. + - When enabled, uses server-provided re-authentication timeout values. + type: bool + required: false + dot1x_interface_reauth_timer: + description: + - Re-authentication timer value in seconds for 802.1X authentication. + - Time interval between periodic re-authentication attempts. + - Valid range is 1-65535 seconds. + type: int + required: false + dot1x_interface_tx_period: + description: + - Transmission period for EAP Request/Identity frames. + - Time interval between successive EAP Request/Identity transmissions. + - Valid range is 1-65535 seconds. + type: int + required: false + mab_interface_config: + description: + - MAC Authentication Bypass (MAB) configuration for this interface. + - Provides authentication for devices that don't support 802.1X. + - Uses device MAC address as the authentication credential. + - Common for printers, cameras, and legacy devices. + type: dict + required: false + suboptions: + enable_mab: + description: + - Enable MAC Authentication Bypass on this interface. + - When true, allows authentication using device MAC address. + - When false, disables MAB authentication method. + - Useful for devices that cannot perform 802.1X authentication. + - Often used in combination with 802.1X authentication. + type: bool + required: false + default: false + stp_interface_config: + description: + - Spanning Tree Protocol configuration for this specific interface. + - Controls STP behavior, timers, and protection features per port. + - Allows fine-tuning of STP operation for different interface types. + - Essential for optimizing convergence and preventing loops. + type: dict + required: false + suboptions: + stp_interface_portfast_mode: + description: + - PortFast mode configuration for this interface. + - C(NONE) - No PortFast configuration (uses global setting). + - C(DISABLE) - Explicitly disable PortFast on this interface. + - C(EDGE) - Enable PortFast for edge ports (end device connections). + - C(EDGE_TRUNK) - Enable PortFast on trunk ports to edge devices. + - C(NETWORK) - Configure as network port (inter-switch links). + - C(TRUNK) - Enable PortFast on all trunk ports. + - Advanced portfast modes (EDGE_TRUNK, NETWORK, TRUNK) are only supported on + Catalyst 9600 Series switches and specific Catalyst 9500 Series models + (C9500-32C, C9500-32QC, C9500-48Y4C, C9500-24Y4C, C9500X-28C8D). + type: str + required: false + choices: ["NONE", "DISABLE", "EDGE", "EDGE_TRUNK", "NETWORK", "TRUNK"] + stp_interface_bpdu_filter: + description: + - BPDU Filter configuration for this interface. + - When true, prevents sending and receiving BPDUs on PortFast ports. + - When false, allows normal BPDU processing. + - Use with caution as it can create loops if misconfigured. + - Typically used on ports connected to end devices. + type: bool + required: false + default: false + stp_interface_bpdu_guard: + description: + - BPDU Guard configuration for this interface. + - When true, shuts down PortFast ports that receive BPDUs. + - When false, disables BPDU Guard protection. + - Protects against accidental switch connections to access ports. + - Essential security feature for edge port protection. + type: bool + required: false + default: false + stp_interface_cost: + description: + - Path cost for this interface in STP calculations. + - Must be between 1 and 20000000. + - Lower costs are preferred paths in STP topology. + - Allows manual control of STP path selection. + - Should reflect actual link bandwidth and desired traffic flow. + type: int + required: false + stp_interface_guard: + description: + - Guard mode configuration for this interface + - C(LOOP) - Enable Loop Guard to prevent loops from unidirectional failures. + - C(ROOT) - Enable Root Guard to prevent inferior BPDUs. + - C(NONE) - Disable guard features on this interface. + - Choose based on interface role and protection requirements. + type: str + required: false + choices: ["LOOP", "ROOT", "NONE"] + stp_interface_priority: + description: + - Port priority for this interface in STP tie-breaking. + - Must be between 0 and 240 in increments of 16. + - Lower values have higher priority for forwarding state. + - Used when multiple ports have equal cost to root bridge. + - Helps control which ports forward traffic in redundant topologies. + type: int + required: false + default: 128 + stp_interface_per_vlan_cost: + description: + - Per-VLAN cost configuration for this interface. + - Allows different costs for different VLANs on the same interface. + - Enables per-VLAN load balancing in PVST plus environments. + - Useful for optimizing traffic flow across VLANs. + type: dict + required: false + suboptions: + priority: + description: + - Cost value to apply to the specified VLANs. + - Must be between 1 and 20000000. + - Lower costs make this path preferred for the specified VLANs. + - Should be coordinated with overall STP design. + type: int + required: false + vlan_ids: + description: + - List of VLAN IDs to apply this cost setting to. + - Each VLAN ID must be between 1 and 4094. + - Allows grouping VLANs with the same cost requirements. + - VLANs must exist before applying cost settings. + type: list + elements: int + required: false + stp_interface_per_vlan_priority: + description: + - Per-VLAN priority configuration for this interface. + - Allows different priorities for different VLANs on the same interface. + - Enables per-VLAN load balancing and traffic engineering. + - Useful for optimizing port selection across VLANs. + type: dict + required: false + suboptions: + priority: + description: + - Priority value to apply to the specified VLANs. + - Must be between 0 and 240 in increments of 16. + - Lower values have higher priority for forwarding state. + - Should be coordinated with overall STP design. + type: int + required: false + vlan_ids: + description: + - List of VLAN IDs to apply this priority setting to. + - Each VLAN ID must be between 1 and 4094. + - Allows grouping VLANs with the same priority requirements. + - VLANs must exist before applying priority settings. + type: list + elements: int + required: false + dhcp_snooping_interface_config: + description: + - DHCP Snooping interface configuration for this specific interface. + - Controls DHCP security features and trust settings per interface. + - Provides granular control over DHCP packet processing on individual ports. + - Essential for securing DHCP operations against rogue servers and attacks. + type: dict + required: false + suboptions: + dhcp_snooping_interface_rate: + description: + - Maximum rate of DHCP packets per second allowed on this interface. + - Must be between 1 and 2048 packets per second. + - Helps prevent DHCP flooding attacks by rate-limiting DHCP traffic. + - Higher rates may be needed for interfaces connecting to DHCP servers. + - Lower rates are typically sufficient for client access ports. + type: int + required: false + default: 100 + dhcp_snooping_interface_trust: + description: + - Configure this interface as trusted for DHCP operations. + - When true, interface is trusted and DHCP packets are forwarded without inspection. + - When false, interface is untrusted and DHCP packets are inspected and filtered. + - Trusted interfaces typically connect to legitimate DHCP servers or uplinks. + - Untrusted interfaces typically connect to end devices that should not offer DHCP. + type: bool + required: false + default: false + cdp_interface_config: + description: + - Cisco Discovery Protocol (CDP) interface configuration for this specific interface. + - Controls CDP operation on individual interfaces independent of global settings. + - Allows per-interface customization of CDP behavior and logging. + - Useful for selectively enabling/disabling CDP on specific ports. + type: dict + required: false + suboptions: + cdp_interface_admin_status: + description: + - Enable or disable CDP on this specific interface. + - When true, CDP is enabled on this interface (sends and receives CDP packets). + - When false, CDP is disabled on this interface. + - Overrides the global CDP setting for this specific interface. + - Recommended to disable on interfaces connecting to untrusted devices. + type: bool + required: false + default: true + cdp_interface_log_duplex_mismatch: + description: + - Enable logging of duplex mismatches detected by CDP on this interface. + - When true, logs warnings when CDP detects duplex mismatches with the neighbor. + - When false, duplex mismatch detection logging is disabled for this interface. + - Useful for troubleshooting connectivity issues and performance problems. + - Helps identify configuration inconsistencies between connected devices. + type: bool + required: false + default: true + lldp_interface_config: + description: + - Link Layer Discovery Protocol (LLDP) interface configuration for this specific interface. + - Controls LLDP packet transmission and reception behavior per interface. + - Provides granular control over LLDP operation on individual ports. + - Allows optimization of LLDP behavior based on interface usage. + type: dict + required: false + suboptions: + lldp_interface_receive_transmit: + description: + - Configure LLDP transmission and reception behavior for this interface. + - C(TRANSMIT_ONLY) - Only send LLDP packets, do not process received packets. + - C(RECEIVE_ONLY) - Only receive and process LLDP packets, do not transmit. + - C(TRANSMIT_AND_RECEIVE) - Both send and receive LLDP packets (default behavior). + - C(DISABLED) - Completely disable LLDP on this interface. + - Choose based on security requirements and interface role in the network. + type: str + required: false + choices: ["TRANSMIT_ONLY", "RECEIVE_ONLY", "TRANSMIT_AND_RECEIVE", "DISABLED"] + default: "TRANSMIT_AND_RECEIVE" + vtp_interface_config: + description: + - VLAN Trunking Protocol (VTP) interface configuration for this specific interface. + - Controls VTP advertisement processing on individual interfaces. + - Allows per-interface control of VTP participation. + - Useful for securing VTP domains and preventing unauthorized updates. + type: dict + required: false + suboptions: + vtp_interface_admin_status: + description: + - Enable or disable VTP on this specific interface. + - When true, VTP advertisements are processed on this interface. + - When false, VTP advertisements are blocked on this interface. + - Helps prevent VTP updates from untrusted sources. + - Recommended to disable on interfaces connecting to untrusted switches. + type: bool + required: false + default: true +requirements: + - dnacentersdk >= 2.10.1 + - python >= 3.9 +notes: + - SDK Method used are + - devices.get_device_list + - wired.Wired.get_configurations_for_an_intended_layer2_feature_on_a_wired_device + - wired.Wired.get_configurations_for_a_deployed_layer2_feature_on_a_wired_device + - wired.Wired.create_configurations_for_an_intended_layer2_feature_on_a_wired_device + - wired.Wired.update_configurations_for_an_intended_layer2_feature_on_a_wired_device + - wired.Wired.delete_configurations_for_an_intended_layer2_feature_on_a_wired_device + - wired.Wired.deploy_the_intended_configuration_features_on_a_wired_device + - Paths used are + - GET /dna/intent/api/v1/networkDevices + - GET /dna/intent/api/v1/networkDevices/${id}/configFeatures/intended/layer2/${feature} + - GET /dna/intent/api/v1/networkDevices/${id}/configFeatures/intended/layer2/${feature} + - POST /dna/intent/api/v1/networkDevices/${id}/configFeatures/intended/layer2/${feature} + - PUT /dna/intent/api/v1/networkDevices/${id}/configFeatures/intended/layer2/${feature} + - DELETE /dna/intent/api/v1/networkDevices/${id}/configFeatures/intended/layer2/${feature} + - POST /dna/intent/api/v1/networkDevices/${id}/configFeatures/deploy +""" + +EXAMPLES = r""" +- name: Create multiple VLANs with comprehensive settings + cisco.dnac.wired_campus_automation_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + state: merged + config: + - ip_address: 204.1.2.3 + device_collection_status_check: false + layer2_configuration: + vlans: + - vlan_id: 100 + vlan_name: Production_Network + vlan_admin_status: true + - vlan_id: 200 + vlan_name: Development_Network + vlan_admin_status: true + - vlan_id: 300 + vlan_name: Guest_Network + vlan_admin_status: false + +- name: Update VLAN settings + cisco.dnac.wired_campus_automation_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + state: merged + config: + - ip_address: 204.1.2.3 + layer2_configuration: + vlans: + - vlan_id: 300 + vlan_name: Guest_Network_Updated + vlan_admin_status: true + +- name: Delete VLANs + cisco.dnac.wired_campus_automation_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + state: deleted + config: + - ip_address: 204.1.2.3 + layer2_configuration: + vlans: + - vlan_id: 300 + +- name: Configure CDP discovery protocol + cisco.dnac.wired_campus_automation_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + state: merged + config: + - ip_address: 204.1.2.3 + layer2_configuration: + cdp: + cdp_admin_status: true + cdp_hold_time: 180 + cdp_timer: 60 + cdp_advertise_v2: true + cdp_log_duplex_mismatch: true + +- name: Configure LLDP discovery protocol + cisco.dnac.wired_campus_automation_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + state: merged + config: + - ip_address: 204.1.2.3 + layer2_configuration: + lldp: + lldp_admin_status: true + lldp_hold_time: 240 + lldp_timer: 30 + lldp_reinitialization_delay: 3 + +- name: Configure Spanning Tree Protocol + cisco.dnac.wired_campus_automation_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + state: merged + config: + - ip_address: 204.1.2.3 + layer2_configuration: + stp: + stp_mode: MST + stp_portfast_mode: ENABLE + stp_bpdu_guard: true + stp_bpdu_filter: false + stp_backbonefast: true + stp_extended_system_id: true + stp_logging: true + stp_loopguard: false + stp_transmit_hold_count: 8 + stp_uplinkfast: false + stp_uplinkfast_max_update_rate: 200 + stp_etherchannel_guard: true + stp_instances: + - stp_instance_vlan_id: 100 + stp_instance_priority: 32768 + enable_stp: true + stp_instance_max_age_timer: 20 + stp_instance_hello_interval_timer: 2 + stp_instance_forward_delay_timer: 15 + - stp_instance_vlan_id: 200 + stp_instance_priority: 16384 + enable_stp: true + +- name: Configure VLAN Trunking Protocol + cisco.dnac.wired_campus_automation_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + state: merged + config: + - ip_address: 204.1.2.3 + layer2_configuration: + vtp: + vtp_mode: TRANSPARENT + vtp_version: VERSION_2 + vtp_domain_name: CORPORATE_DOMAIN + vtp_pruning: true + vtp_configuration_file_name: flash:vtp_config.dat + vtp_source_interface: Loopback0 + +- name: Configure DHCP Snooping + cisco.dnac.wired_campus_automation_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + state: merged + config: + - ip_address: 204.1.2.3 + layer2_configuration: + dhcp_snooping: + dhcp_admin_status: true + dhcp_snooping_vlans: + - 100 + - 200 + - 300 + dhcp_snooping_glean: true + dhcp_snooping_database_agent_url: tftp://192.168.1.100/dhcp_binding.db + dhcp_snooping_database_timeout: 600 + dhcp_snooping_database_write_delay: 300 + dhcp_snooping_proxy_bridge_vlans: + - 100 + - 200 + +- name: Configure IGMP Snooping for multicast + cisco.dnac.wired_campus_automation_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + state: merged + config: + - ip_address: 204.1.2.3 + layer2_configuration: + igmp_snooping: + enable_igmp_snooping: true + igmp_snooping_querier: false + igmp_snooping_querier_address: 192.168.1.10 + igmp_snooping_querier_version: VERSION_2 + igmp_snooping_querier_query_interval: 125 + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 100 + enable_igmp_snooping: true + igmp_snooping_querier: false + igmp_snooping_querier_address: 192.168.1.11 + igmp_snooping_querier_version: VERSION_2 + igmp_snooping_querier_query_interval: 125 + igmp_snooping_mrouter_port_list: + - GigabitEthernet1/0/1 + - GigabitEthernet1/0/2 + - igmp_snooping_vlan_id: 200 + enable_igmp_snooping: true + igmp_snooping_querier: true + igmp_snooping_querier_version: VERSION_3 + igmp_snooping_querier_query_interval: 90 + +- name: Configure MLD Snooping for IPv6 multicast + cisco.dnac.wired_campus_automation_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + state: merged + config: + - ip_address: 204.1.2.3 + layer2_configuration: + mld_snooping: + enable_mld_snooping: true + mld_snooping_querier: false + mld_snooping_querier_address: fe80::1 + mld_snooping_querier_version: VERSION_2 + mld_snooping_listener: true + mld_snooping_querier_query_interval: 125 + mld_snooping_vlans: + - mld_snooping_vlan_id: 100 + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: false + mld_snooping_querier: false + mld_snooping_querier_address: fe80::10 + mld_snooping_querier_version: VERSION_2 + mld_snooping_querier_query_interval: 125 + mld_snooping_mrouter_port_list: + - GigabitEthernet1/0/3 + - GigabitEthernet1/0/4 + +- name: Configure 802.1X Authentication + cisco.dnac.wired_campus_automation_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + state: merged + config: + - ip_address: 204.1.2.3 + layer2_configuration: + authentication: + enable_dot1x_authentication: true + authentication_config_mode: NEW_STYLE + +- name: Configure LACP and PAGP Port Channels + cisco.dnac.wired_campus_automation_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + state: merged + config: + - ip_address: 204.1.2.3 + layer2_configuration: + logical_ports: + port_channel_auto: false + port_channel_lacp_system_priority: 4096 + port_channel_load_balancing_method: SRC_DST_MIXED_IP_PORT + port_channels: + - port_channel_protocol: LACP + port_channel_name: Port-channel1 + port_channel_min_links: 2 + port_channel_members: + - port_channel_interface_name: GigabitEthernet1/0/10 + port_channel_mode: ACTIVE + port_channel_port_priority: 128 + port_channel_rate: 30 + - port_channel_interface_name: GigabitEthernet1/0/11 + port_channel_mode: ACTIVE + port_channel_port_priority: 128 + port_channel_rate: 30 + - port_channel_protocol: PAGP + port_channel_name: Port-channel2 + port_channel_min_links: 1 + port_channel_members: + - port_channel_interface_name: GigabitEthernet1/0/12 + port_channel_mode: DESIRABLE + port_channel_port_priority: 128 + port_channel_learn_method: AGGREGATION_PORT + +- name: Configure Access Port with authentication and security + cisco.dnac.wired_campus_automation_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + state: merged + config: + - ip_address: 204.1.2.3 + layer2_configuration: + port_configuration: + - interface_name: GigabitEthernet1/0/5 + switchport_interface_config: + switchport_description: Access Port - Production Network + switchport_mode: ACCESS + access_vlan: 100 + admin_status: true + voice_vlan: 200 + vlan_trunking_interface_config: + enable_dtp_negotiation: false + protected: false + dot1x_interface_config: + dot1x_interface_authentication_order: + - DOT1X + - MAB + dot1x_interface_authentication_mode: OPEN + dot1x_interface_pae_type: AUTHENTICATOR + dot1x_interface_control_direction: BOTH + dot1x_interface_host_mode: MULTI_AUTHENTICATION + dot1x_interface_port_control: AUTO + dot1x_interface_inactivity_timer: 300 + dot1x_interface_max_reauth_requests: 3 + dot1x_interface_reauth_timer: 3600 + mab_interface_config: + mab_interface_enable: true + stp_interface_config: + stp_interface_enable_portfast: true + stp_interface_enable_bpdu_guard: true + stp_interface_enable_bpdu_filter: false + stp_interface_enable_root_guard: false + stp_interface_enable_loop_guard: false + stp_interface_port_priority: 128 + stp_interface_cost: 19 + dhcp_snooping_interface_config: + dhcp_snooping_interface_rate_limit: 100 + dhcp_snooping_interface_trust: true + cdp_interface_config: + cdp_interface_admin_status: true + cdp_interface_logging: true + lldp_interface_config: + lldp_interface_transmit: true + lldp_interface_receive: true + vtp_interface_config: + vtp_interface_admin_status: true + +- name: Configure Trunk Port for inter-switch links + cisco.dnac.wired_campus_automation_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + state: merged + config: + - ip_address: 204.1.2.3 + layer2_configuration: + port_configuration: + - interface_name: GigabitEthernet1/0/6 + switchport_interface_config: + switchport_description: Trunk Port - Inter-Switch Link + switchport_mode: TRUNK + allowed_vlans: + - 100 + - 200 + - 300 + - 400 + native_vlan_id: 100 + admin_status: true + vlan_trunking_interface_config: + enable_dtp_negotiation: true + protected: true + pruning_vlan_ids: + - 300 + - 400 + stp_interface_config: + stp_interface_enable_portfast: false + stp_interface_enable_bpdu_guard: false + stp_interface_enable_bpdu_filter: false + stp_interface_enable_root_guard: true + stp_interface_enable_loop_guard: true + stp_interface_port_priority: 64 + stp_interface_cost: 100 + +- name: Comprehensive network configuration with all Layer 2 features + cisco.dnac.wired_campus_automation_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + state: merged + config: + - ip_address: 204.1.2.3 + device_collection_status_check: false + layer2_configuration: + vlans: + - vlan_id: 10 + vlan_name: Management + vlan_admin_status: true + - vlan_id: 20 + vlan_name: Production + vlan_admin_status: true + - vlan_id: 30 + vlan_name: Development + vlan_admin_status: true + - vlan_id: 40 + vlan_name: Guest + vlan_admin_status: true + cdp: + cdp_admin_status: true + cdp_hold_time: 180 + cdp_timer: 60 + cdp_advertise_v2: true + cdp_log_duplex_mismatch: true + lldp: + lldp_admin_status: true + lldp_hold_time: 240 + lldp_timer: 30 + lldp_reinitialization_delay: 3 + stp: + stp_mode: RSTP + stp_portfast_mode: ENABLE + stp_bpdu_guard: true + stp_bpdu_filter: false + stp_backbonefast: true + stp_extended_system_id: true + stp_logging: true + stp_instances: + - stp_instance_vlan_id: 10 + stp_instance_priority: 32768 + enable_stp: true + - stp_instance_vlan_id: 20 + stp_instance_priority: 16384 + enable_stp: true + vtp: + vtp_mode: SERVER + vtp_version: VERSION_2 + vtp_domain_name: ENTERPRISE_DOMAIN + vtp_pruning: true + dhcp_snooping: + dhcp_admin_status: true + dhcp_snooping_vlans: + - 20 + - 30 + - 40 + dhcp_snooping_glean: true + igmp_snooping: + enable_igmp_snooping: true + igmp_snooping_querier: false + igmp_snooping_querier_version: VERSION_2 + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 20 + enable_igmp_snooping: true + igmp_snooping_querier: false + authentication: + enable_dot1x_authentication: true + authentication_config_mode: NEW_STYLE + logical_ports: + port_channel_auto: false + port_channel_lacp_system_priority: 8192 + port_channel_load_balancing_method: SRC_DST_IP + port_channels: + - port_channel_protocol: LACP + port_channel_name: Port-channel10 + port_channel_min_links: 2 + port_channel_members: + - port_channel_interface_name: GigabitEthernet1/0/16 + port_channel_mode: ACTIVE + port_channel_port_priority: 128 + port_channel_rate: 30 + - port_channel_interface_name: GigabitEthernet1/0/17 + port_channel_mode: ACTIVE + port_channel_port_priority: 128 + port_channel_rate: 30 + port_configuration: + - interface_name: GigabitEthernet1/0/1 + switchport_interface_config: + switchport_description: Management Port + switchport_mode: ACCESS + access_vlan: 10 + admin_status: true + stp_interface_config: + stp_interface_enable_portfast: true + stp_interface_enable_bpdu_guard: true + dhcp_snooping_interface_config: + dhcp_snooping_interface_trust: true + - interface_name: GigabitEthernet1/0/2 + switchport_interface_config: + switchport_description: Production User Port + switchport_mode: ACCESS + access_vlan: 20 + admin_status: true + dot1x_interface_config: + dot1x_interface_authentication_order: + - DOT1X + - MAB + dot1x_interface_port_control: AUTO + stp_interface_config: + stp_interface_enable_portfast: true + +- name: Reset CDP to default settings + cisco.dnac.wired_campus_automation_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + state: deleted + config: + - ip_address: 204.1.2.3 + layer2_configuration: + cdp: {} + +- name: Reset LLDP to default settings + cisco.dnac.wired_campus_automation_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + state: deleted + config: + - ip_address: 204.1.2.3 + layer2_configuration: + lldp: {} + +- name: Comprehensive cleanup of all Layer 2 configurations + cisco.dnac.wired_campus_automation_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + state: deleted + config: + - ip_address: 204.1.2.3 + layer2_configuration: + vlans: + - vlan_id: 10 + - vlan_id: 20 + - vlan_id: 30 + - vlan_id: 40 + - vlan_id: 100 + - vlan_id: 200 + - vlan_id: 300 + cdp: {} + lldp: {} + vtp: {} + dhcp_snooping: {} + authentication: {} + +- name: Configure using device hostname + cisco.dnac.wired_campus_automation_workflow_manager: + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + state: merged + config: + - hostname: switch01.example.com + device_collection_status_check: true + config_verification_wait_time: 15 + layer2_configuration: + vlans: + - vlan_id: 100 + vlan_name: Finance_VLAN + vlan_admin_status: true + cdp: + cdp_admin_status: true + cdp_hold_time: 200 + cdp_timer: 90 +""" + +RETURN = r""" +# Case_1: Success Scenario +response_1: + description: A dictionary with with the response returned by the Cisco Catalyst Center Python SDK + returned: always + type: dict + sample: > + { + "response": + { + "response": String, + "version": String + }, + "msg": String + } +# Case_2: Error Scenario +response_2: + description: A string with the response returned by the Cisco Catalyst Center Python SDK + returned: always + type: list + sample: > + { + "response": [], + "msg": String + } +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.cisco.dnac.plugins.module_utils.dnac import ( + DnacBase, + validate_list_of_dicts, +) +import copy + + +class WiredCampusAutomation(DnacBase): + """ + A class for managing Wired Campus Automation within the Cisco DNA Center. + """ + + def __init__(self, module): + """ + Initialize an instance of the class. + Args: + - module: The module associated with the class instance. + Returns: + The method does not return a value. + """ + self.supported_states = ["merged", "deleted"] + self.is_default_rf_profile_in_config = False + super().__init__(module) + + def validate_input(self): + """ + Validates the input configuration parameters for the playbook. + Returns: + object: An instance of the class with updated attributes: + - self.msg: A message describing the validation result. + - self.status: The status of the validation (either "success" or "failed"). + - self.validated_config: If successful, a validated version of the "config" parameter. + """ + self.log("Starting validation of input configuration parameters.", "DEBUG") + + # Check if configuration is available + if not self.config: + self.msg = "The playbook configuration is empty or missing." + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + # Expected schema for configuration parameters + self.temp_spec = { + "ip_address": {"type": "str", "required": False}, + "hostname": {"type": "str", "required": False}, + "layer2_configuration": { + "type": "dict", + "required": False, + "vlans": { + "type": "list", + "elements": "dict", + "suboptions": { + "vlan_id": {"type": "int", "required": True}, + "vlan_name": {"type": "str"}, + "vlan_admin_status": {"type": "bool"}, + }, + }, + "cdp": { + "type": "dict", + "cdp_admin_status": {"type": "bool"}, + "cdp_hold_time": {"type": "int"}, + "cdp_timer": {"type": "int"}, + "cdp_advertise_v2": {"type": "bool"}, + "cdp_log_duplex_mismatch": {"type": "bool"}, + }, + "lldp": { + "type": "dict", + "lldp_admin_status": {"type": "bool"}, + "lldp_hold_time": {"type": "int"}, + "lldp_timer": {"type": "int"}, + "lldp_reinitialization_delay": {"type": "int"}, + }, + "stp": { + "type": "dict", + "stp_mode": {"type": "str"}, + "stp_portfast_mode": {"type": "str"}, + "stp_bpdu_guard": {"type": "bool"}, + "stp_bpdu_filter": {"type": "bool"}, + "stp_backbonefast": {"type": "bool"}, + "stp_extended_system_id": {"type": "bool"}, + "stp_logging": {"type": "bool"}, + "stp_loopguard": {"type": "bool"}, + "stp_transmit_hold_count": {"type": "int"}, + "stp_uplinkfast": {"type": "bool"}, + "stp_uplinkfast_max_update_rate": {"type": "int"}, + "stp_etherchannel_guard": {"type": "bool"}, + "stp_instances": { + "type": "list", + "elements": "dict", + "suboptions": { + "stp_instance_vlan_id": {"type": "int", "required": True}, + "stp_instance_priority": {"type": "int"}, + "enable_stp": {"type": "bool"}, + "stp_instance_max_age_timer": {"type": "int"}, + "stp_instance_hello_interval_timer": {"type": "int"}, + "stp_instance_forward_delay_timer": {"type": "int"}, + }, + }, + }, + "vtp": { + "type": "dict", + "vtp_mode": {"type": "str"}, + "vtp_version": {"type": "str"}, + "vtp_domain_name": {"type": "str"}, + "vtp_configuration_file_name": {"type": "str"}, + "vtp_source_interface": {"type": "str"}, + "vtp_pruning": {"type": "bool"}, + }, + "dhcp_snooping": { + "type": "dict", + "dhcp_admin_status": {"type": "bool"}, + "dhcp_snooping_vlans": {"type": "list", "elements": "int"}, + "dhcp_snooping_glean": {"type": "bool"}, + "dhcp_snooping_database_agent_url": {"type": "str"}, + "dhcp_snooping_database_timeout": {"type": "int"}, + "dhcp_snooping_database_write_delay": {"type": "int"}, + "dhcp_snooping_proxy_bridge_vlans": { + "type": "list", + "elements": "int", + }, + }, + "igmp_snooping": { + "type": "dict", + "enable_igmp_snooping": {"type": "bool"}, + "igmp_snooping_querier": {"type": "bool"}, + "igmp_snooping_querier_address": {"type": "str"}, + "igmp_snooping_querier_version": {"type": "str"}, + "igmp_snooping_querier_query_interval": {"type": "int"}, + "igmp_snooping_vlans": { + "type": "list", + "elements": "dict", + "suboptions": { + "igmp_snooping_vlan_id": {"type": "int", "required": True}, + "enable_igmp_snooping": {"type": "bool"}, + "igmp_snooping_querier": {"type": "bool"}, + "igmp_snooping_querier_address": {"type": "str"}, + "igmp_snooping_querier_version": {"type": "str"}, + "igmp_snooping_querier_query_interval": {"type": "int"}, + "igmp_snooping_mrouter_port_list": { + "type": "list", + "elements": "str", + }, + }, + }, + }, + "mld_snooping": { + "type": "dict", + "enable_mld_snooping": {"type": "bool"}, + "mld_snooping_querier": {"type": "bool"}, + "mld_snooping_querier_address": {"type": "str"}, + "mld_snooping_querier_version": {"type": "str"}, + "mld_snooping_querier_query_interval": {"type": "int"}, + "mld_snooping_listener": {"type": "bool"}, + "mld_snooping_vlans": { + "type": "list", + "elements": "dict", + "suboptions": { + "mld_snooping_vlan_id": {"type": "int", "required": True}, + "enable_mld_snooping": {"type": "bool"}, + "mld_snooping_enable_immediate_leave": {"type": "bool"}, + "mld_snooping_querier": {"type": "bool"}, + "mld_snooping_querier_address": {"type": "str"}, + "mld_snooping_querier_version": {"type": "str"}, + "mld_snooping_querier_query_interval": {"type": "int"}, + "mld_snooping_mrouter_port_list": { + "type": "list", + "elements": "str", + }, + }, + }, + }, + "authentication": { + "type": "dict", + "enable_dot1x_authentication": {"type": "bool"}, + "authentication_config_mode": {"type": "str"}, + }, + "logical_ports": { + "type": "dict", + "port_channel_auto": {"type": "bool"}, + "port_channel_lacp_system_priority": {"type": "int"}, + "port_channel_load_balancing_method": {"type": "str"}, + "port_channels": { + "type": "list", + "elements": "dict", + "suboptions": { + "port_channel_protocol": {"type": "str"}, + "port_channel_name": {"type": "str"}, + "port_channel_min_links": {"type": "int"}, + "port_channel_members": { + "type": "list", + "elements": "dict", + "suboptions": { + "port_channel_interface_name": {"type": "str"}, + "port_channel_mode": {"type": "str"}, + "port_channel_port_priority": {"type": "int"}, + "port_channel_rate": {"type": "int"}, + "port_channel_learn_method": {"type": "str"}, + }, + }, + }, + }, + }, + "port_configuration": { + "type": "list", + "elements": "dict", + "suboptions": { + "interface_name": {"type": "str", "required": True}, + "switchport_interface_config": { + "type": "dict", + "switchport_description": {"type": "str"}, + "switchport_mode": {"type": "str"}, + "access_vlan": {"type": "int"}, + "voice_vlan": {"type": "int"}, + "admin_status": {"type": "bool"}, + "allowed_vlans": {"type": "list", "elements": "int"}, + "native_vlan_id": {"type": "int"}, + }, + "vlan_trunking_interface_config": { + "type": "dict", + "enable_dtp_negotiation": {"type": "bool"}, + "protected": {"type": "bool"}, + "pruning_vlan_ids": {"type": "list"}, + }, + "dot1x_interface_config": { + "type": "dict", + "dot1x_interface_authentication_mode": {"type": "str"}, + "dot1x_interface_pae_type": {"type": "str"}, + "dot1x_interface_control_direction": {"type": "str"}, + "dot1x_interface_host_mode": {"type": "str"}, + "dot1x_interface_enable_inactivity_timer_from_server": { + "type": "bool" + }, + "dot1x_interface_inactivity_timer": {"type": "int"}, + "dot1x_interface_authentication_order": { + "type": "list", + "elements": "str", + }, + "dot1x_interface_enable_reauth": {"type": "bool"}, + "dot1x_interface_port_control": {"type": "str"}, + "dot1x_interface_priority": { + "type": "list", + "elements": "str", + }, + "dot1x_interface_max_reauth_requests": {"type": "int"}, + "dot1x_interface_enable_reauth_timer_from_server": { + "type": "bool" + }, + "dot1x_interface_reauth_timer": {"type": "int"}, + "dot1x_interface_tx_period": {"type": "int"}, + }, + "mab_interface_config": { + "type": "dict", + "enable_mab": {"type": "bool"}, + }, + "stp_interface_config": { + "type": "dict", + "stp_interface_portfast_mode": {"type": "str"}, + "stp_interface_bpdu_filter": {"type": "bool"}, + "stp_interface_bpdu_guard": {"type": "bool"}, + "stp_interface_cost": {"type": "int"}, + "stp_interface_guard": {"type": "str"}, + "stp_interface_priority": { + "range": (0, 240), + "multiple_of": 16, + "required": False, + }, + "stp_interface_per_vlan_cost": { + "type": "dict", + "priority": {"type": "int"}, + "vlan_ids": {"type": "list", "elements": "int"}, + }, + "stp_interface_per_vlan_priority": { + "type": "dict", + "priority": {"type": "int"}, + "vlan_ids": {"type": "list", "elements": "int"}, + }, + }, + "dhcp_snooping_interface_config": { + "type": "dict", + "dhcp_snooping_interface_rate": {"type": "int"}, + "dhcp_snooping_interface_trust": {"type": "bool"}, + }, + "cdp_interface_config": { + "type": "dict", + "cdp_interface_admin_status": {"type": "bool"}, + "cdp_interface_log_duplex_mismatch": {"type": "bool"}, + }, + "lldp_interface_config": { + "type": "dict", + "lldp_interface_receive_transmit": {"type": "str"}, + }, + "vtp_interface_config": { + "type": "dict", + "vtp_interface_admin_status": {"type": "bool"}, + }, + }, + }, + }, + "device_collection_status_check": { + "type": "bool", + "required": False, + "default": True, + }, + "config_verification_wait_time": { + "type": "int", + "required": False, + "default": 10, + }, + } + + # Validate params against the expected schema + valid_temp, invalid_params = validate_list_of_dicts(self.config, self.temp_spec) + + # Check if any invalid parameters were found + if invalid_params: + self.msg = "Invalid parameters in playbook: {0}".format(invalid_params) + self.set_operation_result("failed", False, self.msg, "ERROR") + return self + + # Set the validated configuration and update the result with success status + self.validated_config = valid_temp + self.msg = "Successfully validated playbook configuration parameters using 'validated_input': {0}".format( + str(valid_temp) + ) + self.set_operation_result("success", False, self.msg, "INFO") + return self + + def get_device_list_params(self, ip_address, hostname): + """ + Generates a dictionary of device list parameters based on the provided IP address or hostname. + Args: + ip_address (str): The management IP address of the device. + hostname (str): The hostname of the device. + Returns: + dict: A dictionary containing the device list parameters with either 'management_ip_address' or 'hostname'. + """ + # Return a dictionary with 'management_ip_address' if ip_address is provided + if ip_address: + self.log( + "Using IP address '{0}' for device list parameters".format(ip_address), + "DEBUG", + ) + return {"management_ip_address": ip_address} + + # Return a dictionary with 'hostname' if hostname is provided + if hostname: + self.log( + "Using hostname '{0}' for device list parameters".format(hostname), + "DEBUG", + ) + return {"hostname": hostname} + + # Return an empty dictionary if neither is provided + self.log( + "No IP address or hostname provided, returning empty parameters", "DEBUG" + ) + return {} + + def get_device_ids_by_params(self, get_device_list_params): + """ + Fetches device IDs from Cisco Catalyst Center based on provided parameters. + Args: + get_device_list_params (dict): Parameters for querying the device list, such as IP address or hostname. + Returns: + dict: A dictionary mapping management IP addresses to device instance IDs. + Description: + This method queries Cisco Catalyst Center using the provided parameters to retrieve device information. + It checks if the device is reachable, managed, and not a Unified AP. If valid, it maps the management IP + address to the device instance ID. If any error occurs or no valid device is found, it logs an error message + and sets the validation status to "failed". + """ + # Initialize the dictionary to map management IP to instance ID + mgmt_ip_to_instance_id_map = {} + self.log( + "Parameters for 'get_device_list API call: {0}".format( + get_device_list_params + ), + "DEBUG", + ) + try: + # Query Cisco Catalyst Center for device information using the parameters + response = self.dnac._exec( + family="devices", + function="get_device_list", + op_modifies=False, + params=get_device_list_params, + ) + self.log( + "Response received from 'get_device_list' API call: {0}".format( + str(response) + ), + "DEBUG", + ) + + response = response.get("response") + # Check if a valid response is received + if not response: + self.log( + "No devices were returned for the given parameters: {0}".format( + get_device_list_params + ), + "ERROR", + ) + return mgmt_ip_to_instance_id_map + + # Get the device information from the response + device_info = response[0] + device_ip = device_info.get("managementIpAddress") + + # Check if the device is reachable, not a Unified AP, and in a managed state + if ( + device_info.get("reachabilityStatus") == "Reachable" + and device_info.get("collectionStatus") in ["Managed", "In Progress"] + and device_info.get("family") != "Unified AP" + ): + device_id = device_info["id"] + mgmt_ip_to_instance_id_map[device_ip] = device_id + self.log( + "Device {0} is valid and added to the map.".format(device_ip), + "INFO", + ) + else: + self.log( + "Device {0} is not valid (either unreachable, not managed, or a Unified AP).".format( + device_ip + ), + "ERROR", + ) + + except Exception as e: + # Log an error message if any exception occurs during the process + self.log( + "Error while fetching device ID from Cisco Catalyst Center using API 'get_device_list' for Device: {0}. " + "Error: {1}".format(get_device_list_params, str(e)), + "ERROR", + ) + # Log an error if no valid device is found + if not mgmt_ip_to_instance_id_map: + self.msg = ("Unable to retrieve details for the Device: {0}.").format( + get_device_list_params.get("management_ip_address") + or get_device_list_params.get("hostname") + ) + self.fail_and_exit(self.msg) + + return mgmt_ip_to_instance_id_map + + def get_network_device_id(self, ip_address, hostname): + """ + Retrieves the network device ID for a given IP address or hostname. + Args: + ip_address (str): The IP address of the device to be queried. + hostname (str): The hostname of the device to be queried. + Returns: + dict: A dictionary mapping management IP addresses to device IDs. + Returns an empty dictionary if no devices are found. + """ + # Get Device IP Address and Id (networkDeviceId required) + self.log( + "Starting device ID retrieval for IP: '{0}' or Hostname: '{1}'.".format( + ip_address, hostname + ), + "DEBUG", + ) + get_device_list_params = self.get_device_list_params(ip_address, hostname) + self.log( + "get_device_list_params constructed: {0}".format(get_device_list_params), + "DEBUG", + ) + mgmt_ip_to_instance_id_map = self.get_device_ids_by_params( + get_device_list_params + ) + self.log( + "Collected mgmt_ip_to_instance_id_map: {0}".format( + mgmt_ip_to_instance_id_map + ), + "DEBUG", + ) + + return mgmt_ip_to_instance_id_map + + def validate_device_exists_and_reachable( + self, ip_address, hostname, device_collection_status_check + ): + """ + Validates whether a device is present in the Catalyst Center, is reachable, and has an acceptable collection status. + Args: + ip_address (str): The IP address of the device to be validated. + hostname (str): The hostname of the device to be validated. + device_collection_status_check (bool): If True, skips the check for the device's collection status. + Returns: + bool: True if the device is reachable and has an acceptable collection status (or the check is skipped). + False if the device is unreachable or has an unacceptable collection status. + """ + device_identifier = ip_address or hostname + self.log( + "Initiating validation for device: '{0}'.".format(device_identifier), "INFO" + ) + + if ip_address: + get_device_list_params = {"management_ip_address": ip_address} + elif hostname: + get_device_list_params = {"hostname": hostname} + + self.log( + "Executing 'get_device_list' API call with parameters: {0}".format( + get_device_list_params + ), + "DEBUG", + ) + + response = self.execute_get_request( + "devices", "get_device_list", get_device_list_params + ) + + if not response or not response.get("response"): + self.msg = ( + "Failed to retrieve details for the specified device: {0}. " + "Please verify that the device exists in the Catalyst Center." + ).format(device_identifier) + self.fail_and_exit(self.msg) + + device_info = response["response"][0] + reachability_status = device_info.get("reachabilityStatus") + collection_status = device_info.get("collectionStatus") + + # Device is not reachable + if reachability_status != "Reachable": + self.msg = ( + "Device '{0}' is not reachable. Cannot proceed with port onboarding. " + "reachabilityStatus: '{1}', collectionStatus: '{2}'.".format( + device_identifier, reachability_status, collection_status + ) + ) + return False + + self.log("Device '{0}' is reachable.".format(device_identifier), "INFO") + + # Skip collection status check + if not device_collection_status_check: + self.log( + "Skipping collection status check for device '{0}' as 'device_collection_status_check' is set to False.".format( + device_identifier + ), + "INFO", + ) + return True + + # Check collection status + if collection_status in ["In Progress", "Managed"]: + self.log( + "Device '{0}' has an acceptable collection status: '{1}'.".format( + device_identifier, collection_status + ), + "INFO", + ) + return True + + # Unacceptable collection status + self.msg = ( + "Device '{0}' does not have an acceptable collection status. " + "Current collection status: '{1}'.".format( + device_identifier, collection_status + ) + ) + return False + + def validate_ip_and_hostname( + self, ip_address, hostname, device_collection_status_check + ): + """ + Validates the provided IP address and hostname. + Args: + ip_address (str): The IP address to be validated. + hostname (str): The hostname to be validated. + Returns: + None: This method does not return a value. It updates the instance attributes: + - self.msg: A message describing the validation result. + - self.status: The status of the validation (either "success" or "failed"). + """ + self.log( + "Validating IP address: '{0}' and hostname: '{1}'".format( + ip_address, hostname + ), + "DEBUG", + ) + + # Check if both IP address and hostname are not provided + if not ip_address and not hostname: + self.msg = "Provided IP address: {0}, hostname: {1}. Either an IP address or a hostname is required.".format( + ip_address, hostname + ) + self.fail_and_exit(self.msg) + + # Check if an IP address is provided but it is not valid + if ip_address and not self.is_valid_ipv4(ip_address): + self.msg = "IP address: {0} is not a valid IP Address.".format(ip_address) + self.fail_and_exit(self.msg) + + # Check if device exists and is reachable in Catalyst Center + if not self.validate_device_exists_and_reachable( + ip_address, hostname, device_collection_status_check + ): + self.fail_and_exit(self.msg) + + self.log("Validation successful: Provided IP address or hostname are valid") + + def get_layer2_configuration_validation_rules(self): + """ + Returns the validation rules for Layer 2 configurations. + """ + return { + "vlans": { + "vlan_id": {"type": "int", "range": (2, 4094), "required": True}, + "vlan_name": {"type": "str", "maxLength": 128, "required": False}, + "vlan_admin_status": {"type": "bool", "required": False}, + }, + "cdp": { + "cdp_admin_status": {"type": "bool", "required": False}, + "cdp_hold_time": {"type": "int", "range": (10, 255), "required": False}, + "cdp_timer": { + "type": "int", + "range": (5, 254), + "required": False, + }, # Added type: int + "cdp_advertise_v2": {"type": "bool", "required": False}, + "cdp_log_duplex_mismatch": {"type": "bool", "required": False}, + }, + "lldp": { + "lldp_admin_status": {"type": "bool", "required": False}, + "lldp_hold_time": { + "type": "int", + "range": (0, 32767), + "required": False, + }, # Added type: int + "lldp_timer": { + "type": "int", + "range": (5, 32767), + "required": False, + }, # Added type: int + "lldp_reinitialization_delay": { + "type": "int", + "range": (2, 5), + "required": False, + }, # Added type: int + }, + "stp": { + "stp_mode": { + "type": "str", + "choices": ["PVST", "RSTP", "MST"], + "required": False, + }, + "stp_portfast_mode": { + "type": "str", + "choices": ["ENABLE", "DISABLE", "EDGE", "NETWORK", "TRUNK"], + "required": False, + }, + "stp_bpdu_guard": {"type": "bool", "required": False}, + "stp_bpdu_filter": {"type": "bool", "required": False}, + "stp_backbonefast": {"type": "bool", "required": False}, + "stp_extended_system_id": {"type": "bool", "required": False}, + "stp_logging": {"type": "bool", "required": False}, + "stp_loopguard": {"type": "bool", "required": False}, + "stp_transmit_hold_count": { + "type": "int", + "range": (1, 20), + "required": False, + }, # Added type: int + "stp_uplinkfast": {"type": "bool", "required": False}, + "stp_uplinkfast_max_update_rate": { + "type": "int", + "range": (0, 32000), + "required": False, + }, # Added type: int + "stp_etherchannel_guard": {"type": "bool", "required": False}, + "stp_instances": {"type": "list", "required": False}, + }, + "stp_instance": { + "stp_instance_vlan_id": { + "type": "int", + "range": (1, 4094), + "required": True, + }, # Added type: int + "stp_instance_priority": { + "type": "int", + "range": (0, 61440), + "multiple_of": 4096, + "required": False, + }, # Added type: int + "enable_stp": {"type": "bool", "required": False}, + "stp_instance_max_age_timer": { + "type": "int", + "range": (6, 40), + "required": False, + }, # Added type: int + "stp_instance_hello_interval_timer": { + "type": "int", + "range": (1, 10), + "required": False, + }, # Added type: int + "stp_instance_forward_delay_timer": { + "type": "int", + "range": (4, 30), + "required": False, + }, # Added type: int + }, + "vtp": { + "vtp_mode": { + "type": "str", + "choices": ["SERVER", "CLIENT", "TRANSPARENT", "OFF"], + "required": False, + }, + "vtp_version": { + "type": "str", + "choices": ["VERSION_1", "VERSION_2", "VERSION_3"], + "required": False, + }, + "vtp_domain_name": {"type": "str", "maxLength": 32, "required": False}, + "vtp_pruning": {"type": "bool", "required": False}, + "vtp_configuration_file_name": { + "type": "str", + "maxLength": 244, + "required": False, + }, + "vtp_source_interface": {"type": "str", "required": False}, + }, + "dhcp_snooping": { + "dhcp_admin_status": {"type": "bool", "required": False}, + "dhcp_snooping_vlans": { + "type": "list", + "elements": "int", + "vlan_range": (1, 4094), + "required": False, + }, + "dhcp_snooping_glean": {"type": "bool", "required": False}, + "dhcp_snooping_database_agent_url": { + "type": "str", + "minLength": 0, + "maxLength": 227, + "required": False, + }, + "dhcp_snooping_database_timeout": { + "type": "int", + "range": (0, 86400), + "required": False, + }, # Added type: int + "dhcp_snooping_database_write_delay": { + "type": "int", + "range": (15, 86400), + "required": False, + }, # Added type: int + "dhcp_snooping_proxy_bridge_vlans": { + "type": "list", + "elements": "int", + "vlan_range": (1, 4094), + "required": False, + }, + }, + "igmp_snooping": { + "enable_igmp_snooping": {"type": "bool", "required": False}, + "igmp_snooping_querier": {"type": "bool", "required": False}, + "igmp_snooping_querier_address": {"type": "str", "required": False}, + "igmp_snooping_querier_version": { + "type": "str", + "choices": ["VERSION_1", "VERSION_2", "VERSION_3"], + "required": False, + }, + "igmp_snooping_querier_query_interval": { + "type": "int", + "range": (1, 18000), + "required": False, + }, # Added type: int + "igmp_snooping_vlans": {"type": "list", "required": False}, + }, + "igmp_snooping_vlan": { + "igmp_snooping_vlan_id": { + "type": "int", + "range": (1, 4094), + "required": True, + }, # Added type: int + "enable_igmp_snooping": {"type": "bool", "required": False}, + "igmp_snooping_immediate_leave": {"type": "bool", "required": False}, + "igmp_snooping_querier": {"type": "bool", "required": False}, + "igmp_snooping_querier_address": {"type": "str", "required": False}, + "igmp_snooping_querier_version": { + "type": "str", + "choices": ["VERSION_1", "VERSION_2", "VERSION_3"], + "required": False, + }, + "igmp_snooping_querier_query_interval": { + "type": "int", + "range": (1, 18000), + "required": False, + }, # Added type: int + "igmp_snooping_mrouter_port_list": {"type": "list", "required": False}, + }, + "mld_snooping": { + "enable_mld_snooping": {"type": "bool", "required": False}, + "mld_snooping_querier": {"type": "bool", "required": False}, + "mld_snooping_querier_address": {"type": "str", "required": False}, + "mld_snooping_querier_version": { + "type": "str", + "choices": ["VERSION_1", "VERSION_2"], + "required": False, + }, + "mld_snooping_querier_query_interval": { + "type": "int", + "range": (1, 18000), + "required": False, + }, # Added type: int + "mld_snooping_listener": {"type": "bool", "required": False}, + "mld_snooping_vlans": {"type": "list", "required": False}, + }, + "mld_snooping_vlan": { + "mld_snooping_vlan_id": { + "type": "int", + "range": (1, 4094), + "required": True, + }, # Added type: int + "enable_mld_snooping": {"type": "bool", "required": False}, + "mld_snooping_enable_immediate_leave": { + "type": "bool", + "required": False, + }, + "mld_snooping_querier": {"type": "bool", "required": False}, + "mld_snooping_querier_address": {"type": "str", "required": False}, + "mld_snooping_querier_version": { + "type": "str", + "choices": ["VERSION_1", "VERSION_2"], + "required": False, + }, + "mld_snooping_querier_query_interval": { + "type": "int", + "range": (1, 18000), + "required": False, + }, # Added type: int + "mld_snooping_mrouter_port_list": {"type": "list", "required": False}, + }, + "authentication": { + "enable_dot1x_authentication": {"type": "bool", "required": False}, + "authentication_config_mode": { + "type": "str", + "choices": ["LEGACY", "NEW_STYLE"], + "required": False, + }, + }, + "logical_ports": { + "port_channel_auto": {"type": "bool", "required": False}, + "port_channel_lacp_system_priority": { + "type": "int", + "range": (0, 65535), + "required": False, + }, # Added type: int + "port_channel_load_balancing_method": { + "type": "str", + "choices": [ + "SRC_MAC", + "DST_MAC", + "SRC_DST_MAC", + "SRC_IP", + "DST_IP", + "SRC_DST_IP", + "SRC_PORT", + "DST_PORT", + "SRC_DST_PORT", + "SRC_DST_MIXED_IP_PORT", + "SRC_MIXED_IP_PORT", + "DST_MIXED_IP_PORT", + "VLAN_SRC_IP", + "VLAN_DST_IP", + "VLAN_SRC_DST_IP", + "VLAN_SRC_MIXED_IP_PORT", + "VLAN_DST_MIXED_IP_PORT", + "VLAN_SRC_DST_MIXED_IP_PORT", + ], + "required": False, + }, + "port_channels": {"type": "list", "required": False}, + }, + "port_channel": { + "port_channel_protocol": { + "type": "str", + "choices": ["LACP", "PAGP", "NONE"], + "required": True, + }, + "port_channel_name": { + "type": "str", + "minLength": 13, + "maxLength": 15, + "required": False, + }, + "port_channel_min_links": { + "type": "int", + "range": (2, 8), + "required": False, + }, # Added type: int + "port_channel_members": {"type": "list", "required": True}, + }, + "port_channel_member_lacp": { + "port_channel_interface_name": {"type": "str", "required": False}, + "port_channel_mode": { + "type": "str", + "choices": ["ACTIVE", "PASSIVE"], + "required": False, + }, + "port_channel_port_priority": { + "type": "int", + "range": (0, 65535), + "required": False, + }, # Added type: int + "port_channel_rate": { + "type": "int", + "range": (1, 30), + "required": False, + }, # Added type: int + }, + "port_channel_member_pagp": { + "port_channel_interface_name": {"type": "str", "required": True}, + "port_channel_mode": { + "type": "str", + "choices": [ + "AUTO", + "AUTO_NON_SILENT", + "DESIRABLE", + "DESIRABLE_NON_SILENT", + ], + "required": False, + }, + "port_channel_port_priority": { + "type": "int", + "range": (0, 255), + "required": False, + }, # Added type: int + "port_channel_learn_method": { + "type": "str", + "choices": ["AGGREGATION_PORT", "PHYSICAL_PORT"], + "required": False, + }, + }, + "port_channel_member_none": { + "port_channel_interface_name": {"type": "str", "required": True}, + "port_channel_mode": { + "type": "str", + "choices": ["ON"], + "required": False, + }, + }, + "switchport_interface_config": { + "switchport_description": { + "type": "str", + "maxLength": 230, + "required": False, + }, + "switchport_mode": { + "type": "str", + "choices": [ + "ACCESS", + "TRUNK", + "DYNAMIC_AUTO", + "DYNAMIC_DESIRABLE", + "DOT1Q_TUNNEL", + ], + "required": False, + }, + "access_vlan": { + "type": "int", + "range": (1, 4094), + "required": False, + }, # Added type: int + "voice_vlan": { + "type": "int", + "range": (1, 4094), + "required": False, + }, # Added type: int + "admin_status": {"type": "bool", "required": False}, + "allowed_vlans": { + "type": "list", + "elements": "int", + "vlan_range": (1, 4094), + "required": False, + }, + "native_vlan_id": { + "type": "int", + "range": (1, 4094), + "required": False, + }, # Added type: int + }, + "vlan_trunking_interface_config": { + "enable_dtp_negotiation": {"type": "bool", "required": False}, + "protected": {"type": "bool", "required": False}, + "pruning_vlan_ids": { + "type": "list", + "elements": "int", + "required": False, + }, + }, + "dot1x_interface_config": { + "dot1x_interface_authentication_mode": { + "type": "str", + "choices": ["OPEN", "CLOSED"], + "required": False, + }, + "dot1x_interface_pae_type": { + "type": "str", + "choices": ["NONE", "AUTHENTICATOR", "SUPPLICANT", "BOTH"], + "required": False, + }, + "dot1x_interface_control_direction": { + "type": "str", + "choices": ["IN", "BOTH"], + "required": False, + }, + "dot1x_interface_host_mode": { + "type": "str", + "choices": [ + "MULTI_AUTHENTICATION", + "MULTI_HOST", + "SINGLE_HOST", + "MULTI_DOMAIN", + ], + "required": False, + }, + "dot1x_interface_inactivity_timer_from_server": { + "type": "bool", + "required": False, + }, + "dot1x_interface_inactivity_timer": { + "type": "int", + "range": (0, 65535), + "required": False, + }, # Added type: int + "dot1x_interface_authentication_order": { + "type": "list", + "elements": "str", + "choices": ["DOT1X", "MAB", "WEBAUTH"], + "max_items": 3, + "required": False, + }, + "dot1x_interface_reauthentication": {"type": "bool", "required": False}, + "dot1x_interface_port_control": { + "type": "str", + "choices": ["AUTO", "FORCE_AUTHORIZED", "FORCE_UNAUTHORIZED"], + "required": False, + }, + "dot1x_interface_priority": { + "type": "list", + "elements": "str", + "choices": ["DOT1X", "MAB", "WEBAUTH"], + "required": False, + }, + "dot1x_interface_max_reauth_requests": { + "type": "int", + "range": (1, 10), + "required": False, + }, # Added type: int + "dot1x_interface_reauth_timer_from_server": { + "type": "bool", + "required": False, + }, + "dot1x_interface_reauth_timer": { + "type": "int", + "range": (1, 1073741823), + "required": False, + }, # Added type: int + "dot1x_interface_tx_period": { + "type": "int", + "range": (1, 65535), + "required": False, + }, # Added type: int + }, + "mab_interface_config": { + "enable_mab": {"type": "bool", "required": False}, + }, + "stp_interface_config": { + "stp_interface_portfast_mode": { + "type": "str", + "choices": [ + "NONE", + "DISABLE", + "EDGE", + "EDGE_TRUNK", + "NETWORK", + "TRUNK", + ], + "required": False, + }, + "stp_interface_bpdu_filter": {"type": "bool", "required": False}, + "stp_interface_bpdu_guard": {"type": "bool", "required": False}, + "stp_interface_cost": { + "type": "int", + "range": (1, 20000000), + "required": False, + }, # Added type: int + "stp_interface_guard": { + "type": "str", + "choices": ["LOOP", "ROOT", "NONE"], + "required": False, + }, + "stp_interface_priority": { + "type": "int", + "range": (0, 240), + "multiple_of": 16, + "required": False, + }, # Added type: int + }, + "dhcp_snooping_interface_config": { + "dhcp_snooping_interface_rate": { + "type": "int", + "range": (1, 2048), + "required": False, + }, # Added type: int + "dhcp_snooping_interface_trust": {"type": "bool", "required": False}, + }, + "cdp_interface_config": { + "cdp_interface_admin_status": {"type": "bool", "required": False}, + "cdp_interface_log_duplex_mismatch": { + "type": "bool", + "required": False, + }, + }, + "lldp_interface_config": { + "lldp_interface_receive_transmit": { + "type": "str", + "choices": [ + "TRANSMIT_ONLY", + "RECEIVE_ONLY", + "TRANSMIT_AND_RECEIVE", + "DISABLED", + ], + "required": False, + }, + }, + "vtp_interface_config": { + "vtp_interface_admin_status": {"type": "bool", "required": False}, + }, + "stp_interface_per_vlan_cost": { + "priority": { + "type": "int", + "range": (1, 20000000), + "required": False, + }, # Added type: int + "vlan_ids": { + "type": "list", + "elements": "int", + "vlan_range": (1, 4094), + "required": False, + }, + }, + "stp_interface_per_vlan_priority": { + "priority": { + "type": "int", + "range": (0, 240), + "multiple_of": 16, + "required": False, + }, # Added type: int + "vlan_ids": { + "type": "list", + "elements": "int", + "vlan_range": (1, 4094), + "required": False, + }, + }, + } + + def validate_config_against_rules(self, config_name, config_values, rules): + """ + Validates a specific configuration against the provided validation rules. + Args: + config_name (str): The name of the configuration (Example, "vlan"). + config_values (dict): The configuration values provided by the user. + rules (dict): The validation rules for the configuration. + Raises: + ValueError: If any validation fails. + """ + self.log( + "Starting validation for configuration '{0}'.".format(config_name), "INFO" + ) + self.log("Configuration values: {0}".format(config_values), "DEBUG") + self.log("Validation rules: {0}".format(rules), "DEBUG") + + # First check if config_values is the expected type (dictionary) + if not isinstance(config_values, dict): + self.msg = "Configuration '{0}' must be of type dictionary. Provided value: '{1}' (type: {2}).".format( + config_name, config_values, type(config_values).__name__ + ) + self.fail_and_exit(self.msg) + + for param, rule in rules.items(): + value = config_values.get(param) + self.log( + "Validating parameter '{0}' with value '{1}' against rule '{2}'.".format( + param, value, rule + ), + "DEBUG", + ) + + # Check if the parameter is required but missing + if rule.get("required") and value is None: + self.msg = "Missing required parameter '{0}' for configuration '{1}'. Full configuration: {2}".format( + param, config_name, config_values + ) + self.fail_and_exit(self.msg) + + # Skip further validation if value is None (and not required) + if value is None: + self.log( + "Parameter '{0}' has None value and is not required. Skipping validation.".format( + param + ), + "DEBUG", + ) + continue + + # Validate data type if specified + if "type" in rule and value is not None: + expected_type = rule["type"] + + # Check for boolean first since isinstance(False, int) returns True in Python + if expected_type == "bool" and not isinstance(value, bool): + self.msg = ( + "Parameter '{0}' in configuration '{1}' must be of type boolean. " + "Provided value: '{2}' (type: {3}). Full configuration: {4}" + ).format(param, config_name, value, type(value).__name__, config_values) + self.fail_and_exit(self.msg) + elif expected_type == "str" and not isinstance(value, str): + self.msg = ( + "Parameter '{0}' in configuration '{1}' must be of type string. " + "Provided value: '{2}' (type: {3}). Full configuration: {4}" + ).format(param, config_name, value, type(value).__name__, config_values) + self.fail_and_exit(self.msg) + elif expected_type == "int" and ( + isinstance(value, bool) or not isinstance(value, int) + ): + # Explicitly reject boolean values for integer fields + self.msg = ( + "Parameter '{0}' in configuration '{1}' must be of type integer. " + "Provided value: '{2}' (type: {3}). Full configuration: {4}" + ).format(param, config_name, value, type(value).__name__, config_values) + self.fail_and_exit(self.msg) + elif expected_type == "list" and not isinstance(value, list): + self.msg = ( + "Parameter '{0}' in configuration '{1}' must be of type list. " + "Provided value: '{2}' (type: {3}). Full configuration: {4}" + ).format(param, config_name, value, type(value).__name__, config_values) + self.fail_and_exit(self.msg) + elif expected_type == "dict" and not isinstance(value, dict): + self.msg = ( + "Parameter '{0}' in configuration '{1}' must be of type dictionary. " + "Provided value: '{2}' (type: {3}). Full configuration: {4}" + ).format(param, config_name, value, type(value).__name__, config_values) + self.fail_and_exit(self.msg) + + # Validate the range of the parameter + if "range" in rule and value is not None: + min_val, max_val = rule["range"] + if not (min_val <= value <= max_val): + self.msg = "Parameter '{0}' in configuration '{1}' must be within the range {2}. Provided value: {3}. Full configuration: {4}".format( + param, config_name, rule["range"], value, config_values + ) + self.fail_and_exit(self.msg) + + # Validate if value is a multiple of a specific number + if "multiple_of" in rule and value is not None: + multiple = rule["multiple_of"] + if value % multiple != 0: + self.msg = "Parameter '{0}' in configuration '{1}' must be a multiple of {2}. Provided value: '{3}'. Full configuration: {4}".format( + param, config_name, multiple, value, config_values + ) + self.fail_and_exit(self.msg) + + # Validate the minimum length of the parameter + if "minLength" in rule and value is not None and isinstance(value, str): + if len(value) < rule["minLength"]: + self.msg = ( + "Parameter '{0}' in configuration '{1}' must be at least {2} characters long. " + "Provided value length: {3}. Full configuration: {4}" + ).format(param, config_name, rule["minLength"], len(value), config_values) + self.fail_and_exit(self.msg) + + # Validate the maximum length of the parameter + if "maxLength" in rule and value is not None: + if len(value) > rule["maxLength"]: + self.msg = ( + "Parameter '{0}' in configuration '{1}' exceeds maximum length of {2}. " + "Provided value length: {3}. Full configuration: {4}" + ).format(param, config_name, rule["maxLength"], len(value), config_values) + self.fail_and_exit(self.msg) + + # Validate maximum number of items in list + if "max_items" in rule and isinstance(value, list): + max_items = rule["max_items"] + if len(value) > max_items: + self.msg = "Parameter '{0}' in configuration '{1}' exceeds maximum of {2} items. Provided {3} items: {4}. Full configuration: {5}".format( + param, config_name, max_items, len(value), value, config_values + ) + self.fail_and_exit(self.msg) + + # Validate the choices for parameters (handle both single values and lists) + if "choices" in rule and value is not None: + # Convert choices to uppercase for case-insensitive comparison + valid_choices = [ + choice.upper() if isinstance(choice, str) else choice + for choice in rule["choices"] + ] + + if isinstance(value, list): + # For list parameters, validate each element + for i, item in enumerate(value): + # Convert item to uppercase for case-insensitive comparison + item_upper = item.upper() if isinstance(item, str) else item + if item_upper not in valid_choices: + self.msg = ( + "Item '{0}' at index {1} in parameter '{2}' of configuration '{3}' must be one of {4}. " + "Provided value: '{5}'. Full configuration: {6}" + ).format( + item, + i, + param, + config_name, + rule["choices"], + value, + config_values, + ) + self.fail_and_exit(self.msg) + else: + # For single values, validate directly + value_upper = value.upper() if isinstance(value, str) else value + if value_upper not in valid_choices: + self.msg = "Parameter '{0}' in configuration '{1}' must be one of {2}. Provided value: '{3}'. Full configuration: {4}".format( + param, config_name, rule["choices"], value, config_values + ) + self.fail_and_exit(self.msg) + + # Validate list elements with VLAN range check + if ( + "elements" in rule + and rule["elements"] == "int" + and "vlan_range" in rule + and isinstance(value, list) + ): + min_vlan, max_vlan = rule["vlan_range"] + for i, item in enumerate(value): + if not isinstance(item, int) or not (min_vlan <= item <= max_vlan): + self.msg = ( + "Item {0} in list parameter '{1}' of configuration '{2}' must be an integer " + "between {3} and {4}. Provided value: '{5}'. Full configuration: {6}" + ).format( + i, + param, + config_name, + min_vlan, + max_vlan, + item, + config_values, + ) + self.fail_and_exit(self.msg) + + self.log("Parameter '{0}' passed validation.".format(param), "DEBUG") + + self.log( + "Validation for configuration '{0}' completed successfully.".format( + config_name + ), + "INFO", + ) + + def _validate_vlans_config(self, vlan_config, rules): + """ + Validates VLAN configuration which is a list of dictionaries. + Args: + vlan_config (list): A list of VLAN configurations. + rules (dict): Validation rules for VLAN parameters. + Raises: + Exception: If the validation fails, an exception is raised with a descriptive message. + """ + self.log( + "Starting validation for VLAN configurations list with {0} items".format( + len(vlan_config) + ), + "INFO", + ) + + # Iterate over each VLAN configuration in the list + for index, vlan in enumerate(vlan_config): + self.log( + "Processing VLAN configuration at index {0}".format(index), "DEBUG" + ) + + # Validate that each VLAN configuration is a dictionary + if not isinstance(vlan, dict): + self.msg = ( + "Each VLAN configuration must be a dictionary. Found: {0}".format( + type(vlan).__name__ + ) + ) + self.log( + "VLAN configuration type validation failed at index {0}".format( + index + ), + "ERROR", + ) + self.fail_and_exit(self.msg) + + self.log("Validating VLAN configuration: {0}".format(vlan), "DEBUG") + + # Validate the individual VLAN configuration against the provided rules + self.validate_config_against_rules("vlans", vlan, rules) + + self.log( + "VLAN configuration at index {0} validated successfully".format(index), + "DEBUG", + ) + + self.log("All VLAN configurations validated successfully", "INFO") + + def _validate_cdp_config(self, cdp_config, rules): + """ + Validates CDP global configuration parameters. + Args: + cdp_config (dict): The CDP configuration. + rules (dict): Validation rules for CDP parameters. + Raises: + Exception: If the validation fails, an exception is raised with a descriptive message. + """ + self.log("Starting validation for CDP global configuration", "INFO") + self.log("Validating CDP configuration: {0}".format(cdp_config), "DEBUG") + + # Validate the CDP configuration against the provided validation rules + self.validate_config_against_rules("cdp", cdp_config, rules) + + self.log("CDP configuration validation completed successfully", "INFO") + + def _validate_lldp_config(self, lldp_config, rules): + """ + Validates LLDP global configuration parameters. + Args: + lldp_config (dict): The LLDP configuration. + rules (dict): Validation rules for LLDP parameters. + Raises: + Exception: If the validation fails, an exception is raised with a descriptive message. + """ + self.log("Starting validation for LLDP global configuration", "INFO") + self.log("Validating LLDP configuration: {0}".format(lldp_config), "DEBUG") + + # Validate the LLDP configuration against the provided validation rules + self.validate_config_against_rules("lldp", lldp_config, rules) + + self.log("LLDP configuration validation completed successfully", "INFO") + + def _validate_stp_config(self, stp_config, rules): + """ + Validates STP configuration which includes both global params and instances. + Args: + stp_config (dict): The STP configuration. + rules (dict): Validation rules for STP parameters. + Raises: + Exception: If the validation fails, an exception is raised with a descriptive message. + """ + self.log("Starting validation for STP global configuration", "INFO") + self.log("Validating STP configuration: {0}".format(stp_config), "DEBUG") + + # First check if stp_config is the expected type (dictionary) + if not isinstance(stp_config, dict): + self.msg = "STP configuration must be of type dictionary. Provided value: '{0}' (type: {1}).".format( + stp_config, type(stp_config).__name__ + ) + self.log("STP configuration type validation failed", "ERROR") + self.fail_and_exit(self.msg) + + # Create a copy of stp_config without the stp_instances for global validation + stp_global_config = stp_config.copy() + stp_instances = stp_global_config.pop("stp_instances", None) + + self.log( + "Extracted STP instances for separate validation: {0}".format( + bool(stp_instances) + ), + "DEBUG", + ) + + # Validate the STP global configuration against the provided rules + self.validate_config_against_rules("stp", stp_global_config, rules) + + self.log("STP global configuration validation completed successfully", "DEBUG") + + # Validate STP instances if present + if stp_instances: + self.log("Validating STP instances configuration", "DEBUG") + self._validate_stp_instances(stp_instances) + self.log("STP instances validation completed successfully", "DEBUG") + else: + self.log("No STP instances found to validate", "DEBUG") + + self.log("STP configuration validation completed successfully", "INFO") + + def _validate_stp_instances(self, stp_instances): + """ + Validates STP instance configurations. + Args: + stp_instances (list): A list of STP instance configurations. + Raises: + Exception: If the validation fails, an exception is raised with a descriptive message. + """ + self.log("Starting validation for STP instances configuration", "INFO") + + # Ensure stp_instances is a list + if not isinstance(stp_instances, list): + self.msg = "STP instances configuration must be a list of dictionaries. Provided: {0}".format( + type(stp_instances).__name__ + ) + self.log( + "STP instances type validation failed - expected list but got {0}".format( + type(stp_instances).__name__ + ), + "ERROR", + ) + self.fail_and_exit(self.msg) + + self.log( + "STP instances list contains {0} items".format(len(stp_instances)), "DEBUG" + ) + + # Get validation rules for STP instance + rules = self.get_layer2_configuration_validation_rules().get("stp_instance") + self.log( + "Validation rules for STP instance configurations: {0}".format(rules), + "DEBUG", + ) + + # Iterate over each STP instance configuration in the list + for index, instance in enumerate(stp_instances): + self.log("Processing STP instance at index {0}".format(index), "DEBUG") + + # Validate that each instance is a dictionary + if not isinstance(instance, dict): + self.msg = "Each STP instance configuration must be a dictionary. Found: {0}".format( + type(instance).__name__ + ) + self.log( + "STP instance type validation failed at index {0} - expected dict but got {1}".format( + index, type(instance).__name__ + ), + "ERROR", + ) + self.fail_and_exit(self.msg) + + self.log( + "Validating STP instance configuration: {0}".format(instance), "DEBUG" + ) + + # Validate the individual STP instance configuration against the validation rules + self.validate_config_against_rules("stp_instance", instance, rules) + + self.log( + "STP instance at index {0} validated successfully".format(index), + "DEBUG", + ) + + self.log("All STP instance configurations validated successfully", "INFO") + + def _validate_vtp_config(self, vtp_config, rules): + """ + Validates VTP global configuration parameters. + Args: + vtp_config (dict): The VTP configuration. + rules (dict): Validation rules for VTP parameters. + Raises: + Exception: If the validation fails, an exception is raised with a descriptive message. + """ + self.log("Starting validation for VTP global configuration", "INFO") + self.log("Validating VTP configuration: {0}".format(vtp_config), "DEBUG") + + # Validate the VTP configuration against the provided validation rules + self.validate_config_against_rules("vtp", vtp_config, rules) + + self.log("VTP configuration validation completed successfully", "INFO") + + def _validate_dhcp_snooping_config(self, dhcp_snooping_config, rules): + """ + Validates DHCP Snooping global configuration parameters. + Args: + dhcp_snooping_config (dict): The DHCP Snooping configuration. + rules (dict): Validation rules for DHCP Snooping parameters. + Raises: + Exception: If the validation fails, an exception is raised with a descriptive message. + """ + self.log("Starting validation for DHCP Snooping global configuration", "INFO") + self.log( + "Validating DHCP Snooping configuration: {0}".format(dhcp_snooping_config), + "DEBUG", + ) + + # Add type check BEFORE trying to use dictionary methods + if not isinstance(dhcp_snooping_config, dict): + self.msg = "DHCP Snooping configuration must be of type dictionary. Provided value: '{0}' (type: {1}).".format( + dhcp_snooping_config, type(dhcp_snooping_config).__name__ + ) + self.log("DHCP Snooping configuration type validation failed", "ERROR") + self.fail_and_exit(self.msg) + + # Validate VLAN lists if provided + vlan_params = ["dhcp_snooping_vlans", "dhcp_snooping_proxy_bridge_vlans"] + for param in vlan_params: + value = dhcp_snooping_config.get(param) + if value: + self.log( + "Validating VLAN parameter '{0}' with value: {1}".format( + param, value + ), + "DEBUG", + ) + + # Check that the value is a list + if not isinstance(value, list): + self.msg = "Parameter '{0}' must be a list. Provided: {1}. Full configuration: {2}".format( + param, type(value).__name__, dhcp_snooping_config + ) + self.log(self.msg, "ERROR") + self.fail_and_exit(self.msg) + + # Check that all elements are integers and within valid VLAN range + for index, vlan_id in enumerate(value): + if not isinstance(vlan_id, int): + self.msg = "All elements in '{0}' must be integers. Found: {1} at index {2}. Full configuration: {3}".format( + param, type(vlan_id).__name__, index, dhcp_snooping_config + ) + self.log(self.msg, "ERROR") + self.fail_and_exit(self.msg) + + if vlan_id < 1 or vlan_id > 4094: + self.msg = "VLAN ID in '{0}' must be between 1 and 4094. Found: {1} at index {2}. Full configuration: {3}".format( + param, vlan_id, index, dhcp_snooping_config + ) + self.log(self.msg, "ERROR") + self.fail_and_exit(self.msg) + + self.log( + "VLAN parameter '{0}' validated successfully with {1} VLAN IDs".format( + param, len(value) + ), + "DEBUG", + ) + + # Validate the DHCP Snooping configuration against rules + self.validate_config_against_rules("dhcp_snooping", dhcp_snooping_config, rules) + + self.log( + "DHCP Snooping configuration validation completed successfully", "INFO" + ) + + def _validate_igmp_snooping_config(self, igmp_snooping_config, rules): + """ + Validates IGMP Snooping configuration which includes both global params and VLAN-specific settings. + Args: + igmp_snooping_config (dict): The IGMP Snooping configuration. + rules (dict): Validation rules for IGMP Snooping parameters. + Raises: + Exception: If the validation fails, an exception is raised with a descriptive message. + """ + self.log("Starting validation for IGMP Snooping global configuration", "INFO") + self.log( + "Validating IGMP Snooping configuration: {0}".format(igmp_snooping_config), + "DEBUG", + ) + + # Add type check BEFORE trying to use dictionary methods + if not isinstance(igmp_snooping_config, dict): + self.msg = "IGMP Snooping configuration must be of type dictionary. Provided value: '{0}' (type: {1}).".format( + igmp_snooping_config, type(igmp_snooping_config).__name__ + ) + self.log("IGMP Snooping configuration type validation failed", "ERROR") + self.fail_and_exit(self.msg) + + # Create a copy of igmp_snooping_config without the igmp_snooping_vlans for global validation + igmp_global_config = igmp_snooping_config.copy() + igmp_snooping_vlans = igmp_global_config.pop("igmp_snooping_vlans", None) + + self.log( + "Extracted IGMP Snooping VLANs for separate validation: {0}".format( + bool(igmp_snooping_vlans) + ), + "DEBUG", + ) + + # Validate the IGMP Snooping global configuration against the provided rules + self.validate_config_against_rules("igmp_snooping", igmp_global_config, rules) + + self.log( + "IGMP Snooping global configuration validation completed successfully", + "DEBUG", + ) + + # Validate IGMP Snooping VLAN settings if present + if igmp_snooping_vlans: + self.log("Validating IGMP Snooping VLAN settings", "DEBUG") + self._validate_igmp_snooping_vlans(igmp_snooping_vlans) + self.log( + "IGMP Snooping VLAN settings validation completed successfully", "DEBUG" + ) + else: + self.log("No IGMP Snooping VLAN settings found to validate", "DEBUG") + + self.log( + "IGMP Snooping configuration validation completed successfully", "INFO" + ) + + def _validate_igmp_snooping_vlans(self, igmp_snooping_vlans): + """ + Validates IGMP Snooping VLAN configurations. + Args: + igmp_snooping_vlans (list): A list of IGMP Snooping VLAN configurations. + Raises: + Exception: If the validation fails, an exception is raised with a descriptive message. + """ + self.log("Starting validation for IGMP Snooping VLAN configurations", "INFO") + + # Ensure igmp_snooping_vlans is a list + if not isinstance(igmp_snooping_vlans, list): + self.msg = "IGMP Snooping VLANs configuration must be a list of dictionaries. Provided: {0}".format( + type(igmp_snooping_vlans).__name__ + ) + self.log( + "IGMP Snooping VLANs type validation failed - expected list but got {0}".format( + type(igmp_snooping_vlans).__name__ + ), + "ERROR", + ) + self.fail_and_exit(self.msg) + + self.log( + "IGMP Snooping VLANs list contains {0} items".format( + len(igmp_snooping_vlans) + ), + "DEBUG", + ) + + # Get validation rules for IGMP Snooping VLAN + rules = self.get_layer2_configuration_validation_rules().get( + "igmp_snooping_vlan" + ) + self.log( + "Validation rules for IGMP Snooping VLAN configurations: {0}".format(rules), + "DEBUG", + ) + + # Iterate over each IGMP Snooping VLAN configuration in the list + for index, vlan_config in enumerate(igmp_snooping_vlans): + self.log( + "Processing IGMP Snooping VLAN configuration at index {0}".format( + index + ), + "DEBUG", + ) + + # Validate that each VLAN configuration is a dictionary + if not isinstance(vlan_config, dict): + self.msg = "Each IGMP Snooping VLAN configuration must be a dictionary. Found: {0}".format( + type(vlan_config).__name__ + ) + self.log( + "IGMP Snooping VLAN configuration type validation failed at index {0} - expected dict but got {1}".format( + index, type(vlan_config).__name__ + ), + "ERROR", + ) + self.fail_and_exit(self.msg) + + self.log( + "Validating IGMP Snooping VLAN configuration: {0}".format(vlan_config), + "DEBUG", + ) + + # Validate the individual IGMP Snooping VLAN configuration against the validation rules + self.validate_config_against_rules("igmp_snooping_vlan", vlan_config, rules) + + self.log( + "IGMP Snooping VLAN configuration at index {0} validated successfully".format( + index + ), + "DEBUG", + ) + + self.log("All IGMP Snooping VLAN configurations validated successfully", "INFO") + + def _validate_mld_snooping_config(self, mld_snooping_config, rules): + """ + Validates MLD Snooping configuration which includes both global params and VLAN-specific settings. + Args: + mld_snooping_config (dict): The MLD Snooping configuration. + rules (dict): Validation rules for MLD Snooping parameters. + Raises: + Exception: If the validation fails, an exception is raised with a descriptive message. + """ + self.log("Starting validation for MLD Snooping global configuration", "INFO") + self.log( + "Validating MLD Snooping configuration: {0}".format(mld_snooping_config), + "DEBUG", + ) + + # Add type check BEFORE trying to use dictionary methods + if not isinstance(mld_snooping_config, dict): + self.msg = "MLD Snooping configuration must be of type dictionary. Provided value: '{0}' (type: {1}).".format( + mld_snooping_config, type(mld_snooping_config).__name__ + ) + self.log("MLD Snooping configuration type validation failed", "ERROR") + self.fail_and_exit(self.msg) + + # Create a copy of mld_snooping_config without the mld_snooping_vlans for global validation + mld_global_config = mld_snooping_config.copy() + mld_snooping_vlans = mld_global_config.pop("mld_snooping_vlans", None) + + self.log( + "Extracted MLD Snooping VLANs for separate validation: {0}".format( + bool(mld_snooping_vlans) + ), + "DEBUG", + ) + + # Validate the MLD Snooping global configuration against the provided rules + self.validate_config_against_rules("mld_snooping", mld_global_config, rules) + + self.log( + "MLD Snooping global configuration validation completed successfully", + "DEBUG", + ) + + # Validate MLD Snooping VLAN settings if present + if mld_snooping_vlans: + self.log("Validating MLD Snooping VLAN settings", "DEBUG") + self._validate_mld_snooping_vlans(mld_snooping_vlans) + self.log( + "MLD Snooping VLAN settings validation completed successfully", "DEBUG" + ) + else: + self.log("No MLD Snooping VLAN settings found to validate", "DEBUG") + + self.log("MLD Snooping configuration validation completed successfully", "INFO") + + def _validate_mld_snooping_vlans(self, mld_snooping_vlans): + """ + Validates MLD Snooping VLAN configurations. + Args: + mld_snooping_vlans (list): A list of MLD Snooping VLAN configurations. + Raises: + Exception: If the validation fails, an exception is raised with a descriptive message. + """ + self.log("Starting validation for MLD Snooping VLAN configurations", "INFO") + + # Ensure mld_snooping_vlans is a list + if not isinstance(mld_snooping_vlans, list): + self.msg = "MLD Snooping VLANs configuration must be a list of dictionaries. Provided: {0}".format( + type(mld_snooping_vlans).__name__ + ) + self.log( + "MLD Snooping VLANs type validation failed - expected list but got {0}".format( + type(mld_snooping_vlans).__name__ + ), + "ERROR", + ) + self.fail_and_exit(self.msg) + + self.log( + "MLD Snooping VLANs list contains {0} items".format( + len(mld_snooping_vlans) + ), + "DEBUG", + ) + + # Get validation rules for MLD Snooping VLAN + rules = self.get_layer2_configuration_validation_rules().get( + "mld_snooping_vlan" + ) + self.log( + "Validation rules for MLD Snooping VLAN configurations: {0}".format(rules), + "DEBUG", + ) + + # Iterate over each MLD Snooping VLAN configuration in the list + for index, vlan_config in enumerate(mld_snooping_vlans): + self.log( + "Processing MLD Snooping VLAN configuration at index {0}".format(index), + "DEBUG", + ) + + # Validate that each VLAN configuration is a dictionary + if not isinstance(vlan_config, dict): + self.msg = "Each MLD Snooping VLAN configuration must be a dictionary. Found: {0}".format( + type(vlan_config).__name__ + ) + self.log( + "MLD Snooping VLAN configuration type validation failed at index {0} - expected dict but got {1}".format( + index, type(vlan_config).__name__ + ), + "ERROR", + ) + self.fail_and_exit(self.msg) + + self.log( + "Validating MLD Snooping VLAN configuration: {0}".format(vlan_config), + "DEBUG", + ) + + # Validate the individual MLD Snooping VLAN configuration against the validation rules + self.validate_config_against_rules("mld_snooping_vlan", vlan_config, rules) + + self.log( + "MLD Snooping VLAN configuration at index {0} validated successfully".format( + index + ), + "DEBUG", + ) + + self.log("All MLD Snooping VLAN configurations validated successfully", "INFO") + + def _validate_authentication_config(self, authentication_config, rules): + """ + Validates authentication configuration parameters. + Args: + authentication_config (dict): The authentication configuration. + rules (dict): Validation rules for authentication parameters. + Raises: + Exception: If the validation fails, an exception is raised with a descriptive message. + """ + self.log("Starting validation for authentication configuration", "INFO") + self.log( + "Validating authentication configuration: {0}".format( + authentication_config + ), + "DEBUG", + ) + + # Validate the authentication configuration against the provided validation rules + self.validate_config_against_rules( + "authentication", authentication_config, rules + ) + + self.log( + "Authentication configuration validation completed successfully", "INFO" + ) + + def _validate_logical_ports_config(self, logical_ports_config, rules): + """ + Validates logical ports configuration which includes both global params and port channels. + Args: + logical_ports_config (dict): The logical ports configuration. + rules (dict): Validation rules for logical ports parameters. + Raises: + Exception: If the validation fails, an exception is raised with a descriptive message. + """ + self.log("Starting validation for logical ports configuration", "INFO") + self.log( + "Validating logical ports configuration: {0}".format(logical_ports_config), + "DEBUG", + ) + + # Check if logical_ports_config is a dictionary + if not isinstance(logical_ports_config, dict): + msg = "logical_ports configuration must be of type dict, got {0}".format( + type(logical_ports_config).__name__ + ) + self.log(msg, "ERROR") + self.module.fail_json(msg=msg) + + # Create a copy of logical_ports_config without the port_channels for global validation + logical_ports_global_config = logical_ports_config.copy() + port_channels = logical_ports_global_config.pop("port_channels", None) + + self.log( + "Extracted port channels for separate validation: {0}".format( + bool(port_channels) + ), + "DEBUG", + ) + + # Validate the logical ports global configuration against the provided rules + self.validate_config_against_rules( + "logical_ports", logical_ports_global_config, rules + ) + + self.log( + "Logical ports global configuration validation completed successfully", + "DEBUG", + ) + + # Validate port channels if present + if port_channels: + self.log("Validating port channels configuration", "DEBUG") + self._validate_port_channels(port_channels) + self.log("Port channels validation completed successfully", "DEBUG") + else: + self.log("No port channels found to validate", "DEBUG") + + self.log( + "Logical ports configuration validation completed successfully", "INFO" + ) + + def _validate_port_channels(self, port_channels): + """ + Validates port channel configurations. + Args: + port_channels (list): A list of port channel configurations. + Raises: + Exception: If the validation fails, an exception is raised with a descriptive message. + """ + self.log("Starting validation for port channel configurations", "INFO") + + # Ensure port_channels is a list + if not isinstance(port_channels, list): + self.msg = "Port channels configuration must be a list of dictionaries. Provided: {0}".format( + type(port_channels).__name__ + ) + self.log( + "Port channels type validation failed - expected list but got {0}".format( + type(port_channels).__name__ + ), + "ERROR", + ) + self.fail_and_exit(self.msg) + + self.log( + "Port channels list contains {0} items".format(len(port_channels)), "DEBUG" + ) + + # Get validation rules for port channel + port_channel_rules = self.get_layer2_configuration_validation_rules().get( + "port_channel" + ) + self.log( + "Validation rules for port channel configurations: {0}".format( + port_channel_rules + ), + "DEBUG", + ) + + # Iterate over each port channel configuration in the list + for index, channel in enumerate(port_channels): + self.log( + "Processing port channel configuration at index {0}".format(index), + "DEBUG", + ) + + # Validate that each port channel configuration is a dictionary + if not isinstance(channel, dict): + self.msg = "Each port channel configuration must be a dictionary. Found: {0}".format( + type(channel).__name__ + ) + self.log( + "Port channel type validation failed at index {0} - expected dict but got {1}".format( + index, type(channel).__name__ + ), + "ERROR", + ) + self.fail_and_exit(self.msg) + + self.log( + "Validating port channel configuration: {0}".format(channel), "DEBUG" + ) + + # Validate the port channel configuration against rules + self.validate_config_against_rules( + "port_channel", channel, port_channel_rules + ) + + # Get the protocol of this port channel + protocol = channel.get("port_channel_protocol") + self.log("Port channel protocol identified: {0}".format(protocol), "DEBUG") + + # Validate port channel members based on the protocol + port_channel_members = channel.get("port_channel_members") + if port_channel_members: + self.log( + "Validating port channel members for protocol {0}".format(protocol), + "DEBUG", + ) + self._validate_port_channel_members(port_channel_members, protocol) + self.log( + "Port channel members validation completed for index {0}".format( + index + ), + "DEBUG", + ) + else: + self.log( + "No port channel members found at index {0}".format(index), "DEBUG" + ) + + self.log( + "Port channel configuration at index {0} validated successfully".format( + index + ), + "DEBUG", + ) + + self.log("All port channel configurations validated successfully", "INFO") + + def _validate_port_channel_members(self, port_channel_members, protocol): + """ + Validates port channel member configurations based on the protocol. + Args: + port_channel_members (list): A list of port channel member configurations. + protocol (str): The protocol of the port channel (LACP, PAGP, or NONE). + Raises: + Exception: If the validation fails, an exception is raised with a descriptive message. + """ + self.log( + "Starting validation for port channel members with protocol {0}".format( + protocol + ), + "INFO", + ) + self.log( + "Port channel members to validate: {0}".format(port_channel_members), + "DEBUG", + ) + + # Ensure port_channel_members is a list + if not isinstance(port_channel_members, list): + self.msg = "Port channel members configuration must be a list of dictionaries. Provided: {0}".format( + type(port_channel_members).__name__ + ) + self.log( + "Port channel members type validation failed - expected list but got {0}".format( + type(port_channel_members).__name__ + ), + "ERROR", + ) + self.fail_and_exit(self.msg) + + self.log( + "Port channel members list contains {0} items".format( + len(port_channel_members) + ), + "DEBUG", + ) + + # Map protocol to the appropriate validation rule set + protocol_rule_map = { + "LACP": "port_channel_member_lacp", + "PAGP": "port_channel_member_pagp", + "NONE": "port_channel_member_none", + } + + rule_set_name = protocol_rule_map.get(protocol) + if not rule_set_name: + self.msg = ( + "Invalid port channel protocol: '{0}'. Must be one of {1}.".format( + protocol, list(protocol_rule_map.keys()) + ) + ) + self.log("Invalid protocol specified: {0}".format(protocol), "ERROR") + self.fail_and_exit(self.msg) + + self.log( + "Using validation rule set: {0} for protocol {1}".format( + rule_set_name, protocol + ), + "DEBUG", + ) + + # Get validation rules for the specific protocol's member ports + member_rules = self.get_layer2_configuration_validation_rules().get( + rule_set_name + ) + self.log( + "Validation rules for {0} port channel member configurations: {1}".format( + protocol, member_rules + ), + "DEBUG", + ) + + # Enforce member limit for LACP (max 16 members) + if protocol == "LACP" and len(port_channel_members) > 16: + self.msg = "LACP port channel can have a maximum of 16 member ports. Found: {0}".format( + len(port_channel_members) + ) + self.log( + "LACP member limit exceeded: {0} members found".format( + len(port_channel_members) + ), + "ERROR", + ) + self.fail_and_exit(self.msg) + + # Iterate over each port channel member in the list + for index, member in enumerate(port_channel_members): + self.log( + "Processing port channel member at index {0}".format(index), "DEBUG" + ) + + # Validate that each member configuration is a dictionary + if not isinstance(member, dict): + self.msg = "Each port channel member configuration must be a dictionary. Found: {0}".format( + type(member).__name__ + ) + self.log( + "Port channel member type validation failed at index {0} - expected dict but got {1}".format( + index, type(member).__name__ + ), + "ERROR", + ) + self.fail_and_exit(self.msg) + + self.log( + "Validating {0} port channel member configuration: {1}".format( + protocol, member + ), + "DEBUG", + ) + + # Special validation for port_channel_rate in LACP + if protocol == "LACP" and "port_channel_rate" in member: + rate_value = member["port_channel_rate"] + self.log( + "Validating LACP port_channel_rate parameter: {0}".format( + rate_value + ), + "DEBUG", + ) + + if isinstance(rate_value, int): + # Check if rate value is valid for LACP (1 or 30) + if rate_value != 1 and rate_value != 30: + self.msg = "Invalid port_channel_rate for LACP: {0}. Must be 1 (FAST) or 30 (NORMAL). Full member configuration: {1}".format( + rate_value, member + ) + self.log( + "Invalid LACP rate value: {0}".format(rate_value), "ERROR" + ) + self.fail_and_exit(self.msg) + # We keep the value as-is (1 or 30) + self.log("LACP rate value {0} is valid".format(rate_value), "DEBUG") + else: + # If it's not an integer, fail with a detailed error message + self.msg = "port_channel_rate for LACP must be an integer (1 or 30). Received: {0} of type {1}. Full member configuration: {2}".format( + rate_value, type(rate_value).__name__, member + ) + self.log( + "LACP rate value type validation failed: expected int but got {0}".format( + type(rate_value).__name__ + ), + "ERROR", + ) + self.fail_and_exit(self.msg) + + # Validate the member configuration against appropriate rules for the protocol + self.validate_config_against_rules(rule_set_name, member, member_rules) + + self.log( + "Port channel member at index {0} validated successfully".format(index), + "DEBUG", + ) + + self.log( + "All port channel members validated successfully for protocol {0}".format( + protocol + ), + "INFO", + ) + + def _validate_port_configurations(self, port_configurations, rules=None): + """ + Validates port configurations which is a list of interface configurations. + Args: + port_configurations (list): A list of port configurations for various interfaces. + rules (dict): Not used for this validation but included for consistency with other validators. + Raises: + Exception: If the validation fails, an exception is raised with a descriptive message. + """ + self.log("Starting validation for port configurations.", "INFO") + + # Ensure port_configurations is a list + if not isinstance(port_configurations, list): + self.msg = "Port configurations must be a list of dictionaries. Provided: {0}".format( + type(port_configurations).__name__ + ) + self.fail_and_exit(self.msg) + + # Get all the validation rules + validation_rules = self.get_layer2_configuration_validation_rules() + + # Iterate over each port configuration in the list + for port_config in port_configurations: + if not isinstance(port_config, dict): + self.msg = ( + "Each port configuration must be a dictionary. Found: {0}".format( + type(port_config).__name__ + ) + ) + self.fail_and_exit(self.msg) + + # Validate interface_name is present + if "interface_name" not in port_config: + self.msg = "Each port configuration must have an 'interface_name'. Missing in: {0}".format( + port_config + ) + self.fail_and_exit(self.msg) + + interface_name = port_config["interface_name"] + + # Validate interface_name is a string + if not isinstance(interface_name, str): + self.msg = "Parameter 'interface_name' must be of type string. Provided value: '{0}' (type: {1}). Full configuration: {2}".format( + interface_name, type(interface_name).__name__, port_config + ) + self.fail_and_exit(self.msg) + + # Check that interface_name is not an empty string + if not interface_name.strip(): + self.msg = "Parameter 'interface_name' must not be empty. Provided value: '{0}'. Full configuration: {1}".format( + interface_name, port_config + ) + self.fail_and_exit(self.msg) + + self.log( + "Validating port configuration for interface: {0}".format( + interface_name + ), + "DEBUG", + ) + + # Check each interface configuration type and validate it + config_types = [ + "switchport_interface_config", + "vlan_trunking_interface_config", + "dot1x_interface_config", + "mab_interface_config", + "stp_interface_config", + "dhcp_snooping_interface_config", + "cdp_interface_config", + "lldp_interface_config", + "vtp_interface_config", + ] + + for config_type in config_types: + if config_type in port_config: + config_value = port_config[config_type] + config_rules = validation_rules.get(config_type) + + if not config_rules: + self.msg = "No validation rules found for {0}. Available rules: {1}".format( + config_type, list(validation_rules.keys()) + ) + self.fail_and_exit(self.msg) + + self.log( + "Validating {0} for interface {1}".format( + config_type, interface_name + ), + "DEBUG", + ) + + # Special handling for certain config types + if config_type == "stp_interface_config": + self._validate_stp_interface_config( + interface_name, config_value, config_rules + ) + else: + # Standard validation for most config types + self.validate_config_against_rules( + config_type, config_value, config_rules + ) + + self.log("Completed validation for all port configurations.", "INFO") + + def _validate_stp_interface_config(self, interface_name, config, rules): + """ + Validates STP interface configuration, including special handling for per-VLAN settings. + Args: + interface_name (str): The name of the interface. + config (dict): The STP interface configuration. + rules (dict): Validation rules for STP interface parameters. + Raises: + Exception: If the validation fails, an exception is raised with a descriptive message. + """ + self.log( + "Validating STP interface configuration for {0}: {1}".format( + interface_name, config + ), + "DEBUG", + ) + + # First check if config is the expected type (dictionary) + if not isinstance(config, dict): + self.msg = "stp_interface_config must be a dictionary. Provided value: '{0}' (type: {1}). Interface: {2}".format( + config, type(config).__name__, interface_name + ) + self.fail_and_exit(self.msg) + + # Create a copy without the per-VLAN settings for standard validation + stp_interface_config = config.copy() + per_vlan_cost = stp_interface_config.pop("stp_interface_per_vlan_cost", None) + per_vlan_priority = stp_interface_config.pop( + "stp_interface_per_vlan_priority", None + ) + + self.log( + "Extracted per-VLAN cost settings: {0}".format(bool(per_vlan_cost)), "DEBUG" + ) + self.log( + "Extracted per-VLAN priority settings: {0}".format(bool(per_vlan_priority)), + "DEBUG", + ) + + # Validate the standard STP interface configuration + self.validate_config_against_rules( + "stp_interface_config", stp_interface_config, rules + ) + + self.log( + "Standard STP interface configuration validation completed successfully", + "DEBUG", + ) + + # Get validation rules for per-VLAN settings + validation_rules = self.get_layer2_configuration_validation_rules() + + # Validate per-VLAN cost settings if present + if per_vlan_cost: + self.log( + "Validating per-VLAN cost settings for interface {0}".format( + interface_name + ), + "DEBUG", + ) + self.validate_config_against_rules( + "stp_interface_per_vlan_cost", + per_vlan_cost, + validation_rules.get("stp_interface_per_vlan_cost"), + ) + self.log( + "Per-VLAN cost settings validation completed successfully", "DEBUG" + ) + else: + self.log("No per-VLAN cost settings found to validate", "DEBUG") + + # Validate per-VLAN priority settings if present + if per_vlan_priority: + self.log( + "Validating per-VLAN priority settings for interface {0}".format( + interface_name + ), + "DEBUG", + ) + self.validate_config_against_rules( + "stp_interface_per_vlan_priority", + per_vlan_priority, + validation_rules.get("stp_interface_per_vlan_priority"), + ) + self.log( + "Per-VLAN priority settings validation completed successfully", "DEBUG" + ) + else: + self.log("No per-VLAN priority settings found to validate", "DEBUG") + + self.log( + "STP interface configuration validation completed successfully for interface {0}".format( + interface_name + ), + "INFO", + ) + + def validate_layer2_config_params(self, layer2_configuration): + """ + Validates all Layer 2 configurations. + Args: + layer2_configuration (dict): The Layer 2 configuration provided by the user. + Raises: + Exception: If the validation fails, an exception is raised with a descriptive message. + """ + self.log("Starting validation for Layer 2 configurations.", "INFO") + + # Check if layer2_configuration is present, not None, and not an empty dictionary + if not layer2_configuration or not isinstance(layer2_configuration, dict): + self.log( + "Layer 2 configuration is either missing, None, or an empty dictionary. Skipping Layer 2 validation.", + "INFO", + ) + return + + # Map of feature names to their validation functions + feature_validators = { + "vlans": self._validate_vlans_config, + "stp": self._validate_stp_config, + "cdp": self._validate_cdp_config, + "lldp": self._validate_lldp_config, + "vtp": self._validate_vtp_config, + "dhcp_snooping": self._validate_dhcp_snooping_config, + "igmp_snooping": self._validate_igmp_snooping_config, + "mld_snooping": self._validate_mld_snooping_config, + "authentication": self._validate_authentication_config, + "logical_ports": self._validate_logical_ports_config, + "port_configuration": self._validate_port_configurations, + } + + # Validate each configuration + for config_name, config_values in layer2_configuration.items(): + self.log( + "Validating Layer 2 configuration: {0}".format(config_name), "DEBUG" + ) + + # Handle None values (when user specifies "cdp:" without a value) + if config_values is None: + self.log( + "Configuration '{0}' has None value. Treating as empty configuration and skipping validation.".format( + config_name + ), + "INFO", + ) + continue + + # Handle empty dictionaries or lists + if not config_values: + self.log( + "Configuration '{0}' is empty. Skipping validation.".format( + config_name + ), + "INFO", + ) + continue + + # Get validation rules for this feature + validation_rules = self.get_layer2_configuration_validation_rules().get( + config_name + ) + + self.log( + "Validation rules for {0} configuration: {1}".format( + config_name, validation_rules + ), + "DEBUG", + ) + + # Get the appropriate validator for this feature + validator = feature_validators.get(config_name) + + if validator: + # Call the specific validator with the config data and rules + validator(config_values, validation_rules) + else: + # This should never happen if our feature_validators dictionary is complete + self.msg = "No validator available for feature '{0}'.".format( + config_name + ) + self.fail_and_exit(self.msg) + + self.log( + "{0} configuration validated successfully.".format(config_name), + "INFO", + ) + + self.log("Completed validation for all Layer 2 configurations.", "INFO") + + def validate_params(self, config, state): + """ + Validates the input parameters for the playbook configuration. + Args: + config (dict): The configuration details from the playbook. + state (str): The desired state of the configuration. + Raises: + Exception: If the validation fails, an exception is raised with a descriptive message. + """ + # Validate the ip/hostname + ip_address = config.get("ip_address") + hostname = config.get("hostname") + device_collection_status_check = config.get("device_collection_status_check") + self.validate_ip_and_hostname( + ip_address, hostname, device_collection_status_check + ) + + self.log("Starting validation of the input parameters.", "INFO") + self.log("State: {0}".format(state), "DEBUG") + + self.log("Configuration to validate: {0}".format(config), "DEBUG") + + # Validate the Layer 2 configurations + layer2_configuration = config.get("layer2_configuration", {}) + if layer2_configuration: + # Validate Layer 2 configuration parameters + self.validate_layer2_config_params(layer2_configuration) + else: + self.log( + "No Layer 2 configurations provided. Skipping Layer 2 validation.", + "INFO", + ) + + self.log("Completed validation of all input parameters.", "INFO") + + def _map_parameters(self, user_config, mapping_schema): + """ + Maps user configuration parameters to API parameters based on a mapping schema. + Args: + user_config (dict/list): The user configuration to map. + mapping_schema (dict): Schema defining how to map user parameters to API parameters. + Returns: + dict: The mapped configuration in API-compatible format. + Raises: + ValueError: If an invalid processing strategy is determined. + """ + self.log("Starting parameter mapping operation", "INFO") + self.log("Input user configuration: {0}".format(user_config), "DEBUG") + self.log("Mapping schema provided: {0}".format(mapping_schema), "DEBUG") + + # Initialize the base structure + mapped_config = self._initialize_mapped_config(mapping_schema) + + # Determine processing strategy based on input type and schema + processing_strategy = self._determine_processing_strategy( + user_config, mapping_schema + ) + self.log("Using processing strategy: {0}".format(processing_strategy), "DEBUG") + + # Apply the appropriate mapping strategy + if processing_strategy == "list_input": + self._process_list_input(user_config, mapping_schema, mapped_config) + elif processing_strategy == "dict_with_path": + self._process_dict_with_path(user_config, mapping_schema, mapped_config) + elif processing_strategy == "flat_dict": + self._process_flat_dict(user_config, mapping_schema, mapped_config) + else: + self.log( + "Unknown processing strategy: {0}".format(processing_strategy), "ERROR" + ) + raise ValueError("Invalid processing strategy determined") + + self.log("Parameter mapping operation completed successfully", "INFO") + self.log("Final mapped configuration: {0}".format(mapped_config), "DEBUG") + + return mapped_config + + def _initialize_mapped_config(self, mapping_schema): + """ + Initialize the base mapped configuration structure. + Args: + mapping_schema (dict): The mapping schema containing output_structure. + Returns: + dict: Initialized mapped configuration. + """ + output_structure = mapping_schema.get("output_structure", {}) + mapped_config = output_structure.copy() if output_structure else {} + self.log( + "Initialized base mapped configuration: {0}".format(mapped_config), "DEBUG" + ) + return mapped_config + + def _determine_processing_strategy(self, user_config, mapping_schema): + """ + Determine the appropriate processing strategy based on input type and schema. + Args: + user_config (dict/list): The user configuration + mapping_schema (dict): The mapping schema + Returns: + str: The processing strategy to use + """ + is_list_input = mapping_schema.get("is_list_input", False) + input_is_list = isinstance(user_config, list) + has_param_path = "param_path" in mapping_schema + + self.log( + "Input analysis - is_list_input: {0}, input_is_list: {1}, has_param_path: {2}".format( + is_list_input, input_is_list, has_param_path + ), + "DEBUG", + ) + + # Determine strategy based on schema configuration and input type + if is_list_input and input_is_list: + return "list_input" + elif has_param_path: + return "dict_with_path" + else: + return "flat_dict" + + def _process_list_input(self, user_config, mapping_schema, mapped_config): + """ + Process list input configuration (like VLANs). + Args: + user_config (list): List of configuration items + mapping_schema (dict): Mapping schema + mapped_config (dict): The configuration being built + """ + self.log( + "Processing list input configuration with {0} items".format( + len(user_config) + ), + "INFO", + ) + + # Extract schema components + list_path = mapping_schema.get("list_path", []) + item_config_type = mapping_schema.get("item_config_type", "") + param_mapping = mapping_schema.get("param_mapping", {}) + + self.log( + "List processing parameters - path: {0}, config_type: {1}".format( + list_path, item_config_type + ), + "DEBUG", + ) + + # Process each item in the list + items_list = [] + for index, item in enumerate(user_config): + mapped_item = self._map_single_item( + item, param_mapping, item_config_type, index + ) + items_list.append(mapped_item) + + self.log("Created {0} mapped items".format(len(items_list)), "INFO") + + # Place the items list in the correct location + if list_path: + self._place_items_at_path(mapped_config, list_path, items_list) + else: + self.log("No list path specified, items not placed in structure", "WARNING") + + def _process_dict_with_path(self, user_config, mapping_schema, mapped_config): + """ + Process dictionary input with a specific parameter path (like CDP). + Args: + user_config (dict): Dictionary configuration + mapping_schema (dict): Mapping schema + mapped_config (dict): The configuration being built + """ + self.log("Processing dictionary input with parameter path", "INFO") + + param_path = mapping_schema.get("param_path", []) + item_config_type = mapping_schema.get("item_config_type", "") + param_mapping = mapping_schema.get("param_mapping", {}) + + self.log( + "Path processing parameters - path: {0}, config_type: {1}".format( + param_path, item_config_type + ), + "DEBUG", + ) + + # Navigate to the target location + target_container = self._navigate_and_create_path( + mapped_config, param_path, item_config_type + ) + + # Map parameters to the target container + mapped_count = self._apply_parameter_mapping( + user_config, param_mapping, target_container + ) + + self.log( + "Mapped {0} parameters using parameter path".format(mapped_count), "INFO" + ) + + def _process_flat_dict(self, user_config, mapping_schema, mapped_config): + """ + Process dictionary input with flat mapping (no nested paths). + Args: + user_config (dict): Dictionary configuration + mapping_schema (dict): Mapping schema + mapped_config (dict): The configuration being built + """ + self.log("Processing dictionary input with flat mapping", "INFO") + + param_mapping = mapping_schema.get("param_mapping", {}) + + # Apply flat mapping directly to mapped_config + mapped_count = self._apply_parameter_mapping( + user_config, param_mapping, mapped_config + ) + + self.log( + "Mapped {0} parameters using flat mapping".format(mapped_count), "INFO" + ) + + def _map_single_item(self, item, param_mapping, config_type, item_index=None): + """ + Map a single configuration item. + Args: + item (dict): Single configuration item + param_mapping (dict): Parameter mapping rules + config_type (str): ConfigType for this item + item_index (int, optional): Index for logging purposes + Returns: + dict: Mapped item + """ + index_str = " {0}".format(item_index) if item_index is not None else "" + self.log("Mapping single item{0}: {1}".format(index_str, item), "DEBUG") + + # Create base item with configType + mapped_item = {"configType": config_type} if config_type else {} + + # Apply parameter mapping + mapped_count = self._apply_parameter_mapping(item, param_mapping, mapped_item) + + self.log( + "Mapped {0} parameters for item{1}".format(mapped_count, index_str), "DEBUG" + ) + self.log("Final mapped item{0}: {1}".format(index_str, mapped_item), "DEBUG") + + return mapped_item + + def _apply_parameter_mapping(self, source_config, param_mapping, target_container): + """ + Apply parameter mapping from source to target container. + Args: + source_config (dict): Source configuration containing user parameters + param_mapping (dict): Mapping from user params to API params + target_container (dict): Target container to receive mapped parameters + Returns: + int: Number of parameters successfully mapped + """ + mapped_count = 0 + + for user_param, api_param in param_mapping.items(): + if user_param in source_config: + original_value = source_config.get(user_param) + + # Handle special value transformations + transformed_value = self._transform_parameter_value( + user_param, original_value, api_param + ) + + target_container[api_param] = transformed_value + mapped_count += 1 + + self.log( + "Mapped parameter '{0}' -> '{1}' with value: {2}".format( + user_param, api_param, transformed_value + ), + "DEBUG", + ) + else: + self.log( + "Parameter '{0}' not found in source config, skipping".format( + user_param + ), + "DEBUG", + ) + + return mapped_count + + def _transform_parameter_value(self, user_param, original_value, api_param): + """ + Transform parameter values based on specific rules. + Args: + user_param (str): Original user parameter name + original_value: Original parameter value + api_param (str): Target API parameter name + Returns: + Transformed value for the API + """ + self.log( + "Starting parameter value transformation for parameter: {0}".format( + user_param + ), + "DEBUG", + ) + self.log( + "Original value: {0} (type: {1})".format( + original_value, type(original_value).__name__ + ), + "DEBUG", + ) + self.log("Target API parameter: {0}".format(api_param), "DEBUG") + + # Handle boolean to string transformations for specific parameters + boolean_to_string_mappings = { + "admin_status": lambda x: "UP" if x else "DOWN", + "stp_interface_bpdu_filter": lambda x: "ENABLE" if x else "DISABLE", + "stp_interface_bpdu_guard": lambda x: "ENABLE" if x else "DISABLE", + } + + # Handle list to string transformations + if isinstance(original_value, list) and "vlan" in user_param.lower(): + self.log("Applying VLAN list to string transformation", "DEBUG") + transformed_value = self._convert_vlan_list_to_string(original_value) + self.log("VLAN list transformed to: {0}".format(transformed_value), "DEBUG") + return transformed_value + + # Apply boolean transformations if applicable + if user_param in boolean_to_string_mappings: + self.log( + "Applying boolean to string transformation for parameter: {0}".format( + user_param + ), + "DEBUG", + ) + transformed_value = boolean_to_string_mappings[user_param](original_value) + self.log( + "Boolean value {0} transformed to: {1}".format( + original_value, transformed_value + ), + "DEBUG", + ) + return transformed_value + + # Return original value if no transformation needed + self.log( + "No transformation required for parameter: {0}".format(user_param), "DEBUG" + ) + self.log("Returning original value: {0}".format(original_value), "DEBUG") + return original_value + + def _navigate_and_create_path(self, base_config, path_list, final_config_type=""): + """ + Navigate through a nested path, creating missing sections as needed. + Args: + base_config (dict): Base configuration to navigate + path_list (list): List of path components to navigate + final_config_type (str): ConfigType for final container if creating new + Returns: + dict: Target container at the end of the path + """ + if not path_list: + return base_config + + self.log("Navigating path: {0}".format(path_list), "DEBUG") + + current = base_config + + # Navigate to all but the last path component + for path_index, path_part in enumerate(path_list[:-1]): + current = self._navigate_to_path_component(current, path_part, path_index) + + # Handle the final path component specially + final_key = path_list[-1] + return self._get_or_create_final_container( + current, final_key, final_config_type + ) + + def _navigate_to_path_component( + self, current_container, path_component, path_index + ): + """ + Navigate to a single path component, creating it if necessary. + Args: + current_container (dict): Current container + path_component (str): Path component to navigate to + path_index (int): Index for logging + Returns: + dict: Container at the path component + """ + self.log( + "Navigating to path component {0}: '{1}'".format( + path_index, path_component + ), + "DEBUG", + ) + + if path_component not in current_container: + current_container[path_component] = {} + self.log("Created new path section '{0}'".format(path_component), "DEBUG") + else: + self.log( + "Path section '{0}' already exists".format(path_component), "DEBUG" + ) + + return current_container[path_component] + + def _get_or_create_final_container(self, parent_container, final_key, config_type): + """ + Get or create the final target container for parameter mapping. + Args: + parent_container (dict): Parent container + final_key (str): Final key in the path + config_type (str): ConfigType for new containers + Returns: + dict: Target container for parameter mapping + """ + if final_key not in parent_container: + # Create new container with configType in a list structure + parent_container[final_key] = ( + [{"configType": config_type}] if config_type else [{}] + ) + self.log( + "Created final container '{0}' with configType '{1}'".format( + final_key, config_type + ), + "DEBUG", + ) + else: + self.log("Final container '{0}' already exists".format(final_key), "DEBUG") + + # Return the first item in the list for parameter mapping + target = parent_container[final_key][0] + self.log("Target container for parameter mapping: {0}".format(target), "DEBUG") + + return target + + def _place_items_at_path(self, base_config, path_list, items_list): + """ + Place a list of items at the specified path in the configuration. + Args: + base_config (dict): Base configuration + path_list (list): Path where items should be placed + items_list (list): List of items to place + """ + if not path_list: + self.log("Empty path provided for item placement", "WARNING") + return + + self.log( + "Placing {0} items at path: {1}".format(len(items_list), path_list), "DEBUG" + ) + + # Navigate to the parent of the final location + current = base_config + for path_part in path_list[:-1]: + if path_part not in current: + current[path_part] = {} + current = current[path_part] + + # Set the items at the final location + final_key = path_list[-1] + current[final_key] = items_list + + self.log( + "Successfully placed items at path key '{0}'".format(final_key), "INFO" + ) + + def _convert_vlan_list_to_string(self, vlan_list): + """ + Converts a list of VLAN IDs to a comma-separated string. + Args: + vlan_list (list): List of VLAN IDs + Returns: + str: Comma-separated string of VLAN IDs + """ + self.log("Starting VLAN list to string conversion", "DEBUG") + self.log("Input VLAN list: {0}".format(vlan_list), "DEBUG") + + # Check if the VLAN list is empty or None + if not vlan_list: + self.log( + "Empty or None VLAN list provided, returning empty string", "DEBUG" + ) + return "" + + # Convert each VLAN ID to string and join with commas + result = ",".join(map(str, vlan_list)) + + self.log("VLAN list conversion completed successfully", "DEBUG") + self.log("Converted VLAN list to string: {0}".format(result), "DEBUG") + + return result + + def _map_vlans_config(self, vlan_config): + """ + Maps VLAN configuration parameters from user format to API format. + Args: + vlan_config (list): A list of VLAN configurations provided by the user. + Returns: + dict: Mapped VLAN configuration in API-compatible format with 'vlanConfig' as the key. + """ + self.log("Mapping VLAN configuration: {0}".format(vlan_config), "DEBUG") + + # Define the mapping schema for VLANs - corrected to match API format + vlan_mapping_schema = { + "output_structure": {"vlanConfig": {"items": []}}, + "item_config_type": "VLAN", + "is_list_input": True, + "list_path": ["vlanConfig", "items"], + "param_mapping": { + "vlan_id": "vlanId", + "vlan_name": "name", + "vlan_admin_status": "isVlanEnabled", + }, + } + + # Use the generic mapping function + return self._map_parameters(vlan_config, vlan_mapping_schema) + + def _map_cdp_config(self, cdp_config): + """ + Maps CDP configuration parameters from user format to API format. + Args: + cdp_config (dict): The CDP configuration provided by the user. + Returns: + dict: Mapped CDP configuration in API-compatible format. + """ + self.log("Mapping CDP configuration: {0}".format(cdp_config), "DEBUG") + + # Define the mapping schema for CDP + cdp_mapping_schema = { + "output_structure": {}, + "item_config_type": "CDP_GLOBAL", + "param_path": ["cdpGlobalConfig", "items"], + "param_mapping": { + "cdp_admin_status": "isCdpEnabled", + "cdp_hold_time": "holdTime", + "cdp_timer": "timer", + "cdp_advertise_v2": "isAdvertiseV2Enabled", + "cdp_log_duplex_mismatch": "isLogDuplexMismatchEnabled", + }, + } + + # Use the generic mapping function + return self._map_parameters(cdp_config, cdp_mapping_schema) + + def _map_lldp_config(self, lldp_config): + """ + Maps LLDP configuration parameters from user format to API format. + Args: + lldp_config (dict): The LLDP configuration provided by the user. + Returns: + dict: Mapped LLDP configuration in API-compatible format. + """ + self.log("Mapping LLDP configuration: {0}".format(lldp_config), "DEBUG") + + # Define the mapping schema for LLDP + lldp_mapping_schema = { + "output_structure": {}, + "item_config_type": "LLDP_GLOBAL", + "param_path": ["lldpGlobalConfig", "items"], + "param_mapping": { + "lldp_admin_status": "isLldpEnabled", + "lldp_hold_time": "holdTime", + "lldp_timer": "timer", + "lldp_reinitialization_delay": "reinitializationDelay", + }, + } + + # Use the generic mapping function + return self._map_parameters(lldp_config, lldp_mapping_schema) + + def _map_stp_config(self, stp_config): + """ + Maps STP configuration parameters from user format to API format. + Args: + stp_config (dict): The STP configuration provided by the user. + Returns: + dict: Mapped STP configuration in API-compatible format. + """ + self.log("Mapping STP configuration: {0}".format(stp_config), "DEBUG") + + # Extract stp_instances from the config for separate handling + stp_instances = stp_config.get("stp_instances", []) + main_stp_config = {k: v for k, v in stp_config.items() if k != "stp_instances"} + + self.log( + "Extracted {0} STP instances for separate processing".format( + len(stp_instances) + ), + "DEBUG", + ) + self.log( + "Main STP global config parameters: {0}".format(main_stp_config), "DEBUG" + ) + + # Define the mapping schema for the main STP global config + stp_mapping_schema = { + "output_structure": {}, + "item_config_type": "STP_GLOBAL", + "param_path": ["stpGlobalConfig", "items"], + "param_mapping": { + "stp_mode": "stpMode", + "stp_backbonefast": "isBackboneFastEnabled", + "stp_etherchannel_guard": "isEtherChannelGuardEnabled", + "stp_extended_system_id": "isExtendedSystemIdEnabled", + "stp_logging": "isLoggingEnabled", + "stp_loopguard": "isLoopGuardEnabled", + "stp_portfast_mode": "portFastMode", + "stp_bpdu_filter": "isBpduFilterEnabled", + "stp_bpdu_guard": "isBpduGuardEnabled", + "stp_uplinkfast": "isUplinkFastEnabled", + "stp_transmit_hold_count": "transmitHoldCount", + "stp_uplinkfast_max_update_rate": "uplinkFastMaxUpdateRate", + }, + } + + # Use the generic mapping function for the main STP config + mapped_config = self._map_parameters(main_stp_config, stp_mapping_schema) + + self.log("Main STP global configuration mapped successfully", "DEBUG") + + # Handle STP instances separately if present + if stp_instances: + self.log( + "Processing {0} STP instances for integration".format( + len(stp_instances) + ), + "DEBUG", + ) + + # Get the stpGlobalConfig.items[0] reference to add instances to + stp_item = mapped_config["stpGlobalConfig"]["items"][0] + + # Create the instances container + stp_item["stpInstances"] = {"configType": "LIST", "items": []} + + self.log("Created STP instances container structure", "DEBUG") + + # Process each STP instance + for instance in stp_instances: + self.log( + "Processing STP instance for VLAN {0}".format( + instance.get("stp_instance_vlan_id") + ), + "DEBUG", + ) + + # Create a new instance with the required configType + instance_item = { + "configType": "STP_VLAN", + "vlanId": instance.get("stp_instance_vlan_id"), + } + + # Add priority if present + if "stp_instance_priority" in instance: + instance_item["priority"] = instance.get("stp_instance_priority") + self.log( + "Added priority {0} for VLAN {1}".format( + instance.get("stp_instance_priority"), + instance.get("stp_instance_vlan_id"), + ), + "DEBUG", + ) + + # Add enable_stp at the instance level (not in timers) + if "enable_stp" in instance: + instance_item["isStpEnabled"] = instance.get("enable_stp") + self.log( + "Set STP enabled status to {0} for VLAN {1}".format( + instance.get("enable_stp"), + instance.get("stp_instance_vlan_id"), + ), + "DEBUG", + ) + + # Create timers configuration if any timer parameters are present + timer_params = [ + "stp_instance_forward_delay_timer", + "stp_instance_hello_interval_timer", + "stp_instance_max_age_timer", + ] + + # Check if any timer parameters are provided + has_timer_params = any(param in instance for param in timer_params) + + if has_timer_params: + self.log( + "Timer parameters found for VLAN {0}, creating timers configuration".format( + instance.get("stp_instance_vlan_id") + ), + "DEBUG", + ) + + # Create the timers configuration (without isStpEnabled) + timers = {"configType": "STP_TIMERS"} + + # Map timer parameters (excluding enable_stp) + timer_mapping = { + "stp_instance_forward_delay_timer": "forwardDelay", + "stp_instance_hello_interval_timer": "helloInterval", + "stp_instance_max_age_timer": "maxAge", + } + + # Add timer parameters that are provided + for user_param, api_param in timer_mapping.items(): + if user_param in instance: + timers[api_param] = instance.get(user_param) + self.log( + "Mapped timer parameter {0} to {1} with value {2}".format( + user_param, api_param, instance.get(user_param) + ), + "DEBUG", + ) + + # Add timers to the instance + instance_item["timers"] = timers + self.log( + "Added timers configuration to VLAN {0} instance".format( + instance.get("stp_instance_vlan_id") + ), + "DEBUG", + ) + else: + self.log( + "No timer parameters found for VLAN {0}, skipping timers configuration".format( + instance.get("stp_instance_vlan_id") + ), + "DEBUG", + ) + + # Add the instance to the instances list + stp_item["stpInstances"]["items"].append(instance_item) + self.log( + "Successfully added STP instance for VLAN {0} to instances list".format( + instance.get("stp_instance_vlan_id") + ), + "DEBUG", + ) + + self.log( + "Completed processing all {0} STP instances".format(len(stp_instances)), + "DEBUG", + ) + else: + self.log("No STP instances found to process", "DEBUG") + + self.log("STP configuration mapping completed successfully", "INFO") + self.log("Final mapped STP configuration: {0}".format(mapped_config), "DEBUG") + + return mapped_config + + def _map_vtp_config(self, vtp_config): + """ + Maps VTP configuration parameters from user format to API format. + Args: + vtp_config (dict): The VTP configuration provided by the user. + Returns: + dict: Mapped VTP configuration in API-compatible format. + """ + self.log("Mapping VTP configuration: {0}".format(vtp_config), "DEBUG") + + # Define the mapping schema for VTP + vtp_mapping_schema = { + "output_structure": {}, + "item_config_type": "VTP_GLOBAL", + "param_path": ["vtpGlobalConfig", "items"], + "param_mapping": { + "vtp_mode": "mode", + "vtp_version": "version", + "vtp_domain_name": "domainName", + "vtp_pruning": "isPruningEnabled", + "vtp_configuration_file_name": "configurationFileName", + "vtp_source_interface": "sourceInterface", + }, + } + + # Use the generic mapping function + return self._map_parameters(vtp_config, vtp_mapping_schema) + + def _map_dhcp_snooping_config(self, dhcp_snooping_config): + """ + Maps DHCP Snooping configuration parameters from user format to API format. + Args: + dhcp_snooping_config (dict): The DHCP Snooping configuration provided by the user. + Returns: + dict: Mapped DHCP Snooping configuration in API-compatible format. + """ + self.log( + "Mapping DHCP Snooping configuration: {0}".format(dhcp_snooping_config), + "DEBUG", + ) + + # Create a copy of the configuration to avoid modifying the original + dhcp_config = dhcp_snooping_config.copy() + + # Extract database-related parameters for separate handling + database_params = {} + db_params_keys = [ + "dhcp_snooping_database_agent_url", + "dhcp_snooping_database_timeout", + "dhcp_snooping_database_write_delay", + ] + + for key in db_params_keys: + if key in dhcp_config: + # Remove from main config and add to database params + database_params[key] = dhcp_config.pop(key) + + self.log( + "Extracted {0} database parameters for separate processing".format( + len(database_params) + ), + "DEBUG", + ) + + # Handle lists for VLANs - convert to comma-separated strings + vlan_params = ["dhcp_snooping_vlans", "dhcp_snooping_proxy_bridge_vlans"] + for param in vlan_params: + if param in dhcp_config and isinstance(dhcp_config[param], list): + # Convert list of integers to simple comma-separated string + dhcp_config[param] = ",".join(map(str, dhcp_config[param])) + self.log( + "Converted VLAN parameter '{0}' from list to comma-separated string".format( + param + ), + "DEBUG", + ) + + # Define the mapping schema for the main DHCP Snooping config + dhcp_mapping_schema = { + "output_structure": {}, + "item_config_type": "DHCP_SNOOPING_GLOBAL", + "param_path": ["dhcpSnoopingGlobalConfig", "items"], + "param_mapping": { + "dhcp_admin_status": "isDhcpSnoopingEnabled", + "dhcp_snooping_glean": "isGleaningEnabled", + "dhcp_snooping_proxy_bridge_vlans": "proxyBridgeVlans", + "dhcp_snooping_vlans": "dhcpSnoopingVlans", + }, + } + + # Use the generic mapping function for the main DHCP Snooping config + mapped_config = self._map_parameters(dhcp_config, dhcp_mapping_schema) + + self.log("Main DHCP Snooping configuration mapped successfully", "DEBUG") + + # Add the database agent configuration if any database parameters were provided + if database_params: + self.log( + "Processing database agent configuration with {0} parameters".format( + len(database_params) + ), + "DEBUG", + ) + + # Create the database agent mapping schema + db_mapping_schema = { + "output_structure": {"configType": "DHCP_SNOOPING_DATABASE_AGENT"}, + "param_mapping": { + "dhcp_snooping_database_agent_url": "agentUrl", + "dhcp_snooping_database_timeout": "timeout", + "dhcp_snooping_database_write_delay": "writeDelay", + }, + } + + # Map the database parameters + mapped_db_config = self._map_parameters(database_params, db_mapping_schema) + + # Add the mapped database config to the main config + if ( + mapped_config + and "dhcpSnoopingGlobalConfig" in mapped_config + and "items" in mapped_config["dhcpSnoopingGlobalConfig"] + ): + mapped_config["dhcpSnoopingGlobalConfig"]["items"][0][ + "databaseAgent" + ] = mapped_db_config + self.log( + "Database agent configuration added to main DHCP Snooping config", + "DEBUG", + ) + else: + self.log( + "No database parameters found, skipping database agent configuration", + "DEBUG", + ) + + self.log("DHCP Snooping configuration mapping completed successfully", "INFO") + self.log( + "Final mapped DHCP Snooping configuration: {0}".format(mapped_config), + "DEBUG", + ) + + return mapped_config + + def _map_igmp_snooping_config(self, igmp_snooping_config): + """ + Maps IGMP Snooping configuration parameters from user format to API format. + Args: + igmp_snooping_config (dict): The IGMP Snooping configuration provided by the user. + Returns: + dict: Mapped IGMP Snooping configuration in API-compatible format. + """ + self.log( + "Mapping IGMP Snooping configuration: {0}".format(igmp_snooping_config), + "DEBUG", + ) + + # Create a copy of the configuration to avoid modifying the original + igmp_config = igmp_snooping_config.copy() + + # Extract VLAN-specific settings for separate handling if present + # Using get() with empty list as default to handle the case when key doesn't exist + igmp_snooping_vlans = igmp_config.pop("igmp_snooping_vlans", []) + + self.log( + "Extracted {0} IGMP Snooping VLANs for separate processing".format( + len(igmp_snooping_vlans) + ), + "DEBUG", + ) + + # Define the mapping schema for the main IGMP Snooping global config + igmp_mapping_schema = { + "output_structure": {}, + "item_config_type": "IGMP_SNOOPING_GLOBAL", + "param_path": ["igmpSnoopingGlobalConfig", "items"], + "param_mapping": { + "enable_igmp_snooping": "isIgmpSnoopingEnabled", + "igmp_snooping_querier": "isQuerierEnabled", + "igmp_snooping_querier_address": "querierAddress", + "igmp_snooping_querier_query_interval": "querierQueryInterval", + "igmp_snooping_querier_version": "querierVersion", + }, + } + + # Use the generic mapping function for the main IGMP Snooping global config + mapped_config = self._map_parameters(igmp_config, igmp_mapping_schema) + + self.log("Main IGMP Snooping global configuration mapped successfully", "DEBUG") + + # Handle VLAN-specific IGMP Snooping settings if present + if igmp_snooping_vlans: + self.log( + "Processing {0} IGMP Snooping VLANs for integration".format( + len(igmp_snooping_vlans) + ), + "DEBUG", + ) + + # Get the reference to the global config item to add VLAN settings + if ( + mapped_config + and "igmpSnoopingGlobalConfig" in mapped_config + and "items" in mapped_config["igmpSnoopingGlobalConfig"] + ): + global_config_item = mapped_config["igmpSnoopingGlobalConfig"]["items"][ + 0 + ] + + # Create the VLAN settings container + global_config_item["igmpSnoopingVlanSettings"] = { + "configType": "SET", + "items": [], + } + + self.log( + "Created IGMP Snooping VLAN settings container structure", "DEBUG" + ) + + # Process each VLAN configuration + for vlan_index, vlan_config in enumerate(igmp_snooping_vlans): + self.log( + "Processing IGMP Snooping VLAN configuration at index {0}".format( + vlan_index + ), + "DEBUG", + ) + + # Create a new VLAN config item + vlan_item = { + "configType": "IGMP_SNOOPING_VLAN", + "vlanId": vlan_config.get("igmp_snooping_vlan_id"), + } + + # Map the standard VLAN parameters + vlan_param_mapping = { + "enable_igmp_snooping": "isIgmpSnoopingEnabled", + "igmp_snooping_immediate_leave": "isImmediateLeaveEnabled", + "igmp_snooping_querier": "isQuerierEnabled", + "igmp_snooping_querier_address": "querierAddress", + "igmp_snooping_querier_query_interval": "querierQueryInterval", + "igmp_snooping_querier_version": "querierVersion", + } + + # Add each parameter that exists in the VLAN config + for user_param, api_param in vlan_param_mapping.items(): + if user_param in vlan_config: + vlan_item[api_param] = vlan_config.get(user_param) + self.log( + "Mapped VLAN parameter '{0}' to '{1}' with value: {2}".format( + user_param, api_param, vlan_config.get(user_param) + ), + "DEBUG", + ) + + # Handle mrouter port list if present + mrouter_ports = vlan_config.get( + "igmp_snooping_mrouter_port_list", [] + ) + if mrouter_ports: + self.log( + "Processing {0} mrouter ports for VLAN {1}".format( + len(mrouter_ports), + vlan_config.get("igmp_snooping_vlan_id"), + ), + "DEBUG", + ) + + # Create the mrouters container + vlan_item["igmpSnoopingVlanMrouters"] = { + "configType": "SET", + "items": [], + } + + # Add each mrouter port to the list + for port_index, port in enumerate(mrouter_ports): + mrouter_item = { + "configType": "IGMP_SNOOPING_VLAN_MROUTER", + "interfaceName": port, + } + vlan_item["igmpSnoopingVlanMrouters"]["items"].append( + mrouter_item + ) + self.log( + "Added mrouter port '{0}' at index {1} for VLAN {2}".format( + port, + port_index, + vlan_config.get("igmp_snooping_vlan_id"), + ), + "DEBUG", + ) + else: + self.log( + "No mrouter ports found for VLAN {0}".format( + vlan_config.get("igmp_snooping_vlan_id") + ), + "DEBUG", + ) + + # Add the VLAN config to the VLAN settings items list + global_config_item["igmpSnoopingVlanSettings"]["items"].append( + vlan_item + ) + self.log( + "Successfully added IGMP Snooping VLAN {0} configuration to settings list".format( + vlan_config.get("igmp_snooping_vlan_id") + ), + "DEBUG", + ) + + self.log( + "Completed processing all {0} IGMP Snooping VLANs".format( + len(igmp_snooping_vlans) + ), + "DEBUG", + ) + else: + self.log( + "Unable to add VLAN settings - global config structure not found", + "WARNING", + ) + else: + self.log("No IGMP Snooping VLANs found to process", "DEBUG") + + self.log("IGMP Snooping configuration mapping completed successfully", "INFO") + self.log( + "Final mapped IGMP Snooping configuration: {0}".format(mapped_config), + "DEBUG", + ) + + return mapped_config + + def _map_mld_snooping_config(self, mld_snooping_config): + """ + Maps MLD Snooping configuration parameters from user format to API format. + Args: + mld_snooping_config (dict): The MLD Snooping configuration provided by the user. + Returns: + dict: Mapped MLD Snooping configuration in API-compatible format. + """ + self.log( + "Mapping MLD Snooping configuration: {0}".format(mld_snooping_config), + "DEBUG", + ) + + # Create a copy of the configuration to avoid modifying the original + mld_config = mld_snooping_config.copy() + + # Extract VLAN-specific settings for separate handling if present + # Using get() with empty list as default to handle the case when key doesn't exist + mld_snooping_vlans = mld_config.pop("mld_snooping_vlans", []) + + self.log( + "Extracted {0} MLD Snooping VLANs for separate processing".format( + len(mld_snooping_vlans) + ), + "DEBUG", + ) + + # Define the mapping schema for the main MLD Snooping global config + mld_mapping_schema = { + "output_structure": {}, + "item_config_type": "MLD_SNOOPING_GLOBAL", + "param_path": ["mldSnoopingGlobalConfig", "items"], + "param_mapping": { + "enable_mld_snooping": "isMldSnoopingEnabled", + "mld_snooping_listener": "isSuppressListenerMessagesEnabled", + "mld_snooping_querier": "isQuerierEnabled", + "mld_snooping_querier_address": "querierAddress", + "mld_snooping_querier_query_interval": "querierQueryInterval", + "mld_snooping_querier_version": "querierVersion", + }, + } + + # Use the generic mapping function for the main MLD Snooping global config + mapped_config = self._map_parameters(mld_config, mld_mapping_schema) + + self.log("Main MLD Snooping global configuration mapped successfully", "DEBUG") + + # Handle VLAN-specific MLD Snooping settings if present + if mld_snooping_vlans: + self.log( + "Processing {0} MLD Snooping VLANs for integration".format( + len(mld_snooping_vlans) + ), + "DEBUG", + ) + + # Get the reference to the global config item to add VLAN settings + if ( + mapped_config + and "mldSnoopingGlobalConfig" in mapped_config + and "items" in mapped_config["mldSnoopingGlobalConfig"] + ): + global_config_item = mapped_config["mldSnoopingGlobalConfig"]["items"][ + 0 + ] + + # Create the VLAN settings container + global_config_item["mldSnoopingVlanSettings"] = { + "configType": "SET", + "items": [], + } + + self.log( + "Created MLD Snooping VLAN settings container structure", "DEBUG" + ) + + # Process each VLAN configuration + for vlan_index, vlan_config in enumerate(mld_snooping_vlans): + self.log( + "Processing MLD Snooping VLAN configuration at index {0}".format( + vlan_index + ), + "DEBUG", + ) + + # Create a new VLAN config item + vlan_item = { + "configType": "MLD_SNOOPING_VLAN", + "vlanId": vlan_config.get("mld_snooping_vlan_id"), + } + + # Map the standard VLAN parameters + vlan_param_mapping = { + "enable_mld_snooping": "isMldSnoopingEnabled", + "mld_snooping_enable_immediate_leave": "isImmediateLeaveEnabled", + "mld_snooping_querier": "isQuerierEnabled", + "mld_snooping_querier_address": "querierAddress", + "mld_snooping_querier_query_interval": "querierQueryInterval", + "mld_snooping_querier_version": "querierVersion", + } + + # Add each parameter that exists in the VLAN config + for user_param, api_param in vlan_param_mapping.items(): + if user_param in vlan_config: + vlan_item[api_param] = vlan_config.get(user_param) + self.log( + "Mapped VLAN parameter '{0}' to '{1}' with value: {2}".format( + user_param, api_param, vlan_config.get(user_param) + ), + "DEBUG", + ) + + # Always include mldSnoopingVlanMrouters section to match API response format + # This ensures consistency with deployed configuration structure + mrouter_ports = vlan_config.get( + "mld_snooping_mrouter_port_list", [] + ) + vlan_item["mldSnoopingVlanMrouters"] = { + "configType": "SET", + "items": [], + } + + # Add mrouter ports if they exist + for port_index, port in enumerate(mrouter_ports): + mrouter_item = { + "configType": "MLD_SNOOPING_VLAN_MROUTER", + "interfaceName": port, + } + vlan_item["mldSnoopingVlanMrouters"]["items"].append( + mrouter_item + ) + self.log( + "Added mrouter port '{0}' at index {1} for VLAN {2}".format( + port, + port_index, + vlan_config.get("mld_snooping_vlan_id"), + ), + "DEBUG", + ) + + # Add the VLAN config to the VLAN settings items list + global_config_item["mldSnoopingVlanSettings"]["items"].append( + vlan_item + ) + self.log( + "Successfully added MLD Snooping VLAN {0} configuration to settings list".format( + vlan_config.get("mld_snooping_vlan_id") + ), + "DEBUG", + ) + + self.log( + "Completed processing all {0} MLD Snooping VLANs".format( + len(mld_snooping_vlans) + ), + "DEBUG", + ) + else: + self.log( + "Unable to add VLAN settings - global config structure not found", + "WARNING", + ) + else: + self.log("No MLD Snooping VLANs found to process", "DEBUG") + + self.log("MLD Snooping configuration mapping completed successfully", "INFO") + self.log( + "Final mapped MLD Snooping configuration: {0}".format(mapped_config), + "DEBUG", + ) + + return mapped_config + + def _map_authentication_config(self, authentication_config): + """ + Maps Authentication configuration parameters from user format to API format. + Args: + authentication_config (dict): The Authentication configuration provided by the user. + Returns: + dict: Mapped Authentication configuration in API-compatible format. + """ + self.log( + "Mapping Authentication configuration: {0}".format(authentication_config), + "DEBUG", + ) + + # Define the mapping schema for Authentication + auth_mapping_schema = { + "output_structure": {}, + "item_config_type": "DOT1X_GLOBAL", + "param_path": ["dot1xGlobalConfig", "items"], + "param_mapping": { + "enable_dot1x_authentication": "isDot1xEnabled", + "authentication_config_mode": "authenticationConfigMode", + }, + } + + # Use the generic mapping function + return self._map_parameters(authentication_config, auth_mapping_schema) + + def _map_logical_ports_config(self, logical_ports_config): + """ + Maps logical ports (port channel) configuration parameters from user format to API format. + Args: + logical_ports_config (dict): The logical ports configuration provided by the user. + Returns: + dict: Mapped logical ports configuration in API-compatible format. + """ + self.log( + "Mapping logical ports configuration: {0}".format(logical_ports_config), + "DEBUG", + ) + + # Create a copy of the configuration to avoid modifying the original + ports_config = logical_ports_config.copy() + + # Extract port channels for separate handling + port_channels = ports_config.pop("port_channels", []) + + self.log( + "Extracted {0} port channels for separate processing".format( + len(port_channels) + ), + "DEBUG", + ) + + # Define the mapping schema for the main logical ports global config + ports_mapping_schema = { + "output_structure": {}, + "item_config_type": "PORTCHANNEL", + "param_path": ["portchannelConfig", "items"], + "param_mapping": { + "port_channel_auto": "isAutoEnabled", + "port_channel_load_balancing_method": "loadBalancingMethod", + "port_channel_lacp_system_priority": "lacpSystemPriority", + }, + } + + # Use the generic mapping function for the main logical ports global config + mapped_config = self._map_parameters(ports_config, ports_mapping_schema) + + self.log("Main logical ports global configuration mapped successfully", "DEBUG") + + # Handle port channels if present + if ( + port_channels + and mapped_config + and "portchannelConfig" in mapped_config + and "items" in mapped_config["portchannelConfig"] + ): + self.log( + "Processing {0} port channels for integration".format( + len(port_channels) + ), + "DEBUG", + ) + + global_config_item = mapped_config["portchannelConfig"]["items"][0] + + # Create the port channels container + global_config_item["portchannels"] = {"configType": "SET", "items": []} + + self.log("Created port channels container structure", "DEBUG") + + # Process each port channel + for channel_index, channel in enumerate(port_channels): + self.log( + "Processing port channel at index {0}".format(channel_index), + "DEBUG", + ) + + protocol = channel.get("port_channel_protocol") + + # Skip if protocol is missing + if not protocol: + self.log( + "Skipping port channel without protocol: {0}".format(channel), + "WARNING", + ) + continue + + self.log( + "Processing port channel with protocol: {0}".format(protocol), + "DEBUG", + ) + + # Create the appropriate config type based on protocol + if protocol == "LACP": + self.log("Creating LACP port channel configuration", "DEBUG") + channel_item = { + "configType": "LACP_PORTCHANNEL_CONFIG", + "name": channel.get("port_channel_name"), + } + + # Add min links if present + if "port_channel_min_links" in channel: + channel_item["minLinks"] = channel.get("port_channel_min_links") + self.log( + "Added min links parameter: {0}".format( + channel.get("port_channel_min_links") + ), + "DEBUG", + ) + + # Handle member ports + member_ports = channel.get("port_channel_members", []) + if member_ports: + self.log( + "Processing {0} LACP member ports".format( + len(member_ports) + ), + "DEBUG", + ) + channel_item["memberPorts"] = {"configType": "SET", "items": []} + + for member_index, member in enumerate(member_ports): + self.log( + "Processing LACP member port at index {0}".format( + member_index + ), + "DEBUG", + ) + member_item = { + "configType": "LACP_PORTCHANNEL_MEMBER_PORT_CONFIG", + "interfaceName": member.get( + "port_channel_interface_name" + ), + } + + # Map optional member parameters + if "port_channel_mode" in member: + member_item["mode"] = member.get("port_channel_mode") + self.log( + "Added LACP mode parameter: {0}".format( + member.get("port_channel_mode") + ), + "DEBUG", + ) + if "port_channel_port_priority" in member: + member_item["portPriority"] = member.get( + "port_channel_port_priority" + ) + self.log( + "Added LACP port priority: {0}".format( + member.get("port_channel_port_priority") + ), + "DEBUG", + ) + if "port_channel_rate" in member: + member_item["rate"] = member.get("port_channel_rate") + self.log( + "Added LACP rate parameter: {0}".format( + member.get("port_channel_rate") + ), + "DEBUG", + ) + + channel_item["memberPorts"]["items"].append(member_item) + + elif protocol == "PAGP": + self.log("Creating PAGP port channel configuration", "DEBUG") + channel_item = { + "configType": "PAGP_PORTCHANNEL_CONFIG", + "name": channel.get("port_channel_name"), + } + + # Add min links if present + if "port_channel_min_links" in channel: + channel_item["minLinks"] = channel.get("port_channel_min_links") + self.log( + "Added min links parameter: {0}".format( + channel.get("port_channel_min_links") + ), + "DEBUG", + ) + + # Handle member ports + member_ports = channel.get("port_channel_members", []) + if member_ports: + self.log( + "Processing {0} PAGP member ports".format( + len(member_ports) + ), + "DEBUG", + ) + channel_item["memberPorts"] = {"configType": "SET", "items": []} + + for member_index, member in enumerate(member_ports): + self.log( + "Processing PAGP member port at index {0}".format( + member_index + ), + "DEBUG", + ) + member_item = { + "configType": "PAGP_PORTCHANNEL_MEMBER_PORT_CONFIG", + "interfaceName": member.get( + "port_channel_interface_name" + ), + } + + # Map optional member parameters + if "port_channel_mode" in member: + member_item["mode"] = member.get("port_channel_mode") + self.log( + "Added PAGP mode parameter: {0}".format( + member.get("port_channel_mode") + ), + "DEBUG", + ) + if "port_channel_port_priority" in member: + member_item["portPriority"] = member.get( + "port_channel_port_priority" + ) + self.log( + "Added PAGP port priority: {0}".format( + member.get("port_channel_port_priority") + ), + "DEBUG", + ) + if "port_channel_learn_method" in member: + member_item["learnMethod"] = member.get( + "port_channel_learn_method" + ) + self.log( + "Added PAGP learn method: {0}".format( + member.get("port_channel_learn_method") + ), + "DEBUG", + ) + + channel_item["memberPorts"]["items"].append(member_item) + + elif protocol == "NONE": + self.log( + "Creating EtherChannel configuration for static aggregation", + "DEBUG", + ) + channel_item = { + "configType": "ETHERCHANNEL_CONFIG", + "name": channel.get("port_channel_name"), + } + + # Add min links if present + if "port_channel_min_links" in channel: + channel_item["minLinks"] = channel.get("port_channel_min_links") + self.log( + "Added min links parameter: {0}".format( + channel.get("port_channel_min_links") + ), + "DEBUG", + ) + + # Handle member ports + member_ports = channel.get("port_channel_members", []) + if member_ports: + self.log( + "Processing {0} EtherChannel member ports".format( + len(member_ports) + ), + "DEBUG", + ) + channel_item["memberPorts"] = {"configType": "SET", "items": []} + + for member_index, member in enumerate(member_ports): + self.log( + "Processing EtherChannel member port at index {0}".format( + member_index + ), + "DEBUG", + ) + member_item = { + "configType": "ETHERCHANNEL_MEMBER_PORT_CONFIG", + "interfaceName": member.get( + "port_channel_interface_name" + ), + } + + # Map optional member parameters + if "port_channel_mode" in member: + member_item["mode"] = member.get("port_channel_mode") + self.log( + "Added EtherChannel mode parameter: {0}".format( + member.get("port_channel_mode") + ), + "DEBUG", + ) + + channel_item["memberPorts"]["items"].append(member_item) + + else: + self.log( + "Unsupported port channel protocol: {0}".format(protocol), + "WARNING", + ) + continue + + # Add the port channel item directly to the items list + global_config_item["portchannels"]["items"].append(channel_item) + self.log( + "Successfully added port channel '{0}' with protocol {1}".format( + channel.get("port_channel_name"), protocol + ), + "DEBUG", + ) + + self.log( + "Completed processing all {0} port channels".format(len(port_channels)), + "DEBUG", + ) + else: + self.log( + "No port channels found to process or configuration structure incomplete", + "DEBUG", + ) + + if mapped_config and "portchannelConfig" in mapped_config: + self.log( + "Returning portchannelConfig structure for SDK 'payload' parameter", + "DEBUG", + ) + self.log("Final mapped config: {0}".format(mapped_config), "DEBUG") + + self.log("Logical ports configuration mapping completed successfully", "INFO") + + return mapped_config + + def _convert_vlan_list_to_string(self, vlan_list): + """ + Converts a list of VLAN IDs to a simple comma-separated string. + Args: + vlan_list (list): List of VLAN IDs + Returns: + str: Comma-separated string of VLAN IDs + """ + self.log("Starting VLAN list to string conversion", "DEBUG") + self.log("Input VLAN list: {0}".format(vlan_list), "DEBUG") + + # Check if the VLAN list is empty or None + if not vlan_list: + self.log( + "Empty or None VLAN list provided, returning empty string", "DEBUG" + ) + return "" + + # Convert each VLAN ID to string and join with commas + result = ",".join(map(str, vlan_list)) + + self.log("VLAN list conversion completed successfully", "DEBUG") + self.log("Converted VLAN list to string: {0}".format(result), "DEBUG") + + return result + + def _process_switchport_interface_config( + self, mapped_config, interface_name, switchport_config + ): + """ + Processes switchport interface configuration for a specific interface. + Args: + mapped_config (dict): The configuration being built + interface_name (str): The name of the interface + switchport_config (dict): The switchport configuration for this interface + """ + if not switchport_config: + return + + self.log( + "Processing switchport configuration for {0}: {1}".format( + interface_name, switchport_config + ), + "DEBUG", + ) + + # Initialize the switchport interface config section if it doesn't exist + if "switchportInterfaceConfig" not in mapped_config: + mapped_config["switchportInterfaceConfig"] = {"items": []} + self.log( + "Initialized switchportInterfaceConfig section in mapped configuration", + "DEBUG", + ) + + # Create the new item for this interface + switchport_item = { + "configType": "SWITCHPORT_INTERFACE", + "interfaceName": interface_name, + } + self.log( + "Created switchport item structure for interface {0}".format( + interface_name + ), + "DEBUG", + ) + + # Map the parameters + param_mapping = { + "switchport_description": "description", + "switchport_mode": "mode", + "access_vlan": "accessVlan", + "voice_vlan": "voiceVlan", + "native_vlan_id": "nativeVlan", + } + + # Apply standard parameter mappings + for user_param, api_param in param_mapping.items(): + if user_param in switchport_config: + switchport_item[api_param] = switchport_config[user_param] + self.log( + "Mapped parameter {0} to {1} with value {2}".format( + user_param, api_param, switchport_config[user_param] + ), + "DEBUG", + ) + + # Special handling for admin_status (bool to enum) + if "admin_status" in switchport_config: + switchport_item["adminStatus"] = ( + "UP" if switchport_config["admin_status"] else "DOWN" + ) + self.log( + "Applied admin status transformation: {0} -> {1}".format( + switchport_config["admin_status"], switchport_item["adminStatus"] + ), + "DEBUG", + ) + + # Special handling for allowed_vlans (list to string) + if "allowed_vlans" in switchport_config and isinstance( + switchport_config["allowed_vlans"], list + ): + # Convert list of integers to simple comma-separated string or ranges + allowed_vlans = switchport_config["allowed_vlans"] + if allowed_vlans: + switchport_item["trunkAllowedVlans"] = ( + self._convert_vlan_list_to_string(allowed_vlans) + ) + self.log( + "Converted allowed VLANs list to string: {0}".format( + switchport_item["trunkAllowedVlans"] + ), + "DEBUG", + ) + + # Add the item to the items list + mapped_config["switchportInterfaceConfig"]["items"].append(switchport_item) + self.log( + "Successfully added switchport configuration for interface {0} to mapped config".format( + interface_name + ), + "DEBUG", + ) + + def _process_vlan_trunking_interface_config( + self, mapped_config, interface_name, trunk_config + ): + """ + Processes VLAN trunking interface configuration for a specific interface. + Args: + mapped_config (dict): The configuration being built + interface_name (str): The name of the interface + trunk_config (dict): The VLAN trunking configuration for this interface + """ + if not trunk_config: + return + + self.log( + "Processing VLAN trunking configuration for {0}: {1}".format( + interface_name, trunk_config + ), + "DEBUG", + ) + + # Initialize the trunk interface config section if it doesn't exist + if "trunkInterfaceConfig" not in mapped_config: + mapped_config["trunkInterfaceConfig"] = {"items": []} + self.log( + "Initialized trunkInterfaceConfig section in mapped configuration", + "DEBUG", + ) + + # Create the new item for this interface + trunk_item = {"configType": "TRUNK_INTERFACE", "interfaceName": interface_name} + self.log( + "Created trunk item structure for interface {0}".format(interface_name), + "DEBUG", + ) + + # Map the protected parameter + if "protected" in trunk_config: + trunk_item["isProtected"] = trunk_config["protected"] + self.log( + "Mapped protected parameter: {0}".format(trunk_config["protected"]), + "DEBUG", + ) + + # Updated logic for enable_dtp_negotiation (boolean parameter) + if "enable_dtp_negotiation" in trunk_config: + trunk_item["isDtpNegotiationEnabled"] = trunk_config[ + "enable_dtp_negotiation" + ] + self.log( + "Mapped DTP negotiation parameter: {0}".format( + trunk_config["enable_dtp_negotiation"] + ), + "DEBUG", + ) + + # Handle pruning VLAN IDs + if "pruning_vlan_ids" in trunk_config and isinstance( + trunk_config["pruning_vlan_ids"], list + ): + pruning_vlans = trunk_config["pruning_vlan_ids"] + if pruning_vlans: + trunk_item["pruneEligibleVlans"] = self._convert_vlan_list_to_string( + pruning_vlans + ) + self.log( + "Converted pruning VLAN IDs list to string: {0}".format( + trunk_item["pruneEligibleVlans"] + ), + "DEBUG", + ) + else: + self.log( + "Empty pruning VLAN IDs list found, skipping conversion", "DEBUG" + ) + + # Add the item to the items list + mapped_config["trunkInterfaceConfig"]["items"].append(trunk_item) + self.log( + "Successfully added VLAN trunking configuration for interface {0} to mapped config".format( + interface_name + ), + "DEBUG", + ) + + def _process_dot1x_interface_config( + self, mapped_config, interface_name, dot1x_config + ): + """ + Processes 802.1x interface configuration for a specific interface. + Args: + mapped_config (dict): The configuration being built + interface_name (str): The name of the interface + dot1x_config (dict): The 802.1x configuration for this interface + """ + if not dot1x_config: + return + + self.log( + "Processing 802.1x configuration for {0}: {1}".format( + interface_name, dot1x_config + ), + "DEBUG", + ) + + # Initialize the dot1x interface config section if it doesn't exist + if "dot1xInterfaceConfig" not in mapped_config: + mapped_config["dot1xInterfaceConfig"] = {"items": []} + self.log( + "Initialized dot1xInterfaceConfig section in mapped configuration", + "DEBUG", + ) + + # Create the new item for this interface + dot1x_item = {"configType": "DOT1X_INTERFACE", "interfaceName": interface_name} + self.log( + "Created dot1x item structure for interface {0}".format(interface_name), + "DEBUG", + ) + + # Map basic parameters + basic_param_mapping = { + "dot1x_interface_authentication_mode": "authenticationMode", + "dot1x_interface_pae_type": "paeType", + "dot1x_interface_control_direction": "controlDirection", + "dot1x_interface_host_mode": "hostMode", + "dot1x_interface_inactivity_timer": "inactivityTimer", + "dot1x_interface_reauthentication": "isReauthEnabled", + "dot1x_interface_port_control": "portControl", + "dot1x_interface_max_reauth_requests": "maxReauthRequests", + "dot1x_interface_reauth_timer": "reauthTimer", + "dot1x_interface_tx_period": "txPeriod", + } + + # Apply basic parameter mappings + for user_param, api_param in basic_param_mapping.items(): + if user_param in dot1x_config: + dot1x_item[api_param] = dot1x_config[user_param] + self.log( + "Mapped basic parameter '{0}' to '{1}' with value: {2}".format( + user_param, api_param, dot1x_config[user_param] + ), + "DEBUG", + ) + + # Map boolean parameters with different naming + boolean_param_mapping = { + "dot1x_interface_inactivity_timer_from_server": "isInactivityTimerFromServerEnabled", + "dot1x_interface_reauth_timer_from_server": "isReauthTimerFromServerEnabled", + } + + for user_param, api_param in boolean_param_mapping.items(): + if user_param in dot1x_config: + dot1x_item[api_param] = dot1x_config[user_param] + self.log( + "Mapped boolean parameter '{0}' to '{1}' with value: {2}".format( + user_param, api_param, dot1x_config[user_param] + ), + "DEBUG", + ) + + # Handle authentication order (list parameter) + if "dot1x_interface_authentication_order" in dot1x_config: + auth_order = dot1x_config["dot1x_interface_authentication_order"] + if auth_order: # Only add if not empty + dot1x_item["authenticationOrder"] = { + "configType": "ORDERED_SET", + "items": auth_order, + } + self.log( + "Added authentication order with {0} items: {1}".format( + len(auth_order), auth_order + ), + "DEBUG", + ) + else: + self.log("Skipping empty authentication order list", "DEBUG") + + # Handle priority (list parameter) + if "dot1x_interface_priority" in dot1x_config: + priority_order = dot1x_config["dot1x_interface_priority"] + if priority_order: # Only add if not empty + dot1x_item["priority"] = { + "configType": "ORDERED_SET", + "items": priority_order, + } + self.log( + "Added priority order with {0} items: {1}".format( + len(priority_order), priority_order + ), + "DEBUG", + ) + else: + self.log("Skipping empty priority order list", "DEBUG") + + # Add the item to the items list + mapped_config["dot1xInterfaceConfig"]["items"].append(dot1x_item) + self.log( + "Successfully added 802.1x configuration for interface {0} to mapped config".format( + interface_name + ), + "DEBUG", + ) + + def _process_mab_interface_config(self, mapped_config, interface_name, mab_config): + """ + Processes MAB interface configuration for a specific interface. + Args: + mapped_config (dict): The configuration being built + interface_name (str): The name of the interface + mab_config (dict): The MAB configuration for this interface + """ + if not mab_config: + return + + self.log( + "Processing MAB configuration for {0}: {1}".format( + interface_name, mab_config + ), + "DEBUG", + ) + + # Initialize the mab interface config section if it doesn't exist + if "mabInterfaceConfig" not in mapped_config: + mapped_config["mabInterfaceConfig"] = {"items": []} + self.log( + "Initialized mabInterfaceConfig section in mapped configuration", + "DEBUG", + ) + + # Create the new item for this interface + mab_item = {"configType": "MAB_INTERFACE", "interfaceName": interface_name} + self.log( + "Created MAB item structure for interface {0}".format(interface_name), + "DEBUG", + ) + + # Map the parameters + if "enable_mab" in mab_config: + mab_item["isMabEnabled"] = mab_config["enable_mab"] + self.log( + "Mapped enable_mab parameter to isMabEnabled with value: {0}".format( + mab_config["enable_mab"] + ), + "DEBUG", + ) + + # Add the item to the items list + mapped_config["mabInterfaceConfig"]["items"].append(mab_item) + self.log( + "Successfully added MAB configuration for interface {0} to mapped config".format( + interface_name + ), + "DEBUG", + ) + + def _process_stp_interface_config(self, mapped_config, interface_name, stp_config): + """ + Processes STP interface configuration for a specific interface. + Args: + mapped_config (dict): The configuration being built + interface_name (str): The name of the interface + stp_config (dict): The STP configuration for this interface + """ + if not stp_config: + return + + self.log( + "Processing STP configuration for {0}: {1}".format( + interface_name, stp_config + ), + "DEBUG", + ) + + # Initialize the STP interface config section if it doesn't exist + if "stpInterfaceConfig" not in mapped_config: + mapped_config["stpInterfaceConfig"] = {"items": []} + self.log( + "Initialized stpInterfaceConfig section in mapped configuration", + "DEBUG", + ) + + # Create the new item for this interface + stp_item = {"configType": "STP_INTERFACE", "interfaceName": interface_name} + self.log( + "Created STP item structure for interface {0}".format(interface_name), + "DEBUG", + ) + + # Map the basic parameters + param_mapping = { + "stp_interface_portfast_mode": "portFastMode", + "stp_interface_guard": "guardMode", + "stp_interface_cost": "pathCost", + "stp_interface_priority": "priority", + } + + # Apply standard parameter mappings + for user_param, api_param in param_mapping.items(): + if user_param in stp_config: + stp_item[api_param] = stp_config[user_param] + self.log( + "Mapped parameter {0} to {1} with value {2}".format( + user_param, api_param, stp_config[user_param] + ), + "DEBUG", + ) + + # Special handling for boolean to string enum conversions + if "stp_interface_bpdu_filter" in stp_config: + stp_item["bpduFilter"] = ( + "ENABLE" if stp_config["stp_interface_bpdu_filter"] else "DISABLE" + ) + self.log( + "Applied BPDU filter transformation: {0} -> {1}".format( + stp_config["stp_interface_bpdu_filter"], stp_item["bpduFilter"] + ), + "DEBUG", + ) + + if "stp_interface_bpdu_guard" in stp_config: + stp_item["bpduGuard"] = ( + "ENABLE" if stp_config["stp_interface_bpdu_guard"] else "DISABLE" + ) + self.log( + "Applied BPDU guard transformation: {0} -> {1}".format( + stp_config["stp_interface_bpdu_guard"], stp_item["bpduGuard"] + ), + "DEBUG", + ) + + # Handle per-VLAN cost settings + per_vlan_cost = stp_config.get("stp_interface_per_vlan_cost") + if ( + per_vlan_cost + and "priority" in per_vlan_cost + and "vlan_ids" in per_vlan_cost + ): + stp_item["portVlanCostSettings"] = { + "configType": "LIST", + "items": [ + { + "configType": "STP_INTERFACE_VLAN_COST", + "cost": per_vlan_cost["priority"], + "vlans": self._convert_vlan_list_to_string( + per_vlan_cost["vlan_ids"] + ), + } + ], + } + self.log( + "Added per-VLAN cost settings with cost {0} for VLANs: {1}".format( + per_vlan_cost["priority"], per_vlan_cost["vlan_ids"] + ), + "DEBUG", + ) + + # Handle per-VLAN priority settings + per_vlan_priority = stp_config.get("stp_interface_per_vlan_priority") + if ( + per_vlan_priority + and "priority" in per_vlan_priority + and "vlan_ids" in per_vlan_priority + ): + stp_item["portVlanPrioritySettings"] = { + "configType": "LIST", + "items": [ + { + "configType": "STP_INTERFACE_VLAN_PRIORITY", + "priority": per_vlan_priority["priority"], + "vlans": self._convert_vlan_list_to_string( + per_vlan_priority["vlan_ids"] + ), + } + ], + } + self.log( + "Added per-VLAN priority settings with priority {0} for VLANs: {1}".format( + per_vlan_priority["priority"], per_vlan_priority["vlan_ids"] + ), + "DEBUG", + ) + + # Add the item to the items list + mapped_config["stpInterfaceConfig"]["items"].append(stp_item) + self.log( + "Successfully added STP configuration for interface {0} to mapped config".format( + interface_name + ), + "DEBUG", + ) + + def _process_dhcp_snooping_interface_config( + self, mapped_config, interface_name, dhcp_config + ): + """ + Processes DHCP Snooping interface configuration for a specific interface. + Args: + mapped_config (dict): The configuration being built + interface_name (str): The name of the interface + dhcp_config (dict): The DHCP Snooping configuration for this interface + """ + if not dhcp_config: + return + + self.log( + "Processing DHCP Snooping configuration for {0}: {1}".format( + interface_name, dhcp_config + ), + "DEBUG", + ) + + # Initialize the dhcp snooping interface config section if it doesn't exist + if "dhcpSnoopingInterfaceConfig" not in mapped_config: + mapped_config["dhcpSnoopingInterfaceConfig"] = {"items": []} + self.log( + "Initialized dhcpSnoopingInterfaceConfig section in mapped configuration", + "DEBUG", + ) + + # Create the new item for this interface + dhcp_item = { + "configType": "DHCP_SNOOPING_INTERFACE", + "interfaceName": interface_name, + } + self.log( + "Created DHCP snooping item structure for interface {0}".format( + interface_name + ), + "DEBUG", + ) + + # Map the parameters + if "dhcp_snooping_interface_trust" in dhcp_config: + dhcp_item["isTrustedInterface"] = dhcp_config[ + "dhcp_snooping_interface_trust" + ] + self.log( + "Mapped dhcp_snooping_interface_trust parameter: {0}".format( + dhcp_config["dhcp_snooping_interface_trust"] + ), + "DEBUG", + ) + + if "dhcp_snooping_interface_rate" in dhcp_config: + dhcp_item["messageRateLimit"] = dhcp_config["dhcp_snooping_interface_rate"] + self.log( + "Mapped dhcp_snooping_interface_rate parameter: {0}".format( + dhcp_config["dhcp_snooping_interface_rate"] + ), + "DEBUG", + ) + + # Add the item to the items list + mapped_config["dhcpSnoopingInterfaceConfig"]["items"].append(dhcp_item) + self.log( + "Successfully added DHCP Snooping configuration for interface {0} to mapped config".format( + interface_name + ), + "DEBUG", + ) + + def _process_cdp_interface_config(self, mapped_config, interface_name, cdp_config): + """ + Processes CDP interface configuration for a specific interface. + Args: + mapped_config (dict): The configuration being built + interface_name (str): The name of the interface + cdp_config (dict): The CDP configuration for this interface + """ + if not cdp_config: + return + + self.log( + "Processing CDP configuration for {0}: {1}".format( + interface_name, cdp_config + ), + "DEBUG", + ) + + # Initialize the cdp interface config section if it doesn't exist + if "cdpInterfaceConfig" not in mapped_config: + mapped_config["cdpInterfaceConfig"] = {"items": []} + self.log( + "Initialized cdpInterfaceConfig section in mapped configuration", + "DEBUG", + ) + + # Create the new item for this interface + cdp_item = {"configType": "CDP_INTERFACE", "interfaceName": interface_name} + self.log( + "Created CDP item structure for interface {0}".format(interface_name), + "DEBUG", + ) + + # Map the parameters + if "cdp_interface_admin_status" in cdp_config: + cdp_item["isCdpEnabled"] = cdp_config["cdp_interface_admin_status"] + self.log( + "Mapped cdp_interface_admin_status parameter: {0}".format( + cdp_config["cdp_interface_admin_status"] + ), + "DEBUG", + ) + + if "cdp_interface_log_duplex_mismatch" in cdp_config: + cdp_item["isLogDuplexMismatchEnabled"] = cdp_config[ + "cdp_interface_log_duplex_mismatch" + ] + self.log( + "Mapped cdp_interface_log_duplex_mismatch parameter: {0}".format( + cdp_config["cdp_interface_log_duplex_mismatch"] + ), + "DEBUG", + ) + + # Add the item to the items list + mapped_config["cdpInterfaceConfig"]["items"].append(cdp_item) + self.log( + "Successfully added CDP configuration for interface {0} to mapped config".format( + interface_name + ), + "DEBUG", + ) + + def _process_lldp_interface_config( + self, mapped_config, interface_name, lldp_config + ): + """ + Processes LLDP interface configuration for a specific interface. + Args: + mapped_config (dict): The configuration being built + interface_name (str): The name of the interface + lldp_config (dict): The LLDP configuration for this interface + """ + if not lldp_config: + return + + self.log( + "Processing LLDP configuration for {0}: {1}".format( + interface_name, lldp_config + ), + "DEBUG", + ) + + # Initialize the lldp interface config section if it doesn't exist + if "lldpInterfaceConfig" not in mapped_config: + mapped_config["lldpInterfaceConfig"] = {"items": []} + self.log( + "Initialized lldpInterfaceConfig section in mapped configuration", + "DEBUG", + ) + + # Create the new item for this interface + lldp_item = {"configType": "LLDP_INTERFACE", "interfaceName": interface_name} + self.log( + "Created LLDP item structure for interface {0}".format(interface_name), + "DEBUG", + ) + + # Map the parameters - specifically the receive/transmit status + if "lldp_interface_receive_transmit" in lldp_config: + lldp_item["adminStatus"] = lldp_config["lldp_interface_receive_transmit"] + self.log( + "Mapped lldp_interface_receive_transmit parameter: {0}".format( + lldp_config["lldp_interface_receive_transmit"] + ), + "DEBUG", + ) + + # Add the item to the items list + mapped_config["lldpInterfaceConfig"]["items"].append(lldp_item) + self.log( + "Successfully added LLDP configuration for interface {0} to mapped config".format( + interface_name + ), + "DEBUG", + ) + + def _process_vtp_interface_config(self, mapped_config, interface_name, vtp_config): + """ + Processes VTP interface configuration for a specific interface. + Args: + mapped_config (dict): The configuration being built + interface_name (str): The name of the interface + vtp_config (dict): The VTP configuration for this interface + """ + if not vtp_config: + return + + self.log( + "Processing VTP configuration for {0}: {1}".format( + interface_name, vtp_config + ), + "DEBUG", + ) + + # Initialize the vtp interface config section if it doesn't exist + if "vtpInterfaceConfig" not in mapped_config: + mapped_config["vtpInterfaceConfig"] = {"items": []} + self.log( + "Initialized vtpInterfaceConfig section in mapped configuration", + "DEBUG", + ) + + # Create the new item for this interface + vtp_item = {"configType": "VTP_INTERFACE", "interfaceName": interface_name} + self.log( + "Created VTP item structure for interface {0}".format(interface_name), + "DEBUG", + ) + + # Map the parameters + if "vtp_interface_admin_status" in vtp_config: + vtp_item["isVtpEnabled"] = vtp_config["vtp_interface_admin_status"] + self.log( + "Mapped vtp_interface_admin_status parameter: {0}".format( + vtp_config["vtp_interface_admin_status"] + ), + "DEBUG", + ) + + # Add the item to the items list + mapped_config["vtpInterfaceConfig"]["items"].append(vtp_item) + self.log( + "Successfully added VTP configuration for interface {0} to mapped config".format( + interface_name + ), + "DEBUG", + ) + + def _map_port_configuration(self, port_configurations): + """ + Maps port configuration parameters from user format to API format. + Args: + port_configurations (list): A list of port configurations provided by the user. + Returns: + dict: Mapped port configuration in API-compatible format. + """ + self.log("Mapping port configuration: {0}".format(port_configurations), "DEBUG") + + # Initialize the output structure for mapped configurations + mapped_config = {} + + self.log( + "Processing {0} port configurations for interface mapping".format( + len(port_configurations) + ), + "DEBUG", + ) + + # Process each interface configuration sequentially + for port_index, port_config in enumerate(port_configurations): + self.log( + "Processing port configuration at index {0}".format(port_index), "DEBUG" + ) + + interface_name = port_config.get("interface_name") + + if not interface_name: + self.log( + "Skipping port configuration without interface_name", "WARNING" + ) + continue + + self.log( + "Processing interface configurations for: {0}".format(interface_name), + "DEBUG", + ) + + # Process each feature configuration for this interface in the same order as temp_spec + self._process_switchport_interface_config( + mapped_config, + interface_name, + port_config.get("switchport_interface_config"), + ) + + self._process_vlan_trunking_interface_config( + mapped_config, + interface_name, + port_config.get("vlan_trunking_interface_config"), + ) + + self._process_dot1x_interface_config( + mapped_config, interface_name, port_config.get("dot1x_interface_config") + ) + + self._process_mab_interface_config( + mapped_config, interface_name, port_config.get("mab_interface_config") + ) + + self._process_stp_interface_config( + mapped_config, interface_name, port_config.get("stp_interface_config") + ) + + self._process_dhcp_snooping_interface_config( + mapped_config, + interface_name, + port_config.get("dhcp_snooping_interface_config"), + ) + + self._process_cdp_interface_config( + mapped_config, interface_name, port_config.get("cdp_interface_config") + ) + + self._process_lldp_interface_config( + mapped_config, interface_name, port_config.get("lldp_interface_config") + ) + + self._process_vtp_interface_config( + mapped_config, interface_name, port_config.get("vtp_interface_config") + ) + + self.log( + "Completed processing all feature configurations for interface: {0}".format( + interface_name + ), + "DEBUG", + ) + + self.log("Port configuration mapping completed successfully", "INFO") + self.log( + "Final mapped configuration contains {0} feature sections".format( + len(mapped_config) + ), + "DEBUG", + ) + self.log("Mapped configuration structure: {0}".format(mapped_config), "DEBUG") + + return mapped_config + + def get_mapped_layer2_config_params(self, feature_name, config_data): + """ + Maps user-provided configuration parameters to API-compatible format for a specific Layer 2 feature. + Args: + feature_name (str): The name of the Layer 2 feature (Example, "vlans", "cdp", "lldp"). + config_data (dict/list): The configuration data for the feature. + Returns: + dict: The mapped configuration parameters in API-compatible format. + """ + self.log( + "Mapping configuration parameters for feature '{0}'.".format(feature_name), + "DEBUG", + ) + + # Feature-specific mapping functions + feature_mappers = { + "vlans": self._map_vlans_config, + "cdp": self._map_cdp_config, + "lldp": self._map_lldp_config, + "stp": self._map_stp_config, + "vtp": self._map_vtp_config, + "dhcp_snooping": self._map_dhcp_snooping_config, + "igmp_snooping": self._map_igmp_snooping_config, + "mld_snooping": self._map_mld_snooping_config, + "authentication": self._map_authentication_config, + "logical_ports": self._map_logical_ports_config, + "port_configuration": self._map_port_configuration, + } + + # Get the appropriate mapper for this feature + mapper = feature_mappers.get(feature_name) + + if mapper: + self.log( + "Found mapping function for feature '{0}', executing transformation".format( + feature_name + ), + "DEBUG", + ) + + # Call the specific mapper with the config data + mapped_config = mapper(config_data) + + self.log( + "Mapped configuration for '{0}': {1}".format( + feature_name, mapped_config + ), + "DEBUG", + ) + + self.log( + "Configuration mapping completed successfully for feature '{0}'".format( + feature_name + ), + "INFO", + ) + + return mapped_config + else: + # This should never happen if our feature_mappers dictionary is complete + self.msg = "No parameter mapper available for feature '{0}'.".format( + feature_name + ) + self.log( + "Configuration mapping failed - no mapper found for feature '{0}'".format( + feature_name + ), + "ERROR", + ) + self.fail_and_exit(self.msg) + + # def get_intended_layer2_feature_configuration(self, network_device_id, feature): + # """ + # Retrieves the configurations for an intended layer 2 feature on a wired device. + # Args: + # device_id (str): Network device ID of the wired device. + # feature (str): Name of the layer 2 feature to retrieve (Example, 'vlan', 'cdp', 'stp'). + # Returns: + # dict: The configuration details of the intended layer 2 feature. + # """ + # self.log( + # "Retrieving intended configuration for layer 2 feature '{0}' on device {1}".format( + # feature, network_device_id + # ), + # "INFO", + # ) + # # Prepare the API parameters + # api_params = { + # "id": network_device_id, + # "feature": feature + # } + # # Execute the API call to get the intended layer 2 feature configuration + # return self.execute_get_request( + # "wired", "get_configurations_for_an_intended_layer2_feature_on_a_wired_device", api_params + # ) + + def get_intended_layer2_feature_configuration(self, network_device_id, feature): + """ + Retrieves the configurations for an intended layer 2 feature on a wired device. + """ + + self.log( + "Retrieving intended configuration for layer 2 feature '{0}' on device {1}".format( + feature, network_device_id + ), + "INFO", + ) + + # Prepare the API parameters + api_parameters = {"id": network_device_id, "feature": feature} + + try: + # Execute the API call to get the intended layer 2 feature configuration + api_function = ( + "get_configurations_for_an_intended_layer2_feature_on_a_wired_device" + ) + api_family = "wired" + + response = self.dnac._exec( + family=api_family, + function=api_function, + op_modifies=False, + params=api_parameters, + ) + + self.log( + "Response received from GET API call to Function: '{0}' from Family: '{1}' is Response: {2}".format( + api_function, api_family, str(response) + ), + "INFO", + ) + + # Check if the response is None, an empty string, or an empty dictionary + if ( + response is None + or response == "" + or (isinstance(response, dict) and not response) + ): + self.log( + "No response received from GET API call to Function: '{0}' from Family: '{1}'.".format( + api_function, api_family + ), + "WARNING", + ) + return {} + + return response + + except Exception as e: + error_str = str(e) + self.log( + "Error retrieving intended configuration for feature '{0}': {1}".format( + feature, error_str + ), + "DEBUG", + ) + + # Check if this is a 404 error (resource not found) or the SDK's TypeError bug + if ( + "404" in error_str + or "Not Found" in error_str + or "argument of type 'NoneType' is not iterable" in error_str + ): + self.log( + "No intended configuration exists for feature '{0}' - this is normal for features that haven't been configured yet".format( + feature + ), + "INFO", + ) + # Return empty dict for 404 errors and SDK TypeError bug + return {} + + # For other errors, log as WARNING but don't fail the entire operation + self.log_traceback() + self.msg = ( + "An error occurred while executing GET API call to Function: '{0}' from Family: '{1}'. " + "Parameters: {2}. Exception: {3}.".format( + api_function, api_family, api_parameters, str(e) + ) + ) + self.fail_and_exit(self.msg) + + def get_deployed_layer2_feature_configuration(self, network_device_id, feature): + """ + Retrieves the configurations for a deployed layer 2 feature on a wired device. + Args: + device_id (str): Network device ID of the wired device. + feature (str): Name of the layer 2 feature to retrieve (Example, 'vlan', 'cdp', 'stp'). + Returns: + dict: The configuration details of the deployed layer 2 feature. + """ + self.log( + "Retrieving deployed configuration for layer 2 feature '{0}' on device {1}".format( + feature, network_device_id + ), + "INFO", + ) + # Prepare the API parameters + api_params = {"id": network_device_id, "feature": feature} + # Execute the API call to get the deployed layer 2 feature configuration + return self.execute_get_request( + "wired", + "get_configurations_for_a_deployed_layer2_feature_on_a_wired_device", + api_params, + ) + + def create_layer2_feature_configuration( + self, network_device_id, feature, config_params + ): + """ + Creates configurations for an intended layer 2 feature on a wired device. + Args: + device_id (str): Network device ID of the wired device to configure. + feature (str): Name of the layer 2 feature to configure (Example, 'vlan', 'cdp', 'stp'). + config_params (dict): A dictionary containing the configuration parameters for the feature. + The keys should match the expected parameter names for the feature. + Returns: + dict: The response containing the task ID for the create operation. + """ + self.log( + "Initiating creation of layer 2 feature '{0}' on device {1} with parameters: {2}".format( + feature, network_device_id, config_params + ), + "INFO", + ) + # Prepare the API parameters + api_params = { + "id": network_device_id, + "feature": feature, + "active_validation": False, + "payload": config_params, + } + # Add configuration parameters to the API parameters + api_params.update(config_params) + + self.log( + "Final API parameters for create intent operation: {0}".format(api_params), + "DEBUG", + ) + + # Execute the API call to create the layer 2 feature configuration and return the task ID + return self.get_taskid_post_api_call( + "wired", + "create_configurations_for_an_intended_layer2_feature_on_a_wired_device", + api_params, + ) + + def update_layer2_feature_configuration( + self, network_device_id, feature, config_params + ): + """ + Updates configurations for an intended layer 2 feature on a wired device. + Args: + device_id (str): Network device ID of the wired device to configure. + feature (str): Name of the layer 2 feature to update (Example, 'vlan', 'cdp', 'stp'). + config_params (dict): A dictionary containing the updated configuration parameters for the feature. + The keys should match the expected parameter names for the feature. + Returns: + dict: The response containing the task ID for the update operation. + """ + self.log( + "Initiating update of layer 2 feature '{0}' on device {1} with parameters: {2}".format( + feature, network_device_id, config_params + ), + "INFO", + ) + # Prepare the API parameters + api_params = { + "id": network_device_id, + "feature": feature, + "active_validation": False, + "payload": config_params, + } + # Add configuration parameters to the API parameters + api_params.update(config_params) + + self.log( + "Final API parameters for udpate intent operation: {0}".format(api_params), + "DEBUG", + ) + + # Execute the API call to update the layer 2 feature configuration and return the task ID + return self.get_taskid_post_api_call( + "wired", + "update_configurations_for_an_intended_layer2_feature_on_a_wired_device", + api_params, + ) + + def delete_layer2_feature_configuration(self, network_device_id, feature): + """ + Deletes configurations for an intended layer 2 feature on a wired device. + Args: + device_id (str): Network device ID of the wired device to configure. + feature (str): Name of the layer 2 feature to delete (Example, 'vlan', 'cdp', 'stp'). + Returns: + dict: The response containing the task ID for the delete operation. + """ + self.log( + "Initiating deletion of layer 2 feature '{0}' on device {1}".format( + feature, network_device_id + ), + "INFO", + ) + # Prepare the API parameters + api_params = {"id": network_device_id, "feature": feature} + # Execute the API call to delete the layer 2 feature configuration and return the task ID + return self.get_taskid_post_api_call( + "wired", + "delete_configurations_for_an_intended_layer2_feature_on_a_wired_device", + api_params, + ) + + def deploy_intended_configurations(self, network_device_id): + """ + Deploys the intended configuration features on a wired device. + Args: + device_id (str): Network device ID of the wired device to provision. + Returns: + dict: The response containing the task ID for the deployment operation. + """ + self.log( + "Initiating deployment of intended configurations on device {0}".format( + network_device_id + ), + "INFO", + ) + # Prepare the API parameters + api_params = {"network_device_id": network_device_id} + # Execute the API call to deploy the intended configurations and return the task ID + return self.get_taskid_post_api_call( + "wired", + "deploy_the_intended_configuration_features_on_a_wired_device", + api_params, + ) + + def get_current_configs_for_features(self, network_device_id, features): + """ + Fetch current deployed and intended configs for specified features. + Args: + network_device_id (str): Network device ID to fetch configurations from + features (list): List of API feature names to fetch + Returns: + tuple: (deployed_configs, intended_configs) dictionaries + """ + self.log( + "Starting retrieval of current configurations for {0} features".format( + len(features) + ), + "INFO", + ) + self.log("Device ID: {0}".format(network_device_id), "DEBUG") + self.log("Features to retrieve: {0}".format(features), "DEBUG") + + deployed_configs = {} + intended_configs = {} + + for feature in features: + self.log( + "Retrieving configurations for feature: {0}".format(feature), "DEBUG" + ) + + try: + # Fetch deployed configuration + self.log( + "Fetching deployed configuration for feature: {0}".format(feature), + "DEBUG", + ) + deployed = self.get_deployed_layer2_feature_configuration( + network_device_id, feature + ) + + if deployed: + deployed_configs[feature] = deployed + self.log( + "Successfully retrieved deployed config for {0}".format( + feature + ), + "DEBUG", + ) + self.log( + "Deployed config structure for {0}: {1}".format( + feature, deployed + ), + "DEBUG", + ) + else: + self.log( + "No deployed configuration found for feature: {0}".format( + feature + ), + "DEBUG", + ) + + # Fetch intended configuration + self.log( + "Fetching intended configuration for feature: {0}".format(feature), + "DEBUG", + ) + intended = self.get_intended_layer2_feature_configuration( + network_device_id, feature + ) + + if intended: + intended_configs[feature] = intended + self.log( + "Successfully retrieved intended config for {0}".format( + feature + ), + "DEBUG", + ) + self.log( + "Intended config structure for {0}: {1}".format( + feature, intended + ), + "DEBUG", + ) + else: + self.log( + "No intended configuration found for feature: {0}".format( + feature + ), + "DEBUG", + ) + + except Exception as e: + self.log( + "Error retrieving configurations for feature {0}: {1}".format( + feature, str(e) + ), + "WARNING", + ) + # Continue with other features even if one fails + continue + + self.log( + "Successfully retrieved configurations for {0} features".format( + len(features) + ), + "INFO", + ) + self.log( + "Deployed configs retrieved for features: {0}".format( + list(deployed_configs.keys()) + ), + "DEBUG", + ) + self.log( + "Intended configs retrieved for features: {0}".format( + list(intended_configs.keys()) + ), + "DEBUG", + ) + + return deployed_configs, intended_configs + + def extract_layer2_feature_mappings(self, config): + """ + Extract Layer2 feature mappings from user configuration. + Args: + config (dict): User configuration from playbook + Returns: + tuple: (discovered_features_set, feature_mappings_dict) + - discovered_features_set (set): Set of API feature names that need processing + - feature_mappings_dict (dict): Maps user feature names to their API format configs + """ + self.log( + "Starting extraction of Layer2 feature mappings from user configuration", + "INFO", + ) + self.log("Input configuration structure: {0}".format(config), "DEBUG") + + discovered_features = set() + feature_mappings = {} + + # Get layer2_configuration from config + layer2_config = config.get("layer2_configuration", {}) + + if not layer2_config: + self.log( + "No layer2_configuration found in config, skipping feature mapping", + "INFO", + ) + return discovered_features, feature_mappings + + self.log( + "Found layer2_configuration with {0} features".format(len(layer2_config)), + "DEBUG", + ) + self.log( + "Layer2 features to process: {0}".format(list(layer2_config.keys())), + "DEBUG", + ) + + for feature_name, feature_config in layer2_config.items(): + self.log("Processing feature: {0}".format(feature_name), "DEBUG") + self.log("Feature configuration: {0}".format(feature_config), "DEBUG") + + try: + self.log( + "Mapping configuration parameters for feature: {0}".format( + feature_name + ), + "DEBUG", + ) + mapped_config = self.get_mapped_layer2_config_params( + feature_name, feature_config + ) + + if mapped_config: + # Track discovered API features + api_features = list(mapped_config.keys()) + discovered_features.update(api_features) + feature_mappings[feature_name] = mapped_config + + self.log( + "Successfully mapped feature '{0}' to {1} API features: {2}".format( + feature_name, len(api_features), api_features + ), + "INFO", + ) + self.log( + "Mapped configuration for '{0}': {1}".format( + feature_name, mapped_config + ), + "DEBUG", + ) + else: + self.log( + "No mapping returned for feature: {0}".format(feature_name), + "WARNING", + ) + + except Exception as e: + self.log( + "Error mapping feature '{0}': {1}".format(feature_name, str(e)), + "ERROR", + ) + # Continue with other features even if one fails + continue + + self.log("Feature mapping extraction completed successfully", "INFO") + self.log( + "Total discovered API features: {0}".format(len(discovered_features)), + "INFO", + ) + self.log( + "Discovered API feature names: {0}".format(list(discovered_features)), + "DEBUG", + ) + self.log( + "User features mapped: {0}".format(list(feature_mappings.keys())), "DEBUG" + ) + + return discovered_features, feature_mappings + + def _analyze_configuration_differences(self): + """ + Analyzes differences between want and have configurations to determine required operations. + Returns: + dict: Analysis results containing features to process and their operation types + """ + self.log("Starting configuration difference analysis", "INFO") + + # Extract data from want and have states + want_feature_mappings = self.want.get("user_feature_mappings", {}) + deployed_configs = self.have.get("current_deployed_configs", {}) + intended_configs = self.have.get("current_intended_configs", {}) + network_device_id = self.have.get("network_device_id") + + self.log( + "Analyzing {0} user feature mappings".format(len(want_feature_mappings)), + "DEBUG", + ) + + # Initialize analysis results + analysis_results = { + "network_device_id": network_device_id, + "features_to_process": {}, + "summary": { + "total_features": len(want_feature_mappings), + "create_intent_operations": 0, + "update_intent_operations": 0, + "total_api_features": 0, + }, + } + + # Process each user feature mapping + for user_feature_name, user_feature_config in want_feature_mappings.items(): + self.log("Processing user feature: {0}".format(user_feature_name), "DEBUG") + + # Process each API feature within this user feature + for api_feature_name, api_feature_config in user_feature_config.items(): + self.log("Analyzing API feature: {0}".format(api_feature_name), "DEBUG") + + # Determine the operation type for this API feature + feature_operation = self._determine_feature_operation( + api_feature_name, + api_feature_config, + deployed_configs.get(api_feature_name, {}), + intended_configs.get(api_feature_name, {}), + # user_feature_name + ) + + if feature_operation: + analysis_results["features_to_process"][ + api_feature_name + ] = feature_operation + analysis_results["summary"]["total_api_features"] += 1 + + # Count operation types + if feature_operation["intent_operation"] == "create": + analysis_results["summary"]["create_intent_operations"] += 1 + elif feature_operation["intent_operation"] == "update": + analysis_results["summary"]["update_intent_operations"] += 1 + + self.log( + "Configuration analysis completed: {0}".format(analysis_results["summary"]), + "INFO", + ) + return analysis_results + + def _determine_feature_operation( + self, api_feature_name, desired_config, deployed_config, intended_config + ): + """ + Determines the required operation (create/update intent) and final configuration for an API feature. + Args: + api_feature_name (str): Name of the API feature (Example, 'vlanConfig', 'cdpGlobalConfig') + desired_config (dict): Desired configuration from user input + deployed_config (dict): Current deployed configuration from device + intended_config (dict): Current intended configuration from Catalyst Center + Returns: + dict: Operation details including final config and intent operation type + """ + self.log( + "Determining operation for feature: {0}".format(api_feature_name), "DEBUG" + ) + self.log("Desired config: {0}".format(desired_config), "DEBUG") + self.log( + "Deployed config exists: {0}".format( + bool( + deployed_config.get("response", {}) + .get(api_feature_name, {}) + .get("items") + ) + ), + "DEBUG", + ) + self.log( + "Intended config exists: {0}".format( + bool( + intended_config.get("response", {}) + .get(api_feature_name, {}) + .get("items") + ) + ), + "DEBUG", + ) + + # Extract actual configurations from API response format + deployed_feature_config = deployed_config.get("response", {}).get( + api_feature_name, {} + ) + intended_feature_config = intended_config.get("response", {}).get( + api_feature_name, {} + ) + + # Determine configuration operation (create/update config) + config_operation_result = self._determine_config_operation( + api_feature_name, desired_config, deployed_feature_config + ) + + if not config_operation_result: + self.log( + "No configuration changes needed for feature: {0}".format( + api_feature_name + ), + "DEBUG", + ) + return None + + # Determine intent operation (create/update intent) - FIX: Pass all required parameters + intent_operation = self._determine_intent_operation( + intended_config, + api_feature_name, + desired_config, # Pass intended_config (full), not intended_feature_config + ) + + # Prepare final configuration for API call + final_config = self._prepare_final_config( + api_feature_name, + config_operation_result["final_config"], + intended_feature_config, + intent_operation, + ) + + operation_details = { + "api_feature_name": api_feature_name, + "config_operation": config_operation_result[ + "operation" + ], # "create" or "update" + "intent_operation": intent_operation, # "create" or "update" + "final_config": final_config, + "changes_detected": config_operation_result.get("changes_detected", True), + } + + self.log( + "Operation determined for {0}: config_op={1}, intent_op={2}".format( + api_feature_name, + operation_details["config_operation"], + operation_details["intent_operation"], + ), + "INFO", + ) + + return operation_details + + def _determine_config_operation( + self, api_feature_name, desired_config, deployed_config + ): + """ + Determines if configuration needs to be created or updated based on deployed state. + Args: + api_feature_name (str): Name of the API feature + desired_config (dict): Desired configuration + deployed_config (dict): Current deployed configuration + Returns: + dict: Configuration operation details or None if no changes needed + """ + deployed_items = deployed_config.get("items", []) + desired_items = desired_config.get("items", []) + + self.log( + "Config operation analysis for {0}: deployed_items={1}, desired_items={2}".format( + api_feature_name, len(deployed_items), len(desired_items) + ), + "DEBUG", + ) + + # Handle different feature types based on their specific requirements + if self._is_vlan_feature(api_feature_name): + self.log( + "Processing VLAN feature operation analysis for {0}".format( + api_feature_name + ), + "DEBUG", + ) + return self._determine_vlan_config_operation(desired_items, deployed_items) + elif self._is_global_feature(api_feature_name): + self.log( + "Processing global feature operation analysis for {0}".format( + api_feature_name + ), + "DEBUG", + ) + return self._determine_global_config_operation( + desired_items, deployed_items + ) + elif self._is_interface_feature(api_feature_name): + self.log( + "Processing interface feature operation analysis for {0}".format( + api_feature_name + ), + "DEBUG", + ) + return self._determine_interface_config_operation( + desired_items, deployed_items + ) + else: + # Default handling for other features that don't fit standard categories + self.log( + "Processing default feature operation analysis for {0}".format( + api_feature_name + ), + "DEBUG", + ) + return self._determine_default_config_operation( + desired_items, deployed_items + ) + + def _determine_vlan_config_operation(self, desired_vlans, deployed_vlans): + """ + Determines VLAN configuration operation (supports create/update of individual VLANs). + Args: + desired_vlans (list): List of desired VLAN configurations + deployed_vlans (list): List of currently deployed VLANs + Returns: + dict: VLAN operation details + """ + self.log("Analyzing VLAN configuration operation", "DEBUG") + + # Create lookup for deployed VLANs by ID + deployed_vlan_lookup = {vlan.get("vlanId"): vlan for vlan in deployed_vlans} + + # Determine VLANs that need create vs update + vlans_to_create = [] + vlans_to_update = [] + + for desired_vlan in desired_vlans: + vlan_id = desired_vlan.get("vlanId") + deployed_vlan = deployed_vlan_lookup.get(vlan_id) + + if not deployed_vlan: + # VLAN doesn't exist - needs creation + vlans_to_create.append(desired_vlan) + self.log("VLAN {0} needs creation".format(vlan_id), "DEBUG") + else: + # VLAN exists - check if update is needed + if self._config_needs_update(desired_vlan, deployed_vlan): + # Update existing VLAN with new parameters + updated_vlan = deployed_vlan.copy() + updated_vlan.update( + {k: v for k, v in desired_vlan.items() if k != "configType"} + ) + vlans_to_update.append(updated_vlan) + self.log("VLAN {0} needs update".format(vlan_id), "DEBUG") + + # Combine all VLANs for final configuration + all_vlans = vlans_to_create + vlans_to_update + + if not all_vlans: + return None # No changes needed + + return { + "operation": "create" if vlans_to_create else "update", + "final_config": {"items": all_vlans}, + "changes_detected": True, + "vlans_to_create": len(vlans_to_create), + "vlans_to_update": len(vlans_to_update), + } + + def _determine_global_config_operation(self, desired_items, deployed_items): + """ + Determines global configuration operation with support for nested instances. + Args: + desired_items (list): List of desired configuration items + deployed_items (list): List of currently deployed configuration items + Returns: + dict: Global configuration operation details or None if no changes needed + """ + self.log( + "Analyzing global configuration operation with nested instance support", + "DEBUG", + ) + + if not desired_items: + return None + + desired_item = desired_items[0] # Global configs typically have one item + + if not deployed_items: + # No deployed config - needs creation + self.log("Global config needs creation (no deployed config)", "DEBUG") + return { + "operation": "create", + "final_config": {"items": [desired_item]}, + "changes_detected": True, + } + + deployed_item = deployed_items[0] + + # Check if this is a global feature with nested instances + if self._has_nested_instances(desired_item): + self.log( + "Detected global feature with nested instances, using specialized comparison", + "DEBUG", + ) + needs_update = self._global_config_with_instances_needs_update( + desired_item, deployed_item + ) + else: + # Standard global config comparison + needs_update = self._config_needs_update(desired_item, deployed_item) + + if needs_update: + # Determine merge strategy based on feature type + config_type = desired_item.get("configType", "") + + if config_type in [ + "IGMP_SNOOPING_GLOBAL", + "MLD_SNOOPING_GLOBAL", + "PORTCHANNEL", + ]: + self.log( + "Using desired config as-is for feature type: {0}".format( + config_type + ), + "DEBUG", + ) + updated_item = desired_item + else: + # Standard deep merge for other global features + updated_item = self._deep_merge_config(deployed_item, desired_item) + + self.log("Global config needs update", "DEBUG") + return { + "operation": "update", + "final_config": {"items": [updated_item]}, + "changes_detected": True, + } + + return None + + def _global_config_with_instances_needs_update(self, desired_item, deployed_item): + """ + Determines if a global configuration with nested instances needs updating. + This method compares global parameters and nested instances separately. + Args: + desired_item (dict): Desired configuration item + deployed_item (dict): Currently deployed configuration item + Returns: + bool: True if update is needed, False otherwise + """ + self.log("Comparing global configuration with nested instances", "DEBUG") + + # Get nested instance containers for different feature types + nested_instance_keys = { + "STP_GLOBAL": ["stpInstances"], + "IGMP_SNOOPING_GLOBAL": ["igmpSnoopingVlanSettings"], + "MLD_SNOOPING_GLOBAL": ["mldSnoopingVlanSettings"], + "PORTCHANNEL": ["portchannels"], + } + + config_type = desired_item.get("configType", "") + instance_keys = nested_instance_keys.get(config_type, []) + + # First, compare global parameters (excluding nested instances) + for key, desired_value in desired_item.items(): + if key in ["configType"] + instance_keys: + continue # Skip configType and nested instance keys + + current_value = deployed_item.get(key) + if desired_value != current_value: + self.log( + "Global parameter '{0}' differs: desired='{1}', current='{2}'".format( + key, desired_value, current_value + ), + "DEBUG", + ) + return True + + # Then, compare nested instances if present + for instance_key in instance_keys: + if instance_key in desired_item: + desired_instances = desired_item[instance_key].get("items", []) + current_instances = deployed_item.get(instance_key, {}).get("items", []) + + if self._nested_instances_need_update( + desired_instances, current_instances, instance_key + ): + self.log( + "Nested instances '{0}' differ - update needed".format( + instance_key + ), + "DEBUG", + ) + return True + + self.log( + "Global configuration with instances matches - no update needed", "DEBUG" + ) + return False + + def _deep_merge_config(self, base_config, new_config): + """ + Performs deep merge of configuration objects, preserving nested structures. + Only updates values when they actually differ from the current values. + Args: + base_config (dict): Base configuration (existing deployed/intended) + new_config (dict): New configuration with updates + Returns: + dict: Merged configuration + """ + self.log("Starting deep merge of configuration objects", "DEBUG") + self.log( + "Base configuration keys: {0}".format(list(base_config.keys())), "DEBUG" + ) + self.log("New configuration keys: {0}".format(list(new_config.keys())), "DEBUG") + + # Create a copy of the base configuration to avoid modifying the original + merged = base_config.copy() + self.log("Created copy of base configuration for merging", "DEBUG") + + # Track if any actual changes were made + changes_made = 0 + + # Iterate through each key-value pair in the new configuration + for key, value in new_config.items(): + self.log("Processing configuration key: {0}".format(key), "DEBUG") + + # Skip configType as it should not be merged + if key == "configType": + self.log("Skipping configType key during merge", "DEBUG") + continue + + # Check if this key exists in merged config and both values are dictionaries + if ( + key in merged + and isinstance(merged[key], dict) + and isinstance(value, dict) + ): + self.log( + "Found nested dictionary for key '{0}', performing recursive merge".format( + key + ), + "DEBUG", + ) + # Recursively merge nested dictionaries + original_nested = merged[key] + merged_nested = self._deep_merge_config(merged[key], value) + + # Only update if the recursive merge actually changed something + if original_nested != merged_nested: + merged[key] = merged_nested + changes_made += 1 + self.log( + "Updated nested dictionary for key '{0}' due to detected changes".format( + key + ), + "DEBUG", + ) + else: + self.log( + "No changes detected in nested dictionary for key '{0}' - keeping original".format( + key + ), + "DEBUG", + ) + else: + # For non-dict values or new keys, check if value actually differs + if key in merged: + # Key exists - check if values are different + current_value = merged[key] + if current_value != value: + merged[key] = value + changes_made += 1 + self.log( + "Updated parameter '{0}': changed from '{1}' to '{2}' (values differ)".format( + key, current_value, value + ), + "DEBUG", + ) + else: + self.log( + "Parameter '{0}' already has desired value '{1}' - no update needed".format( + key, value + ), + "DEBUG", + ) + else: + # New key - add it + merged[key] = value + changes_made += 1 + self.log( + "Added new key '{0}' to merged configuration with value '{1}'".format( + key, value + ), + "DEBUG", + ) + + self.log("Deep merge completed successfully", "DEBUG") + self.log("Total configuration changes made: {0}".format(changes_made), "DEBUG") + self.log("Merged configuration contains {0} keys".format(len(merged)), "DEBUG") + + return merged + + def _has_nested_instances(self, config_item): + """ + Enhanced check for configuration items with nested instances that need special handling. + Now includes deeper nested structures like memberPorts and mrouters. + Args: + config_item (dict): Configuration item to check + Returns: + bool: True if the item has nested instances, False otherwise + """ + # Define the nested instance containers for each feature type + nested_instance_keys = { + "STP_GLOBAL": ["stpInstances"], + "IGMP_SNOOPING_GLOBAL": ["igmpSnoopingVlanSettings"], + "MLD_SNOOPING_GLOBAL": ["mldSnoopingVlanSettings"], + "PORTCHANNEL": ["portchannels"], + } + + config_type = config_item.get("configType", "") + instance_keys = nested_instance_keys.get(config_type, []) + + # Check if any of the nested instance keys exist in the config + for key in instance_keys: + if ( + key in config_item + and isinstance(config_item[key], dict) + and config_item[key].get("items") + ): + self.log( + "Found nested instances in key '{0}' for config type '{1}'".format( + key, config_type + ), + "DEBUG", + ) + return True + + return False + + def _nested_instances_need_update( + self, desired_instances, current_instances, instance_type + ): + """ + Enhanced comparison of nested instances with support for deeply nested structures. + Returns True at the first mismatch found. + Args: + desired_instances (list): List of desired instance configurations + current_instances (list): List of current instance configurations + instance_type (str): Type of instances being compared + Returns: + bool: True if update is needed, False otherwise + """ + self.log( + "Comparing {0} nested instances: desired={1}, current={2}".format( + instance_type, len(desired_instances), len(current_instances) + ), + "DEBUG", + ) + + # Use different comparison strategies based on instance type + if instance_type in [ + "stpInstances", + "igmpSnoopingVlanSettings", + "mldSnoopingVlanSettings", + ]: + return self._compare_vlan_based_instances( + desired_instances, current_instances, instance_type + ) + elif instance_type == "portchannels": + return self._compare_portchannel_instances( + desired_instances, current_instances + ) + else: + # Fallback to generic comparison + return self._compare_generic_instances(desired_instances, current_instances) + + def _deep_compare_instances(self, desired_instance, current_instance): + """ + Performs deep comparison of two instances, returning True if they differ. + Args: + desired_instance (dict): Desired instance configuration + current_instance (dict): Current instance configuration + Returns: + bool: True if instances differ, False if they match + """ + self.log("Performing deep instance comparison", "DEBUG") + + # Compare all parameters except configType + for key, desired_value in desired_instance.items(): + if key == "configType": + continue + + current_value = current_instance.get(key) + + if isinstance(desired_value, dict) and isinstance(current_value, dict): + # Recursive comparison for nested dictionaries + if self._deep_compare_nested_dict(desired_value, current_value): + self.log("Nested dictionary '{0}' differs".format(key), "DEBUG") + return True + elif isinstance(desired_value, list) and isinstance(current_value, list): + # Comparison for nested lists + if self._deep_compare_nested_list(desired_value, current_value): + self.log("Nested list '{0}' differs".format(key), "DEBUG") + return True + else: + # Direct comparison for simple values + if desired_value != current_value: + self.log( + "Parameter '{0}' differs: desired='{1}', current='{2}'".format( + key, desired_value, current_value + ), + "DEBUG", + ) + return True + + return False + + def _compare_portchannel_instances(self, desired_instances, current_instances): + """ + Enhanced comparison of port channel instances by name with support for memberPorts. + Args: + desired_instances (list): Desired port channel instances + current_instances (list): Current port channel instances + Returns: + bool: True if update is needed, False otherwise + """ + self.log( + "Performing enhanced port channel instance comparison with memberPorts support", + "DEBUG", + ) + + # Create lookup map for current instances by name + current_by_name = {} + for instance in current_instances: + name = instance.get("name") + if name: + current_by_name[name] = instance + + # Check each desired instance + for desired_instance in desired_instances: + name = desired_instance.get("name") + if not name: + continue + + current_instance = current_by_name.get(name) + + if not current_instance: + self.log( + "Port channel '{0}' not found in current instances - update needed".format( + name + ), + "DEBUG", + ) + return True + + # Enhanced comparison that handles memberPorts specifically + if self._deep_compare_portchannel_instance( + desired_instance, current_instance + ): + self.log( + "Port channel '{0}' instance differs - update needed".format(name), + "DEBUG", + ) + return True + + return False + + def _deep_compare_portchannel_instance(self, desired_instance, current_instance): + """ + Performs deep comparison of port channel instances with special handling for memberPorts. + Args: + desired_instance (dict): Desired port channel configuration + current_instance (dict): Current port channel configuration + Returns: + bool: True if instances differ, False if they match + """ + self.log( + "Performing deep port channel instance comparison with memberPorts support", + "DEBUG", + ) + + # Compare all parameters except configType and memberPorts + for key, desired_value in desired_instance.items(): + if key in ["configType", "memberPorts"]: + continue + + current_value = current_instance.get(key) + if desired_value != current_value: + self.log( + "Port channel parameter '{0}' differs: desired='{1}', current='{2}'".format( + key, desired_value, current_value + ), + "DEBUG", + ) + return True + + # Special handling for memberPorts + if "memberPorts" in desired_instance: + desired_member_ports = desired_instance["memberPorts"].get("items", []) + current_member_ports = current_instance.get("memberPorts", {}).get( + "items", [] + ) + + if self._compare_member_ports(desired_member_ports, current_member_ports): + self.log("Port channel memberPorts differ", "DEBUG") + return True + + return False + + def _compare_member_ports(self, desired_members, current_members): + """ + Compares member ports within a port channel by interfaceName. + Args: + desired_members (list): Desired member port configurations + current_members (list): Current member port configurations + Returns: + bool: True if member ports differ, False if they match + """ + self.log( + "Comparing member ports: desired={0}, current={1}".format( + len(desired_members), len(current_members) + ), + "DEBUG", + ) + + # Create lookup map for current members by interfaceName + current_by_interface = {} + for member in current_members: + interface_name = member.get("interfaceName") + if interface_name: + current_by_interface[interface_name] = member + + # Check each desired member + for desired_member in desired_members: + interface_name = desired_member.get("interfaceName") + if not interface_name: + continue + + current_member = current_by_interface.get(interface_name) + + if not current_member: + self.log( + "Member port '{0}' not found in current members - update needed".format( + interface_name + ), + "DEBUG", + ) + return True + + # Compare member port parameters + if self._deep_compare_instances(desired_member, current_member): + self.log( + "Member port '{0}' differs - update needed".format(interface_name), + "DEBUG", + ) + return True + + return False + + def _compare_vlan_based_instances( + self, desired_instances, current_instances, instance_type + ): + """ + Enhanced comparison of VLAN-based instances with support for nested mrouters. + Args: + desired_instances (list): Desired VLAN-based instances + current_instances (list): Current VLAN-based instances + instance_type (str): Type of instances (stpInstances, igmpSnoopingVlanSettings, mldSnoopingVlanSettings) + Returns: + bool: True if update is needed, False otherwise + """ + self.log( + "Performing enhanced VLAN-based instance comparison for {0}".format( + instance_type + ), + "DEBUG", + ) + + # Create lookup map for current instances by VLAN ID + current_by_vlan = {} + for instance in current_instances: + vlan_id = instance.get("vlanId") + if vlan_id is not None: + current_by_vlan[vlan_id] = instance + + # Check each desired instance + for desired_instance in desired_instances: + vlan_id = desired_instance.get("vlanId") + if vlan_id is None: + continue + + current_instance = current_by_vlan.get(vlan_id) + + if not current_instance: + self.log( + "VLAN {0} not found in current instances - update needed".format( + vlan_id + ), + "DEBUG", + ) + return True + + # Enhanced comparison based on instance type + if instance_type == "stpInstances": + if self._deep_compare_stp_instance(desired_instance, current_instance): + self.log( + "STP VLAN {0} instance differs - update needed".format(vlan_id), + "DEBUG", + ) + return True + elif instance_type in [ + "igmpSnoopingVlanSettings", + "mldSnoopingVlanSettings", + ]: + if self._deep_compare_snooping_vlan_instance( + desired_instance, current_instance, instance_type + ): + self.log( + "{0} VLAN {1} instance differs - update needed".format( + instance_type, vlan_id + ), + "DEBUG", + ) + return True + else: + # Fallback to standard deep comparison + if self._deep_compare_instances(desired_instance, current_instance): + self.log( + "VLAN {0} instance differs - update needed".format(vlan_id), + "DEBUG", + ) + return True + + return False + + def _deep_compare_stp_instance(self, desired_instance, current_instance): + """ + Performs deep comparison of STP instances with special handling for timers. + Args: + desired_instance (dict): Desired STP instance configuration + current_instance (dict): Current STP instance configuration + Returns: + bool: True if instances differ, False if they match + """ + self.log("Performing deep STP instance comparison with timers support", "DEBUG") + + # Compare all parameters except configType and timers + for key, desired_value in desired_instance.items(): + if key in ["configType", "timers"]: + continue + + current_value = current_instance.get(key) + if desired_value != current_value: + self.log( + "STP parameter '{0}' differs: desired='{1}', current='{2}'".format( + key, desired_value, current_value + ), + "DEBUG", + ) + return True + + # Special handling for timers + if "timers" in desired_instance: + desired_timers = desired_instance["timers"] + current_timers = current_instance.get("timers", {}) + + if self._deep_compare_nested_dict(desired_timers, current_timers): + self.log("STP timers differ", "DEBUG") + return True + + return False + + def _deep_compare_snooping_vlan_instance( + self, desired_instance, current_instance, instance_type + ): + """ + Performs deep comparison of IGMP/MLD snooping VLAN instances with special handling for mrouters. + Args: + desired_instance (dict): Desired snooping VLAN instance configuration + current_instance (dict): Current snooping VLAN instance configuration + instance_type (str): Type of snooping (igmpSnoopingVlanSettings or mldSnoopingVlanSettings) + Returns: + bool: True if instances differ, False if they match + """ + self.log( + "Performing deep {0} instance comparison with mrouters support".format( + instance_type + ), + "DEBUG", + ) + + # Determine the mrouter key based on instance type + mrouter_key = ( + "igmpSnoopingVlanMrouters" + if "igmp" in instance_type.lower() + else "mldSnoopingVlanMrouters" + ) + + # Compare all parameters except configType and mrouters + for key, desired_value in desired_instance.items(): + if key in ["configType", mrouter_key]: + continue + + current_value = current_instance.get(key) + if desired_value != current_value: + self.log( + "Snooping parameter '{0}' differs: desired='{1}', current='{2}'".format( + key, desired_value, current_value + ), + "DEBUG", + ) + return True + + # Special handling for mrouters + if mrouter_key in desired_instance: + desired_mrouters = desired_instance[mrouter_key].get("items", []) + current_mrouters = current_instance.get(mrouter_key, {}).get("items", []) + + if self._compare_mrouter_ports(desired_mrouters, current_mrouters): + self.log("Snooping mrouters differ", "DEBUG") + return True + + return False + + def _compare_mrouter_ports(self, desired_mrouters, current_mrouters): + """ + Compares mrouter port configurations by interfaceName. + Args: + desired_mrouters (list): Desired mrouter port configurations + current_mrouters (list): Current mrouter port configurations + Returns: + bool: True if mrouter ports differ, False if they match + """ + self.log( + "Comparing mrouter ports: desired={0}, current={1}".format( + len(desired_mrouters), len(current_mrouters) + ), + "DEBUG", + ) + + # Create lookup map for current mrouters by interfaceName + current_by_interface = {} + for mrouter in current_mrouters: + interface_name = mrouter.get("interfaceName") + if interface_name: + current_by_interface[interface_name] = mrouter + + # Check each desired mrouter + for desired_mrouter in desired_mrouters: + interface_name = desired_mrouter.get("interfaceName") + if not interface_name: + continue + + current_mrouter = current_by_interface.get(interface_name) + + if not current_mrouter: + self.log( + "Mrouter port '{0}' not found in current mrouters - update needed".format( + interface_name + ), + "DEBUG", + ) + return True + + # Compare mrouter parameters + if self._deep_compare_instances(desired_mrouter, current_mrouter): + self.log( + "Mrouter port '{0}' differs - update needed".format(interface_name), + "DEBUG", + ) + return True + + return False + + def _deep_compare_nested_list(self, desired_list, current_list): + """ + Enhanced comparison of nested lists with improved identifier detection. + Args: + desired_list (list): Desired nested list + current_list (list): Current nested list + Returns: + bool: True if lists differ, False if they match + """ + if len(desired_list) != len(current_list): + return True + + # For lists containing dictionaries with identifiers, try to match by identifier + if desired_list and isinstance(desired_list[0], dict): + # Check for various identifier types in priority order + if "interfaceName" in desired_list[0]: + return self._compare_interface_based_list(desired_list, current_list) + elif "vlanId" in desired_list[0]: + return self._compare_vlan_based_list(desired_list, current_list) + elif "name" in desired_list[0]: + return self._compare_name_based_list(desired_list, current_list) + else: + # Fallback to index-based comparison + return self._compare_index_based_list(desired_list, current_list) + else: + # Simple list comparison for non-dictionary items + for i, desired_item in enumerate(desired_list): + if i >= len(current_list) or desired_item != current_list[i]: + return True + + return False + + def _compare_name_based_list(self, desired_list, current_list): + """ + Compares lists of items that have name as identifier (Example, port channels). + Args: + desired_list (list): Desired list with name-based items + current_list (list): Current list with name-based items + Returns: + bool: True if lists differ, False if they match + """ + current_by_name = { + item.get("name"): item for item in current_list if item.get("name") + } + + for desired_item in desired_list: + name = desired_item.get("name") + if not name: + continue + + current_item = current_by_name.get(name) + if not current_item: + return True + + if self._deep_compare_instances(desired_item, current_item): + return True + + return False + + def _determine_interface_config_operation(self, desired_items, deployed_items): + """ + Determines interface configuration operation. + Args: + desired_items (list): Desired interface configurations + deployed_items (list): Currently deployed interface configurations + Returns: + dict: Interface config operation details + """ + self.log("Analyzing interface configuration operation", "DEBUG") + + if not desired_items: + return None + + # Create lookup for deployed interfaces by name + deployed_interface_lookup = { + item.get("interfaceName"): item for item in deployed_items + } + + interfaces_to_process = [] + operation_type = "create" + + for desired_interface in desired_items: + interface_name = desired_interface.get("interfaceName") + deployed_interface = deployed_interface_lookup.get(interface_name) + + if not deployed_interface: + # Interface config doesn't exist - needs creation + interfaces_to_process.append(desired_interface) + self.log( + "Interface {0} config needs creation".format(interface_name), + "DEBUG", + ) + else: + # Interface config exists - check if update is needed + if self._config_needs_update(desired_interface, deployed_interface): + # Update existing interface with new parameters + updated_interface = deployed_interface.copy() + updated_interface.update( + { + k: v + for k, v in desired_interface.items() + if k not in ["configType", "interfaceName"] + } + ) + interfaces_to_process.append(updated_interface) + operation_type = "update" + self.log( + "Interface {0} config needs update".format(interface_name), + "DEBUG", + ) + + if not interfaces_to_process: + return None # No changes needed + + return { + "operation": operation_type, + "final_config": {"items": interfaces_to_process}, + "changes_detected": True, + } + + def _determine_default_config_operation(self, desired_items, deployed_items): + """ + Default configuration operation determination for other feature types. + Args: + desired_items (list): Desired configuration items + deployed_items (list): Currently deployed items + Returns: + dict: Default config operation details + """ + self.log("Analyzing default configuration operation", "DEBUG") + + if not desired_items: + return None + + if not deployed_items: + # No deployed config - needs creation + self.log( + "No deployed configuration found - operation needs creation", "DEBUG" + ) + return { + "operation": "create", + "final_config": {"items": desired_items}, + "changes_detected": True, + } + + # Check if any changes are needed + changes_needed = False + updated_items = [] + + # Compare each desired item with corresponding deployed item + for i, desired_item in enumerate(desired_items): + if i < len(deployed_items): + deployed_item = deployed_items[i] + self.log( + "Comparing desired and deployed configurations for item at index {0}".format(i), + "DEBUG", + ) + + if self._config_needs_update(desired_item, deployed_item): + # Update existing item with new parameters + updated_item = deployed_item.copy() + updated_item.update( + {k: v for k, v in desired_item.items() if k != "configType"} + ) + updated_items.append(updated_item) + changes_needed = True + self.log( + "Item {0} requires update - parameters differ from deployed state".format( + i + ), + "DEBUG", + ) + else: + # Keep existing item as no changes needed + updated_items.append(deployed_item) + self.log( + "Item {0} matches deployed state - no changes needed".format(i), + "DEBUG", + ) + else: + # Add new item that doesn't exist in deployed config + updated_items.append(desired_item) + changes_needed = True + self.log( + "Item {0} is new - will be added to configuration".format(i), + "DEBUG", + ) + + if not changes_needed: + self.log( + "No configuration changes detected - operation not needed", "DEBUG" + ) + return None + + self.log("Configuration changes detected - update operation required", "DEBUG") + return { + "operation": "update", + "final_config": {"items": updated_items}, + "changes_detected": True, + } + + def _determine_intent_operation( + self, intended_config, api_feature_name=None, desired_config=None + ): + """ + Determines whether to create or update intent configuration. + Args: + intended_config (dict): Current intended configuration + api_feature_name (str): Name of the API feature + desired_config (dict): Desired configuration + Returns: + str: "create" or "update" + """ + if not intended_config or not intended_config.get("response", {}).get( + api_feature_name, {} + ).get("items"): + self.log("No intended config exists - intent needs creation", "DEBUG") + return "create" + + # For interface-based features, check if specific interfaces exist + if api_feature_name and desired_config and "items" in desired_config: + existing_intended = ( + intended_config.get("response", {}) + .get(api_feature_name, {}) + .get("items", []) + ) + desired_interfaces = [ + item.get("interfaceName") + for item in desired_config["items"] + if item.get("interfaceName") + ] + existing_interfaces = [ + item.get("interfaceName") + for item in existing_intended + if item.get("interfaceName") + ] + + # If any desired interface doesn't exist in intended config, we need update (not create) + if any( + interface not in existing_interfaces for interface in desired_interfaces + ): + self.log( + "Some interfaces not in intended config - intent needs update", + "DEBUG", + ) + return "update" + + self.log("Intended config exists - intent needs update", "DEBUG") + return "update" + + # def _determine_intent_operation(self, intended_config): + # """ + # Determines if intent needs to be created or updated. + + # Args: + # intended_config (dict): Current intended configuration + + # Returns: + # str: "create" or "update" + # """ + # intended_items = intended_config.get("items", []) + + # if not intended_items: + # self.log("No intended config exists - intent needs creation", "DEBUG") + # return "create" + # else: + # self.log("Intended config exists - intent needs update", "DEBUG") + # return "update" + + def _is_interface_feature(self, api_feature_name): + """ + Determines if the given API feature is an interface-specific feature. + Args: + api_feature_name (str): Name of the API feature + Returns: + bool: True if it's an interface feature, False otherwise + """ + self.log( + "Checking if API feature '{0}' is an interface-specific feature".format( + api_feature_name + ), + "DEBUG", + ) + + # Define the list of interface-specific features that configure individual interfaces + interface_features = [ + "switchportInterfaceConfig", + "vlanTrunkingInterfaceConfig", + "dot1xInterfaceConfig", + "mabInterfaceConfig", + "stpInterfaceConfig", + "dhcpSnoopingInterfaceConfig", + "cdpInterfaceConfig", + "lldpInterfaceConfig", + "vtpInterfaceConfig", + ] + + # Check if the feature is in the interface features list + is_interface = api_feature_name in interface_features + + self.log( + "Feature '{0}' classification result: {1}".format( + api_feature_name, + "interface feature" if is_interface else "not an interface feature", + ), + "DEBUG", + ) + + return is_interface + + def _prepare_final_config( + self, + api_feature_name, + config_to_apply, + intended_config, + intent_operation, + deployed_config=None, + ): + """ + Prepares the final configuration for API calls, merging with existing intended config when needed. + Args: + api_feature_name (str): Name of the API feature + config_to_apply (dict): Configuration that needs to be applied + intended_config (dict): Current intended configuration + intent_operation (str): "create" or "update" + deployed_config (dict, optional): Current deployed configuration + Returns: + dict: Final configuration ready for API call + """ + self.log("Preparing final config for {0}".format(api_feature_name), "DEBUG") + + if intent_operation == "create": + # For create operations, use config as-is + final_config = config_to_apply.copy() + self.log("Using create operation - applying configuration as-is", "DEBUG") + else: + # For update operations, merge with existing intended config + self.log( + "Using update operation - merging with existing intended configuration", + "DEBUG", + ) + + # Special handling for IGMP and MLD snooping which have nested VLAN configurations + if api_feature_name == "igmpSnoopingGlobalConfig": + self.log( + "Merging IGMP snooping configuration with special VLAN handling", + "DEBUG", + ) + final_config = self._merge_igmp_snooping_config( + config_to_apply, intended_config + ) + elif api_feature_name == "mldSnoopingGlobalConfig": + self.log( + "Merging MLD snooping configuration with special VLAN handling", + "DEBUG", + ) + final_config = self._merge_mld_snooping_config( + config_to_apply, intended_config + ) + elif api_feature_name == "portchannelConfig": + self.log( + "Merging port channel configuration with special handling", "DEBUG" + ) + final_config = self._merge_port_channel_config( + config_to_apply, intended_config + ) + elif self._is_interface_feature(api_feature_name): + self.log("Merging interface feature configuration", "DEBUG") + final_config = self._merge_interface_configs( + api_feature_name, config_to_apply, intended_config + ) + elif self._is_vlan_feature(api_feature_name): + self.log("Merging VLAN feature configuration", "DEBUG") + final_config = self._merge_vlan_configs( + api_feature_name, config_to_apply, intended_config + ) + elif api_feature_name == "stpGlobalConfig": + # Special handling for STP to merge instances properly + self.log( + "Merging STP global configuration with special instance handling", + "DEBUG", + ) + final_config = self._merge_stp_global_config( + config_to_apply, intended_config + ) + else: + # For global features, replace the configuration + self.log("Replacing global feature configuration", "DEBUG") + final_config = config_to_apply.copy() + + self.log( + "Final config prepared for {0}: {1}".format(api_feature_name, final_config), + "DEBUG", + ) + + # Prepare the full API payload structure + final_merged_config = {api_feature_name: final_config} + self.log( + "Final merged config for {0}: {1}".format( + api_feature_name, final_merged_config + ), + "DEBUG", + ) + + return final_merged_config + + def _merge_stp_global_config(self, new_config, existing_intended): + """ + Merges new STP global configuration with existing intended STP configuration, with special handling for STP instances. + Args: + new_config (dict): New STP configuration to apply + existing_intended (dict): Current intended STP configuration + Returns: + dict: Merged STP configuration + """ + self.log( + "Merging STP global configuration with special instance handling", "DEBUG" + ) + + # Get existing intended items + existing_items = existing_intended.get("items", []) + new_items = new_config.get("items", []) + + if not existing_items: + # No existing config, use new config as-is + self.log( + "No existing STP configuration found, using new configuration as-is", + "DEBUG", + ) + return new_config + + if not new_items: + # No new config, return existing + self.log( + "No new STP configuration provided, returning existing configuration", + "DEBUG", + ) + return {"items": existing_items} + + # Take the first item (STP global configs typically have one item) + existing_item = existing_items[0].copy() + new_item = new_items[0] + + self.log( + "Processing STP global configuration merge for single configuration item", + "DEBUG", + ) + + # Update global parameters (everything except stpInstances) + for key, value in new_item.items(): + if key != "stpInstances": + existing_item[key] = value + self.log( + "Updated STP global parameter '{0}' with value: {1}".format( + key, value + ), + "DEBUG", + ) + + # Handle STP instances merging specially + if "stpInstances" in new_item: + self.log("Processing STP instances for special merge handling", "DEBUG") + + existing_instances = existing_item.get("stpInstances", {}) + new_instances = new_item["stpInstances"] + + merged_instances = self._merge_stp_instances( + existing_instances, new_instances + ) + existing_item["stpInstances"] = merged_instances + + self.log( + "Merged STP instances: {0} total instances".format( + len(merged_instances.get("items", [])) + ), + "DEBUG", + ) + else: + self.log("No STP instances found in new configuration to merge", "DEBUG") + + self.log("STP global configuration merge completed successfully", "DEBUG") + + return {"items": [existing_item]} + + def _merge_stp_instances(self, current_instances, desired_instances): + """ + Merge STP instances - add new instances while preserving existing ones. + Args: + current_instances (dict): Current STP instances configuration + desired_instances (dict): Desired STP instances configuration to merge + Returns: + dict: Merged STP instances configuration with combined items + """ + try: + self.log("Starting merge of STP instances", "DEBUG") + + # Initialize the merged structure + merged_instances = {"configType": "LIST", "items": []} + + # Get current instances + current_items = ( + current_instances.get("items", []) if current_instances else [] + ) + desired_items = ( + desired_instances.get("items", []) if desired_instances else [] + ) + + self.log("Current instances count: {0}".format(len(current_items)), "DEBUG") + self.log("Desired instances count: {0}".format(len(desired_items)), "DEBUG") + + # Create a dict of current instances by VLAN ID for easy lookup + current_by_vlan = {} + for item in current_items: + vlan_id = item.get("vlanId") + if vlan_id: + current_by_vlan[vlan_id] = item + self.log( + "Added current instance for VLAN {0} to lookup".format(vlan_id), + "DEBUG", + ) + + # Start with all current instances + merged_by_vlan = current_by_vlan.copy() + self.log( + "Initialized merged instances with {0} current instances".format( + len(merged_by_vlan) + ), + "DEBUG", + ) + + # Add or update with desired instances + for desired_item in desired_items: + vlan_id = desired_item.get("vlanId") + if vlan_id: + if vlan_id in merged_by_vlan: + # Update existing instance + merged_by_vlan[vlan_id].update(desired_item) + self.log( + "Updated STP instance for VLAN {0}".format(vlan_id), "DEBUG" + ) + else: + # Add new instance + merged_by_vlan[vlan_id] = desired_item + self.log( + "Added new STP instance for VLAN {0}".format(vlan_id), + "DEBUG", + ) + + # Convert back to list and sort by VLAN ID for consistency + merged_instances["items"] = sorted( + merged_by_vlan.values(), key=lambda x: x.get("vlanId", 0) + ) + + self.log( + "Merged STP instances: {0} total instances".format( + len(merged_instances["items"]) + ), + "DEBUG", + ) + self.log("STP instances merge completed successfully", "DEBUG") + + return merged_instances + + except Exception as e: + self.log("Error merging STP instances: {0}".format(str(e)), "ERROR") + self.log("Returning desired instances as fallback", "DEBUG") + return desired_instances # Fallback to desired instances + + def _merge_interface_configs(self, api_feature_name, new_config, existing_intended): + """ + Merges new interface configuration with existing intended configuration. + Args: + api_feature_name (str): Name of the API feature + new_config (dict): New configuration to apply + existing_intended (dict): Current intended configuration + Returns: + dict: Merged configuration + """ + self.log("Merging interface configs for {0}".format(api_feature_name), "DEBUG") + self.log("New config to merge: {0}".format(new_config), "DEBUG") + self.log("Existing intended config: {0}".format(existing_intended), "DEBUG") + + # Start with existing intended configuration + existing_items = existing_intended.get("items", []) + new_items = new_config.get("items", []) + + # Create a lookup of existing items by interface name + existing_items_by_interface = {} + for item in existing_items: + interface_name = item.get("interfaceName") + if interface_name: + existing_items_by_interface[interface_name] = item + + self.log( + "Found {0} existing interfaces in intended config".format( + len(existing_items_by_interface) + ), + "DEBUG", + ) + + # Process new items + for new_item in new_items: + interface_name = new_item.get("interfaceName") + if interface_name: + # Replace existing config for this interface with new config + existing_items_by_interface[interface_name] = new_item + self.log( + "Updated/added config for interface: {0}".format(interface_name), + "DEBUG", + ) + + # Convert back to list format + merged_items = list(existing_items_by_interface.values()) + + merged_config = {"items": merged_items} + + self.log( + "Merged config contains {0} total interfaces".format(len(merged_items)), + "DEBUG", + ) + self.log("Final merged interface config: {0}".format(merged_config), "DEBUG") + + return merged_config + + def _merge_vlan_configs(self, api_feature_name, new_config, existing_intended): + """ + Merges new VLAN configuration with existing intended VLANs. + Args: + api_feature_name (str): API feature name + new_config (dict): New VLAN configuration + existing_intended (dict): Existing intended configuration (not list) + Returns: + dict: Merged VLAN configuration + """ + new_vlans = new_config.get("items", []) + + # Create lookup for new VLANs by ID + new_vlan_lookup = {vlan.get("vlanId"): vlan for vlan in new_vlans} + + # Extract existing intended VLAN items - FIX: Handle the nested structure + existing_items = existing_intended.get("items", []) + + # Start with existing intended VLANs + merged_vlans = [] + for existing_vlan in existing_items: + vlan_id = existing_vlan.get("vlanId") + if vlan_id in new_vlan_lookup: + # Use the new configuration for this VLAN + merged_vlans.append(new_vlan_lookup[vlan_id]) + del new_vlan_lookup[vlan_id] # Remove from lookup to avoid duplicates + else: + # Keep the existing VLAN + merged_vlans.append(existing_vlan) + + # Add any remaining new VLANs + merged_vlans.extend(new_vlan_lookup.values()) + + return {"items": merged_vlans} + + # def _merge_vlan_configs(self, api_feature_name, new_config, existing_intended): + # """ + # Merges new VLAN configuration with existing intended VLANs. + + # Args: + # api_feature_name (str): API feature name + # new_config (dict): New VLAN configuration + # existing_intended (list): Existing intended VLAN items + + # Returns: + # dict: Merged VLAN configuration + # """ + # new_vlans = new_config.get("items", []) + + # # Create lookup for new VLANs by ID + # new_vlan_lookup = {vlan.get("vlanId"): vlan for vlan in new_vlans} + + # # Start with existing intended VLANs + # merged_vlans = [] + # for existing_vlan in existing_intended: + # vlan_id = existing_vlan.get("vlanId") + # if vlan_id in new_vlan_lookup: + # # Use the new configuration for this VLAN + # merged_vlans.append(new_vlan_lookup[vlan_id]) + # del new_vlan_lookup[vlan_id] # Remove from lookup to avoid duplicates + # else: + # # Keep the existing VLAN + # merged_vlans.append(existing_vlan) + + # # Add any remaining new VLANs + # merged_vlans.extend(new_vlan_lookup.values()) + + # return {api_feature_name: {"items": merged_vlans}} + + # def _merge_igmp_snooping_config(self, existing_config, desired_config): + # """ + # Merges IGMP snooping configurations with special handling for VLANs. + # Args: + # existing_config (dict): The existing IGMP snooping configuration from intended config. + # desired_config (dict): The desired IGMP snooping configuration from user input. + # Returns: + # dict: The merged configuration with updated global parameters and preserved VLAN settings. + # """ + # self.log("Starting IGMP snooping configuration merge", "DEBUG") + + # # Get the items from both configs + # existing_items = existing_config.get("items", []) + # self.log("Existing IGMP items: {0}".format(existing_items), "DEBUG") + + # desired_items = desired_config.get("items", []) + # self.log("Desired IGMP items: {0}".format(desired_items), "DEBUG") + + # if not existing_items and not desired_items: + # self.log("No existing or desired IGMP items found, returning desired config", "DEBUG") + # return desired_config + + # # If no existing config, return desired + # if not existing_items: + # return desired_config + + # # If no desired config, return existing + # if not desired_items: + # return existing_config + + # # Merge the configurations + # merged_items = [] + + # for existing_item in existing_items: + # if existing_item.get("configType") == "IGMP_SNOOPING_GLOBAL": + # # Start with existing global config as the final intended config + # final_intended_item = copy.deepcopy(existing_item) + # self.log("Final intended IGMP item initialized from existing: {0}".format(final_intended_item), "DEBUG") + + # # Find corresponding desired item + # desired_item = None + # for d_item in desired_items: + # if d_item.get("configType") == "IGMP_SNOOPING_GLOBAL": + # desired_item = d_item + # break + + # if desired_item: + # # Merge global parameters (non-VLAN settings) + # for key, value in desired_item.items(): + # if key != "igmpSnoopingVlanSettings" and key != "configType": + # self.log("Updating global parameter '{0}' from '{1}' to '{2}'".format( + # key, final_intended_item.get(key), value), "DEBUG") + # final_intended_item[key] = value + + # # Handle VLAN settings merge with correct three-category approach + # current_vlan_settings = existing_item.get("igmpSnoopingVlanSettings", {}) + # desired_vlan_settings = desired_item.get("igmpSnoopingVlanSettings", {}) + + # if desired_vlan_settings: + # self.log("Merging IGMP VLAN settings - current intended has {0} VLANs, user desires {1} VLANs".format( + # len(current_vlan_settings.get("items", [])), + # len(desired_vlan_settings.get("items", []))), "DEBUG") + + # # Apply the three-category logic: current -> desired -> final + # merged_vlan_settings = self._merge_igmp_vlan_settings(current_vlan_settings, desired_vlan_settings) + # final_intended_item["igmpSnoopingVlanSettings"] = merged_vlan_settings + # self.log("IGMP VLAN settings merge completed", "DEBUG") + # else: + # self.log("No desired VLAN settings found to merge", "DEBUG") + + # merged_items.append(final_intended_item) + + # return {"items": merged_items} + + def _merge_igmp_snooping_config(self, desired_config, existing_config): + """ + Merges IGMP snooping configurations with special handling for VLANs. + Args: + existing_config (dict): The existing IGMP snooping configuration from intended config. + desired_config (dict): The desired IGMP snooping configuration from user input. + Returns: + dict: The merged configuration with updated global parameters and preserved VLAN settings. + """ + self.log("Starting IGMP snooping configuration merge", "DEBUG") + self.log("Function entry parameters validation", "DEBUG") + self.log( + "Existing config type: {0}, keys: {1}".format( + type(existing_config).__name__, + list(existing_config.keys()) if existing_config else "None", + ), + "DEBUG", + ) + self.log( + "Desired config type: {0}, keys: {1}".format( + type(desired_config).__name__, + list(desired_config.keys()) if desired_config else "None", + ), + "DEBUG", + ) + + # Get the items from both configs + existing_items = existing_config.get("items", []) + self.log("Existing IGMP items: {0}".format(existing_items), "DEBUG") + self.log( + "Extracted {0} existing items from configuration".format( + len(existing_items) + ), + "DEBUG", + ) + desired_items = desired_config.get("items", []) + self.log("Desired IGMP items: {0}".format(desired_items), "DEBUG") + self.log( + "Extracted {0} desired items from configuration".format(len(desired_items)), + "DEBUG", + ) + # Early exit conditions with detailed logging + if not existing_items and not desired_items: + self.log( + "No existing or desired IGMP items found, returning desired config", + "DEBUG", + ) + self.log("Early exit: Both configurations are empty", "DEBUG") + return desired_config + + # If no existing config, return desired + if not existing_items: + self.log( + "No existing configuration found, returning desired configuration as-is", + "DEBUG", + ) + self.log( + "Early exit: Only desired configuration exists with {0} items".format( + len(desired_items) + ), + "DEBUG", + ) + return desired_config + + # If no desired config, return existing + if not desired_items: + self.log( + "No desired configuration found, returning existing configuration as-is", + "DEBUG", + ) + self.log( + "Early exit: Only existing configuration exists with {0} items".format( + len(existing_items) + ), + "DEBUG", + ) + return existing_config + + # Start main merge process + self.log("Both configurations exist - proceeding with merge operation", "DEBUG") + self.log( + "Merge operation will process {0} existing items and {1} desired items".format( + len(existing_items), len(desired_items) + ), + "DEBUG", + ) + + # Merge the configurations + merged_items = [] + self.log("Initialized merged_items list for storing merge results", "DEBUG") + + # Process each existing item + for existing_item_index, existing_item in enumerate(existing_items): + self.log( + "Processing existing item {0} of {1}".format( + existing_item_index + 1, len(existing_items) + ), + "DEBUG", + ) + self.log("Existing item structure: {0}".format(existing_item), "DEBUG") + + existing_config_type = existing_item.get("configType") + self.log( + "Existing item configType: {0}".format(existing_config_type), "DEBUG" + ) + + if existing_config_type == "IGMP_SNOOPING_GLOBAL": + self.log("Found IGMP_SNOOPING_GLOBAL item for processing", "DEBUG") + + # Start with existing global config as the final intended config + final_intended_item = copy.deepcopy(existing_item) + self.log( + "Final intended IGMP item initialized from existing: {0}".format( + final_intended_item + ), + "DEBUG", + ) + self.log( + "Created deep copy of existing item with {0} keys".format( + len(final_intended_item.keys()) + ), + "DEBUG", + ) + # Find corresponding desired item + self.log( + "Searching for corresponding desired item with IGMP_SNOOPING_GLOBAL configType", + "DEBUG", + ) + desired_item = None + desired_item_found_index = None + + for d_item_index, d_item in enumerate(desired_items): + d_item_config_type = d_item.get("configType") + self.log( + "Checking desired item {0}: configType = {1}".format( + d_item_index, d_item_config_type + ), + "DEBUG", + ) + + if d_item_config_type == "IGMP_SNOOPING_GLOBAL": + desired_item = d_item + desired_item_found_index = d_item_index + self.log( + "Found matching desired item at index {0}".format( + d_item_index + ), + "DEBUG", + ) + break + + if desired_item: + self.log( + "Successfully found corresponding desired item for merge", + "DEBUG", + ) + self.log( + "Desired item structure: {0}".format(desired_item), "DEBUG" + ) + self.log( + "Desired item contains {0} keys for processing".format( + len(desired_item.keys()) + ), + "DEBUG", + ) + + # Process global parameters (non-VLAN settings) + self.log( + "Starting global parameters merge (excluding VLAN settings)", + "DEBUG", + ) + global_params_updated = 0 + + for key, value in desired_item.items(): + if key != "igmpSnoopingVlanSettings" and key != "configType": + old_value = final_intended_item.get(key) + self.log( + "Updating global parameter '{0}' from '{1}' to '{2}'".format( + key, old_value, value + ), + "DEBUG", + ) + final_intended_item[key] = value + global_params_updated += 1 + else: + self.log( + "Skipping parameter '{0}' (will be handled separately)".format( + key + ), + "DEBUG", + ) + + self.log( + "Global parameters merge completed: {0} parameters updated".format( + global_params_updated + ), + "DEBUG", + ) + + # Handle VLAN settings merge with correct three-category approach + self.log( + "Starting VLAN settings extraction and validation", "DEBUG" + ) + current_vlan_settings = existing_item.get( + "igmpSnoopingVlanSettings", {} + ) + desired_vlan_settings = desired_item.get( + "igmpSnoopingVlanSettings", {} + ) + + self.log( + "Current VLAN settings type: {0}".format( + type(current_vlan_settings).__name__ + ), + "DEBUG", + ) + self.log( + "Desired VLAN settings type: {0}".format( + type(desired_vlan_settings).__name__ + ), + "DEBUG", + ) + + current_vlan_items = current_vlan_settings.get("items", []) + desired_vlan_items = desired_vlan_settings.get("items", []) + + self.log( + "Current VLAN settings structure: {0}".format( + current_vlan_settings + ), + "DEBUG", + ) + self.log( + "Desired VLAN settings structure: {0}".format( + desired_vlan_settings + ), + "DEBUG", + ) + if desired_vlan_settings: + self.log( + "VLAN settings found in desired configuration - proceeding with VLAN merge", + "DEBUG", + ) + self.log( + "Merging IGMP VLAN settings - current intended has {0} VLANs, user desires {1} VLANs".format( + len(current_vlan_items), len(desired_vlan_items) + ), + "DEBUG", + ) + + self.log( + "Calling _merge_igmp_vlan_settings with parameters:", + "DEBUG", + ) + self.log( + " - Current VLAN settings: {0} items".format( + len(current_vlan_items) + ), + "DEBUG", + ) + self.log( + " - Desired VLAN settings: {0} items".format( + len(desired_vlan_items) + ), + "DEBUG", + ) + + # Apply the three-category logic: current -> desired -> final + merged_vlan_settings = self._merge_igmp_vlan_settings( + current_vlan_settings, desired_vlan_settings + ) + + self.log( + "VLAN settings merge function returned successfully", + "DEBUG", + ) + self.log( + "Merged VLAN settings structure: {0}".format( + merged_vlan_settings + ), + "DEBUG", + ) + + merged_vlan_items = merged_vlan_settings.get("items", []) + self.log( + "Final merged VLAN settings contain {0} items".format( + len(merged_vlan_items) + ), + "DEBUG", + ) + + final_intended_item["igmpSnoopingVlanSettings"] = ( + merged_vlan_settings + ) + self.log( + "Successfully applied merged VLAN settings to final intended item", + "DEBUG", + ) + self.log("IGMP VLAN settings merge completed", "DEBUG") + else: + self.log( + "No desired VLAN settings found in user configuration", + "DEBUG", + ) + self.log( + "VLAN settings will remain unchanged from current intended configuration", + "DEBUG", + ) + self.log( + "Current VLAN settings preserved: {0} items".format( + len(current_vlan_items) + ), + "DEBUG", + ) + + self.log("Global item merge completed successfully", "DEBUG") + self.log( + "Final intended item contains {0} keys".format( + len(final_intended_item.keys()) + ), + "DEBUG", + ) + + else: + self.log( + "No corresponding desired item found with IGMP_SNOOPING_GLOBAL configType", + "DEBUG", + ) + self.log("Existing item will be preserved without changes", "DEBUG") + + # Add the processed item to merged results + merged_items.append(final_intended_item) + self.log( + "Added final intended item to merged results (item {0})".format( + len(merged_items) + ), + "DEBUG", + ) + + else: + self.log( + "Existing item has non-global configType '{0}', preserving as-is".format( + existing_config_type + ), + "DEBUG", + ) + merged_items.append(existing_item) + self.log( + "Added non-global item to merged results (item {0})".format( + len(merged_items) + ), + "DEBUG", + ) + + # Final result preparation + self.log("Merge operation completed successfully", "DEBUG") + self.log("Total merged items: {0}".format(len(merged_items)), "DEBUG") + + final_result = {"items": merged_items} + self.log( + "Final merged configuration structure: {0}".format(final_result), "DEBUG" + ) + self.log( + "Returning merged configuration with {0} items".format(len(merged_items)), + "DEBUG", + ) + + return final_result + + def _merge_igmp_vlan_settings(self, current_vlan_settings, desired_vlan_settings): + """ + Merge IGMP VLAN settings using the three-category approach: + 1. current = current intended config (ALL VLANs) - this becomes our base + 2. desired = user provided config (ONLY USER-SPECIFIED VLANs) - these are the changes + 3. final = copy current, then apply user's desired changes for specified VLANs + + Args: + current_vlan_settings (dict): Current intended VLAN settings (ALL VLANs). + desired_vlan_settings (dict): User desired VLAN settings (ONLY USER-SPECIFIED VLANs). + Returns: + dict: Final VLAN settings with user's desired values applied to current intended config. + """ + self.log( + "Starting IGMP VLAN settings merge using three-category approach", "DEBUG" + ) + + # Category 1: current = current intended config (ALL VLANs) + current_vlans = current_vlan_settings.get("items", []) + self.log("Current intended VLANs: {0}".format(current_vlans), "DEBUG") + # Category 2: desired = user provided config (ONLY USER-SPECIFIED VLANs) + desired_vlans = desired_vlan_settings.get("items", []) + self.log("User desired VLANs: {0}".format(desired_vlans), "DEBUG") + self.log( + "Current intended VLANs count: {0}".format(len(current_vlans)), "DEBUG" + ) + self.log("User desired VLANs count: {0}".format(len(desired_vlans)), "DEBUG") + + # Category 3: final = copy current intended config as base + final_vlan_dict = {} + + # Initialize the parameters_updated counter + parameters_updated = 0 + + # Step 1: Copy ALL current intended VLANs into final config + for current_vlan in current_vlans: + vlan_id = current_vlan.get("vlanId") + if vlan_id: + final_vlan_dict[vlan_id] = copy.deepcopy(current_vlan) + self.log( + "Copied current intended VLAN {0} to final config".format(vlan_id), + "DEBUG", + ) + + self.log( + "Copied {0} current intended VLANs to final config".format( + len(final_vlan_dict) + ), + "DEBUG", + ) + # Step 2: Apply user's desired changes ONLY for user-specified VLANs + for desired_vlan in desired_vlans: + vlan_id = desired_vlan.get("vlanId") + if vlan_id: + self.log("Processing user-specified VLAN {0}".format(vlan_id), "DEBUG") + + if vlan_id in final_vlan_dict: + # VLAN exists in current intended config - UPDATE with user's desired parameters + final_vlan = final_vlan_dict[vlan_id] + self.log( + "VLAN {0} exists in current intended config - updating with user's desired values".format( + vlan_id + ), + "DEBUG", + ) + + # Update ONLY the parameters provided by the user + igmp_vlan_params = [ + "isIgmpSnoopingEnabled", + "isImmediateLeaveEnabled", + "isQuerierEnabled", + "querierAddress", + "querierQueryInterval", + "querierVersion", + ] + + for param in igmp_vlan_params: + if param in desired_vlan: + old_value = final_vlan.get(param) + new_value = desired_vlan[param] + # FIX: Only update if values are different + if old_value != new_value: + final_vlan[param] = new_value + parameters_updated += 1 + self.log( + "VLAN {0}: Updated parameter '{1}' from current '{2}' to user's desired '{3}' (values differ)".format( + vlan_id, param, old_value, new_value + ), + "DEBUG", + ) + else: + self.log( + "VLAN {0}: Parameter '{1}' already matches desired value '{2}' - no update needed".format( + vlan_id, param, new_value + ), + "DEBUG", + ) + + # Handle mrouter configuration if provided by user + if "igmpSnoopingVlanMrouters" in desired_vlan: + final_vlan["igmpSnoopingVlanMrouters"] = copy.deepcopy( + desired_vlan["igmpSnoopingVlanMrouters"] + ) + self.log( + "VLAN {0}: Applied user's mrouter configuration".format( + vlan_id + ), + "DEBUG", + ) + elif "igmpSnoopingVlanMrouters" not in final_vlan: + final_vlan["igmpSnoopingVlanMrouters"] = { + "configType": "SET", + "items": [], + } + self.log( + "VLAN {0}: Added default mrouter structure".format(vlan_id), + "DEBUG", + ) + + else: + # VLAN doesn't exist in current intended config - ADD new VLAN + self.log( + "VLAN {0} does not exist in current intended config - adding new VLAN".format( + vlan_id + ), + "DEBUG", + ) + new_vlan_config = copy.deepcopy(desired_vlan) + + # Ensure required structure for new VLAN + if "configType" not in new_vlan_config: + new_vlan_config["configType"] = "IGMP_SNOOPING_VLAN" + + if "igmpSnoopingVlanMrouters" not in new_vlan_config: + new_vlan_config["igmpSnoopingVlanMrouters"] = { + "configType": "SET", + "items": [], + } + + final_vlan_dict[vlan_id] = new_vlan_config + self.log( + "Added new VLAN {0} to final intended config".format(vlan_id), + "DEBUG", + ) + + # Convert final result back to list format sorted by VLAN ID + final_vlans = sorted(final_vlan_dict.values(), key=lambda x: x.get("vlanId", 0)) + + self.log( + "IGMP VLAN settings merge completed with {0} total VLANs in final config".format( + len(final_vlans) + ), + "DEBUG", + ) + self.log("Total parameters updated: {0}".format(parameters_updated), "DEBUG") + + return {"configType": "SET", "items": final_vlans} + + def _merge_mld_snooping_config(self, desired_config, existing_config): + """ + Merges MLD snooping configurations with special handling for VLANs. + Args: + existing_config (dict): The existing MLD snooping configuration from intended config. + desired_config (dict): The desired MLD snooping configuration from user input. + Returns: + dict: The merged configuration with updated global parameters and preserved VLAN settings. + """ + self.log("Starting MLD snooping configuration merge", "DEBUG") + self.log("Function entry parameters validation", "DEBUG") + self.log( + "Existing config type: {0}, keys: {1}".format( + type(existing_config).__name__, + list(existing_config.keys()) if existing_config else "None", + ), + "DEBUG", + ) + self.log( + "Desired config type: {0}, keys: {1}".format( + type(desired_config).__name__, + list(desired_config.keys()) if desired_config else "None", + ), + "DEBUG", + ) + + # Get the items from both configs + existing_items = existing_config.get("items", []) + self.log("Existing MLD items: {0}".format(existing_items), "DEBUG") + self.log( + "Extracted {0} existing items from configuration".format( + len(existing_items) + ), + "DEBUG", + ) + desired_items = desired_config.get("items", []) + self.log("Desired MLD items: {0}".format(desired_items), "DEBUG") + self.log( + "Extracted {0} desired items from configuration".format(len(desired_items)), + "DEBUG", + ) + + # Early exit conditions with detailed logging + if not existing_items and not desired_items: + self.log( + "No existing or desired MLD items found, returning desired config", + "DEBUG", + ) + self.log("Early exit: Both configurations are empty", "DEBUG") + return desired_config + + # If no existing config, return desired + if not existing_items: + self.log( + "No existing configuration found, returning desired configuration as-is", + "DEBUG", + ) + self.log( + "Early exit: Only desired configuration exists with {0} items".format( + len(desired_items) + ), + "DEBUG", + ) + return desired_config + + # If no desired config, return existing + if not desired_items: + self.log( + "No desired configuration found, returning existing configuration as-is", + "DEBUG", + ) + self.log( + "Early exit: Only existing configuration exists with {0} items".format( + len(existing_items) + ), + "DEBUG", + ) + return existing_config + + # Start main merge process + self.log("Both configurations exist - proceeding with merge operation", "DEBUG") + self.log( + "Merge operation will process {0} existing items and {1} desired items".format( + len(existing_items), len(desired_items) + ), + "DEBUG", + ) + + # Merge the configurations + merged_items = [] + self.log("Initialized merged_items list for storing merge results", "DEBUG") + + # Process each existing item + for existing_item_index, existing_item in enumerate(existing_items): + self.log( + "Processing existing item {0} of {1}".format( + existing_item_index + 1, len(existing_items) + ), + "DEBUG", + ) + self.log("Existing item structure: {0}".format(existing_item), "DEBUG") + + existing_config_type = existing_item.get("configType") + self.log( + "Existing item configType: {0}".format(existing_config_type), "DEBUG" + ) + + if existing_config_type == "MLD_SNOOPING_GLOBAL": + self.log("Found MLD_SNOOPING_GLOBAL item for processing", "DEBUG") + + # Start with existing global config as the final intended config + final_intended_item = copy.deepcopy(existing_item) + self.log( + "Final intended MLD item initialized from existing: {0}".format( + final_intended_item + ), + "DEBUG", + ) + self.log( + "Created deep copy of existing item with {0} keys".format( + len(final_intended_item.keys()) + ), + "DEBUG", + ) + + # Find corresponding desired item + self.log( + "Searching for corresponding desired item with MLD_SNOOPING_GLOBAL configType", + "DEBUG", + ) + desired_item = None + desired_item_found_index = None + + for d_item_index, d_item in enumerate(desired_items): + d_item_config_type = d_item.get("configType") + self.log( + "Checking desired item {0}: configType = {1}".format( + d_item_index, d_item_config_type + ), + "DEBUG", + ) + + if d_item_config_type == "MLD_SNOOPING_GLOBAL": + desired_item = d_item + desired_item_found_index = d_item_index + self.log( + "Found matching desired item at index {0}".format( + d_item_index + ), + "DEBUG", + ) + break + + if desired_item: + self.log( + "Successfully found corresponding desired item for merge", + "DEBUG", + ) + self.log( + "Desired item structure: {0}".format(desired_item), "DEBUG" + ) + self.log( + "Desired item contains {0} keys for processing".format( + len(desired_item.keys()) + ), + "DEBUG", + ) + + # Process global parameters (non-VLAN settings) + self.log( + "Starting global parameters merge (excluding VLAN settings)", + "DEBUG", + ) + global_params_updated = 0 + + for key, value in desired_item.items(): + if key != "mldSnoopingVlanSettings" and key != "configType": + old_value = final_intended_item.get(key) + self.log( + "Updating global parameter '{0}' from '{1}' to '{2}'".format( + key, old_value, value + ), + "DEBUG", + ) + final_intended_item[key] = value + global_params_updated += 1 + else: + self.log( + "Skipping parameter '{0}' (will be handled separately)".format( + key + ), + "DEBUG", + ) + + self.log( + "Global parameters merge completed: {0} parameters updated".format( + global_params_updated + ), + "DEBUG", + ) + + # Handle VLAN settings merge with correct three-category approach + self.log( + "Starting VLAN settings extraction and validation", "DEBUG" + ) + current_vlan_settings = existing_item.get( + "mldSnoopingVlanSettings", {} + ) + desired_vlan_settings = desired_item.get( + "mldSnoopingVlanSettings", {} + ) + + self.log( + "Current VLAN settings type: {0}".format( + type(current_vlan_settings).__name__ + ), + "DEBUG", + ) + self.log( + "Desired VLAN settings type: {0}".format( + type(desired_vlan_settings).__name__ + ), + "DEBUG", + ) + + current_vlan_items = current_vlan_settings.get("items", []) + desired_vlan_items = desired_vlan_settings.get("items", []) + + self.log( + "Current VLAN settings structure: {0}".format( + current_vlan_settings + ), + "DEBUG", + ) + self.log( + "Desired VLAN settings structure: {0}".format( + desired_vlan_settings + ), + "DEBUG", + ) + + if desired_vlan_settings: + self.log( + "VLAN settings found in desired configuration - proceeding with VLAN merge", + "DEBUG", + ) + self.log( + "Merging MLD VLAN settings - current intended has {0} VLANs, user desires {1} VLANs".format( + len(current_vlan_items), len(desired_vlan_items) + ), + "DEBUG", + ) + + self.log( + "Calling _merge_mld_vlan_settings with parameters:", "DEBUG" + ) + self.log( + " - Current VLAN settings: {0} items".format( + len(current_vlan_items) + ), + "DEBUG", + ) + self.log( + " - Desired VLAN settings: {0} items".format( + len(desired_vlan_items) + ), + "DEBUG", + ) + + # Apply the three-category logic: current -> desired -> final + merged_vlan_settings = self._merge_mld_vlan_settings( + current_vlan_settings, desired_vlan_settings + ) + + self.log( + "VLAN settings merge function returned successfully", + "DEBUG", + ) + self.log( + "Merged VLAN settings structure: {0}".format( + merged_vlan_settings + ), + "DEBUG", + ) + + merged_vlan_items = merged_vlan_settings.get("items", []) + self.log( + "Final merged VLAN settings contain {0} items".format( + len(merged_vlan_items) + ), + "DEBUG", + ) + + final_intended_item["mldSnoopingVlanSettings"] = ( + merged_vlan_settings + ) + self.log( + "Successfully applied merged VLAN settings to final intended item", + "DEBUG", + ) + self.log("MLD VLAN settings merge completed", "DEBUG") + else: + self.log( + "No desired VLAN settings found in user configuration", + "DEBUG", + ) + self.log( + "VLAN settings will remain unchanged from current intended configuration", + "DEBUG", + ) + self.log( + "Current VLAN settings preserved: {0} items".format( + len(current_vlan_items) + ), + "DEBUG", + ) + + self.log("Global item merge completed successfully", "DEBUG") + self.log( + "Final intended item contains {0} keys".format( + len(final_intended_item.keys()) + ), + "DEBUG", + ) + + else: + self.log( + "No corresponding desired item found with MLD_SNOOPING_GLOBAL configType", + "DEBUG", + ) + self.log("Existing item will be preserved without changes", "DEBUG") + + # Add the processed item to merged results + merged_items.append(final_intended_item) + self.log( + "Added final intended item to merged results (item {0})".format( + len(merged_items) + ), + "DEBUG", + ) + + else: + self.log( + "Existing item has non-global configType '{0}', preserving as-is".format( + existing_config_type + ), + "DEBUG", + ) + merged_items.append(existing_item) + self.log( + "Added non-global item to merged results (item {0})".format( + len(merged_items) + ), + "DEBUG", + ) + + # Final result preparation + self.log("Merge operation completed successfully", "DEBUG") + self.log("Total merged items: {0}".format(len(merged_items)), "DEBUG") + + final_result = {"items": merged_items} + self.log( + "Final merged configuration structure: {0}".format(final_result), "DEBUG" + ) + self.log( + "Returning merged configuration with {0} items".format(len(merged_items)), + "DEBUG", + ) + + return final_result + + def _merge_mld_vlan_settings(self, current_vlan_settings, desired_vlan_settings): + """ + Merge MLD VLAN settings using the three-category approach: + 1. current = current intended config (ALL VLANs) - this becomes our base + 2. desired = user provided config (ONLY USER-SPECIFIED VLANs) - these are the changes + 3. final = copy current, then apply user's desired changes for specified VLANs + + Args: + current_vlan_settings (dict): Current intended VLAN settings (ALL VLANs). + desired_vlan_settings (dict): User desired VLAN settings (ONLY USER-SPECIFIED VLANs). + Returns: + dict: Final VLAN settings with user's desired values applied to current intended config. + """ + self.log( + "Starting MLD VLAN settings merge using three-category approach", "DEBUG" + ) + + # Category 1: current = current intended config (ALL VLANs) + current_vlans = current_vlan_settings.get("items", []) + self.log("Current intended VLANs: {0}".format(current_vlans), "DEBUG") + # Category 2: desired = user provided config (ONLY USER-SPECIFIED VLANs) + desired_vlans = desired_vlan_settings.get("items", []) + self.log("User desired VLANs: {0}".format(desired_vlans), "DEBUG") + self.log( + "Current intended VLANs count: {0}".format(len(current_vlans)), "DEBUG" + ) + self.log("User desired VLANs count: {0}".format(len(desired_vlans)), "DEBUG") + + # Category 3: final = copy current intended config as base + final_vlan_dict = {} + + # Initialize the parameters_updated counter + parameters_updated = 0 + + # Step 1: Copy ALL current intended VLANs into final config + for current_vlan in current_vlans: + vlan_id = current_vlan.get("vlanId") + if vlan_id: + final_vlan_dict[vlan_id] = copy.deepcopy(current_vlan) + self.log( + "Copied current intended VLAN {0} to final config".format(vlan_id), + "DEBUG", + ) + + self.log( + "Copied {0} current intended VLANs to final config".format( + len(final_vlan_dict) + ), + "DEBUG", + ) + + # Step 2: Apply user's desired changes ONLY for user-specified VLANs + for desired_vlan in desired_vlans: + vlan_id = desired_vlan.get("vlanId") + if vlan_id: + self.log("Processing user-specified VLAN {0}".format(vlan_id), "DEBUG") + + if vlan_id in final_vlan_dict: + # VLAN exists in current intended config - UPDATE with user's desired parameters + final_vlan = final_vlan_dict[vlan_id] + self.log( + "VLAN {0} exists in current intended config - updating with user's desired values".format( + vlan_id + ), + "DEBUG", + ) + + # Update ONLY the parameters provided by the user + mld_vlan_params = [ + "isMldSnoopingEnabled", + "isImmediateLeaveEnabled", + "isQuerierEnabled", + "querierAddress", + "querierQueryInterval", + "querierVersion", + ] + + for param in mld_vlan_params: + if param in desired_vlan: + old_value = final_vlan.get(param) + new_value = desired_vlan[param] + + # FIX: Skip empty querierAddress when querier is disabled + if ( + param == "querierAddress" + and not new_value + and not desired_vlan.get("isQuerierEnabled", False) + ): + # Remove empty querierAddress when querier is disabled + if param in final_vlan: + del final_vlan[param] + self.log( + "Removed empty querierAddress for VLAN {0} (querier disabled)".format( + vlan_id + ), + "DEBUG", + ) + continue + + # Only update if values are different + if old_value != new_value: + final_vlan[param] = new_value + parameters_updated += 1 + self.log( + "VLAN {0}: Updated parameter '{1}' from current '{2}' to user's desired '{3}' (values differ)".format( + vlan_id, param, old_value, new_value + ), + "DEBUG", + ) + else: + self.log( + "VLAN {0}: Parameter '{1}' already matches desired value '{2}' - no update needed".format( + vlan_id, param, new_value + ), + "DEBUG", + ) + + # Handle mrouter configuration if provided by user + if "mldSnoopingVlanMrouters" in desired_vlan: + final_vlan["mldSnoopingVlanMrouters"] = copy.deepcopy( + desired_vlan["mldSnoopingVlanMrouters"] + ) + self.log( + "VLAN {0}: Applied user's mrouter configuration".format( + vlan_id + ), + "DEBUG", + ) + elif "mldSnoopingVlanMrouters" not in final_vlan: + final_vlan["mldSnoopingVlanMrouters"] = { + "configType": "SET", + "items": [], + } + self.log( + "VLAN {0}: Added default mrouter structure".format(vlan_id), + "DEBUG", + ) + + else: + # VLAN doesn't exist in current intended config - ADD new VLAN + self.log( + "VLAN {0} does not exist in current intended config - adding new VLAN".format( + vlan_id + ), + "DEBUG", + ) + new_vlan_config = copy.deepcopy(desired_vlan) + + # Ensure required structure for new VLAN + if "configType" not in new_vlan_config: + new_vlan_config["configType"] = "MLD_SNOOPING_VLAN" + + # FIX: Remove empty querierAddress if querier is disabled for new VLANs + if ( + "querierAddress" in new_vlan_config + and not new_vlan_config["querierAddress"] + and not new_vlan_config.get("isQuerierEnabled", False) + ): + del new_vlan_config["querierAddress"] + self.log( + "Removed empty querierAddress from new VLAN {0} (querier disabled)".format( + vlan_id + ), + "DEBUG", + ) + + if "mldSnoopingVlanMrouters" not in new_vlan_config: + new_vlan_config["mldSnoopingVlanMrouters"] = { + "configType": "SET", + "items": [], + } + + final_vlan_dict[vlan_id] = new_vlan_config + self.log( + "Added new VLAN {0} to final intended config".format(vlan_id), + "DEBUG", + ) + + # Convert final result back to list format sorted by VLAN ID + final_vlans = sorted(final_vlan_dict.values(), key=lambda x: x.get("vlanId", 0)) + + self.log( + "MLD VLAN settings merge completed with {0} total VLANs in final config".format( + len(final_vlans) + ), + "DEBUG", + ) + self.log("Total parameters updated: {0}".format(parameters_updated), "DEBUG") + + return {"configType": "SET", "items": final_vlans} + + def _merge_port_channel_config(self, desired_config, existing_config): + """ + Merges port channel configurations by port channel name. + Args: + desired_config (dict): The desired port channel configuration from user input. + existing_config (dict): The existing port channel configuration from intended config. + Returns: + dict: The merged configuration with updated global parameters and preserved port channels. + """ + self.log("Starting port channel configuration merge", "DEBUG") + + # Create a deep copy of existing config to avoid modifying original + merged_config = copy.deepcopy(existing_config) + + # Get the first item from both configs (port channel configs have single item) + existing_items = existing_config.get("items", []) + desired_items = desired_config.get("items", []) + + if not existing_items or not desired_items: + self.log( + "Missing items in existing or desired config, returning desired config", + "DEBUG", + ) + return desired_config + + existing_item = existing_items[0] + desired_item = desired_items[0] + + # Start with existing item as base + merged_item = copy.deepcopy(existing_item) + + # Update global parameters (excluding portchannels) + for key, value in desired_item.items(): + if key not in ["portchannels", "configType"]: + merged_item[key] = value + self.log( + "Updated global parameter '{0}': {1}".format(key, value), "DEBUG" + ) + + # Extract current and desired port channels + existing_portchannels = existing_item.get("portchannels", {}).get("items", []) + desired_portchannels = desired_item.get("portchannels", {}).get("items", []) + + self.log( + "Current intended port channels count: {0}".format( + len(existing_portchannels) + ), + "DEBUG", + ) + self.log( + "User desired port channels count: {0}".format(len(desired_portchannels)), + "DEBUG", + ) + + # Create a mapping of existing port channels by name + existing_pc_map = {} + for pc in existing_portchannels: + pc_name = pc.get("name") + if pc_name: + existing_pc_map[pc_name] = pc + self.log("Found existing port channel: {0}".format(pc_name), "DEBUG") + + # Process desired port channels + updated_portchannels = [] + + # First, add all existing port channels that are not being updated + for pc_name, pc_config in existing_pc_map.items(): + # Check if this port channel is being updated + is_being_updated = any( + desired_pc.get("name") == pc_name for desired_pc in desired_portchannels + ) + + if not is_being_updated: + updated_portchannels.append(pc_config) + self.log( + "Preserving existing port channel: {0}".format(pc_name), "DEBUG" + ) + + # Then, add/update port channels from desired config + for desired_pc in desired_portchannels: + pc_name = desired_pc.get("name") + if not pc_name: + self.log("Skipping port channel without name", "WARNING") + continue + + if pc_name in existing_pc_map: + self.log("Updating existing port channel: {0}".format(pc_name), "DEBUG") + # Merge the port channel configuration + merged_pc = self._merge_single_port_channel( + existing_pc_map[pc_name], desired_pc + ) + updated_portchannels.append(merged_pc) + else: + self.log("Adding new port channel: {0}".format(pc_name), "DEBUG") + updated_portchannels.append(desired_pc) + + # Update the portchannels in the merged item (not at root level) + if "portchannels" not in merged_item: + merged_item["portchannels"] = {"configType": "SET", "items": []} + + merged_item["portchannels"]["items"] = updated_portchannels + + # Update the merged config with the single merged item + merged_config["items"] = [merged_item] + + self.log( + "Port channel merge completed. Final count: {0}".format( + len(updated_portchannels) + ), + "DEBUG", + ) + return merged_config + + def _merge_single_port_channel(self, current_pc, desired_pc): + """ + Merges a single port channel configuration. + Args: + current_pc (dict): Current port channel configuration. + desired_pc (dict): Desired port channel configuration. + Returns: + dict: Merged port channel configuration. + """ + self.log( + "Merging port channel: {0}".format(current_pc.get("name", "Unknown")), + "DEBUG", + ) + + # Start with current configuration + merged_pc = copy.deepcopy(current_pc) + + # Update with desired parameters (excluding member ports for now) + for key, value in desired_pc.items(): + if key not in ["memberPorts", "configType"]: + merged_pc[key] = value + self.log( + "Updated port channel parameter '{0}': {1}".format(key, value), + "DEBUG", + ) + + # Handle member ports merge + current_members = current_pc.get("memberPorts", {}).get("items", []) + desired_members = desired_pc.get("memberPorts", {}).get("items", []) + + # Create mapping of current members by interface name + current_member_map = {} + for member in current_members: + interface_name = member.get("interfaceName") + if interface_name: + current_member_map[interface_name] = member + + # Process desired members + updated_members = [] + + # Add existing members that are not being updated + for interface_name, member_config in current_member_map.items(): + is_being_updated = any( + desired_member.get("interfaceName") == interface_name + for desired_member in desired_members + ) + + if not is_being_updated: + updated_members.append(member_config) + self.log( + "Preserving existing member: {0}".format(interface_name), "DEBUG" + ) + + # Add/update members from desired config + for desired_member in desired_members: + interface_name = desired_member.get("interfaceName") + if not interface_name: + continue + + if interface_name in current_member_map: + self.log( + "Updating existing member: {0}".format(interface_name), "DEBUG" + ) + # Merge member configuration + merged_member = copy.deepcopy(current_member_map[interface_name]) + for key, value in desired_member.items(): + if key != "configType": + merged_member[key] = value + updated_members.append(merged_member) + else: + self.log("Adding new member: {0}".format(interface_name), "DEBUG") + updated_members.append(desired_member) + + # Update member ports in merged config + if updated_members: + merged_pc["memberPorts"] = {"configType": "SET", "items": updated_members} + + self.log( + "Port channel merge completed. Members: {0}".format(len(updated_members)), + "DEBUG", + ) + return merged_pc + + def _config_needs_update(self, desired_config, current_config): + """ + Compares desired configuration with current configuration to determine if update is needed. + Args: + desired_config (dict): Desired configuration + current_config (dict): Current configuration + Returns: + bool: True if update is needed, False otherwise + """ + self.log( + "Starting configuration comparison to determine update necessity", "DEBUG" + ) + self.log( + "Desired configuration keys: {0}".format(list(desired_config.keys())), + "DEBUG", + ) + self.log( + "Current configuration keys: {0}".format(list(current_config.keys())), + "DEBUG", + ) + + # Compare all parameters except configType + for key, desired_value in desired_config.items(): + if key == "configType": + self.log("Skipping configType parameter during comparison", "DEBUG") + continue + + current_value = current_config.get(key) + self.log( + "Comparing parameter '{0}': desired='{1}', current='{2}'".format( + key, desired_value, current_value + ), + "DEBUG", + ) + + if current_value != desired_value: + self.log( + "Parameter {0} differs: desired={1}, current={2}".format( + key, desired_value, current_value + ), + "DEBUG", + ) + self.log( + "Configuration update is required due to parameter differences", + "DEBUG", + ) + return True + else: + self.log( + "Parameter '{0}' matches between desired and current configuration".format( + key + ), + "DEBUG", + ) + + self.log( + "All parameters match between desired and current configuration", "DEBUG" + ) + self.log("No configuration update is required", "DEBUG") + + return False + + def _is_vlan_feature(self, api_feature_name): + """ + Check if the feature is VLAN-related. + Args: + api_feature_name (str): Name of the API feature to check + Returns: + bool: True if the feature is VLAN-related, False otherwise + """ + self.log( + "Checking if API feature '{0}' is VLAN-related".format(api_feature_name), + "DEBUG", + ) + + # Check if the feature name matches the VLAN configuration identifier + is_vlan = api_feature_name == "vlanConfig" + + self.log( + "VLAN feature check result for '{0}': {1}".format( + api_feature_name, is_vlan + ), + "DEBUG", + ) + + return is_vlan + + def _is_global_feature(self, api_feature_name): + """ + Check if the feature is a global configuration. + Args: + api_feature_name (str): Name of the API feature to check + Returns: + bool: True if the feature is a global configuration, False otherwise + """ + self.log( + "Checking if API feature '{0}' is a global configuration feature".format( + api_feature_name + ), + "DEBUG", + ) + + # Define the list of global configuration features that configure device-wide settings + global_features = [ + "cdpGlobalConfig", + "lldpGlobalConfig", + "stpGlobalConfig", + "vtpGlobalConfig", + "dhcpSnoopingGlobalConfig", + "igmpSnoopingGlobalConfig", + "mldSnoopingGlobalConfig", + "dot1xGlobalConfig", + "portchannelConfig", + "udldGlobalConfig", + ] + + # Check if the feature is in the global features list + is_global = api_feature_name in global_features + + self.log( + "Global feature check result for '{0}': {1}".format( + api_feature_name, + "global feature" if is_global else "not a global feature", + ), + "DEBUG", + ) + + return is_global + + def _is_interface_feature(self, api_feature_name): + """ + Check if the feature is an interface configuration. + Args: + api_feature_name (str): Name of the API feature to check + Returns: + bool: True if the feature is an interface configuration, False otherwise + """ + self.log( + "Checking if API feature '{0}' is an interface configuration feature".format( + api_feature_name + ), + "DEBUG", + ) + + # Define the list of interface-specific features that configure individual interfaces + interface_features = [ + "switchportInterfaceConfig", + "trunkInterfaceConfig", + "cdpInterfaceConfig", + "lldpInterfaceConfig", + "stpInterfaceConfig", + "dhcpSnoopingInterfaceConfig", + "dot1xInterfaceConfig", + "mabInterfaceConfig", + "vtpInterfaceConfig", + ] + + # Check if the feature is in the interface features list + is_interface = api_feature_name in interface_features + + self.log( + "Interface feature check result for '{0}': {1}".format( + api_feature_name, + ( + "interface configuration feature" + if is_interface + else "not an interface configuration feature" + ), + ), + "DEBUG", + ) + + return is_interface + + def _execute_api_operations(self, diff_analysis): + """ + Executes the API operations for all features that require changes. + Description: + This is the main orchestration function that: + 1. Checks if any operations are needed (exits with success if none) + 2. Executes intent operations (create/update) for each feature + 3. Fails immediately if any intent operation fails + 4. Attempts deployment only if all intent operations succeed + 5. Sets appropriate operation results and messages + Args: + diff_analysis (dict): Analysis results from _analyze_configuration_differences + Returns: + dict: Results of all API operations executed + """ + self.log("Starting execution of API operations", "INFO") + + network_device_id = diff_analysis.get("network_device_id") + features_to_process = diff_analysis.get("features_to_process", {}) + device_identifier = self.want.get("device_identifier") + + if not features_to_process: + self.msg = "No Layer 2 configuration changes required for device {0}. Current configuration is already up-to-date.".format( + device_identifier + ) + self.set_operation_result("success", False, self.msg, "INFO") + return { + "executed_operations": {}, + "deployment_result": None, + "summary": { + "total_operations": 0, + "successful_operations": 0, + "failed_operations": 0, + "deployment_attempted": False, + "deployment_successful": False, + }, + } + + self.log( + "Executing API operations for {0} features".format( + len(features_to_process) + ), + "INFO", + ) + + executed_operations = {} + successful_operations = 0 + failed_operations = 0 + failed_features = [] + + # Execute intent operations for each feature + for api_feature_name, operation_details in features_to_process.items(): + user_feature_name = self._get_user_feature_name(api_feature_name) + self.log( + "Executing operation for feature: {0} (user: {1})".format( + api_feature_name, user_feature_name + ), + "INFO", + ) + + try: + operation_result = self._execute_single_feature_operation( + network_device_id, + api_feature_name, + operation_details, + user_feature_name, + ) + + executed_operations[api_feature_name] = operation_result + + if operation_result.get("status") == "success": + successful_operations += 1 + self.log( + "Successfully executed operation for feature: {0}".format( + user_feature_name + ), + "INFO", + ) + else: + failed_operations += 1 + failed_features.append( + { + "feature": user_feature_name, + "operation": operation_details.get("intent_operation"), + "error": operation_result.get("error", "Unknown error"), + "api_feature": api_feature_name, + } + ) + self.log( + "Failed to execute operation for feature: {0}".format( + user_feature_name + ), + "ERROR", + ) + + except Exception as e: + error_msg = "Exception during operation for feature {0}: {1}".format( + user_feature_name, str(e) + ) + self.log(error_msg, "ERROR") + + executed_operations[api_feature_name] = { + "status": "failed", + "error": error_msg, + "exception": str(e), + } + failed_operations += 1 + failed_features.append( + { + "feature": user_feature_name, + "operation": operation_details.get("intent_operation"), + "error": error_msg, + "api_feature": api_feature_name, + } + ) + + # If any intent operations failed, fail immediately without attempting deployment + if failed_operations > 0: + failure_details = self._build_detailed_failure_message( + failed_features, device_identifier, "intent configuration" + ) + self.msg = failure_details + self.set_operation_result("failed", True, self.msg, "ERROR") + self.fail_and_exit(self.msg) + + # Attempt deployment if all intent operations were successful + deployment_result = None + deployment_successful = False + + if successful_operations > 0: + self.log( + "All intent operations successful. Attempting to deploy configurations to device", + "INFO", + ) + try: + deployment_result = self._deploy_configurations( + network_device_id, device_identifier + ) + deployment_successful = deployment_result.get("status") == "success" + + if deployment_successful: + success_msg = "Successfully deployed Wired Campus Automation configuration for device {0}.".format( + device_identifier + ) + self.msg = success_msg + self.set_operation_result("success", True, self.msg, "INFO") + self.log(success_msg, "INFO") + else: + deployment_error = deployment_result.get( + "error", "Unknown deployment error" + ) + failure_msg = ( + "Failed to deploy Wired Campus Automation configuration for device {0}. " + "Intent configuration was successful for {1} features, but deployment failed: {2}" + ).format(device_identifier, successful_operations, deployment_error) + self.msg = failure_msg + self.set_operation_result("failed", True, self.msg, "ERROR") + self.fail_and_exit(self.msg) + + except Exception as e: + error_msg = "Exception during deployment for device {0}: {1}".format( + device_identifier, str(e) + ) + self.log(error_msg, "ERROR") + self.msg = error_msg + self.set_operation_result("failed", True, self.msg, "ERROR") + self.fail_and_exit(self.msg) + + operation_results = { + "executed_operations": executed_operations, + "deployment_result": deployment_result, + "summary": { + "total_operations": len(features_to_process), + "successful_operations": successful_operations, + "failed_operations": failed_operations, + "deployment_attempted": successful_operations > 0, + "deployment_successful": deployment_successful, + }, + } + + self.log( + "API operations execution completed: {0}".format( + operation_results["summary"] + ), + "INFO", + ) + return operation_results + + def _execute_single_feature_operation( + self, network_device_id, api_feature_name, operation_details, user_feature_name + ): + """ + Executes the API operation for a single Layer 2 feature (create or update intent). + Description: + This function: + 1. Determines the intent operation type (create/update) + 2. Calls the appropriate API function with the final configuration + 3. Extracts the task ID from the response + 4. Monitors task completion using get_task_status_from_tasks_by_id + 5. Returns success/failure status with detailed information + Args: + network_device_id (str): Network device ID + api_feature_name (str): API feature name (Example, 'vlanConfig') + operation_details (dict): Operation details including intent operation and final config + user_feature_name (str): User-friendly feature name for logging + Returns: + dict: Result of the API operation with status, task details, and error info + """ + intent_operation = operation_details.get("intent_operation") + final_config = operation_details.get("final_config") + + self.log( + "Executing {0} intent operation for feature {1}".format( + intent_operation, user_feature_name + ), + "DEBUG", + ) + self.log("Final configuration to apply: {0}".format(final_config), "DEBUG") + + try: + if intent_operation == "create": + task_response = self.create_layer2_feature_configuration( + network_device_id, api_feature_name, final_config + ) + task_name = "Create {0} Intent Configuration".format(user_feature_name) + success_msg = "Successfully created {0} intent configuration".format( + user_feature_name + ) + elif intent_operation == "update": + task_response = self.update_layer2_feature_configuration( + network_device_id, api_feature_name, final_config + ) + task_name = "Update {0} Intent Configuration".format(user_feature_name) + success_msg = "Successfully updated {0} intent configuration".format( + user_feature_name + ) + else: + raise ValueError( + "Invalid intent operation: {0}".format(intent_operation) + ) + + # Debug: Log the task response structure and type + self.log( + "DEBUG: task_response type: {0}".format(type(task_response)), "DEBUG" + ) + self.log("DEBUG: task_response content: {0}".format(task_response), "DEBUG") + + # Process the task response - get_taskid_post_api_call returns the task ID directly as a string + if task_response and isinstance(task_response, str): + task_id = task_response + self.log( + "Task initiated for {0} operation on {1}, task ID: {2}".format( + intent_operation, user_feature_name, task_id + ), + "INFO", + ) + + # Monitor task completion using the same pattern as wireless design module + self.get_task_status_from_tasks_by_id( + task_id, task_name, success_msg + ).check_return_status() + + # Check the final status + if self.status == "success": + return { + "status": "success", + "intent_operation": intent_operation, + "task_id": task_id, + "task_name": task_name, + "final_config": final_config, + "message": success_msg, + } + else: + return { + "status": "failed", + "error": self.msg, + "intent_operation": intent_operation, + "task_id": task_id, + "task_name": task_name, + } + else: + error_msg = "Invalid task response format for {0} operation on {1}. Expected string task ID, got: {2} (type: {3})".format( + intent_operation, + user_feature_name, + task_response, + type(task_response).__name__, + ) + self.log(error_msg, "ERROR") + return { + "status": "failed", + "error": error_msg, + "intent_operation": intent_operation, + "response": task_response, + } + + except Exception as e: + error_msg = "Failed to execute {0} operation for {1}: {2}".format( + intent_operation, user_feature_name, str(e) + ) + self.log(error_msg, "ERROR") + self.log("Exception details: {0}".format(str(e)), "DEBUG") + + # DEBUG: Add full traceback + import traceback + + self.log( + "DEBUG: Full exception traceback: {0}".format(traceback.format_exc()), + "DEBUG", + ) + + return { + "status": "failed", + "error": error_msg, + "exception": str(e), + "intent_operation": intent_operation, + } + + def _deploy_configurations(self, network_device_id, device_identifier): + """ + Deploys the intended configurations to the device. + Description: + This function: + 1. Calls the deployment API to deploy all intended configurations + 2. Extracts the task ID from the response + 3. Monitors deployment task completion using get_task_status_from_tasks_by_id + 4. Returns success/failure status with detailed information + Args: + network_device_id (str): Network device ID + device_identifier (str): Device identifier for user messages + Returns: + dict: Result of the deployment operation with status and task details + """ + self.log( + "Initiating deployment of configurations to device {0}".format( + device_identifier + ), + "INFO", + ) + + try: + deploy_response = self.deploy_intended_configurations(network_device_id) + + # Debug: Log the deploy response structure and type + self.log( + "DEBUG: deploy_response type: {0}".format(type(deploy_response)), + "DEBUG", + ) + self.log( + "DEBUG: deploy_response content: {0}".format(deploy_response), "DEBUG" + ) + + # Process the deploy response - deploy_intended_configurations returns the task ID directly as a string + if deploy_response and isinstance(deploy_response, str): + task_id = deploy_response + task_name = "Deploy Wired Campus Automation Configuration" + success_msg = "Successfully deployed Wired Campus Automation configuration to device {0}".format( + device_identifier + ) + self.log( + "Deployment task initiated, task ID: {0}".format(task_id), "INFO" + ) + + # Monitor deployment task completion using existing DnacBase function + self.get_task_status_from_tasks_by_id( + task_id, task_name, success_msg + ).check_return_status() + + # Check the final status + if self.status == "success": + return { + "status": "success", + "task_id": task_id, + "task_name": task_name, + } + else: + return { + "status": "failed", + "error": self.msg, + "task_id": task_id, + "task_name": task_name, + } + else: + error_msg = "Invalid deploy response format for deployment operation on device {0}. Expected string task ID, got: {1} (type: {2})".format( + device_identifier, deploy_response, type(deploy_response).__name__ + ) + self.log(error_msg, "ERROR") + return { + "status": "failed", + "error": error_msg, + "response": deploy_response, + } + + except Exception as e: + error_msg = "Failed to deploy configurations to device {0}: {1}".format( + device_identifier, str(e) + ) + self.log(error_msg, "ERROR") + return {"status": "failed", "error": error_msg, "exception": str(e)} + + def _get_user_feature_name(self, api_feature_name): + """ + Maps API feature names to user-friendly feature names for better error messages. + Description: + Converts technical API feature names to human-readable names that users + can understand in error messages and logs. This ensures that failure + messages use terminology familiar to network administrators. + Args: + api_feature_name (str): API feature name (Example, 'vlanConfig') + Returns: + str: User-friendly feature name (Example, 'VLAN Configuration') + """ + feature_mapping = { + "vlanConfig": "VLAN Configuration", + "cdpGlobalConfig": "CDP Configuration", + "lldpGlobalConfig": "LLDP Configuration", + "stpGlobalConfig": "STP Configuration", + "vtpGlobalConfig": "VTP Configuration", + "dhcpSnoopingGlobalConfig": "DHCP Snooping Configuration", + "igmpSnoopingGlobalConfig": "IGMP Snooping Configuration", + "mldSnoopingGlobalConfig": "MLD Snooping Configuration", + "dot1xGlobalConfig": "802.1X Authentication Configuration", + "portchannelConfig": "Port Channel Configuration", + "switchportInterfaceConfig": "Switchport Interface Configuration", + "trunkInterfaceConfig": "Trunk Interface Configuration", + "cdpInterfaceConfig": "CDP Interface Configuration", + "lldpInterfaceConfig": "LLDP Interface Configuration", + "stpInterfaceConfig": "STP Interface Configuration", + "dhcpSnoopingInterfaceConfig": "DHCP Snooping Interface Configuration", + "dot1xInterfaceConfig": "802.1X Interface Configuration", + "mabInterfaceConfig": "MAB Interface Configuration", + "vtpInterfaceConfig": "VTP Interface Configuration", + } + + return feature_mapping.get(api_feature_name, api_feature_name) + + def _build_detailed_failure_message( + self, failed_features, device_identifier, operation_type + ): + """ + Builds a detailed failure message for failed operations using user-friendly feature names. + Args: + failed_features (list): List of failed feature details with user-friendly names + device_identifier (str): Device identifier for the error message + operation_type (str): Type of operation (Example, "intent configuration", "deployment") + Returns: + str: Detailed failure message with enumerated failure details + """ + if not failed_features: + return "Unknown failure occurred during {0} for device {1}".format( + operation_type, device_identifier + ) + + failure_msg = "Failed to configure Wired Campus Automation for device {0} during {1}. Failures occurred in the following features:\n".format( + device_identifier, operation_type + ) + + # Iterate through each failed feature and build detailed error description + for i, failure in enumerate(failed_features, 1): + feature_name = failure.get("feature", "Unknown Feature") + operation = failure.get("operation", "unknown operation") + error = failure.get("error", "Unknown error") + + failure_msg += "{0}. {1} ({2} operation): {3}\n".format( + i, feature_name, operation, error + ) + self.log( + "Processing failure {0}: Feature '{1}', Operation '{2}', Error '{3}'".format( + i, feature_name, operation, error + ), + "DEBUG", + ) + + self.log( + "Built detailed failure message for {0} failed features during {1}".format( + len(failed_features), operation_type + ), + "DEBUG", + ) + + return failure_msg.rstrip() + + def _log_configuration_state( + self, state_label, want_feature_mappings, deployed_configs + ): + """ + Logs the configuration state in a structured format. + Args: + state_label (str): Label for the state (Example, "Pre-operation", "Post-operation") + want_feature_mappings (dict): User feature mappings for context + deployed_configs (dict): Deployed configurations to log + """ + for user_feature_name, user_feature_config in want_feature_mappings.items(): + self.log("User Feature: {0}".format(user_feature_name), "INFO") + + for api_feature_name in user_feature_config.keys(): + user_friendly_name = self._get_user_feature_name(api_feature_name) + config = deployed_configs.get(api_feature_name, {}) + + if config.get("response", {}).get(api_feature_name, {}).get("items"): + items_count = len(config["response"][api_feature_name]["items"]) + self.log( + " - {0}: {1} items configured".format( + user_friendly_name, items_count + ), + "INFO", + ) + self.log( + " {0} config structure: {1}".format( + state_label, config["response"][api_feature_name] + ), + "DEBUG", + ) + else: + self.log( + " - {0}: No configuration found".format(user_friendly_name), + "INFO", + ) + + def _perform_detailed_verification( + self, + want_feature_mappings, + original_deployed_configs, + post_operation_deployed_configs, + ): + """ + Performs detailed verification of configuration changes by comparing desired vs actual state. + Args: + want_feature_mappings (dict): Desired user feature configurations + original_deployed_configs (dict): Pre-operation deployed configurations + post_operation_deployed_configs (dict): Post-operation deployed configurations + Returns: + dict: Detailed verification results and summary + """ + self.log("Starting detailed verification analysis", "DEBUG") + + verification_summary = { + "total_features_verified": 0, + "features_successfully_applied": 0, + "features_failed_verification": 0, + "features_not_found": 0, + "verification_failed": False, + "success_message": "", + "failure_message": "", + "detailed_results": {}, + } + + failed_verifications = [] + successful_verifications = [] + + # Process each user feature mapping + for user_feature_name, user_feature_config in want_feature_mappings.items(): + self.log("Verifying user feature: {0}".format(user_feature_name), "INFO") + + for api_feature_name, desired_config in user_feature_config.items(): + verification_summary["total_features_verified"] += 1 + user_friendly_name = self._get_user_feature_name(api_feature_name) + + self.log( + "Verifying API feature: {0} ({1})".format( + api_feature_name, user_friendly_name + ), + "INFO", + ) + + # Get configurations for comparison + original_config = original_deployed_configs.get(api_feature_name, {}) + post_config = post_operation_deployed_configs.get(api_feature_name, {}) + + # Perform feature-specific verification + feature_verification = self._verify_single_feature( + api_feature_name, + user_friendly_name, + desired_config, + original_config, + post_config, + ) + + verification_summary["detailed_results"][ + api_feature_name + ] = feature_verification + + if feature_verification["status"] == "success": + verification_summary["features_successfully_applied"] += 1 + successful_verifications.append(feature_verification["message"]) + self.log("✓ {0}".format(feature_verification["message"]), "INFO") + elif feature_verification["status"] == "failed": + verification_summary["features_failed_verification"] += 1 + failed_verifications.append(feature_verification["message"]) + self.log("✗ {0}".format(feature_verification["message"]), "ERROR") + else: # not_found + verification_summary["features_not_found"] += 1 + failed_verifications.append(feature_verification["message"]) + self.log("⚠ {0}".format(feature_verification["message"]), "WARNING") + + # Determine overall verification result + if failed_verifications: + verification_summary["verification_failed"] = True + verification_summary["failure_message"] = ( + "Configuration verification failed for device {0}. " + "Successfully verified: {1}, Failed: {2}, Not found: {3}. " + "Failures: {4}".format( + self.want.get("device_identifier"), + verification_summary["features_successfully_applied"], + verification_summary["features_failed_verification"], + verification_summary["features_not_found"], + "; ".join(failed_verifications), + ) + ) + else: + verification_summary["success_message"] = ( + "Configuration verification successful for device {0}. " + "All {1} Layer 2 features have been successfully deployed and verified.".format( + self.want.get("device_identifier"), + verification_summary["features_successfully_applied"], + ) + ) + + return verification_summary + + def _verify_single_feature( + self, + api_feature_name, + user_friendly_name, + desired_config, + original_config, + post_config, + ): + """ + Verifies a single feature configuration by comparing desired vs deployed state. + Args: + api_feature_name (str): API feature name + user_friendly_name (str): User-friendly feature name + desired_config (dict): Desired configuration for this feature + original_config (dict): Original deployed configuration + post_config (dict): Current deployed configuration + Returns: + dict: Verification result for this feature + """ + self.log( + "Performing detailed verification for feature: {0}".format( + user_friendly_name + ), + "DEBUG", + ) + + # Extract actual configuration items + original_items = ( + original_config.get("response", {}) + .get(api_feature_name, {}) + .get("items", []) + ) + post_items = ( + post_config.get("response", {}).get(api_feature_name, {}).get("items", []) + ) + desired_items = desired_config.get("items", []) + + self.log("Original items count: {0}".format(len(original_items)), "DEBUG") + self.log("Post-operation items count: {0}".format(len(post_items)), "DEBUG") + self.log("Desired items count: {0}".format(len(desired_items)), "DEBUG") + + # Check if configuration exists post-operation + if not post_items: + return { + "status": "not_found", + "message": "{0} configuration not found after deployment".format( + user_friendly_name + ), + "details": "Expected configuration items but found none in deployed state", + } + + # Perform feature-type specific verification + if self._is_vlan_feature(api_feature_name): + return self._verify_vlan_configuration( + user_friendly_name, desired_items, post_items, original_items + ) + elif self._is_global_feature(api_feature_name): + return self._verify_global_configuration( + user_friendly_name, desired_items, post_items, original_items + ) + elif self._is_interface_feature(api_feature_name): + return self._verify_interface_configuration( + user_friendly_name, desired_items, post_items, original_items + ) + else: + return self._verify_default_configuration( + user_friendly_name, desired_items, post_items, original_items + ) + + def _verify_vlan_configuration( + self, user_friendly_name, desired_items, post_items, original_items + ): + """ + Verifies VLAN configuration by checking individual VLAN parameters. + Args: + user_friendly_name (str): User-friendly name for the VLAN feature + desired_items (list): List of desired VLAN configuration items + post_items (list): List of post-operation deployed VLAN items + original_items (list): List of original VLAN configuration items + Returns: + dict: Verification result containing status, message, and details + """ + self.log("Verifying VLAN configuration details", "DEBUG") + + # Create lookup for deployed VLANs by ID + post_vlan_lookup = {item.get("vlanId"): item for item in post_items} + + verified_vlans = [] + failed_vlans = [] + + for desired_vlan in desired_items: + vlan_id = desired_vlan.get("vlanId") + post_vlan = post_vlan_lookup.get(vlan_id) + + if not post_vlan: + failed_vlans.append( + "VLAN {0} not found in deployed configuration".format(vlan_id) + ) + continue + + # Verify VLAN parameters + vlan_verification = self._verify_vlan_parameters(desired_vlan, post_vlan) + + if vlan_verification["success"]: + verified_vlans.append("VLAN {0}".format(vlan_id)) + self.log( + "VLAN {0} verification successful: {1}".format( + vlan_id, vlan_verification["details"] + ), + "DEBUG", + ) + else: + failed_vlans.append( + "VLAN {0}: {1}".format(vlan_id, vlan_verification["details"]) + ) + self.log( + "VLAN {0} verification failed: {1}".format( + vlan_id, vlan_verification["details"] + ), + "DEBUG", + ) + + if failed_vlans: + return { + "status": "failed", + "message": "{0} verification failed for VLANs: {1}".format( + user_friendly_name, ", ".join(failed_vlans) + ), + "details": "Successfully verified: {0}, Failed: {1}".format( + len(verified_vlans), len(failed_vlans) + ), + } + else: + return { + "status": "success", + "message": "{0} successfully verified for {1} VLANs".format( + user_friendly_name, len(verified_vlans) + ), + "details": "All requested VLAN configurations match deployed state", + } + + def _verify_global_configuration( + self, user_friendly_name, desired_items, post_items, original_items + ): + """ + Verifies global configuration (single item configurations like CDP, LLDP, etc.). + Args: + user_friendly_name (str): User-friendly name for the global feature + desired_items (list): List of desired global configuration items + post_items (list): List of post-operation deployed global items + original_items (list): List of original global configuration items + Returns: + dict: Verification result containing status, message, and details + """ + self.log( + "Verifying global configuration details for {0}".format(user_friendly_name), + "DEBUG", + ) + + if not desired_items or not post_items: + return { + "status": "failed", + "message": "{0} verification failed - missing configuration items".format( + user_friendly_name + ), + "details": "Expected configuration items but found incomplete data", + } + + desired_item = desired_items[0] + post_item = post_items[0] + + # Verify configuration parameters + verification_result = self._verify_configuration_parameters( + desired_item, post_item + ) + + if verification_result["success"]: + return { + "status": "success", + "message": "{0} successfully verified and deployed".format( + user_friendly_name + ), + "details": verification_result["details"], + } + else: + return { + "status": "failed", + "message": "{0} verification failed: {1}".format( + user_friendly_name, verification_result["details"] + ), + "details": "Configuration parameters do not match expected values", + } + + def _verify_interface_configuration( + self, user_friendly_name, desired_items, post_items, original_items + ): + """ + Verifies interface configuration by checking individual interface parameters. + Args: + user_friendly_name (str): User-friendly name for the interface feature + desired_items (list): List of desired interface configuration items + post_items (list): List of post-operation deployed interface items + original_items (list): List of original interface configuration items + Returns: + dict: Verification result containing status, message, and details + """ + self.log( + "Verifying interface configuration details for {0}".format( + user_friendly_name + ), + "DEBUG", + ) + + # Create lookup for deployed interfaces by name + post_interface_lookup = {item.get("interfaceName"): item for item in post_items} + + verified_interfaces = [] + failed_interfaces = [] + + for desired_interface in desired_items: + interface_name = desired_interface.get("interfaceName") + post_interface = post_interface_lookup.get(interface_name) + + if not post_interface: + failed_interfaces.append( + "Interface {0} not found in deployed configuration".format( + interface_name + ) + ) + continue + + # Verify interface parameters + interface_verification = self._verify_configuration_parameters( + desired_interface, post_interface + ) + + if interface_verification["success"]: + verified_interfaces.append("Interface {0}".format(interface_name)) + self.log( + "Interface {0} verification successful: {1}".format( + interface_name, interface_verification["details"] + ), + "DEBUG", + ) + else: + failed_interfaces.append( + "Interface {0}: {1}".format( + interface_name, interface_verification["details"] + ) + ) + self.log( + "Interface {0} verification failed: {1}".format( + interface_name, interface_verification["details"] + ), + "DEBUG", + ) + + if failed_interfaces: + return { + "status": "failed", + "message": "{0} verification failed for interfaces: {1}".format( + user_friendly_name, ", ".join(failed_interfaces) + ), + "details": "Successfully verified: {0}, Failed: {1}".format( + len(verified_interfaces), len(failed_interfaces) + ), + } + else: + return { + "status": "success", + "message": "{0} successfully verified for {1} interfaces".format( + user_friendly_name, len(verified_interfaces) + ), + "details": "All requested interface configurations match deployed state", + } + + def _verify_default_configuration( + self, user_friendly_name, desired_items, post_items, original_items + ): + """ + Default verification for other configuration types. + Args: + user_friendly_name (str): User-friendly name for the configuration feature + desired_items (list): List of desired configuration items + post_items (list): List of post-operation deployed items + original_items (list): List of original configuration items + Returns: + dict: Verification result containing status, message, and details + """ + self.log( + "Verifying default configuration for {0}".format(user_friendly_name), + "DEBUG", + ) + + verified_items = 0 + failed_items = 0 + + for i, desired_item in enumerate(desired_items): + if i < len(post_items): + post_item = post_items[i] + + verification_result = self._verify_configuration_parameters( + desired_item, post_item + ) + + if verification_result["success"]: + verified_items += 1 + else: + failed_items += 1 + self.log( + "Item {0} verification failed: {1}".format( + i, verification_result["details"] + ), + "DEBUG", + ) + else: + failed_items += 1 + self.log( + "Item {0} not found in deployed configuration".format(i), "DEBUG" + ) + + if failed_items > 0: + return { + "status": "failed", + "message": "{0} verification failed for {1} items".format( + user_friendly_name, failed_items + ), + "details": "Successfully verified: {0}, Failed: {1}".format( + verified_items, failed_items + ), + } + else: + return { + "status": "success", + "message": "{0} successfully verified for all {1} items".format( + user_friendly_name, verified_items + ), + "details": "All configuration items match deployed state", + } + + def _verify_vlan_parameters(self, desired_vlan, post_vlan): + """ + Verifies individual VLAN parameters. + Args: + desired_vlan (dict): Desired VLAN configuration parameters + post_vlan (dict): Post-operation deployed VLAN configuration + Returns: + dict: Verification result with success status and details + """ + verification_details = [] + verification_success = True + + # Check VLAN name + if "name" in desired_vlan: + desired_name = desired_vlan["name"] + post_name = post_vlan.get("name", "") + if desired_name != post_name: + verification_success = False + verification_details.append( + "Name mismatch: expected '{0}', found '{1}'".format( + desired_name, post_name + ) + ) + self.log( + "VLAN name verification failed: expected '{0}', found '{1}'".format( + desired_name, post_name + ), + "DEBUG", + ) + else: + verification_details.append("Name verified: '{0}'".format(desired_name)) + self.log( + "VLAN name verification successful: '{0}'".format(desired_name), + "DEBUG", + ) + + # Check VLAN admin status + if "isVlanEnabled" in desired_vlan: + desired_status = desired_vlan["isVlanEnabled"] + post_status = post_vlan.get("isVlanEnabled", True) + if desired_status != post_status: + verification_success = False + verification_details.append( + "Admin status mismatch: expected {0}, found {1}".format( + desired_status, post_status + ) + ) + self.log( + "VLAN admin status verification failed: expected {0}, found {1}".format( + desired_status, post_status + ), + "DEBUG", + ) + else: + verification_details.append( + "Admin status verified: {0}".format(desired_status) + ) + self.log( + "VLAN admin status verification successful: {0}".format( + desired_status + ), + "DEBUG", + ) + + self.log( + "VLAN parameter verification completed: success={0}, details={1}".format( + verification_success, len(verification_details) + ), + "DEBUG", + ) + + return { + "success": verification_success, + "details": ( + "; ".join(verification_details) + if verification_details + else "All parameters verified" + ), + } + + def _verify_configuration_parameters(self, desired_config, post_config): + """ + Verifies configuration parameters by comparing desired vs deployed values. + Args: + desired_config (dict): Desired configuration parameters + post_config (dict): Deployed configuration parameters + Returns: + dict: Verification result with success status and parameter details + """ + verification_details = [] + verification_success = True + verified_params = 0 + + # Compare all parameters except configType + for param, desired_value in desired_config.items(): + if param == "configType": + continue + + post_value = post_config.get(param) + + # Handle different types of comparisons + if self._values_match(desired_value, post_value): + verified_params += 1 + verification_details.append("✓ {0}: verified".format(param)) + self.log( + "Parameter '{0}' verified successfully: {1}".format( + param, desired_value + ), + "DEBUG", + ) + else: + verification_success = False + verification_details.append( + "✗ {0}: expected '{1}', found '{2}'".format( + param, desired_value, post_value + ) + ) + self.log( + "Parameter '{0}' verification failed: expected '{1}', found '{2}'".format( + param, desired_value, post_value + ), + "DEBUG", + ) + + summary = "Verified {0} parameters".format(verified_params) + if not verification_success: + failed_count = len([d for d in verification_details if d.startswith("✗")]) + summary += ", {0} failed".format(failed_count) + + self.log( + "Configuration parameter verification completed: success={0}, verified={1}".format( + verification_success, verified_params + ), + "DEBUG", + ) + + return { + "success": verification_success, + "details": summary, + "parameter_details": verification_details, + } + + def _values_match(self, desired, current): + """ + Compare two values for equality, handling different data types appropriately. + Args: + desired: The desired value to compare + current: The current value to compare against + Returns: + bool: True if values match, False otherwise + """ + if not isinstance(desired, type(current)) and not isinstance(current, type(desired)): + self.log( + "Type mismatch detected: desired={0}, current={1}".format( + type(desired).__name__, type(current).__name__ + ), + "DEBUG", + ) + return False + + if isinstance(desired, list): + self.log( + "Comparing list values with lengths: desired={0}, current={1}".format( + len(desired), len(current) + ), + "DEBUG", + ) + if len(desired) != len(current): + self.log( + "List length mismatch: desired={0}, current={1}".format( + len(desired), len(current) + ), + "DEBUG", + ) + return False + + # For lists containing dictionaries, we need to handle comparison differently + if desired and isinstance(desired[0], dict): + self.log( + "Found list of dictionaries, using specialized comparison method", + "DEBUG", + ) + return self._compare_dict_lists(desired, current) + else: + # For lists of simple types, sort and compare + try: + sorted_comparison = all( + self._values_match(d, c) + for d, c in zip(sorted(desired), sorted(current)) + ) + self.log("Sorted list comparison completed successfully", "DEBUG") + return sorted_comparison + except TypeError: + # If sorting fails, compare without sorting (order matters) + self.log( + "Sorting failed, comparing lists in original order", "DEBUG" + ) + return all( + self._values_match(d, c) for d, c in zip(desired, current) + ) + + elif isinstance(desired, dict): + self.log( + "Comparing dictionary values with keys: desired={0}, current={1}".format( + len(desired), len(current) + ), + "DEBUG", + ) + if set(desired.keys()) != set(current.keys()): + self.log( + "Dictionary key sets differ between desired and current values", + "DEBUG", + ) + return False + dict_comparison = all( + self._values_match(desired[key], current[key]) for key in desired.keys() + ) + self.log( + "Dictionary comparison completed: {0}".format(dict_comparison), "DEBUG" + ) + return dict_comparison + + else: + # Direct comparison for simple types + simple_comparison = desired == current + self.log( + "Simple value comparison result: {0} (desired='{1}', current='{2}')".format( + simple_comparison, desired, current + ), + "DEBUG", + ) + return simple_comparison + + def _compare_dict_lists(self, desired_list, current_list): + """ + Compare two lists of dictionaries by finding matching items based on key fields. + Args: + desired_list (list): List of desired dictionary items to compare + current_list (list): List of current dictionary items to compare against + Returns: + bool: True if lists match, False otherwise + """ + if len(desired_list) != len(current_list): + self.log( + "List length mismatch: desired={0}, current={1}".format( + len(desired_list), len(current_list) + ), + "DEBUG", + ) + return False + + # For STP instances, use vlanId as the key for matching + if desired_list and "vlanId" in desired_list[0]: + self.log( + "Using vlanId-based matching for STP instances comparison", "DEBUG" + ) + + desired_by_vlan = {item["vlanId"]: item for item in desired_list} + current_by_vlan = {item["vlanId"]: item for item in current_list} + + self.log( + "Created VLAN lookup tables: desired={0} VLANs, current={1} VLANs".format( + len(desired_by_vlan), len(current_by_vlan) + ), + "DEBUG", + ) + + if set(desired_by_vlan.keys()) != set(current_by_vlan.keys()): + self.log( + "VLAN ID sets differ between desired and current lists", "DEBUG" + ) + return False + + # Compare each VLAN's configuration + for vlan_id in desired_by_vlan.keys(): + if not self._values_match( + desired_by_vlan[vlan_id], current_by_vlan[vlan_id] + ): + self.log( + "VLAN {0} configuration mismatch found".format(vlan_id), "DEBUG" + ) + return False + self.log("VLAN {0} configuration matches".format(vlan_id), "DEBUG") + + self.log("All STP instances match successfully", "DEBUG") + return True + + # For other types of dict lists, try to match by content + self.log("Using content-based matching for generic dictionary lists", "DEBUG") + current_list_copy = current_list.copy() + + for i, desired_item in enumerate(desired_list): + self.log("Looking for match for desired item {0}".format(i), "DEBUG") + found_match = False + + for j, current_item in enumerate(current_list_copy): + if self._values_match(desired_item, current_item): + current_list_copy.pop(j) + found_match = True + self.log( + "Found match for desired item {0} at current position {1}".format( + i, j + ), + "DEBUG", + ) + break + + if not found_match: + self.log("No match found for desired item {0}".format(i), "DEBUG") + return False + + remaining_items = len(current_list_copy) + self.log( + "Content-based comparison completed: remaining unmatched items={0}".format( + remaining_items + ), + "DEBUG", + ) + + return remaining_items == 0 + + def _analyze_deletion_requirements(self, want_feature_mappings): + """ + Analyzes deletion requirements and categorizes features by deletion type. + Args: + want_feature_mappings (dict): User feature mappings for deletion + Returns: + dict: Analysis results containing deletion requirements by type + """ + self.log("Starting deletion requirements analysis", "INFO") + + # Extract data from have state + deployed_configs = self.have.get("current_deployed_configs", {}) + intended_configs = self.have.get("current_intended_configs", {}) + network_device_id = self.have.get("network_device_id") + + self.log("Extracted configuration data for deletion analysis", "DEBUG") + self.log("Network device ID: {0}".format(network_device_id), "DEBUG") + self.log( + "Available deployed configs: {0}".format(list(deployed_configs.keys())), + "DEBUG", + ) + self.log( + "Available intended configs: {0}".format(list(intended_configs.keys())), + "DEBUG", + ) + + # Initialize deletion analysis structure with all feature type categories + deletion_analysis = { + "network_device_id": network_device_id, + "type1_global_resets": {}, # cdp, lldp, vtp, dhcp_snooping, authentication + "type2_vlan_deletions": {}, # vlans + "type3_hybrid_features": {}, # stp, igmp_snooping, mld_snooping, logical_ports + "type4_port_configurations": {}, # port_configuration + "summary": { + "total_features": len(want_feature_mappings), + "type1_features": 0, + "type2_features": 0, + "type3_features": 0, + "type4_features": 0, + }, + } + + self.log( + "Initialized deletion analysis structure for {0} features".format( + len(want_feature_mappings) + ), + "DEBUG", + ) + + # Process each user feature mapping to determine deletion type and requirements + for user_feature_name, user_feature_config in want_feature_mappings.items(): + self.log( + "Analyzing deletion for user feature: {0}".format(user_feature_name), + "DEBUG", + ) + self.log( + "User feature config structure: {0}".format(user_feature_config), + "DEBUG", + ) + + # Determine deletion type and process accordingly + deletion_type = self._determine_deletion_type( + user_feature_name, user_feature_config + ) + self.log( + "Determined deletion type '{0}' for feature: {1}".format( + deletion_type, user_feature_name + ), + "DEBUG", + ) + + if deletion_type == "type1": + self.log( + "Processing Type 1 deletion (global reset) for feature: {0}".format( + user_feature_name + ), + "DEBUG", + ) + self._analyze_type1_deletion( + user_feature_name, + user_feature_config, + deployed_configs, + intended_configs, + deletion_analysis, + ) + deletion_analysis["summary"]["type1_features"] += 1 + + elif deletion_type == "type2": + self.log( + "Processing Type 2 deletion (VLAN deletion) for feature: {0}".format( + user_feature_name + ), + "DEBUG", + ) + self._analyze_type2_deletion( + user_feature_name, + user_feature_config, + deployed_configs, + intended_configs, + deletion_analysis, + ) + deletion_analysis["summary"]["type2_features"] += 1 + + elif deletion_type == "type3": + self.log( + "Processing Type 3 deletion (hybrid feature) for feature: {0}".format( + user_feature_name + ), + "DEBUG", + ) + self._analyze_type3_deletion( + user_feature_name, + user_feature_config, + deployed_configs, + intended_configs, + deletion_analysis, + ) + deletion_analysis["summary"]["type3_features"] += 1 + + elif deletion_type == "type4": + self.log( + "Processing Type 4 deletion (port configuration) for feature: {0}".format( + user_feature_name + ), + "DEBUG", + ) + self._analyze_type4_deletion( + user_feature_name, + user_feature_config, + deployed_configs, + intended_configs, + deletion_analysis, + ) + deletion_analysis["summary"]["type4_features"] += 1 + else: + self.log( + "Unknown deletion type '{0}' for feature: {1}".format( + deletion_type, user_feature_name + ), + "WARNING", + ) + + self.log("Completed deletion analysis for all features", "INFO") + self.log( + "Deletion analysis completed: {0}".format(deletion_analysis["summary"]), + "INFO", + ) + self.log( + "Type 1 features: {0}".format( + deletion_analysis["summary"]["type1_features"] + ), + "DEBUG", + ) + self.log( + "Type 2 features: {0}".format( + deletion_analysis["summary"]["type2_features"] + ), + "DEBUG", + ) + self.log( + "Type 3 features: {0}".format( + deletion_analysis["summary"]["type3_features"] + ), + "DEBUG", + ) + self.log( + "Type 4 features: {0}".format( + deletion_analysis["summary"]["type4_features"] + ), + "DEBUG", + ) + + return deletion_analysis + + def _determine_deletion_type(self, user_feature_name, user_feature_config): + """ + Determines the deletion type for a given feature. + Args: + user_feature_name (str): Name of the user feature + user_feature_config (dict): User feature configuration + Returns: + str: Deletion type (type1, type2, type3, type4) + """ + self.log( + "Starting deletion type determination for feature: {0}".format( + user_feature_name + ), + "DEBUG", + ) + self.log( + "Analyzing feature configuration for deletion type classification", "DEBUG" + ) + + # Type 1: Global configs that support only resetting to default settings + type1_features = ["cdp", "lldp", "vtp", "dhcp_snooping", "authentication"] + + # Type 2: VLANs (Delete vlans using update intent API) + type2_features = ["vlans"] + + # Type 3: Configs with global parameters plus components + type3_features = ["stp", "igmp_snooping", "mld_snooping", "logical_ports"] + + # Type 4: Port configurations + type4_features = ["port_configuration"] + + self.log( + "Checking feature '{0}' against Type 1 features (global reset): {1}".format( + user_feature_name, type1_features + ), + "DEBUG", + ) + if user_feature_name in type1_features: + self.log( + "Feature '{0}' classified as Type 1 deletion (global reset)".format( + user_feature_name + ), + "DEBUG", + ) + return "type1" + + self.log( + "Checking feature '{0}' against Type 2 features (VLAN deletion): {1}".format( + user_feature_name, type2_features + ), + "DEBUG", + ) + if user_feature_name in type2_features: + self.log( + "Feature '{0}' classified as Type 2 deletion (VLAN deletion)".format( + user_feature_name + ), + "DEBUG", + ) + return "type2" + + self.log( + "Checking feature '{0}' against Type 3 features (hybrid features): {1}".format( + user_feature_name, type3_features + ), + "DEBUG", + ) + if user_feature_name in type3_features: + self.log( + "Feature '{0}' classified as Type 3 deletion (hybrid feature)".format( + user_feature_name + ), + "DEBUG", + ) + return "type3" + + self.log( + "Checking feature '{0}' against Type 4 features (port configurations): {1}".format( + user_feature_name, type4_features + ), + "DEBUG", + ) + if user_feature_name in type4_features: + self.log( + "Feature '{0}' classified as Type 4 deletion (port configuration)".format( + user_feature_name + ), + "DEBUG", + ) + return "type4" + + self.log( + "Unknown feature type for deletion: {0}".format(user_feature_name), + "WARNING", + ) + self.log( + "Feature '{0}' does not match any known deletion type patterns".format( + user_feature_name + ), + "DEBUG", + ) + return "unknown" + + def _analyze_type1_deletion( + self, + user_feature_name, + user_feature_config, + deployed_configs, + intended_configs, + deletion_analysis, + ): + """ + Analyzes Type 1 deletion requirements (global config resets). + Args: + user_feature_name (str): Name of the user feature + user_feature_config (dict): User feature configuration + deployed_configs (dict): Current deployed configurations + intended_configs (dict): Current intended configurations + deletion_analysis (dict): Analysis results to populate + """ + self.log( + "Analyzing Type 1 deletion for feature: {0}".format(user_feature_name), + "DEBUG", + ) + + # Get the API feature name + api_feature_name = list(user_feature_config.keys())[0] + + # Check if deployed configuration exists + deployed_config = deployed_configs.get(api_feature_name, {}) + intended_config = intended_configs.get(api_feature_name, {}) + + deployed_items = ( + deployed_config.get("response", {}) + .get(api_feature_name, {}) + .get("items", []) + ) + intended_items = ( + intended_config.get("response", {}) + .get(api_feature_name, {}) + .get("items", []) + ) + + if not deployed_items: + self.log( + "No deployed configuration found for {0}, skipping deletion".format( + user_feature_name + ), + "INFO", + ) + return + + deletion_analysis["type1_global_resets"][user_feature_name] = { + "api_feature_name": api_feature_name, + "has_deployed_config": bool(deployed_items), + "has_intended_config": bool(intended_items), + "deployed_config": deployed_config, + "intended_config": intended_config, + "operation_sequence": self._determine_type1_operation_sequence( + bool(deployed_items), bool(intended_items) + ), + } + + def _analyze_type2_deletion( + self, + user_feature_name, + user_feature_config, + deployed_configs, + intended_configs, + deletion_analysis, + ): + """ + Analyzes Type 2 deletion requirements (VLAN deletions). + Args: + user_feature_name (str): Name of the user feature + user_feature_config (dict): User feature configuration + deployed_configs (dict): Current deployed configurations + intended_configs (dict): Current intended configurations + deletion_analysis (dict): Analysis results to populate + """ + self.log( + "Analyzing Type 2 deletion for feature: {0}".format(user_feature_name), + "DEBUG", + ) + + # Get the API feature name + api_feature_name = list(user_feature_config.keys())[0] + desired_vlans = user_feature_config[api_feature_name].get("items", []) + + # Get current configurations + deployed_config = deployed_configs.get(api_feature_name, {}) + intended_config = intended_configs.get(api_feature_name, {}) + + deployed_items = ( + deployed_config.get("response", {}) + .get(api_feature_name, {}) + .get("items", []) + ) + intended_items = ( + intended_config.get("response", {}) + .get(api_feature_name, {}) + .get("items", []) + ) + + # Create lookup for deployed VLANs by ID + deployed_vlan_lookup = {vlan.get("vlanId"): vlan for vlan in deployed_items} + intended_vlan_lookup = {vlan.get("vlanId"): vlan for vlan in intended_items} + + # Analyze which VLANs can be deleted + vlans_to_delete = [] + vlans_to_skip = [] + + for desired_vlan in desired_vlans: + vlan_id = desired_vlan.get("vlanId") + + if vlan_id in deployed_vlan_lookup: + vlans_to_delete.append( + { + "vlan_id": vlan_id, + "deployed_config": deployed_vlan_lookup[vlan_id], + "in_intended": vlan_id in intended_vlan_lookup, + "intended_config": intended_vlan_lookup.get(vlan_id), + } + ) + else: + vlans_to_skip.append(vlan_id) + self.log( + "VLAN {0} not found in deployed config, skipping deletion".format( + vlan_id + ), + "INFO", + ) + + if vlans_to_delete or vlans_to_skip: + deletion_analysis["type2_vlan_deletions"][user_feature_name] = { + "api_feature_name": api_feature_name, + "vlans_to_delete": vlans_to_delete, + "vlans_to_skip": vlans_to_skip, + "has_intended_config": bool(intended_items), + "deployed_config": deployed_config, + "intended_config": intended_config, + "operation_sequence": self._determine_type2_operation_sequence( + vlans_to_delete, bool(intended_items) + ), + } + + def _analyze_type3_deletion( + self, + user_feature_name, + user_feature_config, + deployed_configs, + intended_configs, + deletion_analysis, + ): + """ + Analyzes Type 3 deletion requirements (hybrid features - not supported due to beta APIs). + Args: + user_feature_name (str): Name of the user feature + user_feature_config (dict): User feature configuration + deployed_configs (dict): Current deployed configurations + intended_configs (dict): Current intended configurations + deletion_analysis (dict): Analysis results to populate + """ + self.log( + "Type 3 deletion requested for feature: {0}".format(user_feature_name), + "INFO", + ) + self.log( + "DELETION NOT SUPPORTED: Feature '{0}' deletion is not implemented due to beta API limitations".format( + user_feature_name + ), + "INFO", + ) + + # Get the API feature name for logging purposes + api_feature_name = ( + list(user_feature_config.keys())[0] if user_feature_config else "unknown" + ) + + deletion_analysis["type3_hybrid_features"][user_feature_name] = { + "api_feature_name": api_feature_name, + "status": "not_supported", + "reason": "Feature deletion not implemented - underlying APIs are in beta", + "message": "Deletion for {0} is not supported due to beta API limitations".format( + user_feature_name + ), + } + + def _analyze_type4_deletion( + self, + user_feature_name, + user_feature_config, + deployed_configs, + intended_configs, + deletion_analysis, + ): + """ + Analyzes Type 4 deletion requirements (port configurations - not supported due to beta APIs). + Args: + user_feature_name (str): Name of the user feature + user_feature_config (dict): User feature configuration + deployed_configs (dict): Current deployed configurations + intended_configs (dict): Current intended configurations + deletion_analysis (dict): Analysis results to populate + """ + self.log( + "Type 4 deletion requested for feature: {0}".format(user_feature_name), + "INFO", + ) + self.log( + "DELETION NOT SUPPORTED: Feature '{0}' deletion is not implemented due to beta API limitations".format( + user_feature_name + ), + "INFO", + ) + + # Get the API feature name for logging purposes + api_feature_name = ( + list(user_feature_config.keys())[0] if user_feature_config else "unknown" + ) + + deletion_analysis["type4_port_configurations"][user_feature_name] = { + "api_feature_name": api_feature_name, + "status": "not_supported", + "reason": "Feature deletion not implemented - underlying APIs are in beta", + "message": "Deletion for {0} is not supported due to beta API limitations".format( + user_feature_name + ), + } + + def _determine_type1_operation_sequence(self, has_deployed, has_intended): + """ + Determines operation sequence for Type 1 deletions. + Args: + has_deployed (bool): Whether deployed configuration exists + has_intended (bool): Whether intended configuration exists + Returns: + str: Operation sequence identifier for Type 1 deletion + """ + self.log( + "Starting operation sequence determination for Type 1 deletion", "DEBUG" + ) + self.log( + "Configuration state analysis: deployed={0}, intended={1}".format( + has_deployed, has_intended + ), + "DEBUG", + ) + + if not has_deployed: + self.log( + "No deployed configuration found - skipping deletion operation", "DEBUG" + ) + self.log("Operation sequence determined: skip_no_deployed", "DEBUG") + return "skip_no_deployed" + else: + self.log( + "Deployed configuration exists - proceeding with deletion and deployment", + "DEBUG", + ) + self.log("Operation sequence determined: delete_intent_and_deploy", "DEBUG") + # Simplified - the delete operation will handle intent creation if needed + return "delete_intent_and_deploy" + + def _determine_type2_operation_sequence(self, vlans_to_delete, has_intended): + """ + Determines operation sequence for Type 2 deletions. + Args: + vlans_to_delete (list): List of VLANs to delete + has_intended (bool): Whether intended config exists + Returns: + str: Operation sequence identifier + """ + self.log( + "Starting operation sequence determination for Type 2 deletion", "DEBUG" + ) + self.log("VLANs to delete count: {0}".format(len(vlans_to_delete)), "DEBUG") + self.log("Intended configuration exists: {0}".format(has_intended), "DEBUG") + + if not vlans_to_delete: + self.log("No VLANs found for deletion - skipping operation", "DEBUG") + self.log("Operation sequence determined: skip_no_vlans", "DEBUG") + return "skip_no_vlans" + elif has_intended: + self.log( + "Intended configuration exists - updating existing intent to remove VLANs", + "DEBUG", + ) + self.log( + "Operation sequence determined: update_intent_remove_vlans_and_deploy", + "DEBUG", + ) + return "update_intent_remove_vlans_and_deploy" + else: + self.log( + "No intended configuration found - creating intent, updating, and deploying", + "DEBUG", + ) + self.log( + "Operation sequence determined: create_intent_update_remove_and_deploy", + "DEBUG", + ) + return "create_intent_update_remove_and_deploy" + + def _execute_deletion_operations( + self, deletion_analysis, network_device_id, device_identifier + ): + """ + Executes all deletion operations based on the analysis. + Args: + deletion_analysis (dict): Analysis results from _analyze_deletion_requirements + network_device_id (str): Network device ID + device_identifier (str): Device identifier for logging + Returns: + dict: Results of all deletion operations + """ + self.log( + "Starting execution of deletion operations for device {0}".format( + device_identifier + ), + "INFO", + ) + + deletion_results = { + "executed_operations": {}, + "summary": { + "total_operations": 0, + "successful_operations": 0, + "failed_operations": 0, + "skipped_operations": 0, + }, + "deployment_results": [], + } + + failed_operations = [] + + # Execute Type 1 deletions (global resets) + if deletion_analysis.get("type1_global_resets"): + self.log("Executing Type 1 global reset operations", "INFO") + for feature_name, analysis in deletion_analysis[ + "type1_global_resets" + ].items(): + try: + result = self._execute_type1_deletion( + feature_name, analysis, network_device_id + ) + deletion_results["executed_operations"][feature_name] = result + deletion_results["summary"]["total_operations"] += 1 + + if result.get("status") == "success": + deletion_results["summary"]["successful_operations"] += 1 + else: + deletion_results["summary"]["failed_operations"] += 1 + failed_operations.append( + {"feature": feature_name, "error": result.get("error")} + ) + + except Exception as e: + error_msg = "Exception during Type 1 deletion for {0}: {1}".format( + feature_name, str(e) + ) + self.log(error_msg, "ERROR") + failed_operations.append( + {"feature": feature_name, "error": error_msg} + ) + deletion_results["summary"]["failed_operations"] += 1 + + # Execute Type 2 deletions (VLAN deletions) + if deletion_analysis.get("type2_vlan_deletions"): + self.log("Executing Type 2 VLAN deletion operations", "INFO") + for feature_name, analysis in deletion_analysis[ + "type2_vlan_deletions" + ].items(): + try: + result = self._execute_type2_deletion( + feature_name, analysis, network_device_id + ) + deletion_results["executed_operations"][feature_name] = result + deletion_results["summary"]["total_operations"] += 1 + + if result.get("status") == "success": + deletion_results["summary"]["successful_operations"] += 1 + elif result.get("status") == "skipped": + deletion_results["summary"]["skipped_operations"] += 1 + else: + deletion_results["summary"]["failed_operations"] += 1 + failed_operations.append( + {"feature": feature_name, "error": result.get("error")} + ) + + except Exception as e: + error_msg = "Exception during Type 2 deletion for {0}: {1}".format( + feature_name, str(e) + ) + self.log(error_msg, "ERROR") + failed_operations.append( + {"feature": feature_name, "error": error_msg} + ) + deletion_results["summary"]["failed_operations"] += 1 + + # Execute Type 3 deletions (hybrid features) + if deletion_analysis.get("type3_hybrid_features"): + self.log("Processing Type 3 hybrid feature deletion requests", "INFO") + for feature_name, analysis in deletion_analysis[ + "type3_hybrid_features" + ].items(): + try: + result = self._execute_type3_deletion( + feature_name, analysis, network_device_id + ) + deletion_results["executed_operations"][feature_name] = result + deletion_results["summary"]["total_operations"] += 1 + + if result.get("status") == "success": + deletion_results["summary"]["successful_operations"] += 1 + elif result.get("status") in ["skipped", "not_supported"]: + deletion_results["summary"]["skipped_operations"] += 1 + else: + deletion_results["summary"]["failed_operations"] += 1 + failed_operations.append( + {"feature": feature_name, "error": result.get("error")} + ) + + except Exception as e: + error_msg = "Exception during Type 3 deletion for {0}: {1}".format( + feature_name, str(e) + ) + self.log(error_msg, "ERROR") + failed_operations.append( + {"feature": feature_name, "error": error_msg} + ) + deletion_results["summary"]["failed_operations"] += 1 + + # Execute Type 4 deletions (port configurations) + if deletion_analysis.get("type4_port_configurations"): + self.log("Processing Type 4 port configuration deletion requests", "INFO") + for feature_name, analysis in deletion_analysis[ + "type4_port_configurations" + ].items(): + try: + result = self._execute_type4_deletion( + feature_name, analysis, network_device_id + ) + deletion_results["executed_operations"][feature_name] = result + deletion_results["summary"]["total_operations"] += 1 + + if result.get("status") == "success": + deletion_results["summary"]["successful_operations"] += 1 + elif result.get("status") in ["skipped", "not_supported"]: + deletion_results["summary"]["skipped_operations"] += 1 + else: + deletion_results["summary"]["failed_operations"] += 1 + failed_operations.append( + {"feature": feature_name, "error": result.get("error")} + ) + + except Exception as e: + error_msg = "Exception during Type 4 deletion for {0}: {1}".format( + feature_name, str(e) + ) + self.log(error_msg, "ERROR") + failed_operations.append( + {"feature": feature_name, "error": error_msg} + ) + deletion_results["summary"]["failed_operations"] += 1 + + # Check for failures + if failed_operations: + failure_msg = "Failed to delete Wired Campus Automation configurations for device {0}. Failures: {1}".format( + device_identifier, "; ".join([f["error"] for f in failed_operations]) + ) + self.msg = failure_msg + self.set_operation_result("failed", True, self.msg, "ERROR") + self.fail_and_exit(self.msg) + + # Set appropriate message based on what actually happened + if deletion_results["summary"]["successful_operations"] > 0: + success_msg = "Successfully deleted Wired Campus Automation configurations for device {0}.".format( + device_identifier + ) + self.msg = success_msg + self.set_operation_result("success", True, self.msg, "INFO") + elif deletion_results["summary"]["skipped_operations"] > 0: + # When all operations were skipped (like in this VLAN 4001 case) + no_op_msg = ( + "No Wired Campus Automation configuration changes required for device {0}. " + "Requested configurations were not found or already in desired state." + ).format(device_identifier) + self.msg = no_op_msg + self.set_operation_result("success", False, self.msg, "INFO") + else: + no_op_msg = "No Wired Campus Automation configurations required deletion for device {0}.".format( + device_identifier + ) + self.msg = no_op_msg + self.set_operation_result("success", False, self.msg, "INFO") + + return deletion_results + + def _execute_type1_deletion(self, feature_name, analysis, network_device_id): + """ + Executes Type 1 deletion (global config reset) with proper intent handling. + Args: + feature_name (str): Name of the feature to delete + analysis (dict): Analysis results containing deletion requirements + network_device_id (str): Network device ID for the deletion operation + Returns: + dict: Result of the deletion operation with status and operation details + """ + self.log( + "Executing Type 1 deletion for feature: {0}".format(feature_name), "INFO" + ) + + api_feature_name = analysis["api_feature_name"] + operation_sequence = analysis["operation_sequence"] + + self.log( + "Processing deletion with API feature name: {0}".format(api_feature_name), + "DEBUG", + ) + self.log( + "Operation sequence determined: {0}".format(operation_sequence), "DEBUG" + ) + + try: + if operation_sequence == "skip_no_deployed": + self.log( + "Skipping deletion operation - no deployed configuration exists", + "INFO", + ) + return { + "status": "success", + "message": "No deployed configuration found for {0}, skipping deletion".format( + feature_name + ), + "operations_performed": [], + } + + elif operation_sequence == "delete_intent_and_deploy": + self.log( + "Deleting intent for {0} (with automatic intent creation if needed)".format( + feature_name + ), + "INFO", + ) + + # Execute the delete intent operation + delete_result = self._execute_delete_intent_operation( + network_device_id, api_feature_name, feature_name + ) + + if delete_result["status"] != "success": + self.log( + "Delete intent operation failed for feature: {0}".format( + feature_name + ), + "ERROR", + ) + return delete_result + + self.log( + "Delete intent operation completed successfully for feature: {0}".format( + feature_name + ), + "DEBUG", + ) + + # Deploy changes if not already deployed during intent creation + if not delete_result.get("skipped"): + self.log("Proceeding with deployment of deletion changes", "INFO") + deploy_result = self._execute_deployment_operation( + network_device_id + ) + + if deploy_result["status"] == "success": + self.log("Deployment operation completed successfully", "DEBUG") + success_message = "Successfully reset {0} configuration".format( + feature_name + ) + self.log(success_message, "INFO") + + return { + "status": "success", + "message": success_message, + "operations_performed": ["delete_intent", "deploy"], + "delete_result": delete_result, + "deploy_result": deploy_result, + } + else: + self.log( + "Deployment operation failed for feature: {0}".format( + feature_name + ), + "ERROR", + ) + failure_message = "Failed to deploy {0} deletion".format( + feature_name + ) + + return { + "status": "failed", + "message": failure_message, + "operations_performed": ["delete_intent", "deploy"], + "delete_result": delete_result, + "deploy_result": deploy_result, + } + else: + self.log( + "Deployment was skipped during delete intent operation", "DEBUG" + ) + return delete_result + else: + # Handle unexpected operation sequence + error_msg = "Unknown operation sequence '{0}' for Type 1 deletion of feature: {1}".format( + operation_sequence, feature_name + ) + self.log(error_msg, "ERROR") + return {"status": "failed", "error": error_msg} + + except Exception as e: + error_msg = "Exception during Type 1 deletion for {0}: {1}".format( + feature_name, str(e) + ) + self.log(error_msg, "ERROR") + self.log( + "Exception details for Type 1 deletion: {0}".format(str(e)), "DEBUG" + ) + + return {"status": "failed", "error": error_msg, "exception": str(e)} + + def _execute_type2_deletion(self, feature_name, analysis, network_device_id): + """ + Executes Type 2 deletion (VLAN deletion via update). + Args: + feature_name (str): Feature name + analysis (dict): Deletion analysis for this feature + network_device_id (str): Network device ID + Returns: + dict: Operation result + """ + self.log( + "Executing Type 2 deletion for feature: {0}".format(feature_name), "INFO" + ) + + api_feature_name = analysis["api_feature_name"] + vlans_to_delete = analysis["vlans_to_delete"] + operation_sequence = analysis["operation_sequence"] + + if not vlans_to_delete: + return { + "status": "skipped", + "message": "No VLANs found for deletion in {0}".format(feature_name), + "operations_performed": [], + } + + try: + if operation_sequence == "update_intent_remove_vlans_and_deploy": + # Get current intended config and remove VLANs + intended_config = analysis["intended_config"] + updated_config = self._remove_vlans_from_intent_config( + intended_config, vlans_to_delete, api_feature_name + ) + + self.log( + "Updating intent to remove VLANs for {0}".format(feature_name), + "INFO", + ) + update_result = self._execute_update_intent_operation( + network_device_id, api_feature_name, updated_config, feature_name + ) + + if update_result["status"] != "success": + return update_result + + # Deploy changes + deploy_result = self._execute_deployment_operation(network_device_id) + + return { + "status": ( + "success" if deploy_result["status"] == "success" else "failed" + ), + "message": ( + "Successfully deleted {0} VLANs from {1}".format( + len(vlans_to_delete), feature_name + ) + if deploy_result["status"] == "success" + else "Failed to deploy VLAN deletion for {0}".format( + feature_name + ) + ), + "operations_performed": ["update_intent", "deploy"], + "vlans_deleted": [v["vlan_id"] for v in vlans_to_delete], + "update_result": update_result, + "deploy_result": deploy_result, + } + + elif operation_sequence == "create_intent_update_remove_and_deploy": + # Create intent from deployed config + deployed_config = analysis["deployed_config"] + mapped_config = self._map_deployed_to_intent_config( + api_feature_name, deployed_config + ) + + self.log( + "Creating intent for {0} from deployed config".format(feature_name), + "INFO", + ) + create_result = self._execute_create_intent_operation( + network_device_id, api_feature_name, mapped_config, feature_name + ) + + if create_result["status"] != "success": + return create_result + + # Deploy intent + deploy_result1 = self._execute_deployment_operation(network_device_id) + if deploy_result1["status"] != "success": + return { + "status": "failed", + "error": "Failed to deploy intent creation for {0}: {1}".format( + feature_name, deploy_result1.get("error") + ), + "operations_performed": ["create_intent"], + } + + # Remove VLANs and update + updated_config = self._remove_vlans_from_intent_config( + {"response": {api_feature_name: mapped_config[api_feature_name]}}, + vlans_to_delete, + api_feature_name, + ) + + self.log( + "Updating intent to remove VLANs for {0}".format(feature_name), + "INFO", + ) + update_result = self._execute_update_intent_operation( + network_device_id, api_feature_name, updated_config, feature_name + ) + + if update_result["status"] != "success": + return update_result + + # Deploy deletion + deploy_result2 = self._execute_deployment_operation(network_device_id) + + return { + "status": ( + "success" if deploy_result2["status"] == "success" else "failed" + ), + "message": ( + "Successfully deleted {0} VLANs from {1}".format( + len(vlans_to_delete), feature_name + ) + if deploy_result2["status"] == "success" + else "Failed to deploy VLAN deletion for {0}".format( + feature_name + ) + ), + "operations_performed": [ + "create_intent", + "deploy", + "update_intent", + "deploy", + ], + "vlans_deleted": [v["vlan_id"] for v in vlans_to_delete], + "create_result": create_result, + "deploy_result1": deploy_result1, + "update_result": update_result, + "deploy_result2": deploy_result2, + } + + except Exception as e: + error_msg = "Exception during Type 2 deletion for {0}: {1}".format( + feature_name, str(e) + ) + self.log(error_msg, "ERROR") + return {"status": "failed", "error": error_msg, "exception": str(e)} + + def _execute_type3_deletion(self, feature_name, analysis, network_device_id): + """ + Handles Type 3 deletion (not supported due to beta APIs). + Args: + feature_name (str): Feature name + analysis (dict): Deletion analysis for this feature + network_device_id (str): Network device ID + Returns: + dict: Operation result indicating not supported + """ + self.log( + "Type 3 deletion execution for feature: {0}".format(feature_name), "INFO" + ) + self.log( + "DELETION NOT SUPPORTED: {0}".format( + analysis.get("message", "Feature deletion not supported") + ), + "INFO", + ) + + return { + "status": "not_supported", + "message": analysis.get( + "message", "Deletion not supported due to beta API limitations" + ), + "reason": analysis.get("reason", "Underlying APIs are in beta"), + "operations_performed": [], + } + + def _execute_type4_deletion(self, feature_name, analysis, network_device_id): + """ + Handles Type 4 deletion (not supported due to beta APIs). + Args: + feature_name (str): Feature name + analysis (dict): Deletion analysis for this feature + network_device_id (str): Network device ID + Returns: + dict: Operation result indicating not supported + """ + self.log( + "Type 4 deletion execution for feature: {0}".format(feature_name), "INFO" + ) + self.log( + "DELETION NOT SUPPORTED: {0}".format( + analysis.get("message", "Feature deletion not supported") + ), + "INFO", + ) + + return { + "status": "not_supported", + "message": analysis.get( + "message", "Deletion not supported due to beta API limitations" + ), + "reason": analysis.get("reason", "Underlying APIs are in beta"), + "operations_performed": [], + } + + def _execute_delete_intent_operation( + self, network_device_id, api_feature_name, feature_name + ): + """ + Executes delete intent operation with proper intent existence checking. + Uses already-fetched intended configuration from self.have to avoid additional API calls. + Args: + network_device_id (str): Network device ID + api_feature_name (str): API feature name + feature_name (str): User-friendly feature name + Returns: + dict: Delete operation result + """ + self.log( + "Checking if intent exists before deletion for {0}".format(feature_name), + "DEBUG", + ) + + # Use already-fetched intended configuration from self.have + current_intended_configs = self.have.get("current_intended_configs", {}) + intended_config = current_intended_configs.get(api_feature_name, {}) + intent_exists = bool( + intended_config.get("response", {}).get(api_feature_name, {}).get("items") + ) + + self.log( + "Intent existence check for {0}: {1}".format(feature_name, intent_exists), + "DEBUG", + ) + + if not intent_exists: + self.log( + "No intent exists for {0}, creating intent from deployed config before deletion".format( + feature_name + ), + "INFO", + ) + + # Use already-fetched deployed configuration from self.have + current_deployed_configs = self.have.get("current_deployed_configs", {}) + deployed_config = current_deployed_configs.get(api_feature_name, {}) + + if ( + not deployed_config.get("response", {}) + .get(api_feature_name, {}) + .get("items") + ): + self.log( + "No deployed configuration found for {0}, skipping deletion".format( + feature_name + ), + "INFO", + ) + return { + "status": "success", + "message": "No configuration found to delete for {0}".format( + feature_name + ), + "skipped": True, + } + + # Map deployed config to intent format + mapped_config = self._map_deployed_to_intent_config( + api_feature_name, deployed_config + ) + + # Create intent from deployed config + create_result = self._execute_create_intent_operation( + network_device_id, api_feature_name, mapped_config, feature_name + ) + + if create_result["status"] != "success": + return { + "status": "failed", + "error": "Failed to create intent before deletion for {0}: {1}".format( + feature_name, create_result.get("error") + ), + "create_result": create_result, + } + + # Deploy the created intent + deploy_result = self._execute_deployment_operation(network_device_id) + + if deploy_result["status"] != "success": + return { + "status": "failed", + "error": "Failed to deploy intent before deletion for {0}: {1}".format( + feature_name, deploy_result.get("error") + ), + "deploy_result": deploy_result, + } + + self.log( + "Successfully created and deployed intent for {0}, proceeding with deletion".format( + feature_name + ), + "INFO", + ) + + # Now proceed with the actual deletion + try: + task_response = self.delete_layer2_feature_configuration( + network_device_id, api_feature_name + ) + + if task_response and isinstance(task_response, str): + task_id = task_response + task_name = "Delete {0} Intent Configuration".format(feature_name) + success_msg = "Successfully deleted {0} intent configuration".format( + feature_name + ) + + self.log( + "Delete task initiated for {0}, task ID: {1}".format( + feature_name, task_id + ), + "INFO", + ) + + # Monitor task completion + self.get_task_status_from_tasks_by_id( + task_id, task_name, success_msg + ).check_return_status() + + if self.status == "success": + return { + "status": "success", + "task_id": task_id, + "message": success_msg, + } + else: + return {"status": "failed", "error": self.msg, "task_id": task_id} + else: + return { + "status": "failed", + "error": "Invalid delete response format for {0}".format( + feature_name + ), + "response": task_response, + } + + except Exception as e: + error_msg = "Failed to execute delete operation for {0}: {1}".format( + feature_name, str(e) + ) + self.log(error_msg, "ERROR") + return {"status": "failed", "error": error_msg, "exception": str(e)} + + def _execute_create_intent_operation( + self, network_device_id, api_feature_name, config, feature_name + ): + """ + Executes create intent operation. + Args: + network_device_id (str): Network device ID + api_feature_name (str): API feature name + config (dict): Configuration to create + feature_name (str): User-friendly feature name + Returns: + dict: Create operation result + """ + try: + task_response = self.create_layer2_feature_configuration( + network_device_id, api_feature_name, config + ) + + if task_response and isinstance(task_response, str): + task_id = task_response + task_name = "Create {0} Intent Configuration".format(feature_name) + success_msg = "Successfully created {0} intent configuration".format( + feature_name + ) + + self.log( + "Create task initiated for {0}, task ID: {1}".format( + feature_name, task_id + ), + "INFO", + ) + + # Monitor task completion + self.get_task_status_from_tasks_by_id( + task_id, task_name, success_msg + ).check_return_status() + + if self.status == "success": + return { + "status": "success", + "task_id": task_id, + "message": success_msg, + } + else: + return {"status": "failed", "error": self.msg, "task_id": task_id} + else: + return { + "status": "failed", + "error": "Invalid create response format for {0}".format( + feature_name + ), + "response": task_response, + } + + except Exception as e: + error_msg = "Failed to execute create operation for {0}: {1}".format( + feature_name, str(e) + ) + self.log(error_msg, "ERROR") + return {"status": "failed", "error": error_msg, "exception": str(e)} + + def _execute_update_intent_operation( + self, network_device_id, api_feature_name, config, feature_name + ): + """ + Executes update intent operation using existing infrastructure. + Args: + network_device_id (str): Network device ID + api_feature_name (str): API feature name + config (dict): Configuration to update + feature_name (str): User-friendly feature name + Returns: + dict: Update operation result + """ + try: + task_response = self.update_layer2_feature_configuration( + network_device_id, api_feature_name, config + ) + + if task_response and isinstance(task_response, str): + task_id = task_response + task_name = "Update {0} Intent Configuration".format(feature_name) + success_msg = "Successfully updated {0} intent configuration".format( + feature_name + ) + + self.log( + "Update task initiated for {0}, task ID: {1}".format( + feature_name, task_id + ), + "INFO", + ) + + # Monitor task completion using existing infrastructure + self.get_task_status_from_tasks_by_id( + task_id, task_name, success_msg + ).check_return_status() + + if self.status == "success": + return { + "status": "success", + "task_id": task_id, + "message": success_msg, + } + else: + return {"status": "failed", "error": self.msg, "task_id": task_id} + else: + return { + "status": "failed", + "error": "Invalid update response format for {0}".format( + feature_name + ), + "response": task_response, + } + + except Exception as e: + error_msg = "Failed to execute update operation for {0}: {1}".format( + feature_name, str(e) + ) + self.log(error_msg, "ERROR") + return {"status": "failed", "error": error_msg, "exception": str(e)} + + def _execute_deployment_operation(self, network_device_id): + """ + Executes deployment operation using existing infrastructure. + Args: + network_device_id (str): Network device ID + Returns: + dict: Deploy operation result + """ + try: + task_response = self.deploy_intended_configurations(network_device_id) + + if task_response and isinstance(task_response, str): + task_id = task_response + task_name = "Deploy Wired Campus Automation Configuration" + success_msg = ( + "Successfully deployed Wired Campus Automation configuration" + ) + + self.log( + "Deployment task initiated, task ID: {0}".format(task_id), "INFO" + ) + + # Monitor task completion using existing infrastructure + self.get_task_status_from_tasks_by_id( + task_id, task_name, success_msg + ).check_return_status() + + if self.status == "success": + return { + "status": "success", + "task_id": task_id, + "message": success_msg, + } + else: + return {"status": "failed", "error": self.msg, "task_id": task_id} + else: + return { + "status": "failed", + "error": "Invalid deployment response format", + "response": task_response, + } + + except Exception as e: + error_msg = "Failed to execute deployment operation: {0}".format(str(e)) + self.log(error_msg, "ERROR") + return {"status": "failed", "error": error_msg, "exception": str(e)} + + def _map_deployed_to_intent_config(self, api_feature_name, deployed_config): + """ + Maps deployed configuration to intent configuration format. + Args: + api_feature_name (str): API feature name + deployed_config (dict): Deployed configuration + Returns: + dict: Intent configuration format + """ + self.log( + "Starting mapping of deployed configuration to intent format for feature: {0}".format( + api_feature_name + ), + "DEBUG", + ) + self.log( + "Input deployed configuration structure: {0}".format(deployed_config), + "DEBUG", + ) + + deployed_items = ( + deployed_config.get("response", {}) + .get(api_feature_name, {}) + .get("items", []) + ) + + self.log( + "Extracted {0} deployed items from configuration".format( + len(deployed_items) + ), + "DEBUG", + ) + + if not deployed_items: + self.log( + "No deployed items found, returning empty intent configuration", "DEBUG" + ) + return {api_feature_name: {"items": []}} + + # Map deployed items to intent format + intent_items = [] + for item in deployed_items: + self.log("Processing deployed item for intent mapping", "DEBUG") + # Remove any deployment-specific fields and keep configuration fields + intent_item = self._clean_deployed_item_for_intent(item) + intent_items.append(intent_item) + self.log("Successfully mapped deployed item to intent format", "DEBUG") + + self.log( + "Successfully mapped {0} deployed items to intent format".format( + len(intent_items) + ), + "DEBUG", + ) + + intent_config = {api_feature_name: {"items": intent_items}} + + self.log( + "Final intent configuration structure: {0}".format(intent_config), "DEBUG" + ) + self.log( + "Deployed to intent mapping completed successfully for feature: {0}".format( + api_feature_name + ), + "INFO", + ) + + return intent_config + + def _clean_deployed_item_for_intent(self, deployed_item): + """ + Cleans deployed item to make it suitable for intent configuration. + Args: + deployed_item (dict): Deployed configuration item + Returns: + dict: Cleaned item for intent + """ + self.log( + "Starting cleanup of deployed item for intent configuration compatibility", + "DEBUG", + ) + self.log("Input deployed item structure: {0}".format(deployed_item), "DEBUG") + + # Create a copy of the item + intent_item = deployed_item.copy() + self.log("Created copy of deployed item for cleaning process", "DEBUG") + + # Remove deployment-specific fields that shouldn't be in intent + fields_to_remove = [ + "id", + "deviceId", + "deviceName", + "lastUpdated", + "status", + "deploymentId", + ] + + self.log( + "Removing {0} deployment-specific fields from item".format( + len(fields_to_remove) + ), + "DEBUG", + ) + + for field in fields_to_remove: + if field in intent_item: + del intent_item[field] + self.log( + "Removed deployment field '{0}' from intent item".format(field), + "DEBUG", + ) + else: + self.log( + "Field '{0}' not present in deployed item, skipping removal".format( + field + ), + "DEBUG", + ) + + self.log("Deployed item cleanup completed successfully", "DEBUG") + self.log("Cleaned intent item structure: {0}".format(intent_item), "DEBUG") + + return intent_item + + def _remove_vlans_from_intent_config( + self, intended_config, vlans_to_delete, api_feature_name + ): + """ + Removes specified VLANs from intent configuration. + Args: + intended_config (dict): Current intended configuration + vlans_to_delete (list): List of VLANs to delete + api_feature_name (str): API feature name + Returns: + dict: Updated configuration with VLANs removed + """ + self.log("Removing VLANs from intent configuration", "DEBUG") + + # Get current intended items + intended_items = ( + intended_config.get("response", {}) + .get(api_feature_name, {}) + .get("items", []) + ) + + # Create set of VLAN IDs to delete for efficient lookup + vlan_ids_to_delete = {vlan["vlan_id"] for vlan in vlans_to_delete} + + # Filter out VLANs that need to be deleted + remaining_items = [] + for item in intended_items: + vlan_id = item.get("vlanId") + if vlan_id not in vlan_ids_to_delete: + remaining_items.append(item) + else: + self.log( + "Removing VLAN {0} from intent configuration".format(vlan_id), + "DEBUG", + ) + + self.log( + "Remaining VLANs after deletion: {0}".format(len(remaining_items)), "DEBUG" + ) + + return {api_feature_name: {"items": remaining_items}} + + def get_want(self, config, state): + """ + Validates input parameters, extracts Layer2 feature mappings, and prepares the desired state. + Args: + config (dict): The configuration details from the playbook + state (str): The desired state of the configuration (Example, "merged", "deleted") + Returns: + self: Returns the instance with the updated "want" attribute containing desired state + """ + self.log("Starting 'get_want' operation with state: {0}".format(state), "INFO") + self.log("Input configuration: {0}".format(config), "DEBUG") + + # Validate the parameters first + self.log("Validating input parameters", "DEBUG") + self.validate_params(config, state) + self.log("Parameter validation completed successfully", "DEBUG") + + # Extract device identification information + ip_address = config.get("ip_address") + hostname = config.get("hostname") + device_collection_status_check = config.get( + "device_collection_status_check", True + ) + config_verification_wait_time = config.get("config_verification_wait_time", 10) + + self.log( + "Device identifiers - IP: {0}, Hostname: {1}".format(ip_address, hostname), + "DEBUG", + ) + self.log( + "Verification wait time: {0} seconds".format(config_verification_wait_time), + "DEBUG", + ) + + # Extract Layer2 feature mappings from user configuration + self.log("Extracting Layer2 feature mappings from user configuration", "INFO") + discovered_features, feature_mappings = self.extract_layer2_feature_mappings( + config + ) + + self.log("Feature mapping extraction completed", "DEBUG") + self.log( + "Discovered {0} API features for processing".format( + len(discovered_features) + ), + "INFO", + ) + + # Build the comprehensive 'want' state + want = { + # Device identification information + "device_identifier": ip_address or hostname, + "ip_address": ip_address, + "hostname": hostname, + "device_collection_status_check": device_collection_status_check, + "config_verification_wait_time": config_verification_wait_time, + # Feature discovery and mapping results + "discovered_api_features": list(discovered_features), + "user_feature_mappings": feature_mappings, + "total_discovered_features": len(discovered_features), + } + + # Store the want state + self.want = want + + self.log("Desired State (want) assembly completed successfully", "INFO") + self.log("Device identifier: {0}".format(want["device_identifier"]), "INFO") + self.log( + "Features to be processed: {0}".format(want["total_discovered_features"]), + "INFO", + ) + self.log( + "API features discovered: {0}".format(want["discovered_api_features"]), + "DEBUG", + ) + self.log("Complete 'want' state structure: {0}".format(str(self.want)), "DEBUG") + + self.msg = "Successfully collected all parameters from the playbook for Wired Campus Automation Operations." + self.status = "success" + return self + + def get_have(self, config, state): + """ + Gathers the current state of the network device based on the desired state from get_want. + Retrieves network device ID and fetches current deployed/intended configurations for all features identified in the want state. + Args: + config (dict): The configuration details (for compatibility, but want is used) + state (str): The desired state (for compatibility, but want is used) + Returns: + self: Returns the instance with the updated "have" attribute containing current state + """ + self.log("Starting 'get_have' operation", "INFO") + self.log( + "Initiating current state gathering for network device configuration analysis", + "DEBUG", + ) + + # Ensure want state exists + if not hasattr(self, "want") or not self.want: + self.msg = ( + "No 'want' state found. get_want() must be called before get_have()." + ) + self.log( + "Want state validation failed - no want state available for processing", + "ERROR", + ) + self.fail_and_exit(self.msg) + + # Extract information from want state + ip_address = self.want.get("ip_address") + hostname = self.want.get("hostname") + discovered_features = self.want.get("discovered_api_features", []) + device_identifier = self.want.get("device_identifier") + + self.log("Processing device: {0}".format(device_identifier), "INFO") + self.log("Extracted device identification parameters from want state", "DEBUG") + self.log("Features to retrieve: {0}".format(discovered_features), "DEBUG") + self.log( + "Total discovered features count: {0}".format(len(discovered_features)), + "DEBUG", + ) + + # Retrieve the network device ID based on the provided IP address or hostname + self.log("Retrieving network device ID for device identification", "DEBUG") + self.log( + "Initiating device ID resolution using IP address or hostname", "DEBUG" + ) + mgmt_ip_to_instance_id_map = self.get_network_device_id(ip_address, hostname) + + if not mgmt_ip_to_instance_id_map: + self.msg = "Failed to retrieve network device ID for device: {0}".format( + device_identifier + ) + self.log( + "Device ID resolution failed for device: {0}".format(device_identifier), + "ERROR", + ) + self.fail_and_exit(self.msg) + + network_device_id = list(mgmt_ip_to_instance_id_map.values())[0] + resolved_ip_address = list(mgmt_ip_to_instance_id_map.keys())[0] + + self.log( + "Device resolution successful - Network Device ID: {0}".format( + network_device_id + ), + "INFO", + ) + self.log("Successfully resolved device identification parameters", "DEBUG") + self.log("Resolved IP Address: {0}".format(resolved_ip_address), "DEBUG") + self.log("Device ID mapping completed successfully", "DEBUG") + + # Initialize configurations dictionaries + deployed_configs = {} + intended_configs = {} + + self.log("Initialized configuration storage dictionaries", "DEBUG") + + # Fetch current configurations only if we have features to process + if discovered_features: + self.log("Fetching current configurations for discovered features", "INFO") + self.log( + "Starting configuration retrieval for {0} discovered features".format( + len(discovered_features) + ), + "DEBUG", + ) + deployed_configs, intended_configs = self.get_current_configs_for_features( + network_device_id, discovered_features + ) + self.log("Configuration retrieval completed", "INFO") + self.log("Configuration fetching operation completed successfully", "DEBUG") + self.log( + "Retrieved deployed configs for {0} features".format( + len(deployed_configs) + ), + "DEBUG", + ) + self.log( + "Retrieved intended configs for {0} features".format( + len(intended_configs) + ), + "DEBUG", + ) + self.log( + "Configuration data successfully populated from Catalyst Center", + "DEBUG", + ) + else: + self.log( + "No features discovered from user configuration, skipping config retrieval", + "INFO", + ) + self.log( + "Configuration retrieval bypassed due to empty feature list", "DEBUG" + ) + + # Build the comprehensive 'have' state + have = { + # Device identification and resolution + "device_identifier": device_identifier, + "network_device_id": network_device_id, + "resolved_ip_address": resolved_ip_address, + # Current configurations from Catalyst Center + "current_deployed_configs": deployed_configs, + "current_intended_configs": intended_configs, + # Summary information + "configs_retrieved_for_features": len(deployed_configs), + "total_features_processed": len(discovered_features), + } + + # Store the have state + self.have = have + + self.log("Have state structure assembled successfully", "DEBUG") + + # Log comprehensive summary + self.log("Current State (have) assembly completed successfully", "INFO") + self.log( + "Device: {0} (ID: {1})".format( + have["device_identifier"], have["network_device_id"] + ), + "INFO", + ) + self.log( + "Deployed configs retrieved for: {0} features".format( + have["configs_retrieved_for_features"] + ), + "INFO", + ) + self.log( + "Total features processed: {0}".format(have["total_features_processed"]), + "INFO", + ) + self.log( + "Have state processing completed with comprehensive device and configuration data", + "DEBUG", + ) + + # Debug logging of complete have structure + self.log("Complete 'have' state structure: {0}".format(str(self.have)), "DEBUG") + self.log( + "Have state contains all required components for difference analysis", + "DEBUG", + ) + + return self + + def get_diff_merged(self): + """ + Main entry point for the merged state operation. + Description: + This is the primary function called when the module is run with state='merged'. + It orchestrates the entire process: + 1. Analyzes configuration differences between current and desired state + 2. Executes required API operations (create/update intent, then deploy) + 3. Sets final operation results and handles success/failure scenarios + 4. Stores results for potential verification steps + Returns: + self: Returns the instance with updated diff and result attributes + """ + self.log("Starting 'get_diff_merged' operation.", "INFO") + + # Analyze configuration differences and determine operations + self.log( + "Analyzing configuration differences and determining required operations", + "INFO", + ) + diff_analysis = self._analyze_configuration_differences() + + # Store the diff analysis results + self.diff = diff_analysis + + # Execute API operations based on the analysis + operation_results = self._execute_api_operations(diff_analysis) + + # Store operation results for potential use in verification + self.result["operation_results"] = operation_results + + self.log( + "Configuration difference analysis and API operations completed", "INFO" + ) + return self + + def get_diff_deleted(self): + """ + Handles the deletion state operation for Layer 2 configurations. + Description: + This method processes deletion requests for Layer 2 features by: + 1. Analyzing current configurations to determine what can be deleted + 2. Executing delete operations for intended configurations + 3. Setting appropriate operation results + Note: This method only deletes intended configurations, not deployed ones. + Returns: + self: Returns the instance with updated diff and result attributes + """ + self.log( + "Starting 'get_diff_deleted' operation for Layer 2 configuration deletion", + "INFO", + ) + + # Extract deletion requirements from want state + want_feature_mappings = self.want.get("user_feature_mappings", {}) + network_device_id = self.have.get("network_device_id") + device_identifier = self.want.get("device_identifier") + + self.log("Extracted deletion requirements from want and have states", "DEBUG") + self.log("Network device ID: {0}".format(network_device_id), "DEBUG") + self.log("Device identifier: {0}".format(device_identifier), "DEBUG") + self.log( + "Feature mappings to process for deletion: {0}".format( + len(want_feature_mappings) + ), + "DEBUG", + ) + + if not want_feature_mappings: + self.log("No feature mappings found for deletion operation", "INFO") + self.msg = "No Layer 2 configurations specified for deletion from device {0}".format( + device_identifier + ) + self.set_operation_result("success", False, self.msg, "INFO") + self.log( + "Deletion operation completed - no configurations to delete", "INFO" + ) + return self + + self.log( + "Processing deletion for {0} Layer 2 features on device {1}".format( + len(want_feature_mappings), device_identifier + ), + "INFO", + ) + + # Analyze what exists and can be deleted + self.log("Starting deletion requirements analysis", "DEBUG") + deletion_analysis = self._analyze_deletion_requirements(want_feature_mappings) + self.log("Deletion requirements analysis completed successfully", "DEBUG") + self.log( + "Deletion analysis summary: {0}".format( + deletion_analysis.get("summary", {}) + ), + "DEBUG", + ) + + # Execute deletion operations + self.log("Starting execution of deletion operations", "INFO") + deletion_results = self._execute_deletion_operations( + deletion_analysis, network_device_id, device_identifier + ) + self.log("Deletion operations execution completed", "INFO") + self.log( + "Deletion results summary: {0}".format(deletion_results.get("summary", {})), + "DEBUG", + ) + + # Store results for verification + self.result["deletion_results"] = deletion_results + self.log("Stored deletion results for potential verification use", "DEBUG") + + self.log( + "Deletion operations completed for device {0}".format(device_identifier), + "INFO", + ) + self.log("Get diff deleted operation completed successfully", "DEBUG") + + return self + + def verify_diff_merged(self): + """ + Verifies that the configuration changes were successfully applied by comparing the current deployed state with the desired configuration. + Description: + This function performs post-deployment verification by: + 1. Retrieving the current deployed configurations for all features + 2. Comparing them with the originally desired configurations + 3. Logging detailed pre and post operation states + 4. Logging final verification results without setting operation status + Returns: + self: Returns the instance after completing the verification process + """ + self.log( + "Starting 'verify_diff_merged' operation for configuration verification", + "INFO", + ) + + # Extract verification data from previous operations + want_feature_mappings = self.want.get("user_feature_mappings", {}) + network_device_id = self.have.get("network_device_id") + device_identifier = self.want.get("device_identifier") + discovered_features = self.want.get("discovered_api_features", []) + + self.log("Extracted verification parameters from want and have states", "DEBUG") + self.log("Network device ID: {0}".format(network_device_id), "DEBUG") + self.log("Device identifier: {0}".format(device_identifier), "DEBUG") + self.log( + "Want feature mappings count: {0}".format(len(want_feature_mappings)), + "DEBUG", + ) + self.log( + "Discovered features count: {0}".format(len(discovered_features)), "DEBUG" + ) + + # Get pre-operation state from self.have + pre_operation_deployed_configs = self.have.get("current_deployed_configs", {}) + + self.log( + "Retrieved pre-operation deployed configurations from have state", "DEBUG" + ) + self.log( + "Pre-operation deployed configs available for {0} features".format( + len(pre_operation_deployed_configs) + ), + "DEBUG", + ) + + if not want_feature_mappings: + self.log("No desired configurations found for verification", "INFO") + self.log( + "VERIFICATION RESULT: No configuration changes were requested for verification", + "INFO", + ) + return self + + if not network_device_id: + self.log( + "Network device ID not found, cannot perform verification", "ERROR" + ) + self.log( + "VERIFICATION RESULT: Unable to verify configuration - network device ID not available", + "ERROR", + ) + return self + + # Add configurable wait time before verification + config_verification_wait_time = self.want.get( + "config_verification_wait_time", 10 + ) # Default 10 seconds + self.log( + "Waiting {0} seconds for configuration changes to propagate before verification".format( + config_verification_wait_time + ), + "INFO", + ) + + # Import time module for sleep functionality + import time + + time.sleep(config_verification_wait_time) + + self.log("Configuration propagation wait period completed", "DEBUG") + + self.log( + "CONFIGURATION VERIFICATION - PRE-OPERATION vs POST-OPERATION ANALYSIS", + "INFO", + ) + self.log( + "Device being verified: {0} (ID: {1})".format( + device_identifier, network_device_id + ), + "INFO", + ) + self.log( + "Total user features to verify: {0}".format(len(want_feature_mappings)), + "INFO", + ) + + # Log original pre-operation state + self.log("PRE-OPERATION DEPLOYED CONFIGURATION STATE:", "INFO") + self.log( + "Starting pre-operation state logging for verification baseline", "DEBUG" + ) + self._log_configuration_state( + "Pre-operation", want_feature_mappings, pre_operation_deployed_configs + ) + self.log("Completed pre-operation state logging", "DEBUG") + + # Fetch current post-operation deployed configurations using existing function + self.log( + "Retrieving current post-operation deployed configurations for verification", + "INFO", + ) + + try: + self.log( + "Initiating post-operation configuration retrieval from Catalyst Center", + "DEBUG", + ) + post_operation_deployed_configs, post_operation_intended_configs = self.get_current_configs_for_features( + network_device_id, discovered_features + ) + + self.log( + "Successfully retrieved post-operation configurations for {0} features".format( + len(post_operation_deployed_configs) + ), + "INFO", + ) + self.log( + "Post-operation configuration retrieval completed successfully", "DEBUG" + ) + + except Exception as e: + error_msg = "Failed to retrieve post-operation configurations for verification: {0}".format( + str(e) + ) + self.log(error_msg, "ERROR") + self.log( + "Post-operation configuration retrieval failed with exception: {0}".format( + str(e) + ), + "DEBUG", + ) + self.log("VERIFICATION RESULT: {0}".format(error_msg), "ERROR") + return self + + # Log post-operation state + self.log("POST-OPERATION DEPLOYED CONFIGURATION STATE:", "INFO") + self.log( + "Starting post-operation state logging for verification comparison", "DEBUG" + ) + self._log_configuration_state( + "Post-operation", want_feature_mappings, post_operation_deployed_configs + ) + self.log("Completed post-operation state logging", "DEBUG") + + # Perform detailed verification analysis + self.log("DETAILED VERIFICATION ANALYSIS:", "INFO") + self.log("Initiating detailed configuration verification analysis", "DEBUG") + + verification_results = self._perform_detailed_verification( + want_feature_mappings, + pre_operation_deployed_configs, + post_operation_deployed_configs, + ) + + self.log("Detailed verification analysis completed successfully", "DEBUG") + self.log( + "Verification results structure populated with {0} feature results".format( + len(verification_results.get("detailed_results", {})) + ), + "DEBUG", + ) + + # Log verification summary + self.log("VERIFICATION SUMMARY:", "INFO") + self.log( + "Total features verified: {0}".format( + verification_results["total_features_verified"] + ), + "INFO", + ) + self.log( + "Features successfully applied: {0}".format( + verification_results["features_successfully_applied"] + ), + "INFO", + ) + self.log( + "Features with verification failures: {0}".format( + verification_results["features_failed_verification"] + ), + "INFO", + ) + self.log( + "Features not found post-operation: {0}".format( + verification_results["features_not_found"] + ), + "INFO", + ) + + self.log("Verification summary logging completed", "DEBUG") + + # Log final verification result without setting operation status + if verification_results["verification_failed"]: + self.log( + "VERIFICATION RESULT: {0}".format( + verification_results["failure_message"] + ), + "ERROR", + ) + self.log( + "Configuration verification failed - some features did not meet expected state", + "DEBUG", + ) + else: + self.log( + "VERIFICATION RESULT: {0}".format( + verification_results["success_message"] + ), + "INFO", + ) + self.log( + "Configuration verification succeeded - all features verified successfully", + "DEBUG", + ) + + self.log("Completed 'verify_diff_merged' operation", "INFO") + self.log("Verification operation completed and returning instance", "DEBUG") + return self + + def verify_diff_deleted(self): + """ + Placeholder for deletion verification - not implemented due to beta API limitations. + Description: + Deletion verification is not implemented because the underlying APIs are in beta. + This ensures stability and prevents issues with verification logic on unstable endpoints. + """ + device_identifier = self.want.get("device_identifier", "unknown") + + self.log( + "DELETION VERIFICATION NOTICE: Verification for deleted configurations is not implemented", + "INFO", + ) + self.log( + "Reason: The underlying Layer 2 configuration APIs are in beta and verification logic", + "INFO", + ) + self.log( + "may not be reliable with beta API responses. Verification will be added when", + "INFO", + ) + self.log("APIs reach general availability (GA) status.", "INFO") + self.log( + "VERIFICATION RESULT: Deletion verification skipped for device {0} due to beta API limitations".format( + device_identifier + ), + "INFO", + ) + self.log("Completed 'verify_diff_deleted' operation (skipped)", "INFO") + + return self + + +def main(): + """main entry point for module execution""" + # Define the specification for the module"s arguments + element_spec = { + "dnac_host": {"required": True, "type": "str"}, + "dnac_port": {"type": "str", "default": "443"}, + "dnac_username": {"type": "str", "default": "admin", "aliases": ["user"]}, + "dnac_password": {"type": "str", "no_log": True}, + "dnac_verify": {"type": "bool", "default": "True"}, + "dnac_version": {"type": "str", "default": "2.2.3.3"}, + "dnac_debug": {"type": "bool", "default": False}, + "dnac_log_level": {"type": "str", "default": "WARNING"}, + "dnac_log_file_path": {"type": "str", "default": "dnac.log"}, + "dnac_log_append": {"type": "bool", "default": True}, + "dnac_log": {"type": "bool", "default": False}, + "validate_response_schema": {"type": "bool", "default": True}, + "config_verify": {"type": "bool", "default": False}, + "dnac_api_task_timeout": {"type": "int", "default": 1200}, + "dnac_task_poll_interval": {"type": "int", "default": 2}, + "config": {"required": True, "type": "list", "elements": "dict"}, + "state": {"default": "merged", "choices": ["merged", "deleted"]}, + } + + # Initialize the Ansible module with the provided argument specifications + module = AnsibleModule(argument_spec=element_spec, supports_check_mode=False) + + # Initialize the NetworkCompliance object with the module + ccc_wired_campus_automation = WiredCampusAutomation(module) + if ( + ccc_wired_campus_automation.compare_dnac_versions( + ccc_wired_campus_automation.get_ccc_version(), "3.1.3.0" + ) + < 0 + ): + ccc_wired_campus_automation.msg = ( + "The specified version '{0}' does not support the Wired Campus Automation Operations. Supported versions start " + " from '3.1.3.0' onwards. Version '3.1.3.0' introduces APIs for performing Wired Campus Automation Operations".format( + ccc_wired_campus_automation.get_ccc_version() + ) + ) + ccc_wired_campus_automation.set_operation_result( + "failed", False, ccc_wired_campus_automation.msg, "ERROR" + ).check_return_status() + + # Get the state parameter from the provided parameters + state = ccc_wired_campus_automation.params.get("state") + + # Check if the state is valid + if state not in ccc_wired_campus_automation.supported_states: + ccc_wired_campus_automation.status = "invalid" + ccc_wired_campus_automation.msg = "State {0} is invalid".format(state) + ccc_wired_campus_automation.check_return_status() + + # Validate the input parameters and check the return status + ccc_wired_campus_automation.validate_input().check_return_status() + + # Get the config_verify parameter from the provided parameters + config_verify = ccc_wired_campus_automation.params.get("config_verify") + + # Iterate over the validated configuration parameters + for config in ccc_wired_campus_automation.validated_config: + ccc_wired_campus_automation.reset_values() + ccc_wired_campus_automation.get_want(config, state).check_return_status() + ccc_wired_campus_automation.get_have(config, state).check_return_status() + ccc_wired_campus_automation.get_diff_state_apply[state]().check_return_status() + if config_verify: + ccc_wired_campus_automation.verify_diff_state_apply[ + state + ]().check_return_status() + + module.exit_json(**ccc_wired_campus_automation.result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/wireless_design_workflow_manager.py b/plugins/modules/wireless_design_workflow_manager.py index 11e0fd0409..679bab4da2 100644 --- a/plugins/modules/wireless_design_workflow_manager.py +++ b/plugins/modules/wireless_design_workflow_manager.py @@ -13245,9 +13245,7 @@ def recursive_update(self, existing, updates): ) existing[key] = value - def verify_create_update_access_point_profiles_requirement( - self, access_point_profiles - ): + def verify_create_update_access_point_profiles_requirement(self, access_point_profiles): """ Determines whether access point profiles need to be created, updated, or require no updates. Args: @@ -13255,6 +13253,7 @@ def verify_create_update_access_point_profiles_requirement( Returns: tuple: Three lists containing access point profiles to be created, updated, and not updated. """ + # Update requested profiles with default values where needed updated_access_point_profiles = self.map_access_point_profiles_params( access_point_profiles @@ -13268,129 +13267,115 @@ def verify_create_update_access_point_profiles_requirement( self.log("Retrieved existing access point profiles from the system.", "DEBUG") self.log( - "Existing Access Point Profiles: {0}".format( - existing_access_point_profiles - ), + "Existing Access Point Profiles: {0}".format(existing_access_point_profiles), "DEBUG", ) self.log( - "Requested Access Point Profiles: {0}".format( - updated_access_point_profiles - ), + "Requested Access Point Profiles: {0}".format(updated_access_point_profiles), "DEBUG", ) - # Initialize lists to store profiles that need to be created, updated, or not changed create_profiles = [] update_profiles = [] no_update_profiles = [] - # Create a dictionary of existing profiles for quick lookup using the profile name existing_profiles_dict = { - profile["apProfileName"]: profile - for profile in existing_access_point_profiles + profile["apProfileName"]: profile for profile in existing_access_point_profiles } self.log( "Converted existing profiles to a dictionary for quick lookup.", "DEBUG" ) - # Iterate over the updated requested access point profiles for requested_profile in updated_access_point_profiles: profile_name = requested_profile["apProfileName"] self.log("Checking profile: {0}".format(profile_name), "DEBUG") update_needed = False - # Check if the profile already exists + tz = requested_profile.get("timeZone") + if tz in ("NOT CONFIGURED", "CONTROLLER"): + requested_profile["timeZoneOffsetHour"] = 0 + requested_profile["timeZoneOffsetMinutes"] = 0 + self.log( + "For profile '{0}', 'timeZone' is set to '{1}'. Setting 'timeZoneOffsetHour' and 'timeZoneOffsetMinutes' to 0.".format( + profile_name, tz + ), + "DEBUG", + ) + + if "calendarPowerProfiles" in requested_profile and isinstance( + requested_profile["calendarPowerProfiles"], list + ): + self.log( + "Normalizing calendarPowerProfiles for profile '{0}'.".format( + profile_name + ), + "DEBUG", + ) + for calendar_profile in requested_profile["calendarPowerProfiles"]: + if "duration" not in calendar_profile or calendar_profile["duration"] is None: + calendar_profile["duration"] = {} + + scheduler_type = calendar_profile.get("schedulerType") + if scheduler_type == "DAILY": + calendar_profile["duration"]["schedulerDay"] = None + calendar_profile["duration"]["schedulerDate"] = None + self.log( + "Set 'schedulerDay' and 'schedulerDate' to None for schedulerType 'DAILY' in profile '{0}'.".format( + profile_name + ), + "DEBUG", + ) + elif scheduler_type == "WEEKLY": + calendar_profile["duration"]["schedulerDate"] = None + self.log( + "Set 'schedulerDate' to None for schedulerType 'WEEKLY' in profile '{0}'.".format( + profile_name + ), + "DEBUG", + ) + elif scheduler_type == "MONTHLY": + calendar_profile["duration"]["schedulerDay"] = None + self.log( + "Set 'schedulerDay' to None for schedulerType 'MONTHLY' in profile '{0}'.".format( + profile_name + ), + "DEBUG", + ) + else: + self.log( + "Unknown schedulerType '{0}' in calendarPowerProfiles for profile '{1}'.".format( + scheduler_type, profile_name + ), + "DEBUG", + ) + if profile_name in existing_profiles_dict: existing_profile = existing_profiles_dict[profile_name] self.log( "Profile '{0}' exists in the system.".format(profile_name), "DEBUG" ) - # Iterate over each parameter in the requested profile for key, requested_value in requested_profile.items(): if key in existing_profile: existing_value = existing_profile[key] - - # Compare requested and existing values if not self.compare_values(requested_value, existing_value): update_needed = True self.log( - "Mismatch found in parameter '{0}' for profile '{1}'. " - "Requested value: {2}, Existing value: {3}".format( + "Mismatch found in parameter '{0}' for profile '{1}'. Requested: {2}, Existing: {3}".format( key, profile_name, requested_value, existing_value ), "DEBUG", ) - - # Handle the specific case for time_zone - if key == "timeZone" and requested_value in [ - "NOT CONFIGURED", - "CONTROLLER", - ]: - # Ensure timeZoneOffsetHour and timeZoneOffsetMinutes are set to zero - requested_profile["timeZoneOffsetHour"] = 0 - requested_profile["timeZoneOffsetMinutes"] = 0 - self.log( - "For profile '{0}', 'timeZone' is set to '{1}'. Setting 'timeZoneOffsetHour' and 'timeZoneOffsetMinutes' to 0.".format( - profile_name, requested_value - ), - "DEBUG", - ) - - # Handle the specific case for calendarPowerProfiles - if key == "calendarPowerProfiles": - self.log( - "Updating calendarPowerProfiles for profile '{0}'.".format( - profile_name - ), - "DEBUG", - ) - for calendar_profile in requested_value: - # Ensure the 'duration' field exists - if "duration" not in calendar_profile: - calendar_profile["duration"] = {} - - # Update fields based on the schedulerType - scheduler_type = calendar_profile.get( - "schedulerType" - ) - if scheduler_type == "DAILY": - calendar_profile["duration"][ - "schedulerDay" - ] = None - calendar_profile["duration"][ - "schedulerDate" - ] = None - self.log( - "Set 'schedulerDay' to None and 'schedulerDate' to None for schedulerType 'DAILY' " - "in profile '{0}'.".format(profile_name), - "DEBUG", - ) - elif scheduler_type == "WEEKLY": - calendar_profile["duration"][ - "schedulerDate" - ] = None - self.log( - "Set 'schedulerDate' to None for schedulerType 'WEEKLY' in profile '{0}'.".format( - profile_name - ), - "DEBUG", - ) - elif scheduler_type == "MONTHLY": - calendar_profile["duration"][ - "schedulerDay" - ] = None - self.log( - "Unknown schedulerType '{0}' in calendarPowerProfiles for profile '{1}'.".format( - scheduler_type, profile_name - ), - "DEBUG", - ) - break + else: + update_needed = True + self.log( + "Requested key '{0}' not present in existing profile '{1}', marking for update.".format( + key, profile_name + ), + "DEBUG", + ) if update_needed: - # Copy the existing profile and update it with the requested values updated_profile = existing_profile.copy() self.recursive_update(updated_profile, requested_profile) update_profiles.append(updated_profile) @@ -13401,8 +13386,7 @@ def verify_create_update_access_point_profiles_requirement( # No changes needed for this profile no_update_profiles.append(existing_profile) self.log( - "Profile '{0}' requires no updates.".format(profile_name), - "DEBUG", + "Profile '{0}' requires no updates.".format(profile_name), "DEBUG" ) else: @@ -13431,10 +13415,10 @@ def verify_create_update_access_point_profiles_requirement( "DEBUG", ) - # Validate that the total number of processed profiles matches the number of requested profiles total_profiles_processed = ( len(create_profiles) + len(update_profiles) + len(no_update_profiles) ) + if total_profiles_processed == len(updated_access_point_profiles): self.log( "Match in total counts: Processed={0}, Requested={1}.".format( @@ -13450,7 +13434,6 @@ def verify_create_update_access_point_profiles_requirement( "ERROR", ) - # Return the categorized profiles return create_profiles, update_profiles, no_update_profiles def verify_delete_access_point_profiles_requirement(self, access_point_profiles): diff --git a/tests/integration/ccc_backup_and_restore_workflow_manager/defaults/main.yml b/tests/integration/ccc_backup_and_restore_workflow_manager/defaults/main.yml new file mode 100644 index 0000000000..5f709c5aac --- /dev/null +++ b/tests/integration/ccc_backup_and_restore_workflow_manager/defaults/main.yml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/tests/integration/ccc_backup_and_restore_workflow_manager/meta/main.yml b/tests/integration/ccc_backup_and_restore_workflow_manager/meta/main.yml new file mode 100644 index 0000000000..32cf5dda7e --- /dev/null +++ b/tests/integration/ccc_backup_and_restore_workflow_manager/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/tests/integration/ccc_backup_and_restore_workflow_manager/tasks/main.yml b/tests/integration/ccc_backup_and_restore_workflow_manager/tasks/main.yml new file mode 100644 index 0000000000..d842bc1747 --- /dev/null +++ b/tests/integration/ccc_backup_and_restore_workflow_manager/tasks/main.yml @@ -0,0 +1,34 @@ +--- +- name: collect ccc test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yml" + connection: local + register: ccc_cases + tags: sanity + +- debug: + msg: "CCC Cases: {{ ccc_cases }}" + +- set_fact: + test_cases: + files: "{{ ccc_cases.files }}" + tags: sanity + +- debug: + msg: "Test Cases: {{ test_cases }}" + +- name: set test_items + set_fact: + test_items: "{{ test_cases.files | map(attribute='path') | list }}" + tags: sanity + +- debug: + msg: "Test Items: {{ test_items }}" + +- name: run test cases (connection=httpapi) + include_tasks: "{{ test_case_to_run }}" + loop: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run + tags: sanity diff --git a/tests/integration/ccc_backup_and_restore_workflow_manager/tests/test_backup_and_restore_workflow_manager.yml b/tests/integration/ccc_backup_and_restore_workflow_manager/tests/test_backup_and_restore_workflow_manager.yml new file mode 100644 index 0000000000..a7d63a8b7b --- /dev/null +++ b/tests/integration/ccc_backup_and_restore_workflow_manager/tests/test_backup_and_restore_workflow_manager.yml @@ -0,0 +1,162 @@ +--- +- debug: msg="Starting backup and restore workflow manager test" +- debug: msg="Role Path {{ role_path }}" + +- block: + - name: Load vars and declare dnac vars + include_vars: + file: "{{ role_path }}/vars/vars_backup_and_restore_workflow_manager.yml" + name: vars_map + vars: + dnac_login: &dnac_login + dnac_host: "172.23.9.219" + dnac_username: "admin" + dnac_password: "P@ssword9" + dnac_verify: "false" + dnac_port: "443" + dnac_version: "3.1.3.0" + dnac_debug: "false" + dnac_log: true + dnac_log_level: DEBUG + + # - debug: + # msg: "{{ vars_map. }}" + # - debug: + # msg: "{{ vars_map. }}" + # - debug: + # msg: "{{ vars_map. }}" + +#################################################### +# CREATE NFS CONFIGURATION # +#################################################### + + - name: Create NFS Configuration + cisco.dnac.backup_and_restore_workflow_manager: + <<: *dnac_login + state: merged + config: + - "{{ item }}" + loop: "{{ vars_map.create_nfs_configuration }}" + register: result_create_nfs_configuration + + # - name: Debug item + # debug: + # var: item + # loop: "{{ result_create_application.results }}" + # when: result_create_application is defined + + - name: Assert Create NFS Configuration + assert: + that: + - item.changed == true + - "'created successfully' in item.response" + loop: "{{ result_create_nfs_configuration.results }}" + when: result_create_nfs_configuration is defined + +#################################################### +# UPDATE BACKUP CONFIGURATION # +#################################################### + + - name: Update Backup Configuration + cisco.dnac.backup_and_restore_workflow_manager: + <<: *dnac_login + state: merged + config: + - "{{ item }}" + loop: "{{ vars_map.update_backup_configuration }}" + register: result_update_backup_configuration + + # - name: Debug item + # debug: + # var: item + # loop: "{{ result_create_application.results }}" + # when: result_create_application is defined + + - name: Assert Update Backup Configuration + assert: + that: + - item.changed == true + - "'updated successfully' in item.response" + loop: "{{ result_update_backup_configuration.results }}" + when: result_update_backup_configuration is defined + +#################################################### +# CREATE BACKUP # +#################################################### + + - name: Create Backup + cisco.dnac.backup_and_restore_workflow_manager: + <<: *dnac_login + state: merged + config: + - "{{ item }}" + loop: "{{ vars_map.create_backup }}" + register: result_create_backup + + # - name: Debug item + # debug: + # var: item + # loop: "{{ result_create_application.results }}" + # when: result_create_application is defined + + - name: Assert Create Backup + assert: + that: + - item.changed == true + - "'created successfully' in item.response" + loop: "{{ result_create_backup.results }}" + when: result_create_backup is defined + +#################################################### +# DELETE NFS CONFIGURATION # +#################################################### + + - name: Delete NFS Configuration + cisco.dnac.backup_and_restore_workflow_manager: + <<: *dnac_login + state: deleted + config: + - "{{ item }}" + loop: "{{ vars_map.delete_nfs_configuration }}" + register: result_delete_nfs_configuration + + # - name: Debug item + # debug: + # var: item + # loop: "{{ result_create_application.results }}" + # when: result_create_application is defined + + - name: Assert Delete NFS Configuration + assert: + that: + - item.changed == true + - "'deleted successfully' in item.response" + loop: "{{ result_delete_nfs_configuration.results }}" + when: result_delete_nfs_configuration is defined + +#################################################### +# DELETE BACKUP # +#################################################### + + - name: Delete Backup + cisco.dnac.backup_and_restore_workflow_manager: + <<: *dnac_login + state: deleted + config: + - "{{ item }}" + loop: "{{ vars_map.delete_backup }}" + register: result_delete_backup + + # - name: Debug item + # debug: + # var: item + # loop: "{{ result_create_application.results }}" + # when: result_create_application is defined + + - name: Assert Delete Backup + assert: + that: + - item.changed == true + - "'deleted successfully' in item.response" + loop: "{{ result_delete_backup.results }}" + when: result_delete_backup is defined diff --git a/tests/integration/ccc_backup_and_restore_workflow_manager/vars/vars_backup_and_restore_workflow_manager.yml b/tests/integration/ccc_backup_and_restore_workflow_manager/vars/vars_backup_and_restore_workflow_manager.yml new file mode 100644 index 0000000000..0df7c67401 --- /dev/null +++ b/tests/integration/ccc_backup_and_restore_workflow_manager/vars/vars_backup_and_restore_workflow_manager.yml @@ -0,0 +1,39 @@ +--- +create_nfs_configuration: + - nfs_configuration: + - server_ip: 172.27.17.90 + source_path: /home/nfsshare/backups/TB18 + nfs_port: 2049 + nfs_version: nfs4 + nfs_portmapper_port: 111 + - server_ip: 172.27.17.90 + source_path: /home/nfsshare/backups/TB19 + nfs_port: 2049 + nfs_version: nfs4 + nfs_portmapper_port: 111 + +create_backup: + - backup: + - name: BACKUP30_07 + scope: CISCO_DNA_DATA_WITHOUT_ASSURANCE + +update_backup_configuration: + - backup_storage_configuration: + - server_type: NFS + nfs_details: + server_ip: 172.27.17.90 + source_path: /home/nfsshare/backups/TB17 + nfs_port: 2049 + nfs_version: nfs4 + nfs_portmapper_port: 111 + data_retention_period: 21 + encryption_passphrase: Karthick@zigzag333 + +delete_backup: + - backup: + - name: BACKUP29_07 + +delete_nfs_configuration: + - nfs_configuration: + - server_ip: 172.27.17.90 + source_path: /home/nfsshare/backups/TB19 diff --git a/tests/integration/ccc_fabric_devices_info_workflow_manager/defaults/main.yml b/tests/integration/ccc_fabric_devices_info_workflow_manager/defaults/main.yml new file mode 100644 index 0000000000..5f709c5aac --- /dev/null +++ b/tests/integration/ccc_fabric_devices_info_workflow_manager/defaults/main.yml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/tests/integration/ccc_fabric_devices_info_workflow_manager/meta/main.yml b/tests/integration/ccc_fabric_devices_info_workflow_manager/meta/main.yml new file mode 100644 index 0000000000..32cf5dda7e --- /dev/null +++ b/tests/integration/ccc_fabric_devices_info_workflow_manager/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/tests/integration/ccc_fabric_devices_info_workflow_manager/tasks/main.yml b/tests/integration/ccc_fabric_devices_info_workflow_manager/tasks/main.yml new file mode 100644 index 0000000000..d842bc1747 --- /dev/null +++ b/tests/integration/ccc_fabric_devices_info_workflow_manager/tasks/main.yml @@ -0,0 +1,34 @@ +--- +- name: collect ccc test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yml" + connection: local + register: ccc_cases + tags: sanity + +- debug: + msg: "CCC Cases: {{ ccc_cases }}" + +- set_fact: + test_cases: + files: "{{ ccc_cases.files }}" + tags: sanity + +- debug: + msg: "Test Cases: {{ test_cases }}" + +- name: set test_items + set_fact: + test_items: "{{ test_cases.files | map(attribute='path') | list }}" + tags: sanity + +- debug: + msg: "Test Items: {{ test_items }}" + +- name: run test cases (connection=httpapi) + include_tasks: "{{ test_case_to_run }}" + loop: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run + tags: sanity diff --git a/tests/integration/ccc_fabric_devices_info_workflow_manager/tests/test_fabric_devices_info_workflow_manager.yml b/tests/integration/ccc_fabric_devices_info_workflow_manager/tests/test_fabric_devices_info_workflow_manager.yml new file mode 100644 index 0000000000..bffdb17331 --- /dev/null +++ b/tests/integration/ccc_fabric_devices_info_workflow_manager/tests/test_fabric_devices_info_workflow_manager.yml @@ -0,0 +1,41 @@ +--- +- debug: msg="Starting fabric devices info workflow manager test" +- debug: msg="Role Path {{ role_path }}" + +- block: + - name: Load vars and declare dnac vars + include_vars: + file: "{{ role_path }}/vars/vars_fabric_devices_info_workflow_manager.yml" + name: vars_map + vars: + dnac_login: &dnac_login + dnac_host: "10.31.102.124" + dnac_username: "admin" + dnac_password: "Ciscu!123" + dnac_verify: "false" + dnac_port: "443" + dnac_version: "3.1.3.0" + dnac_debug: "false" + dnac_log: true + dnac_log_level: DEBUG + +#################################################### +# FABRIC DEVICE INFO # +#################################################### + + - name: Fabric Devices Info + cisco.dnac.fabric_devices_info_workflow_manager: + <<: *dnac_login + state: gathered + config: + - "{{ item }}" + loop: "{{ vars_map.fabric_devices_info }}" + register: result_fabric_devices_info + + - name: Assert Create Application + assert: + that: + - item.changed == false + - "'The fabric devices filtered' in item.response[0]" + loop: "{{ result_fabric_devices_info.results }}" + when: result_fabric_devices_info is defined diff --git a/tests/integration/ccc_fabric_devices_info_workflow_manager/vars/vars_fabric_devices_info_workflow_manager.yml b/tests/integration/ccc_fabric_devices_info_workflow_manager/vars/vars_fabric_devices_info_workflow_manager.yml new file mode 100644 index 0000000000..b3bd500ea1 --- /dev/null +++ b/tests/integration/ccc_fabric_devices_info_workflow_manager/vars/vars_fabric_devices_info_workflow_manager.yml @@ -0,0 +1,22 @@ +--- +fabric_devices_info: + - fabric_devices: + - fabric_site_hierarchy: "Global/rishipat_area/Fabric-area-1" # Mandatory parameter + fabric_device_role: "EDGE_NODE" + device_identifier: + - ip_address: ["192.168.200.69"] + timeout: 30 + retries: 3 + interval: 10 + requested_info: + - fabric_info + - handoff_info + - onboarding_info + - connected_devices_info + - device_health_info + - device_issues_info + output_file_info: + file_path: /Users/priyadharshini/Downloads/fabric_device_info + file_format: yaml + file_mode: w + timestamp: false diff --git a/tests/integration/ccc_network_devices_info_workflow_manager/defaults/main.yml b/tests/integration/ccc_network_devices_info_workflow_manager/defaults/main.yml new file mode 100644 index 0000000000..5f709c5aac --- /dev/null +++ b/tests/integration/ccc_network_devices_info_workflow_manager/defaults/main.yml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/tests/integration/ccc_network_devices_info_workflow_manager/meta/main.yml b/tests/integration/ccc_network_devices_info_workflow_manager/meta/main.yml new file mode 100644 index 0000000000..32cf5dda7e --- /dev/null +++ b/tests/integration/ccc_network_devices_info_workflow_manager/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/tests/integration/ccc_network_devices_info_workflow_manager/tasks/main.yml b/tests/integration/ccc_network_devices_info_workflow_manager/tasks/main.yml new file mode 100644 index 0000000000..d842bc1747 --- /dev/null +++ b/tests/integration/ccc_network_devices_info_workflow_manager/tasks/main.yml @@ -0,0 +1,34 @@ +--- +- name: collect ccc test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yml" + connection: local + register: ccc_cases + tags: sanity + +- debug: + msg: "CCC Cases: {{ ccc_cases }}" + +- set_fact: + test_cases: + files: "{{ ccc_cases.files }}" + tags: sanity + +- debug: + msg: "Test Cases: {{ test_cases }}" + +- name: set test_items + set_fact: + test_items: "{{ test_cases.files | map(attribute='path') | list }}" + tags: sanity + +- debug: + msg: "Test Items: {{ test_items }}" + +- name: run test cases (connection=httpapi) + include_tasks: "{{ test_case_to_run }}" + loop: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run + tags: sanity diff --git a/tests/integration/ccc_network_devices_info_workflow_manager/tests/test_network_devices_info_workflow_manager.yml b/tests/integration/ccc_network_devices_info_workflow_manager/tests/test_network_devices_info_workflow_manager.yml new file mode 100644 index 0000000000..86dab3383a --- /dev/null +++ b/tests/integration/ccc_network_devices_info_workflow_manager/tests/test_network_devices_info_workflow_manager.yml @@ -0,0 +1,54 @@ +--- +- debug: msg="Starting network devices info workflow manager test" +- debug: msg="Role Path {{ role_path }}" + +- block: + - name: Load vars and declare dnac vars + include_vars: + file: "{{ role_path }}/vars/vars_network_devices_info_workflow_manager.yml" + name: vars_map + vars: + dnac_login: &dnac_login + dnac_host: "10.22.40.214" + dnac_username: "admin" + dnac_password: "M@glev123" + dnac_verify: false + dnac_port: 443 + dnac_version: "3.1.3.0" + dnac_debug: false + dnac_log: true + dnac_log_level: DEBUG + + # - debug: + # msg: "{{ vars_map. }}" + # - debug: + # msg: "{{ vars_map. }}" + # - debug: + # msg: "{{ vars_map. }}" + +#################################################### +# NETWORK DEVICES INFO RETRIEVAL # +#################################################### + + - name: Get Network Devices Info + cisco.dnac.network_devices_info_workflow_manager: + <<: *dnac_login + state: gathered + config: + - "{{ item }}" + loop: "{{ vars_map.get_devices_info }}" + register: result_get_devices_info + + # - name: Debug item + # debug: + # var: item + # loop: "{{ result_create_application.results }}" + # when: result_create_application is defined + + - name: Assert Network Devices Info + assert: + that: + - item.changed == false + - "'The network devices filtered' in item.response[0]" + loop: "{{ result_get_devices_info.results }}" + when: result_get_devices_info is defined \ No newline at end of file diff --git a/tests/integration/ccc_network_devices_info_workflow_manager/vars/vars_network_devices_info_workflow_manager.yml b/tests/integration/ccc_network_devices_info_workflow_manager/vars/vars_network_devices_info_workflow_manager.yml new file mode 100644 index 0000000000..81e4c89773 --- /dev/null +++ b/tests/integration/ccc_network_devices_info_workflow_manager/vars/vars_network_devices_info_workflow_manager.yml @@ -0,0 +1,37 @@ +--- +get_devices_info: + - network_devices: + - site_hierarchy: Global/USA/SAN JOSE + device_type: "Cisco Catalyst 9300 Switch" + device_role: "ACCESS" + device_family: "Switches and Hubs" + software_version: "17.12.1" + os_type: "IOS-XE" + device_identifier: + - ip_address: ["204.1.2.1"] + - serial_number: ["FCW2137L0SB"] + - hostname: ["SJ-BN-9300.cisco.local"] + - mac_address: ["90:88:55:90:26:00"] + timeout: 60 + retries: 3 + interval: 10 + requested_info: + - device_info + - interface_info + - interface_vlan_info + - line_card_info + - supervisor_card_info + - poe_info + - module_count_info + - connected_device_info + - device_interfaces_by_range_info + - device_config_info + - device_summary_info + - device_polling_interval_info + - device_stack_info + - device_link_mismatch_info + output_file_info: + file_path: /Users/priyadharshini/Downloads/info + file_format: json + file_mode: w + timestamp: true diff --git a/tests/integration/ccc_network_profile_switching_workflow_management/tests/test_network_profile_switching_workflow_manager.yml b/tests/integration/ccc_network_profile_switching_workflow_management/tests/test_network_profile_switching_workflow_manager.yml index c6afacf85b..09506b737b 100644 --- a/tests/integration/ccc_network_profile_switching_workflow_management/tests/test_network_profile_switching_workflow_manager.yml +++ b/tests/integration/ccc_network_profile_switching_workflow_management/tests/test_network_profile_switching_workflow_manager.yml @@ -63,7 +63,7 @@ assert: that: - item.changed == true - - "'Profile created/updated are verified successfully for '['test_sw_1']'.' in item.msg" + - "'Profile created/updated are verified successfully for '['DataCenter_Core_Switch']'.' in item.msg" loop: "{{ result_create_switch_profile.results }}" when: result_create_switch_profile is defined @@ -91,7 +91,7 @@ assert: that: - item.changed == true - - "'Profile created/updated are verified successfully for '['test_sw_1']'.' in item.msg" + - "'Profile created/updated are verified successfully for '['DataCenter_Core_Switch']'.' in item.msg" loop: "{{ result_update_switch_profile.results }}" when: result_update_switch_profile is defined @@ -119,7 +119,7 @@ assert: that: - item.changed == true - - "'Switch profile(s) deleted and verified successfully for '['test_sw_1']'.' in item.msg" + - "'Switch profile(s) deleted/unassigned and verified successfully for '['DataCenter_Core_Switch']'.' in item.msg" loop: "{{ result_delete_switch_profile.results }}" when: result_delete_switch_profile is defined diff --git a/tests/integration/ccc_network_profile_switching_workflow_management/vars/vars_network_profile_switching_workflow_manager.yml b/tests/integration/ccc_network_profile_switching_workflow_management/vars/vars_network_profile_switching_workflow_manager.yml index eb6d7a17f3..3b93e5cf52 100644 --- a/tests/integration/ccc_network_profile_switching_workflow_management/vars/vars_network_profile_switching_workflow_manager.yml +++ b/tests/integration/ccc_network_profile_switching_workflow_management/vars/vars_network_profile_switching_workflow_manager.yml @@ -1,8 +1,6 @@ --- create_switch_profile: - profile_name: "DataCenter_Core_Switch" - onboarding_templates: - - "Ansible_PNP_Switch" day_n_templates: - "Template Provisioning To Device" site_names: @@ -11,8 +9,6 @@ create_switch_profile: update_switch_profile: - profile_name: "DataCenter_Core_Switch" - onboarding_templates: - - "Ansible_PNP_Switch" day_n_templates: - "Template Provisioning To Device" site_names: @@ -21,10 +17,3 @@ update_switch_profile: delete_switch_profile: - profile_name: "DataCenter_Core_Switch" - onboarding_templates: - - "Ansible_PNP_Switch" - day_n_templates: - - "Template Provisioning To Device" - site_names: - - "Global/APO" - - "Global/Abc" diff --git a/tests/integration/ccc_network_profile_wireless_workflow_management/tests/test_network_profile_wireless_workflow_management.yml b/tests/integration/ccc_network_profile_wireless_workflow_management/tests/test_network_profile_wireless_workflow_management.yml index fc728b9fb8..922a60e878 100644 --- a/tests/integration/ccc_network_profile_wireless_workflow_management/tests/test_network_profile_wireless_workflow_management.yml +++ b/tests/integration/ccc_network_profile_wireless_workflow_management/tests/test_network_profile_wireless_workflow_management.yml @@ -41,7 +41,7 @@ # register: result_delete_wireless_profile ########################################### - # CREATE SWITCH PROFILE # + # CREATE WIRELESS PROFILE # ########################################### - name: Create network wireless profile @@ -63,13 +63,13 @@ assert: that: - item.changed == true - - "'Profile created/updated are verified successfully for '['test_wireless_1']'.' in item.msg" + - "'Profile created/updated are verified successfully for '['Corporate_Wireless_Profile']'.' in item.msg" loop: "{{ result_create_wireless_profile.results }}" when: result_create_wireless_profile is defined ############################################# -# UPDATE SWITCH PROFILE # +# UPDATE WIRELESS PROFILE # ############################################# - name: Update network wireless profile @@ -91,13 +91,13 @@ assert: that: - item.changed == true - - "'Profile created/updated are verified successfully for '['test_wireless_1']'.' in item.msg" + - "'Profile created/updated are verified successfully for '['Corporate_Wireless_Profile']'.' in item.msg" loop: "{{ result_update_wireless_profile.results }}" when: result_update_wireless_profile is defined - - ############################################# -# DELETE SWITCH PROFILE # + +############################################# +# DELETE WIRELESS PROFILE # ############################################# - name: Delete network wireless profile diff --git a/tests/integration/ccc_network_profile_wireless_workflow_management/vars/vars_network_profile_wireless_workflow_manager.yml b/tests/integration/ccc_network_profile_wireless_workflow_management/vars/vars_network_profile_wireless_workflow_manager.yml index 8b90584edc..dedd48ff98 100644 --- a/tests/integration/ccc_network_profile_wireless_workflow_management/vars/vars_network_profile_wireless_workflow_manager.yml +++ b/tests/integration/ccc_network_profile_wireless_workflow_management/vars/vars_network_profile_wireless_workflow_manager.yml @@ -1,94 +1,69 @@ --- create_wireless_profile: - - profile_name: "test_wireless_1" + - profile_name: "Corporate_Wireless_Profile" site_names: - - "Global/Chennai/LTTS/FLOOR11" + - "Global/Chennai/LTTS/FLOOR1" - "Global/Madurai/LTTS/FLOOR1" ssid_details: - ssid: "GuestLTTS" enable_fabric: false - wlan_profile_name: "GuestLTTS_profile" - policy_profile_name: "GuestLTTS_profile" - vlan_group_name: "test_vlan_group_1" + dot11be_profile_name: "Corporate_VLAN" + vlan_group_name: "Corporate_VLAN_Group" - ssid: "NY_SSID" enable_fabric: false - wlan_profile_name: "NY_SSID_Profile" - policy_profile_name: "NY_SSID_Profile" - interface_name: "test_interface_1" - local_to_vlan: 2001 + dot11be_profile_name: "Corporate_VLAN" + interface_name: "guest_network" + local_to_vlan: 3002 ap_zones: - ap_zone_name: "AP_Zone_North" rf_profile_name: "LOW" ssids: - "GuestLTTS" additional_interfaces: - - interface_name: "test_interface_1" + - interface_name: "Corp_Interface_1" vlan_id: 20 - - interface_name: "test_interface_2" + - interface_name: "Corp_Interface_1" vlan_id: 22 - onboarding_templates: - - "Ansible_PNP_Switch" day_n_templates: - "WLC Template" + feature_template_designs: + - design_type: Advanced SSID Configuration + feature_templates: + - Default Advanced SSID Design + applicability_ssids: + - HQ_WiFi + - Branch_Secure update_wireless_profile: - - profile_name: "test_wireless_1" + - profile_name: "Corporate_Wireless_Profile" site_names: - "Global/Chennai/LTTS/FLOOR11" ssid_details: - ssid: "GuestLTTS" enable_fabric: false - wlan_profile_name: "GuestLTTS_profile" - policy_profile_name: "GuestLTTS_profile" - vlan_group_name: "test_vlan_group_1" + dot11be_profile_name: "Corporate_VLAN" + vlan_group_name: "Corporate_VLAN_Group" - ssid: "NY_SSID" enable_fabric: false - wlan_profile_name: "NY_SSID_Profile" - policy_profile_name: "NY_SSID_Profile" - interface_name: "test_interface_1" - local_to_vlan: 2001 + dot11be_profile_name: "Corporate_VLAN" + interface_name: "guest_network" + local_to_vlan: 3002 ap_zones: - ap_zone_name: "AP_Zone_North" rf_profile_name: "LOW" ssids: - "GuestLTTS" additional_interfaces: - - interface_name: "test_interface_1" + - interface_name: "Corp_Interface_1" vlan_id: 20 - - interface_name: "test_interface_2" + - interface_name: "Corp_Interface_1" vlan_id: 22 - onboarding_templates: - - "Ansible_PNP_Switch" day_n_templates: - "WLC Template" + feature_template_designs: + - design_type: AAA_RADIUS_ATTRIBUTES_CONFIGURATION + feature_templates: + - Radius Feature templates delete_wireless_profile: - - profile_name: "test_wireless_1" - site_names: - - "Global/Chennai/LTTS/FLOOR11" - ssid_details: - - ssid: "GuestLTTS" - enable_fabric: false - wlan_profile_name: "GuestLTTS_profile" - policy_profile_name: "GuestLTTS_profile" - vlan_group_name: "test_vlan_group_1" - - ssid: "NY_SSID" - enable_fabric: false - wlan_profile_name: "NY_SSID_Profile" - policy_profile_name: "NY_SSID_Profile" - interface_name: "test_interface_1" - local_to_vlan: 2001 - ap_zones: - - ap_zone_name: "AP_Zone_North" - rf_profile_name: "LOW" - ssids: - - "GuestLTTS" - additional_interfaces: - - interface_name: "test_interface_1" - vlan_id: 20 - - interface_name: "test_interface_2" - vlan_id: 22 - onboarding_templates: - - "Ansible_PNP_Switch" - day_n_templates: - - "WLC Template" + - profile_name: "Corporate_Wireless_Profile" diff --git a/tests/integration/ccc_reports_workflow_management/defaults/main.yml b/tests/integration/ccc_reports_workflow_management/defaults/main.yml new file mode 100644 index 0000000000..55a93fc23d --- /dev/null +++ b/tests/integration/ccc_reports_workflow_management/defaults/main.yml @@ -0,0 +1,2 @@ +--- +testcase: "*" \ No newline at end of file diff --git a/tests/integration/ccc_reports_workflow_management/meta/main.yml b/tests/integration/ccc_reports_workflow_management/meta/main.yml new file mode 100644 index 0000000000..5514b6a40c --- /dev/null +++ b/tests/integration/ccc_reports_workflow_management/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] \ No newline at end of file diff --git a/tests/integration/ccc_reports_workflow_management/tasks/main.yml b/tests/integration/ccc_reports_workflow_management/tasks/main.yml new file mode 100644 index 0000000000..09e0832ca2 --- /dev/null +++ b/tests/integration/ccc_reports_workflow_management/tasks/main.yml @@ -0,0 +1,34 @@ +--- +- name: collect ccc test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yml" + connection: local + register: ccc_cases + tags: sanity + +- debug: + msg: "CCC Cases: {{ ccc_cases }}" + +- set_fact: + test_cases: + files: "{{ ccc_cases.files }}" + tags: sanity + +- debug: + msg: "Test Cases: {{ test_cases }}" + +- name: set test_items + set_fact: + test_items: "{{ test_cases.files | map(attribute='path') | list }}" + tags: sanity + +- debug: + msg: "Test Items: {{ test_items }}" + +- name: run test cases (connection=httpapi) + include_tasks: "{{ test_case_to_run }}" + loop: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run + tags: sanity \ No newline at end of file diff --git a/tests/integration/ccc_reports_workflow_management/tests/test_reports_management.yml b/tests/integration/ccc_reports_workflow_management/tests/test_reports_management.yml new file mode 100644 index 0000000000..062b6f5262 --- /dev/null +++ b/tests/integration/ccc_reports_workflow_management/tests/test_reports_management.yml @@ -0,0 +1,88 @@ +--- +- debug: msg="Starting reports management test" +- debug: msg="Role Path {{ role_path }}" + +- block: + - name: Load vars and declare dnac vars + include_vars: + file: "{{ role_path }}/vars/vars_reports_management.yml" + name: vars_map + vars: + dnac_login: &dnac_login + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + + +############################################# +# Clean Up # +############################################# + + - name: Clean up before test + cisco.dnac.reports_workflow_manager: + <<: *dnac_login + state: deleted + config: + - "{{ item }}" + loop: "{{ vars_map.report_config }}" + +############################################# +# CREATE REPORTS # +############################################# + + - name: Create Compliance Report + cisco.dnac.reports_workflow_manager: + <<: *dnac_login + state: merged + config: + - "{{ item }}" + loop: "{{ vars_map.report_config }}" + register: result_update_kpi + + - name: Debug item + debug: + var: item.response + loop: "{{ result_create.results }}" + when: result_create is defined + + - name: Assert report creation + assert: + that: + - item.changed == true + - "'report created Successfully' in item.response[0]['create_report']['msg']" + loop: "{{ result_create.results }}" + when: result_create is defined + +############################################# +# DELETE REPORTS # +############################################# + + - name: Delete Compliance Report + cisco.dnac.reports_workflow_manager: + <<: *dnac_login + state: deleted + config: + - "{{ item }}" + loop: "{{ vars_map.report_config }}" + register: result_delete_report + + - name: Debug item + debug: + var: item.response + loop: "{{ result_delete_report.results }}" + when: result_delete_report is defined + + - name: Assert report deletion + assert: + that: + - item.changed == true + - "'has been successfully deleted.' in item.response[0]['delete_report']['msg']" + loop: "{{ result_delete_report.results }}" + when: result_delete_report is defined diff --git a/tests/integration/ccc_reports_workflow_management/vars/vars_reports_management.yml b/tests/integration/ccc_reports_workflow_management/vars/vars_reports_management.yml new file mode 100644 index 0000000000..9813abebc8 --- /dev/null +++ b/tests/integration/ccc_reports_workflow_management/vars/vars_reports_management.yml @@ -0,0 +1,23 @@ +--- +report_config: + - generate_report: + - name: "compliance_report" + view_group_name: "Compliance" + deliveries: + - type: "DOWNLOAD" + file_path: "/Users/mekandar/Desktop" + schedule: + type: "SCHEDULE_NOW" + time_zone: "Asia/Calcutta" + view: + view_name: "Network Device Compliance" + field_groups: + - name: "inventoryAllData" + display_name: "All Data" + format: + format_type: "CSV" + filters: + - name: "Location" + display_name: "Location" + type: "MULTI_SELECT_TREE" + tags: [] diff --git a/tests/integration/ccc_wired_campus_automation_management/defaults/main.yml b/tests/integration/ccc_wired_campus_automation_management/defaults/main.yml new file mode 100644 index 0000000000..55a93fc23d --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/defaults/main.yml @@ -0,0 +1,2 @@ +--- +testcase: "*" \ No newline at end of file diff --git a/tests/integration/ccc_wired_campus_automation_management/meta/main.yml b/tests/integration/ccc_wired_campus_automation_management/meta/main.yml new file mode 100644 index 0000000000..5514b6a40c --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] \ No newline at end of file diff --git a/tests/integration/ccc_wired_campus_automation_management/tasks/main.yml b/tests/integration/ccc_wired_campus_automation_management/tasks/main.yml new file mode 100644 index 0000000000..09e0832ca2 --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/tasks/main.yml @@ -0,0 +1,34 @@ +--- +- name: collect ccc test cases + find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yml" + connection: local + register: ccc_cases + tags: sanity + +- debug: + msg: "CCC Cases: {{ ccc_cases }}" + +- set_fact: + test_cases: + files: "{{ ccc_cases.files }}" + tags: sanity + +- debug: + msg: "Test Cases: {{ test_cases }}" + +- name: set test_items + set_fact: + test_items: "{{ test_cases.files | map(attribute='path') | list }}" + tags: sanity + +- debug: + msg: "Test Items: {{ test_items }}" + +- name: run test cases (connection=httpapi) + include_tasks: "{{ test_case_to_run }}" + loop: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run + tags: sanity \ No newline at end of file diff --git a/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_authentication.yml b/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_authentication.yml new file mode 100644 index 0000000000..56daa81d1a --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_authentication.yml @@ -0,0 +1,514 @@ +# --- +# # =================================================================================================== +# # AUTHENTICATION FEATURE TESTS SUMMARY +# # =================================================================================================== +# # +# # This test suite validates 802.1X Authentication configuration functionality for Wired Campus Automation. +# # +# # TEST CATEGORIES: +# # +# # 1. NEGATIVE VALIDATION TESTS (Lines 90-450) +# # - Authentication Enable validation (string/integer/list instead of boolean) +# # - Authentication Config Mode validation (integer/boolean/list instead of string, invalid choices) +# # - Case sensitivity validation (lowercase/mixed case mode values) +# # - Partial match validation (abbreviated mode names) +# # - Data type and structure validation (invalid dict/list types) +# # +# # 2. POSITIVE VALIDATION TESTS (Lines 450-1200) +# # a) Authentication Creation Tests (Merged State) +# # - Enable/disable only configuration +# # - Mode only configuration (LEGACY/NEW_STYLE) +# # - Combined enable/disable with mode configurations +# # - Minimal configuration testing +# # +# # b) Authentication Update Tests (Merged State) +# # - Enable/disable authentication updates +# # - Mode preservation during updates +# # - Combined parameter updates +# # +# # c) Authentication Reset Tests (Deleted State) +# # - Empty configuration reset (resets to defaults) +# # - Configuration with parameters reset +# # +# # d) Mode Change Restriction Tests +# # - Mode change validation (once set, mode cannot be changed) +# # - API restriction enforcement testing +# # +# # e) Special Configuration Tests +# # - Minimal configurations (enable/disable only) +# # - Mode-only configurations +# # - Case sensitivity validation +# # - Edge case parameter combinations +# # +# # f) Advanced Test Scenarios +# # - Idempotency tests (same config applied twice) +# # - Cleanup operations +# # +# # VALIDATION RANGES: +# # - Authentication Enable: boolean (true/false) +# # - Authentication Config Mode: string choices ["LEGACY", "NEW_STYLE"] +# # - Mode values are case-sensitive and must match exactly +# # - Once mode is set, it cannot be changed via API +# # +# # AUTHENTICATION FUNCTIONALITY: +# # - 802.1X Authentication provides network access control +# # - LEGACY mode uses traditional 802.1X authentication methods +# # - NEW_STYLE mode uses enhanced authentication with additional features +# # - Enable/disable controls whether authentication is active on the device +# # - Mode determines the authentication method and capabilities +# # - Authentication settings apply globally to the managed device +# # +# # EXPECTED BEHAVIORS: +# # - Negative tests should fail with appropriate error messages +# # - Positive tests should succeed with 'Successfully' in response +# # - Idempotency: First run changes=true, second run changes=false +# # - All tests include proper pause times for configuration settlement +# # - Deleted state resets authentication to default configuration +# # - Mode change restrictions are enforced by the API +# # - Case sensitivity is strictly enforced for mode values +# # +# # SPECIAL CONSIDERATIONS: +# # - Mode cannot be changed once initially set (API restriction) +# # - Mode values are case-sensitive ("LEGACY" not "legacy") +# # - Authentication configuration affects device security posture +# # - Proper reset between mode tests to avoid conflicts +# # - Extended pause times for authentication configuration changes +# # - Mode change tests may succeed with no changes or fail with restrictions +# # +# # =================================================================================================== + +# - debug: msg="Starting Authentication feature tests for Wired Campus Automation" +# - debug: msg="Role Path {{ role_path }}" + +# - block: +# - name: Load Authentication test variables +# include_vars: +# file: "{{ role_path }}/vars/vars_wca_test_feature_authentication.yml" +# name: authentication_vars_map +# vars: +# dnac_login: &dnac_login +# dnac_host: "{{ dnac_host }}" +# dnac_username: "{{ dnac_username }}" +# dnac_password: "{{ dnac_password }}" +# dnac_verify: "{{ dnac_verify }}" +# dnac_port: "{{ dnac_port }}" +# dnac_version: "{{ dnac_version }}" +# dnac_debug: "{{ dnac_debug }}" +# dnac_log: true +# dnac_log_level: DEBUG +# config_verify: true +# dnac_api_task_timeout: "{{ dnac_api_task_timeout }}" +# dnac_log_append: false + +# # ############################################# +# # Cleanup Operations +# # ############################################# + +# - name: Cleanup - Reset authentication to default configuration +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - "{{ authentication_vars_map.test_authentication_reset_empty }}" +# register: result_auth_cleanup +# tags: [positive, authentication, cleanup] + +# # ============================================================================= +# # NEGATIVE VALIDATION TESTS FOR AUTHENTICATION +# # ============================================================================= + +# # ############################################# +# # Authentication Enable Tests +# # ############################################# + +# - name: Test authentication enable validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ authentication_vars_map.test_authentication_enable_string }}" +# register: result_auth_enable_string +# ignore_errors: true +# tags: [negative, authentication, enable] + +# - name: Assert authentication enable string validation failed +# assert: +# that: +# - result_auth_enable_string.failed == true +# - "'must be of type boolean' in result_auth_enable_string.msg" +# fail_msg: "Authentication enable string validation should have failed" +# success_msg: "Authentication enable string validation correctly failed" +# tags: [negative, authentication, enable] + +# - name: Test authentication enable validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ authentication_vars_map.test_authentication_enable_integer }}" +# register: result_auth_enable_integer +# ignore_errors: true +# tags: [negative, authentication, enable] + +# - name: Assert authentication enable integer validation failed +# assert: +# that: +# - result_auth_enable_integer.failed == true +# - "'must be of type boolean' in result_auth_enable_integer.msg" +# fail_msg: "Authentication enable integer validation should have failed" +# success_msg: "Authentication enable integer validation correctly failed" +# tags: [negative, authentication, enable] + +# - name: Test authentication enable validation - list value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ authentication_vars_map.test_authentication_enable_list }}" +# register: result_auth_enable_list +# ignore_errors: true +# tags: [negative, authentication, enable] + +# - name: Assert authentication enable list validation failed +# assert: +# that: +# - result_auth_enable_list.failed == true +# - "'must be of type boolean' in result_auth_enable_list.msg" +# fail_msg: "Authentication enable list validation should have failed" +# success_msg: "Authentication enable list validation correctly failed" +# tags: [negative, authentication, enable] + +# # ############################################# +# # Authentication Mode Tests +# # ############################################# + +# - name: Test authentication config mode validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ authentication_vars_map.test_authentication_config_mode_integer }}" +# register: result_auth_mode_integer +# ignore_errors: true +# tags: [negative, authentication, config_mode] + +# - name: Assert authentication config mode integer validation failed +# assert: +# that: +# - result_auth_mode_integer.failed == true +# - "'must be of type string' in result_auth_mode_integer.msg" +# fail_msg: "Authentication config mode integer validation should have failed" +# success_msg: "Authentication config mode integer validation correctly failed" +# tags: [negative, authentication, config_mode] + +# - name: Test authentication config mode validation - boolean value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ authentication_vars_map.test_authentication_config_mode_boolean }}" +# register: result_auth_mode_boolean +# ignore_errors: true +# tags: [negative, authentication, config_mode] + +# - name: Assert authentication config mode boolean validation failed +# assert: +# that: +# - result_auth_mode_boolean.failed == true +# - "'must be of type string' in result_auth_mode_boolean.msg" +# fail_msg: "Authentication config mode boolean validation should have failed" +# success_msg: "Authentication config mode boolean validation correctly failed" +# tags: [negative, authentication, config_mode] + +# - name: Test authentication config mode validation - list value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ authentication_vars_map.test_authentication_config_mode_list }}" +# register: result_auth_mode_list +# ignore_errors: true +# tags: [negative, authentication, config_mode] + +# - name: Assert authentication config mode list validation failed +# assert: +# that: +# - result_auth_mode_list.failed == true +# - "'must be of type string' in result_auth_mode_list.msg" +# fail_msg: "Authentication config mode list validation should have failed" +# success_msg: "Authentication config mode list validation correctly failed" +# tags: [negative, authentication, config_mode] + +# - name: Test authentication config mode validation - invalid choice should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ authentication_vars_map.test_authentication_config_mode_invalid_choice }}" +# register: result_auth_mode_invalid_choice +# ignore_errors: true +# tags: [negative, authentication, config_mode, choices] + +# - name: Assert authentication config mode invalid choice validation failed +# assert: +# that: +# - result_auth_mode_invalid_choice.failed == true +# - "'must be one of' in result_auth_mode_invalid_choice.msg" +# fail_msg: "Authentication config mode invalid choice validation should have failed" +# success_msg: "Authentication config mode invalid choice validation correctly failed" +# tags: [negative, authentication, config_mode, choices] + +# - name: Test authentication config mode validation - partial match should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ authentication_vars_map.test_authentication_config_mode_partial }}" +# register: result_auth_mode_partial +# ignore_errors: true +# tags: [negative, authentication, config_mode, partial] + +# - name: Assert authentication config mode partial match validation failed +# assert: +# that: +# - result_auth_mode_partial.failed == true +# - "'must be one of' in result_auth_mode_partial.msg" +# fail_msg: "Authentication config mode partial match validation should have failed" +# success_msg: "Authentication config mode partial match validation correctly failed" +# tags: [negative, authentication, config_mode, partial] + +# # ############################################# +# # Data Type and Structure Tests +# # ############################################# + +# - name: Test authentication structure validation - string instead of dict should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ authentication_vars_map.test_authentication_invalid_dict_type }}" +# register: result_auth_invalid_dict_type +# ignore_errors: true +# tags: [negative, authentication, structure] + +# - name: Assert authentication invalid dict type validation failed +# assert: +# that: +# - result_auth_invalid_dict_type.failed == true +# - "'must be of type dictionary' in result_auth_invalid_dict_type.msg" +# fail_msg: "Authentication invalid dict type validation should have failed" +# success_msg: "Authentication invalid dict type validation correctly failed" +# tags: [negative, authentication, structure] + +# - name: Test authentication structure validation - list instead of dict should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ authentication_vars_map.test_authentication_invalid_list_type }}" +# register: result_auth_invalid_list_type +# ignore_errors: true +# tags: [negative, authentication, structure] + +# - name: Assert authentication invalid list type validation failed +# assert: +# that: +# - result_auth_invalid_list_type.failed == true +# - "'must be of type dictionary' in result_auth_invalid_list_type.msg" +# fail_msg: "Authentication invalid list type validation should have failed" +# success_msg: "Authentication invalid list type validation correctly failed" +# tags: [negative, authentication, structure] + +# - name: Test authentication structure validation - integer instead of dict should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ authentication_vars_map.test_authentication_invalid_integer_type }}" +# register: result_auth_invalid_integer_type +# ignore_errors: true +# tags: [negative, authentication, structure] + +# - name: Assert authentication invalid integer type validation failed +# assert: +# that: +# - result_auth_invalid_integer_type.failed == true +# - "'must be of type dictionary' in result_auth_invalid_integer_type.msg" +# fail_msg: "Authentication invalid integer type validation should have failed" +# success_msg: "Authentication invalid integer type validation correctly failed" +# tags: [negative, authentication, structure] + +# # ============================================================================= +# # POSITIVE VALIDATION TESTS FOR AUTHENTICATION +# # ============================================================================= + +# # ############################################# +# # POSITIVE TEST CASES - CREATE +# # ############################################# + +# - name: Test authentication configuration with enable only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ authentication_vars_map.test_authentication_create_enable_only }}" +# register: result_auth_create_enable_only +# tags: [positive, authentication, create, enable] + +# - name: Assert authentication enable only configuration succeeded +# assert: +# that: +# - result_auth_create_enable_only.failed == false +# - result_auth_create_enable_only.changed == true +# - "'Successfully' in result_auth_create_enable_only.response" +# fail_msg: "Authentication enable only configuration should have succeeded" +# success_msg: "Authentication enable only configuration succeeded" +# tags: [positive, authentication, create, enable] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, authentication, create] + +# - name: Test authentication configuration with disable only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ authentication_vars_map.test_authentication_create_disable_only }}" +# register: result_auth_create_disable_only +# tags: [positive, authentication, create, disable] + +# - name: Assert authentication disable only configuration succeeded +# assert: +# that: +# - result_auth_create_disable_only.failed == false +# - result_auth_create_disable_only.changed == true +# - "'Successfully' in result_auth_create_disable_only.response" +# fail_msg: "Authentication disable only configuration should have succeeded" +# success_msg: "Authentication disable only configuration succeeded" +# tags: [positive, authentication, create, disable] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, authentication, create] + +# # IMPORTANT: Reset authentication to test NEW_STYLE mode independently +# - name: Reset authentication before NEW_STYLE mode test +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - "{{ authentication_vars_map.test_authentication_reset_empty }}" +# register: result_auth_reset_before_new_style +# tags: [positive, authentication, create, reset_before_new_style] + +# - name: Pause after reset before NEW_STYLE test +# pause: +# seconds: 15 +# tags: [positive, authentication, create] + +# # ############################################# +# # POSITIVE TEST CASES - UPDATE +# # ############################################# + +# - name: Test authentication update - enable authentication (preserving existing mode) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ authentication_vars_map.test_authentication_update_enable }}" +# register: result_auth_update_enable +# tags: [positive, authentication, update, enable] + +# - name: Assert authentication enable update succeeded +# assert: +# that: +# - result_auth_update_enable.failed == false +# - result_auth_update_enable.changed == true +# - "'Successfully' in result_auth_update_enable.response" +# fail_msg: "Authentication enable update should have succeeded" +# success_msg: "Authentication enable update succeeded" +# tags: [positive, authentication, update, enable] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, authentication, update] + +# - name: Test authentication update - disable authentication (preserving existing mode) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ authentication_vars_map.test_authentication_update_disable }}" +# register: result_auth_update_disable +# tags: [positive, authentication, update, disable] + +# - name: Assert authentication disable update succeeded +# assert: +# that: +# - result_auth_update_disable.failed == false +# - result_auth_update_disable.changed == true +# - "'Successfully' in result_auth_update_disable.response" +# fail_msg: "Authentication disable update should have succeeded" +# success_msg: "Authentication disable update succeeded" +# tags: [positive, authentication, update, disable] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, authentication, update] + +# # ############################################# +# # POSITIVE TEST CASES - RESET +# # ############################################# + +# - name: Test authentication reset - empty configuration (deleted state resets to defaults) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - "{{ authentication_vars_map.test_authentication_reset_empty }}" +# register: result_auth_reset_empty +# tags: [positive, authentication, reset, deleted] + +# - name: Assert authentication empty reset succeeded +# assert: +# that: +# - result_auth_reset_empty.failed == false +# - result_auth_reset_empty.changed == true +# - "'Successfully' in result_auth_reset_empty.response" +# fail_msg: "Authentication empty reset should have succeeded" +# success_msg: "Authentication empty reset succeeded" +# tags: [positive, authentication, reset, deleted] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 15 +# tags: [positive, authentication, reset] + +# - name: Test authentication reset - with existing parameters (deleted state resets to defaults) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - "{{ authentication_vars_map.test_authentication_reset_with_params }}" +# register: result_auth_reset_with_params +# tags: [positive, authentication, reset, deleted] + +# - name: Assert authentication reset with params succeeded +# assert: +# that: +# - result_auth_reset_with_params.failed == false +# - result_auth_reset_with_params.changed == true +# - "'Successfully' in result_auth_reset_with_params.response" +# fail_msg: "Authentication reset with params should have succeeded" +# success_msg: "Authentication reset with params succeeded" +# tags: [positive, authentication, reset, deleted] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 15 +# tags: [positive, authentication, reset] diff --git a/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_cdp.yml b/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_cdp.yml new file mode 100644 index 0000000000..7aa578d0a1 --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_cdp.yml @@ -0,0 +1,893 @@ +# --- +# # =================================================================================================== +# # CDP FEATURE TESTS SUMMARY +# # =================================================================================================== +# # +# # This test suite validates CDP (Cisco Discovery Protocol) configuration functionality for Wired Campus Automation. +# # +# # TEST CATEGORIES: +# # +# # 1. NEGATIVE VALIDATION TESTS (Lines 75-350) +# # - CDP Admin Status validation (string/integer instead of boolean) +# # - CDP Hold Time validation (below min, above max, negative, string, float) +# # - CDP Timer validation (below min, above max, negative, string, float) +# # - CDP Advertise V2 validation (string/integer instead of boolean) +# # - CDP Log Duplex Mismatch validation (string/integer instead of boolean) +# # - Data type and structure validation (invalid dict/list types) +# # +# # 2. POSITIVE VALIDATION TESTS (Lines 350-800) +# # a) CDP Creation Tests +# # - Admin status only configuration +# # - All parameters configuration +# # - Custom timers configuration +# # - Disabled CDP configuration +# # +# # b) CDP Update Tests (Merged State) +# # - Enable CDP updates +# # - Timer modifications +# # - Feature disable/enable +# # - All parameters updates +# # +# # c) CDP Deletion Tests (Deleted State) +# # - Empty configuration deletion/reset +# # - Configuration with parameters deletion +# # +# # d) Boundary Value Tests +# # - Minimum valid values (hold_time: 10, timer: 5) +# # - Maximum valid values (hold_time: 255, timer: 254) +# # - Recommended values testing +# # +# # e) Advanced Test Scenarios +# # - Idempotency tests (same config applied twice) +# # - Cleanup operations +# # +# # VALIDATION RANGES: +# # - CDP Admin Status: boolean (true/false) +# # - CDP Hold Time: integer (10-255 seconds) +# # - CDP Timer: integer (5-254 seconds) +# # - CDP Advertise V2: boolean (true/false) +# # - CDP Log Duplex Mismatch: boolean (true/false) +# # +# # EXPECTED BEHAVIORS: +# # - Negative tests should fail with appropriate error messages +# # - Positive tests should succeed with 'Successfully' in response +# # - Idempotency: First run changes=true, second run changes=false +# # - All tests include proper pause times for configuration settlement +# # +# # =================================================================================================== + +# - debug: msg="Starting CDP feature tests for Wired Campus Automation" +# - debug: msg="Role Path {{ role_path }}" + +# - block: +# - name: Load CDP test variables +# include_vars: +# file: "{{ role_path }}/vars/vars_wca_test_feature_cdp.yml" +# name: cdp_vars_map +# vars: +# dnac_login: &dnac_login +# dnac_host: "{{ dnac_host }}" +# dnac_username: "{{ dnac_username }}" +# dnac_password: "{{ dnac_password }}" +# dnac_verify: "{{ dnac_verify }}" +# dnac_port: "{{ dnac_port }}" +# dnac_version: "{{ dnac_version }}" +# dnac_debug: "{{ dnac_debug }}" +# dnac_log: true +# dnac_log_level: "DEBUG" +# config_verify: true +# dnac_api_task_timeout: "{{ dnac_api_task_timeout }}" +# dnac_log_append: false + +# # ============================================================================= +# # Negative Validation Tests for CDP +# # ============================================================================= + +# # ############################################# +# # CDP Admin Status Tests +# # ############################################# + +# - name: Test CDP admin status validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_admin_status_string }}" +# register: result_cdp_admin_status_string +# ignore_errors: true +# tags: [negative, cdp, admin_status] + +# - name: Assert CDP admin status string validation failed +# assert: +# that: +# - result_cdp_admin_status_string.failed == true +# - "'must be of type boolean' in result_cdp_admin_status_string.msg" +# fail_msg: "CDP admin status string validation should have failed" +# success_msg: "CDP admin status string validation correctly failed" +# tags: [negative, cdp, admin_status] + +# - name: Test CDP admin status validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_admin_status_integer }}" +# register: result_cdp_admin_status_integer +# ignore_errors: true +# tags: [negative, cdp, admin_status] + +# - name: Assert CDP admin status integer validation failed +# assert: +# that: +# - result_cdp_admin_status_integer.failed == true +# - "'must be of type boolean' in result_cdp_admin_status_integer.msg" +# fail_msg: "CDP admin status integer validation should have failed" +# success_msg: "CDP admin status integer validation correctly failed" +# tags: [negative, cdp, admin_status] + +# # ############################################# +# # CDP Hold Time Tests +# # ############################################# + +# - name: Test CDP hold time validation - below minimum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_hold_time_below_min }}" +# register: result_cdp_hold_time_below_min +# ignore_errors: true +# tags: [negative, cdp, hold_time] + +# - name: Assert CDP hold time below minimum validation failed +# assert: +# that: +# - result_cdp_hold_time_below_min.failed == true +# - "'must be within the range' in result_cdp_hold_time_below_min.msg" +# fail_msg: "CDP hold time below minimum validation should have failed" +# success_msg: "CDP hold time below minimum validation correctly failed" +# tags: [negative, cdp, hold_time] + +# - name: Test CDP hold time validation - above maximum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_hold_time_above_max }}" +# register: result_cdp_hold_time_above_max +# ignore_errors: true +# tags: [negative, cdp, hold_time] + +# - name: Assert CDP hold time above maximum validation failed +# assert: +# that: +# - result_cdp_hold_time_above_max.failed == true +# - "'must be within the range' in result_cdp_hold_time_above_max.msg" +# fail_msg: "CDP hold time above maximum validation should have failed" +# success_msg: "CDP hold time above maximum validation correctly failed" +# tags: [negative, cdp, hold_time] + +# - name: Test CDP hold time validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_hold_time_string }}" +# register: result_cdp_hold_time_string +# ignore_errors: true +# tags: [negative, cdp, hold_time] + +# - name: Assert CDP hold time string validation failed +# assert: +# that: +# - result_cdp_hold_time_string.failed == true +# - "'must be of type integer' in result_cdp_hold_time_string.msg" +# fail_msg: "CDP hold time string validation should have failed" +# success_msg: "CDP hold time string validation correctly failed" +# tags: [negative, cdp, hold_time] + +# - name: Test CDP hold time validation - float value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_hold_time_float }}" +# register: result_cdp_hold_time_float +# ignore_errors: true +# tags: [negative, cdp, hold_time] + +# - name: Assert CDP hold time float validation failed +# assert: +# that: +# - result_cdp_hold_time_float.failed == true +# - "'must be of type integer' in result_cdp_hold_time_float.msg" +# fail_msg: "CDP hold time float validation should have failed" +# success_msg: "CDP hold time float validation correctly failed" +# tags: [negative, cdp, hold_time] + +# - name: Test CDP hold time validation - negative value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_hold_time_negative }}" +# register: result_cdp_hold_time_negative +# ignore_errors: true +# tags: [negative, cdp, hold_time] + +# - name: Assert CDP hold time negative validation failed +# assert: +# that: +# - result_cdp_hold_time_negative.failed == true +# - "'must be within the range' in result_cdp_hold_time_negative.msg" +# fail_msg: "CDP hold time negative validation should have failed" +# success_msg: "CDP hold time negative validation correctly failed" +# tags: [negative, cdp, hold_time] + +# # ############################################# +# # CDP Timer Tests +# # ############################################# + +# - name: Test CDP timer validation - below minimum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_timer_below_min }}" +# register: result_cdp_timer_below_min +# ignore_errors: true +# tags: [negative, cdp, timer] + +# - name: Assert CDP timer below minimum validation failed +# assert: +# that: +# - result_cdp_timer_below_min.failed == true +# - "'must be within the range' in result_cdp_timer_below_min.msg" +# fail_msg: "CDP timer below minimum validation should have failed" +# success_msg: "CDP timer below minimum validation correctly failed" +# tags: [negative, cdp, timer] + +# - name: Test CDP timer validation - above maximum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_timer_above_max }}" +# register: result_cdp_timer_above_max +# ignore_errors: true +# tags: [negative, cdp, timer] + +# - name: Assert CDP timer above maximum validation failed +# assert: +# that: +# - result_cdp_timer_above_max.failed == true +# - "'must be within the range' in result_cdp_timer_above_max.msg" +# fail_msg: "CDP timer above maximum validation should have failed" +# success_msg: "CDP timer above maximum validation correctly failed" +# tags: [negative, cdp, timer] + +# - name: Test CDP timer validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_timer_string }}" +# register: result_cdp_timer_string +# ignore_errors: true +# tags: [negative, cdp, timer] + +# - name: Assert CDP timer string validation failed +# assert: +# that: +# - result_cdp_timer_string.failed == true +# - "'must be of type integer' in result_cdp_timer_string.msg" +# fail_msg: "CDP timer string validation should have failed" +# success_msg: "CDP timer string validation correctly failed" +# tags: [negative, cdp, timer] + +# - name: Test CDP timer validation - float value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_timer_float }}" +# register: result_cdp_timer_float +# ignore_errors: true +# tags: [negative, cdp, timer] + +# - name: Assert CDP timer float validation failed +# assert: +# that: +# - result_cdp_timer_float.failed == true +# - "'must be of type integer' in result_cdp_timer_float.msg" +# fail_msg: "CDP timer float validation should have failed" +# success_msg: "CDP timer float validation correctly failed" +# tags: [negative, cdp, timer] + +# - name: Test CDP timer validation - negative value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_timer_negative }}" +# register: result_cdp_timer_negative +# ignore_errors: true +# tags: [negative, cdp, timer] + +# - name: Assert CDP timer negative validation failed +# assert: +# that: +# - result_cdp_timer_negative.failed == true +# - "'must be within the range' in result_cdp_timer_negative.msg" +# fail_msg: "CDP timer negative validation should have failed" +# success_msg: "CDP timer negative validation correctly failed" +# tags: [negative, cdp, timer] + +# # ############################################# +# # CDP Advertise V2 Tests +# # ############################################# + +# - name: Test CDP advertise v2 validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_advertise_v2_string }}" +# register: result_cdp_advertise_v2_string +# ignore_errors: true +# tags: [negative, cdp, advertise_v2] + +# - name: Assert CDP advertise v2 string validation failed +# assert: +# that: +# - result_cdp_advertise_v2_string.failed == true +# - "'must be of type boolean' in result_cdp_advertise_v2_string.msg" +# fail_msg: "CDP advertise v2 string validation should have failed" +# success_msg: "CDP advertise v2 string validation correctly failed" +# tags: [negative, cdp, advertise_v2] + +# - name: Test CDP advertise v2 validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_advertise_v2_integer }}" +# register: result_cdp_advertise_v2_integer +# ignore_errors: true +# tags: [negative, cdp, advertise_v2] + +# - name: Assert CDP advertise v2 integer validation failed +# assert: +# that: +# - result_cdp_advertise_v2_integer.failed == true +# - "'must be of type boolean' in result_cdp_advertise_v2_integer.msg" +# fail_msg: "CDP advertise v2 integer validation should have failed" +# success_msg: "CDP advertise v2 integer validation correctly failed" +# tags: [negative, cdp, advertise_v2] + +# # ############################################# +# # CDP Log Duplex Mismatch Tests +# # ############################################# + +# - name: Test CDP log duplex mismatch validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_log_duplex_mismatch_string }}" +# register: result_cdp_log_duplex_mismatch_string +# ignore_errors: true +# tags: [negative, cdp, log_duplex] + +# - name: Assert CDP log duplex mismatch string validation failed +# assert: +# that: +# - result_cdp_log_duplex_mismatch_string.failed == true +# - "'must be of type boolean' in result_cdp_log_duplex_mismatch_string.msg" +# fail_msg: "CDP log duplex mismatch string validation should have failed" +# success_msg: "CDP log duplex mismatch string validation correctly failed" +# tags: [negative, cdp, log_duplex] + +# - name: Test CDP log duplex mismatch validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_log_duplex_mismatch_integer }}" +# register: result_cdp_log_duplex_mismatch_integer +# ignore_errors: true +# tags: [negative, cdp, log_duplex] + +# - name: Assert CDP log duplex mismatch integer validation failed +# assert: +# that: +# - result_cdp_log_duplex_mismatch_integer.failed == true +# - "'must be of type boolean' in result_cdp_log_duplex_mismatch_integer.msg" +# fail_msg: "CDP log duplex mismatch integer validation should have failed" +# success_msg: "CDP log duplex mismatch integer validation correctly failed" +# tags: [negative, cdp, log_duplex] + +# # ############################################# +# # Data Type and Structure Tests +# # ############################################# + +# - name: Test CDP structure validation - string instead of dict should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_invalid_dict_type }}" +# register: result_cdp_invalid_dict_type +# ignore_errors: true +# tags: [negative, cdp, structure] + +# - name: Assert CDP invalid dict type validation failed +# assert: +# that: +# - result_cdp_invalid_dict_type.failed == true +# - "'must be of type dictionary' in result_cdp_invalid_dict_type.msg" +# fail_msg: "CDP invalid dict type validation should have failed" +# success_msg: "CDP invalid dict type validation correctly failed" +# tags: [negative, cdp, structure] + +# - name: Test CDP structure validation - list instead of dict should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_invalid_list_type }}" +# register: result_cdp_invalid_list_type +# ignore_errors: true +# tags: [negative, cdp, structure] + +# - name: Assert CDP invalid list type validation failed +# assert: +# that: +# - result_cdp_invalid_list_type.failed == true +# - "'must be of type dictionary' in result_cdp_invalid_list_type.msg" +# fail_msg: "CDP invalid list type validation should have failed" +# success_msg: "CDP invalid list type validation correctly failed" +# tags: [negative, cdp, structure] + +# - name: Display negative test summary +# debug: +# msg: "CDP negative validation tests completed successfully" +# tags: [negative, cdp] + +# # ============================================================================= +# # POSITIVE TEST CASES - CDP CREATION (MERGED STATE) +# # ============================================================================= + +# # ############################################# +# # Single CDP Parameter Tests +# # ############################################# + +# - name: Test CDP configuration with admin status only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_create_admin_status_only }}" +# register: result_cdp_create_admin_status_only +# tags: [positive, cdp, create, admin_status] + +# - name: Assert CDP admin status only configuration succeeded +# assert: +# that: +# - result_cdp_create_admin_status_only.failed == false +# - result_cdp_create_admin_status_only.changed == true +# - "'Successfully' in result_cdp_create_admin_status_only.response" +# fail_msg: "CDP admin status only configuration should have succeeded" +# success_msg: "CDP admin status only configuration succeeded" +# tags: [positive, cdp, create, admin_status] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, cdp, create] + +# - name: Test CDP configuration with all parameters +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_create_all_params }}" +# register: result_cdp_create_all_params +# tags: [positive, cdp, create, all_params] + +# - name: Assert CDP all parameters configuration succeeded +# assert: +# that: +# - result_cdp_create_all_params.failed == false +# - result_cdp_create_all_params.changed == true +# - "'Successfully' in result_cdp_create_all_params.response" +# fail_msg: "CDP all parameters configuration should have succeeded" +# success_msg: "CDP all parameters configuration succeeded" +# tags: [positive, cdp, create, all_params] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, cdp, create] + +# - name: Test CDP configuration with custom timers +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_create_custom_timers }}" +# register: result_cdp_create_custom_timers +# tags: [positive, cdp, create, timers] + +# - name: Assert CDP custom timers configuration succeeded +# assert: +# that: +# - result_cdp_create_custom_timers.failed == false +# - result_cdp_create_custom_timers.changed == true +# - "'Successfully' in result_cdp_create_custom_timers.response" +# fail_msg: "CDP custom timers configuration should have succeeded" +# success_msg: "CDP custom timers configuration succeeded" +# tags: [positive, cdp, create, timers] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, cdp, create] + +# - name: Test CDP disabled configuration +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_create_disabled }}" +# register: result_cdp_create_disabled +# tags: [positive, cdp, create, disabled] + +# - name: Assert CDP disabled configuration succeeded +# assert: +# that: +# - result_cdp_create_disabled.failed == false +# - result_cdp_create_disabled.changed == true +# - "'Successfully' in result_cdp_create_disabled.response" +# fail_msg: "CDP disabled configuration should have succeeded" +# success_msg: "CDP disabled configuration succeeded" +# tags: [positive, cdp, create, disabled] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, cdp, create] + +# # ============================================================================= +# # POSITIVE TEST CASES - CDP UPDATE (MERGED STATE) +# # ============================================================================= + +# # ############################################# +# # Single CDP Update Tests +# # ############################################# + +# - name: Test CDP update - enable CDP +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_update_enable }}" +# register: result_cdp_update_enable +# tags: [positive, cdp, update, enable] + +# - name: Assert CDP enable update succeeded +# assert: +# that: +# - result_cdp_update_enable.failed == false +# - result_cdp_update_enable.changed == true +# - "'Successfully' in result_cdp_update_enable.response" +# fail_msg: "CDP enable update should have succeeded" +# success_msg: "CDP enable update succeeded" +# tags: [positive, cdp, update, enable] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, cdp, update] + +# - name: Test CDP update - modify timers +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_update_timers }}" +# register: result_cdp_update_timers +# tags: [positive, cdp, update, timers] + +# - name: Assert CDP timers update succeeded +# assert: +# that: +# - result_cdp_update_timers.failed == false +# - result_cdp_update_timers.changed == true +# - "'Successfully' in result_cdp_update_timers.response" +# fail_msg: "CDP timers update should have succeeded" +# success_msg: "CDP timers update succeeded" +# tags: [positive, cdp, update, timers] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, cdp, update] + +# - name: Test CDP update - disable features +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_update_disable_features }}" +# register: result_cdp_update_disable_features +# tags: [positive, cdp, update, disable] + +# - name: Assert CDP disable features update succeeded +# assert: +# that: +# - result_cdp_update_disable_features.failed == false +# - result_cdp_update_disable_features.changed == true +# - "'Successfully' in result_cdp_update_disable_features.response" +# fail_msg: "CDP disable features update should have succeeded" +# success_msg: "CDP disable features update succeeded" +# tags: [positive, cdp, update, disable] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, cdp, update] + +# - name: Test CDP update - all parameters +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_update_all_params }}" +# register: result_cdp_update_all_params +# tags: [positive, cdp, update, all_params] + +# - name: Assert CDP all parameters update succeeded +# assert: +# that: +# - result_cdp_update_all_params.failed == false +# - result_cdp_update_all_params.changed == true +# - "'Successfully' in result_cdp_update_all_params.response" +# fail_msg: "CDP all parameters update should have succeeded" +# success_msg: "CDP all parameters update succeeded" +# tags: [positive, cdp, update, all_params] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, cdp, update] + +# # ============================================================================= +# # POSITIVE TEST CASES - CDP DELETION (DELETED STATE) +# # ============================================================================= + +# # ############################################# +# # CDP Deletion Tests +# # ############################################# + +# - name: Test CDP deletion/reset - empty configuration +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - "{{ cdp_vars_map.test_cdp_delete_empty }}" +# register: result_cdp_delete_empty +# tags: [positive, cdp, delete, empty] + +# - name: Assert CDP empty deletion succeeded +# assert: +# that: +# - result_cdp_delete_empty.failed == false +# - result_cdp_delete_empty.changed == true +# - "'Successfully' in result_cdp_delete_empty.response" +# fail_msg: "CDP empty deletion should have succeeded" +# success_msg: "CDP empty deletion succeeded" +# tags: [positive, cdp, delete, empty] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, cdp, delete] + +# - name: Test CDP update - all parameters +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_update_all_params }}" +# register: result_cdp_update_all_params +# tags: [positive, cdp, update, all_params] + +# - name: Assert CDP all parameters update succeeded +# assert: +# that: +# - result_cdp_update_all_params.failed == false +# - result_cdp_update_all_params.changed == true +# - "'Successfully' in result_cdp_update_all_params.response" +# fail_msg: "CDP all parameters update should have succeeded" +# success_msg: "CDP all parameters update succeeded" +# tags: [positive, cdp, update, all_params] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, cdp, delete] + +# - name: Test CDP deletion/reset - with existing parameters +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - "{{ cdp_vars_map.test_cdp_delete_with_params }}" +# register: result_cdp_delete_with_params +# tags: [positive, cdp, delete, params] + +# - name: Assert CDP deletion with params succeeded +# assert: +# that: +# - result_cdp_delete_with_params.failed == false +# - result_cdp_delete_with_params.changed == true +# - "'Successfully' in result_cdp_delete_with_params.response" +# fail_msg: "CDP deletion with params should have succeeded" +# success_msg: "CDP deletion with params succeeded" +# tags: [positive, cdp, delete, params] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, cdp, delete] + +# # ============================================================================= +# # BOUNDARY VALUE TESTS +# # ============================================================================= + +# # ############################################# +# # Boundary Value Tests +# # ############################################# + +# - name: Test CDP with minimum valid values +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_boundary_min_values }}" +# register: result_cdp_boundary_min_values +# tags: [positive, cdp, boundary, min] + +# - name: Assert CDP minimum values configuration succeeded +# assert: +# that: +# - result_cdp_boundary_min_values.failed == false +# - result_cdp_boundary_min_values.changed == true +# - "'Successfully' in result_cdp_boundary_min_values.response" +# fail_msg: "CDP minimum values configuration should have succeeded" +# success_msg: "CDP minimum values configuration succeeded" +# tags: [positive, cdp, boundary, min] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, cdp, boundary] + +# - name: Test CDP with maximum valid values +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_boundary_max_values }}" +# register: result_cdp_boundary_max_values +# tags: [positive, cdp, boundary, max] + +# - name: Assert CDP maximum values configuration succeeded +# assert: +# that: +# - result_cdp_boundary_max_values.failed == false +# - result_cdp_boundary_max_values.changed == true +# - "'Successfully' in result_cdp_boundary_max_values.response" +# fail_msg: "CDP maximum values configuration should have succeeded" +# success_msg: "CDP maximum values configuration succeeded" +# tags: [positive, cdp, boundary, max] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, cdp, boundary] + +# - name: Test CDP with recommended values +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_recommended_values }}" +# register: result_cdp_recommended_values +# tags: [positive, cdp, boundary, recommended] + +# - name: Assert CDP recommended values configuration succeeded +# assert: +# that: +# - result_cdp_recommended_values.failed == false +# - result_cdp_recommended_values.changed == true +# - "'Successfully' in result_cdp_recommended_values.response" +# fail_msg: "CDP recommended values configuration should have succeeded" +# success_msg: "CDP recommended values configuration succeeded" +# tags: [positive, cdp, boundary, recommended] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, cdp, boundary] + +# # ============================================================================= +# # ADVANCED POSITIVE TEST SCENARIOS +# # ============================================================================= + +# # ############################################# +# # Idempotency Tests +# # ############################################# + +# - name: Test CDP Idempotency - Configure same CDP settings twice +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_create_all_params }}" +# register: result_cdp_idempotency_first +# tags: [positive, cdp, idempotency] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 40 +# tags: [positive, cdp, idempotency] + +# # should be idempotent +# - name: Test CDP Idempotency - Configure same CDP settings again +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ cdp_vars_map.test_cdp_create_all_params }}" +# register: result_cdp_idempotency_second +# tags: [positive, cdp, idempotency] + +# - name: Assert CDP Idempotency - Second configuration should not change +# assert: +# that: +# - result_cdp_idempotency_second.failed == false +# - result_cdp_idempotency_second.changed == false +# fail_msg: "CDP idempotency test failed" +# success_msg: "CDP idempotency test succeeded" +# tags: [positive, cdp, idempotency] + +# # ############################################# +# # Cleanup Test Configurations +# # ############################################# + +# - name: Cleanup - Reset CDP to default configuration +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - ip_address: "204.1.2.3" +# device_collection_status_check: false +# layer2_configuration: +# cdp: {} +# register: result_cdp_cleanup +# ignore_errors: true +# tags: [cleanup, cdp] + +# always: +# - name: Display positive test summary +# debug: +# msg: "CDP positive validation tests completed successfully" +# tags: [positive, cdp] + +# - name: Display final cleanup status +# debug: +# var: result_cdp_cleanup +# when: result_cdp_cleanup is defined +# tags: [cleanup, cdp] diff --git a/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_dhcp_snooping.yml b/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_dhcp_snooping.yml new file mode 100644 index 0000000000..0232a48fc0 --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_dhcp_snooping.yml @@ -0,0 +1,1237 @@ +# --- +# # =================================================================================================== +# # DHCP SNOOPING FEATURE TESTS SUMMARY +# # =================================================================================================== +# # +# # This test suite validates DHCP Snooping configuration functionality for Wired Campus Automation. +# # +# # TEST CATEGORIES: +# # +# # 1. NEGATIVE VALIDATION TESTS (Lines 110-590) +# # - DHCP Admin Status validation (string/integer instead of boolean) +# # - DHCP Snooping VLANs validation (data type, range, elements validation) +# # - DHCP Snooping Glean validation (string/integer instead of boolean) +# # - DHCP Database Agent URL validation (data type, length validation) +# # - DHCP Database Timeout validation (data type, range validation) +# # - DHCP Database Write Delay validation (data type, range validation) +# # - DHCP Proxy Bridge VLANs validation (data type, range validation) +# # - Data type and structure validation (invalid dict/list types) +# # +# # 2. POSITIVE VALIDATION TESTS (Lines 590-1513) +# # a) DHCP Snooping Creation Tests (Merged State) +# # - Admin status only configuration +# # - VLANs only configuration +# # - Glean only configuration +# # - Database URL only configuration +# # - Database timeout only configuration +# # - Database write delay only configuration +# # - Proxy bridge VLANs only configuration +# # - All parameters comprehensive configuration +# # +# # b) DHCP Snooping Update Tests (Merged State) +# # - Enable/disable admin status +# # - Update VLANs list +# # - Enable/disable glean +# # - Update database agent URL +# # - Update database timeout +# # - Update database write delay +# # - Update proxy bridge VLANs +# # - Comprehensive updates +# # +# # c) DHCP Snooping Reset Tests (Deleted State) +# # - Empty configuration reset (resets to defaults) +# # - Configuration with parameters reset +# # +# # d) Boundary Value Tests +# # - Minimum/maximum database timeout (0-86400 seconds) +# # - Minimum/maximum database write delay (15-86400 seconds) +# # - Minimum/maximum database agent URL length (5-227 characters) +# # - Minimum/maximum VLAN IDs (1-4094) +# # - Single VLAN vs large VLAN list configurations +# # +# # e) Special Configuration Tests +# # - Database configuration only +# # - VLAN configuration only +# # - Minimal configurations +# # - Overlapping VLANs (same VLAN in both lists) +# # - Different URL protocols (FTP, HTTP) +# # +# # f) Advanced Test Scenarios +# # - Idempotency tests (same config applied twice) +# # - Cleanup operations +# # +# # VALIDATION RANGES: +# # - Admin Status: boolean (true/false) +# # - Snooping VLANs: list of integers (1-4094) +# # - Glean: boolean (true/false) +# # - Database Agent URL: string (5-227 characters) +# # - Database Timeout: integer (0-86400 seconds) +# # - Database Write Delay: integer (15-86400 seconds) +# # - Proxy Bridge VLANs: list of integers (1-4094) +# # +# # DHCP SNOOPING FUNCTIONALITY: +# # - DHCP Snooping provides security by filtering untrusted DHCP messages +# # - Builds and maintains DHCP snooping binding database +# # - Validates DHCP packets received from untrusted sources +# # - Supports database persistence via external URLs (FTP, HTTP, TFTP) +# # - Glean feature extracts IP address bindings from existing traffic +# # - Admin status enables/disables the entire DHCP snooping feature +# # +# # EXPECTED BEHAVIORS: +# # - Negative tests should fail with appropriate error messages +# # - Positive tests should succeed with 'Successfully' in response +# # - Idempotency: First run changes=true, second run changes=false +# # - All tests include proper pause times for configuration settlement +# # - Deleted state resets DHCP snooping to default configuration +# # - Empty VLAN lists may trigger API bugs (special test cases included) +# # - URL validation accepts various protocols (FTP, HTTP, TFTP) +# # - Database timeout 0 means infinite timeout +# # - Write delay controls frequency of database updates +# # +# # SPECIAL CONSIDERATIONS: +# # - Empty VLAN lists are tested for potential API bugs +# # - Overlapping VLANs between snooping and proxy bridge lists are allowed +# # - Database URL must be properly formatted with protocol prefix +# # - Timeout and write delay values are in seconds +# # - VLAN ID 1 is typically reserved but can be configured +# # - Configuration changes may require time to propagate to the device +# # +# # =================================================================================================== + +# - debug: msg="Starting DHCP Snooping feature tests for Wired Campus Automation" +# - debug: msg="Role Path {{ role_path }}" + +# - block: +# - name: Load DHCP Snooping test variables +# include_vars: +# file: "{{ role_path }}/vars/vars_wca_test_feature_dhcp_snooping.yml" +# name: dhcp_snooping_vars_map +# vars: +# dnac_login: &dnac_login +# dnac_host: "{{ dnac_host }}" +# dnac_username: "{{ dnac_username }}" +# dnac_password: "{{ dnac_password }}" +# dnac_verify: "{{ dnac_verify }}" +# dnac_port: "{{ dnac_port }}" +# dnac_version: "{{ dnac_version }}" +# dnac_debug: "{{ dnac_debug }}" +# dnac_log: true +# dnac_log_level: "DEBUG" +# config_verify: true +# dnac_api_task_timeout: "{{ dnac_api_task_timeout }}" +# dnac_log_append: false + +# # ============================================================================= +# # NEGATIVE VALIDATION TESTS FOR DHCP SNOOPING +# # ============================================================================= + +# # ############################################# +# # DHCP Admin Status Tests +# # ############################################# + +# - name: Test DHCP admin status validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_admin_status_string }}" +# register: result_dhcp_admin_status_string +# ignore_errors: true +# tags: [negative, dhcp_snooping, admin_status] + +# - name: Assert DHCP admin status string validation failed +# assert: +# that: +# - result_dhcp_admin_status_string.failed == true +# - "'must be of type boolean' in result_dhcp_admin_status_string.msg" +# fail_msg: "DHCP admin status string validation should have failed" +# success_msg: "DHCP admin status string validation correctly failed" +# tags: [negative, dhcp_snooping, admin_status] + +# - name: Test DHCP admin status validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_admin_status_integer }}" +# register: result_dhcp_admin_status_integer +# ignore_errors: true +# tags: [negative, dhcp_snooping, admin_status] + +# - name: Assert DHCP admin status integer validation failed +# assert: +# that: +# - result_dhcp_admin_status_integer.failed == true +# - "'must be of type boolean' in result_dhcp_admin_status_integer.msg" +# fail_msg: "DHCP admin status integer validation should have failed" +# success_msg: "DHCP admin status integer validation correctly failed" +# tags: [negative, dhcp_snooping, admin_status] + +# # ############################################# +# # DHCP Snooping VLANs Tests +# # ############################################# + +# - name: Test DHCP snooping VLANs validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_vlans_string }}" +# register: result_dhcp_vlans_string +# ignore_errors: true +# tags: [negative, dhcp_snooping, vlans] + +# - name: Assert DHCP snooping VLANs string validation failed +# assert: +# that: +# - result_dhcp_vlans_string.failed == true +# - "'must be a list' in result_dhcp_vlans_string.msg" +# fail_msg: "DHCP snooping VLANs string validation should have failed" +# success_msg: "DHCP snooping VLANs string validation correctly failed" +# tags: [negative, dhcp_snooping, vlans] + +# - name: Test DHCP snooping VLANs validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_vlans_integer }}" +# register: result_dhcp_vlans_integer +# ignore_errors: true +# tags: [negative, dhcp_snooping, vlans] + +# - name: Assert DHCP snooping VLANs integer validation failed +# assert: +# that: +# - result_dhcp_vlans_integer.failed == true +# - "'must be a list' in result_dhcp_vlans_integer.msg" +# fail_msg: "DHCP snooping VLANs integer validation should have failed" +# success_msg: "DHCP snooping VLANs integer validation correctly failed" +# tags: [negative, dhcp_snooping, vlans] + +# - name: Test DHCP snooping VLANs validation - boolean value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_vlans_boolean }}" +# register: result_dhcp_vlans_boolean +# ignore_errors: true +# tags: [negative, dhcp_snooping, vlans] + +# - name: Assert DHCP snooping VLANs boolean validation failed +# assert: +# that: +# - result_dhcp_vlans_boolean.failed == true +# - "'must be a list' in result_dhcp_vlans_boolean.msg" +# fail_msg: "DHCP snooping VLANs boolean validation should have failed" +# success_msg: "DHCP snooping VLANs boolean validation correctly failed" +# tags: [negative, dhcp_snooping, vlans] + +# - name: Test DHCP snooping VLANs validation - VLAN ID out of range (too low) should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_vlans_range_low }}" +# register: result_dhcp_vlans_range_low +# ignore_errors: true +# tags: [negative, dhcp_snooping, vlans, range] + +# - name: Assert DHCP snooping VLANs range low validation failed +# assert: +# that: +# - result_dhcp_vlans_range_low.failed == true +# - "'must be between 1 and 4094' in result_dhcp_vlans_range_low.msg" +# fail_msg: "DHCP snooping VLANs range low validation should have failed" +# success_msg: "DHCP snooping VLANs range low validation correctly failed" +# tags: [negative, dhcp_snooping, vlans, range] + +# - name: Test DHCP snooping VLANs validation - VLAN ID out of range (too high) should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_vlans_range_high }}" +# register: result_dhcp_vlans_range_high +# ignore_errors: true +# tags: [negative, dhcp_snooping, vlans, range] + +# - name: Assert DHCP snooping VLANs range high validation failed +# assert: +# that: +# - result_dhcp_vlans_range_high.failed == true +# - "'must be between 1 and 4094' in result_dhcp_vlans_range_high.msg" +# fail_msg: "DHCP snooping VLANs range high validation should have failed" +# success_msg: "DHCP snooping VLANs range high validation correctly failed" +# tags: [negative, dhcp_snooping, vlans, range] + +# - name: Test DHCP snooping VLANs validation - string elements should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_vlans_string_elements }}" +# register: result_dhcp_vlans_string_elements +# ignore_errors: true +# tags: [negative, dhcp_snooping, vlans, elements] + +# - name: Assert DHCP snooping VLANs string elements validation failed +# assert: +# that: +# - result_dhcp_vlans_string_elements.failed == true +# - "' must be integers' in result_dhcp_vlans_string_elements.msg" +# fail_msg: "DHCP snooping VLANs string elements validation should have failed" +# success_msg: "DHCP snooping VLANs string elements validation correctly failed" +# tags: [negative, dhcp_snooping, vlans, elements] + +# - name: Test DHCP snooping VLANs - empty list (check for API bug) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_vlans_empty_list }}" +# register: result_dhcp_vlans_empty_list +# ignore_errors: true +# tags: [negative, dhcp_snooping, vlans, empty_list, api_bug_test] + +# - name: Display empty list test result (potential API bug) +# debug: +# msg: | +# Empty VLAN list test result: +# Failed: {{ result_dhcp_vlans_empty_list.failed }} +# Message: {{ result_dhcp_vlans_empty_list.msg | default('No error message') }} +# This test checks for potential API bug with empty VLAN lists +# tags: [negative, dhcp_snooping, vlans, empty_list, api_bug_test] + +# # ############################################# +# # DHCP Snooping Glean Tests +# # ############################################# + +# - name: Test DHCP snooping glean validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_glean_string }}" +# register: result_dhcp_glean_string +# ignore_errors: true +# tags: [negative, dhcp_snooping, glean] + +# - name: Assert DHCP snooping glean string validation failed +# assert: +# that: +# - result_dhcp_glean_string.failed == true +# - "'must be of type boolean' in result_dhcp_glean_string.msg" +# fail_msg: "DHCP snooping glean string validation should have failed" +# success_msg: "DHCP snooping glean string validation correctly failed" +# tags: [negative, dhcp_snooping, glean] + +# - name: Test DHCP snooping glean validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_glean_integer }}" +# register: result_dhcp_glean_integer +# ignore_errors: true +# tags: [negative, dhcp_snooping, glean] + +# - name: Assert DHCP snooping glean integer validation failed +# assert: +# that: +# - result_dhcp_glean_integer.failed == true +# - "'must be of type boolean' in result_dhcp_glean_integer.msg" +# fail_msg: "DHCP snooping glean integer validation should have failed" +# success_msg: "DHCP snooping glean integer validation correctly failed" +# tags: [negative, dhcp_snooping, glean] + +# # ############################################# +# # DHCP Database Agent URL Tests +# # ############################################# + +# - name: Test DHCP database agent URL validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_database_agent_url_integer }}" +# register: result_dhcp_url_integer +# ignore_errors: true +# tags: [negative, dhcp_snooping, database_url] + +# - name: Assert DHCP database agent URL integer validation failed +# assert: +# that: +# - result_dhcp_url_integer.failed == true +# - "'must be of type string' in result_dhcp_url_integer.msg" +# fail_msg: "DHCP database agent URL integer validation should have failed" +# success_msg: "DHCP database agent URL integer validation correctly failed" +# tags: [negative, dhcp_snooping, database_url] + +# - name: Test DHCP database agent URL validation - boolean value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_database_agent_url_boolean }}" +# register: result_dhcp_url_boolean +# ignore_errors: true +# tags: [negative, dhcp_snooping, database_url] + +# - name: Assert DHCP database agent URL boolean validation failed +# assert: +# that: +# - result_dhcp_url_boolean.failed == true +# - "'must be of type string' in result_dhcp_url_boolean.msg" +# fail_msg: "DHCP database agent URL boolean validation should have failed" +# success_msg: "DHCP database agent URL boolean validation correctly failed" +# tags: [negative, dhcp_snooping, database_url] + +# - name: Test DHCP database agent URL validation - exceeds maximum length should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_database_agent_url_max_length }}" +# register: result_dhcp_url_max_length +# ignore_errors: true +# tags: [negative, dhcp_snooping, database_url, length] + +# - name: Assert DHCP database agent URL max length validation failed +# assert: +# that: +# - result_dhcp_url_max_length.failed == true +# - "'exceeds maximum length' in result_dhcp_url_max_length.msg" +# fail_msg: "DHCP database agent URL max length validation should have failed" +# success_msg: "DHCP database agent URL max length validation correctly failed" +# tags: [negative, dhcp_snooping, database_url, length] + +# # ############################################# +# # DHCP Database Timeout Tests +# # ############################################# + +# - name: Test DHCP database timeout validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_database_timeout_string }}" +# register: result_dhcp_timeout_string +# ignore_errors: true +# tags: [negative, dhcp_snooping, database_timeout] + +# - name: Assert DHCP database timeout string validation failed +# assert: +# that: +# - result_dhcp_timeout_string.failed == true +# - "'must be of type integer' in result_dhcp_timeout_string.msg" +# fail_msg: "DHCP database timeout string validation should have failed" +# success_msg: "DHCP database timeout string validation correctly failed" +# tags: [negative, dhcp_snooping, database_timeout] + +# - name: Test DHCP database timeout validation - boolean value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_database_timeout_boolean }}" +# register: result_dhcp_timeout_boolean +# ignore_errors: true +# tags: [negative, dhcp_snooping, database_timeout] + +# - name: Assert DHCP database timeout boolean validation failed +# assert: +# that: +# - result_dhcp_timeout_boolean.failed == true +# - "'must be of type integer' in result_dhcp_timeout_boolean.msg" +# fail_msg: "DHCP database timeout boolean validation should have failed" +# success_msg: "DHCP database timeout boolean validation correctly failed" +# tags: [negative, dhcp_snooping, database_timeout] + +# - name: Test DHCP database timeout validation - negative value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_database_timeout_negative }}" +# register: result_dhcp_timeout_negative +# ignore_errors: true +# tags: [negative, dhcp_snooping, database_timeout, range] + +# - name: Assert DHCP database timeout negative validation failed +# assert: +# that: +# - result_dhcp_timeout_negative.failed == true +# - "'must be within the range' in result_dhcp_timeout_negative.msg" +# fail_msg: "DHCP database timeout negative validation should have failed" +# success_msg: "DHCP database timeout negative validation correctly failed" +# tags: [negative, dhcp_snooping, database_timeout, range] + +# - name: Test DHCP database timeout validation - exceeds maximum value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_database_timeout_max }}" +# register: result_dhcp_timeout_max +# ignore_errors: true +# tags: [negative, dhcp_snooping, database_timeout, range] + +# - name: Assert DHCP database timeout max validation failed +# assert: +# that: +# - result_dhcp_timeout_max.failed == true +# - "'must be within the range' in result_dhcp_timeout_max.msg" +# fail_msg: "DHCP database timeout max validation should have failed" +# success_msg: "DHCP database timeout max validation correctly failed" +# tags: [negative, dhcp_snooping, database_timeout, range] + +# # ############################################# +# # DHCP Database Write Delay Tests +# # ############################################# + +# - name: Test DHCP database write delay validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_database_write_delay_string }}" +# register: result_dhcp_write_delay_string +# ignore_errors: true +# tags: [negative, dhcp_snooping, database_write_delay] + +# - name: Assert DHCP database write delay string validation failed +# assert: +# that: +# - result_dhcp_write_delay_string.failed == true +# - "'must be of type integer' in result_dhcp_write_delay_string.msg" +# fail_msg: "DHCP database write delay string validation should have failed" +# success_msg: "DHCP database write delay string validation correctly failed" +# tags: [negative, dhcp_snooping, database_write_delay] + +# - name: Test DHCP database write delay validation - boolean value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_database_write_delay_boolean }}" +# register: result_dhcp_write_delay_boolean +# ignore_errors: true +# tags: [negative, dhcp_snooping, database_write_delay] + +# - name: Assert DHCP database write delay boolean validation failed +# assert: +# that: +# - result_dhcp_write_delay_boolean.failed == true +# - "'must be of type integer' in result_dhcp_write_delay_boolean.msg" +# fail_msg: "DHCP database write delay boolean validation should have failed" +# success_msg: "DHCP database write delay boolean validation correctly failed" +# tags: [negative, dhcp_snooping, database_write_delay] + +# - name: Test DHCP database write delay validation - below minimum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_database_write_delay_min }}" +# register: result_dhcp_write_delay_min +# ignore_errors: true +# tags: [negative, dhcp_snooping, database_write_delay, range] + +# - name: Assert DHCP database write delay min validation failed +# assert: +# that: +# - result_dhcp_write_delay_min.failed == true +# - "'must be within the range' in result_dhcp_write_delay_min.msg" +# fail_msg: "DHCP database write delay min validation should have failed" +# success_msg: "DHCP database write delay min validation correctly failed" +# tags: [negative, dhcp_snooping, database_write_delay, range] + +# - name: Test DHCP database write delay validation - exceeds maximum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_database_write_delay_max }}" +# register: result_dhcp_write_delay_max +# ignore_errors: true +# tags: [negative, dhcp_snooping, database_write_delay, range] + +# - name: Assert DHCP database write delay max validation failed +# assert: +# that: +# - result_dhcp_write_delay_max.failed == true +# - "'must be within the range' in result_dhcp_write_delay_max.msg" +# fail_msg: "DHCP database write delay max validation should have failed" +# success_msg: "DHCP database write delay max validation correctly failed" +# tags: [negative, dhcp_snooping, database_write_delay, range] + +# # ############################################# +# # DHCP Proxy Bridge VLANs Tests +# # ############################################# + +# - name: Test DHCP proxy bridge VLANs validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_proxy_bridge_vlans_string }}" +# register: result_dhcp_proxy_vlans_string +# ignore_errors: true +# tags: [negative, dhcp_snooping, proxy_bridge_vlans] + +# - name: Assert DHCP proxy bridge VLANs string validation failed +# assert: +# that: +# - result_dhcp_proxy_vlans_string.failed == true +# - "'must be a list' in result_dhcp_proxy_vlans_string.msg" +# fail_msg: "DHCP proxy bridge VLANs string validation should have failed" +# success_msg: "DHCP proxy bridge VLANs string validation correctly failed" +# tags: [negative, dhcp_snooping, proxy_bridge_vlans] + +# - name: Test DHCP proxy bridge VLANs validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_proxy_bridge_vlans_integer }}" +# register: result_dhcp_proxy_vlans_integer +# ignore_errors: true +# tags: [negative, dhcp_snooping, proxy_bridge_vlans] + +# - name: Assert DHCP proxy bridge VLANs integer validation failed +# assert: +# that: +# - result_dhcp_proxy_vlans_integer.failed == true +# - "'must be a list' in result_dhcp_proxy_vlans_integer.msg" +# fail_msg: "DHCP proxy bridge VLANs integer validation should have failed" +# success_msg: "DHCP proxy bridge VLANs integer validation correctly failed" +# tags: [negative, dhcp_snooping, proxy_bridge_vlans] + +# - name: Test DHCP proxy bridge VLANs validation - VLAN ID out of range (too low) should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_proxy_bridge_vlans_range_low }}" +# register: result_dhcp_proxy_vlans_range_low +# ignore_errors: true +# tags: [negative, dhcp_snooping, proxy_bridge_vlans, range] + +# - name: Assert DHCP proxy bridge VLANs range low validation failed +# assert: +# that: +# - result_dhcp_proxy_vlans_range_low.failed == true +# - "'must be between 1 and 4094' in result_dhcp_proxy_vlans_range_low.msg" +# fail_msg: "DHCP proxy bridge VLANs range low validation should have failed" +# success_msg: "DHCP proxy bridge VLANs range low validation correctly failed" +# tags: [negative, dhcp_snooping, proxy_bridge_vlans, range] + +# - name: Test DHCP proxy bridge VLANs validation - VLAN ID out of range (too high) should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_proxy_bridge_vlans_range_high }}" +# register: result_dhcp_proxy_vlans_range_high +# ignore_errors: true +# tags: [negative, dhcp_snooping, proxy_bridge_vlans, range] + +# - name: Assert DHCP proxy bridge VLANs range high validation failed +# assert: +# that: +# - result_dhcp_proxy_vlans_range_high.failed == true +# - "'must be between 1 and 4094' in result_dhcp_proxy_vlans_range_high.msg" +# fail_msg: "DHCP proxy bridge VLANs range high validation should have failed" +# success_msg: "DHCP proxy bridge VLANs range high validation correctly failed" +# tags: [negative, dhcp_snooping, proxy_bridge_vlans, range] + +# - name: Test DHCP proxy bridge VLANs - empty list (check for API bug) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_proxy_bridge_vlans_empty_list }}" +# register: result_dhcp_proxy_vlans_empty_list +# ignore_errors: true +# tags: +# [negative, dhcp_snooping, proxy_bridge_vlans, empty_list, api_bug_test] + +# - name: Display empty proxy bridge VLANs list test result (potential API bug) +# debug: +# msg: | +# Empty proxy bridge VLAN list test result: +# Failed: {{ result_dhcp_proxy_vlans_empty_list.failed }} +# Message: {{ result_dhcp_proxy_vlans_empty_list.msg | default('No error message') }} +# This test checks for potential API bug with empty proxy bridge VLAN lists +# tags: +# [negative, dhcp_snooping, proxy_bridge_vlans, empty_list, api_bug_test] + +# # ############################################# +# # Data Type and Structure Tests +# # ############################################# + +# - name: Test DHCP snooping structure validation - string instead of dict should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_invalid_dict_type }}" +# register: result_dhcp_invalid_dict_type +# ignore_errors: true +# tags: [negative, dhcp_snooping, structure] + +# - name: Assert DHCP snooping invalid dict type validation failed +# assert: +# that: +# - result_dhcp_invalid_dict_type.failed == true +# - "'must be of type dictionary' in result_dhcp_invalid_dict_type.msg" +# fail_msg: "DHCP snooping invalid dict type validation should have failed" +# success_msg: "DHCP snooping invalid dict type validation correctly failed" +# tags: [negative, dhcp_snooping, structure] + +# - name: Test DHCP snooping structure validation - list instead of dict should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_invalid_list_type }}" +# register: result_dhcp_invalid_list_type +# ignore_errors: true +# tags: [negative, dhcp_snooping, structure] + +# - name: Assert DHCP snooping invalid list type validation failed +# assert: +# that: +# - result_dhcp_invalid_list_type.failed == true +# - "'must be of type dictionary' in result_dhcp_invalid_list_type.msg" +# fail_msg: "DHCP snooping invalid list type validation should have failed" +# success_msg: "DHCP snooping invalid list type validation correctly failed" +# tags: [negative, dhcp_snooping, structure] + +# - name: Display negative test summary +# debug: +# msg: "DHCP Snooping negative validation tests completed successfully" +# tags: [negative, dhcp_snooping] + +# # ============================================================================= +# # POSITIVE VALIDATION TESTS FOR DHCP SNOOPING +# # ============================================================================= + +# # ############################################# +# # DHCP Snooping Creation Tests +# # ############################################# + +# - name: Test DHCP snooping configuration with admin status only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_create_admin_status_only }}" +# register: result_dhcp_create_admin_status_only +# tags: [positive, dhcp_snooping, create, admin_status] + +# - name: Assert DHCP snooping admin status only configuration succeeded +# assert: +# that: +# - result_dhcp_create_admin_status_only.failed == false +# - result_dhcp_create_admin_status_only.changed == true +# - "'Successfully' in result_dhcp_create_admin_status_only.response" +# fail_msg: "DHCP snooping admin status only configuration should have succeeded" +# success_msg: "DHCP snooping admin status only configuration succeeded" +# tags: [positive, dhcp_snooping, create, admin_status] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, dhcp_snooping, create] + +# - name: Test DHCP snooping configuration with VLANs only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_create_vlans_only }}" +# register: result_dhcp_create_vlans_only +# tags: [positive, dhcp_snooping, create, vlans] + +# - name: Assert DHCP snooping VLANs only configuration succeeded +# assert: +# that: +# - result_dhcp_create_vlans_only.failed == false +# - result_dhcp_create_vlans_only.changed == true +# - "'Successfully' in result_dhcp_create_vlans_only.response" +# fail_msg: "DHCP snooping VLANs only configuration should have succeeded" +# success_msg: "DHCP snooping VLANs only configuration succeeded" +# tags: [positive, dhcp_snooping, create, vlans] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, dhcp_snooping, create] + +# - name: Test DHCP snooping configuration with glean only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_create_glean_only }}" +# register: result_dhcp_create_glean_only +# tags: [positive, dhcp_snooping, create, glean] + +# - name: Assert DHCP snooping glean only configuration succeeded +# assert: +# that: +# - result_dhcp_create_glean_only.failed == false +# - result_dhcp_create_glean_only.changed == true +# - "'Successfully' in result_dhcp_create_glean_only.response" +# fail_msg: "DHCP snooping glean only configuration should have succeeded" +# success_msg: "DHCP snooping glean only configuration succeeded" +# tags: [positive, dhcp_snooping, create, glean] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, dhcp_snooping, create] + +# - name: Test DHCP snooping configuration with database agent URL only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_create_database_url_only }}" +# register: result_dhcp_create_database_url_only +# tags: [positive, dhcp_snooping, create, database_url] + +# - name: Assert DHCP snooping database URL only configuration succeeded +# assert: +# that: +# - result_dhcp_create_database_url_only.failed == false +# - result_dhcp_create_database_url_only.changed == true +# - "'Successfully' in result_dhcp_create_database_url_only.response" +# fail_msg: "DHCP snooping database URL only configuration should have succeeded" +# success_msg: "DHCP snooping database URL only configuration succeeded" +# tags: [positive, dhcp_snooping, create, database_url] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, dhcp_snooping, create] + +# - name: Test DHCP snooping configuration with database timeout only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_create_database_timeout_only }}" +# register: result_dhcp_create_database_timeout_only +# tags: [positive, dhcp_snooping, create, database_timeout] + +# - name: Assert DHCP snooping database timeout only configuration succeeded +# assert: +# that: +# - result_dhcp_create_database_timeout_only.failed == false +# - result_dhcp_create_database_timeout_only.changed == true +# - "'Successfully' in result_dhcp_create_database_timeout_only.response" +# fail_msg: "DHCP snooping database timeout only configuration should have succeeded" +# success_msg: "DHCP snooping database timeout only configuration succeeded" +# tags: [positive, dhcp_snooping, create, database_timeout] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, dhcp_snooping, create] + +# - name: Test DHCP snooping configuration with database write delay only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_create_database_write_delay_only }}" +# register: result_dhcp_create_database_write_delay_only +# tags: [positive, dhcp_snooping, create, database_write_delay] + +# - name: Assert DHCP snooping database write delay only configuration succeeded +# assert: +# that: +# - result_dhcp_create_database_write_delay_only.failed == false +# - result_dhcp_create_database_write_delay_only.changed == true +# - "'Successfully' in result_dhcp_create_database_write_delay_only.response" +# fail_msg: "DHCP snooping database write delay only configuration should have succeeded" +# success_msg: "DHCP snooping database write delay only configuration succeeded" +# tags: [positive, dhcp_snooping, create, database_write_delay] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, dhcp_snooping, create] + +# - name: Test DHCP snooping configuration with proxy bridge VLANs only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_create_proxy_bridge_vlans_only }}" +# register: result_dhcp_create_proxy_bridge_vlans_only +# tags: [positive, dhcp_snooping, create, proxy_bridge_vlans] + +# - name: Assert DHCP snooping proxy bridge VLANs only configuration succeeded +# assert: +# that: +# - result_dhcp_create_proxy_bridge_vlans_only.failed == false +# - result_dhcp_create_proxy_bridge_vlans_only.changed == true +# - "'Successfully' in result_dhcp_create_proxy_bridge_vlans_only.response" +# fail_msg: "DHCP snooping proxy bridge VLANs only configuration should have succeeded" +# success_msg: "DHCP snooping proxy bridge VLANs only configuration succeeded" +# tags: [positive, dhcp_snooping, create, proxy_bridge_vlans] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, dhcp_snooping, create] + +# - name: Test DHCP snooping configuration with all parameters +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_create_all_params }}" +# register: result_dhcp_create_all_params +# tags: [positive, dhcp_snooping, create, comprehensive] + +# - name: Assert DHCP snooping all parameters configuration succeeded +# assert: +# that: +# - result_dhcp_create_all_params.failed == false +# - result_dhcp_create_all_params.changed == true +# - "'Successfully' in result_dhcp_create_all_params.response" +# fail_msg: "DHCP snooping all parameters configuration should have succeeded" +# success_msg: "DHCP snooping all parameters configuration succeeded" +# tags: [positive, dhcp_snooping, create, comprehensive] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 15 +# tags: [positive, dhcp_snooping, create] + +# # ############################################# +# # DHCP Snooping Update Tests +# # ############################################# + +# - name: Test DHCP snooping update - disable admin status +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_update_disable_admin }}" +# register: result_dhcp_update_disable_admin +# tags: [positive, dhcp_snooping, update, admin_status] + +# - name: Assert DHCP snooping disable admin status update succeeded +# assert: +# that: +# - result_dhcp_update_disable_admin.failed == false +# - result_dhcp_update_disable_admin.changed == true +# - "'Successfully' in result_dhcp_update_disable_admin.response" +# fail_msg: "DHCP snooping disable admin status update should have succeeded" +# success_msg: "DHCP snooping disable admin status update succeeded" +# tags: [positive, dhcp_snooping, update, admin_status] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, dhcp_snooping, update] + +# - name: Test DHCP snooping update - update VLANs list +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_update_vlans }}" +# register: result_dhcp_update_vlans +# tags: [positive, dhcp_snooping, update, vlans] +# ignore_errors: true + +# - name: Assert DHCP snooping VLANs update succeeded +# assert: +# that: +# - result_dhcp_update_vlans.failed == true +# - result_dhcp_update_vlans.changed == false +# - "'Must match ASCII character' in result_dhcp_update_vlans.response" +# fail_msg: "DHCP snooping VLANs update should have succeeded" +# success_msg: "DHCP snooping VLANs update succeeded" +# tags: [positive, dhcp_snooping, update, vlans] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, dhcp_snooping, update] + +# - name: Test DHCP snooping update - disable glean +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_update_disable_glean }}" +# register: result_dhcp_update_disable_glean +# tags: [positive, dhcp_snooping, update, glean] + +# - name: Assert DHCP snooping disable glean update succeeded +# assert: +# that: +# - result_dhcp_update_disable_glean.failed == false +# - result_dhcp_update_disable_glean.changed == true +# - "'Successfully' in result_dhcp_update_disable_glean.response" +# fail_msg: "DHCP snooping disable glean update should have succeeded" +# success_msg: "DHCP snooping disable glean update succeeded" +# tags: [positive, dhcp_snooping, update, glean] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, dhcp_snooping, update] + +# - name: Test DHCP snooping update - update database agent URL +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_update_database_url }}" +# register: result_dhcp_update_database_url +# tags: [positive, dhcp_snooping, update, database_url] + +# - name: Assert DHCP snooping database URL update succeeded +# assert: +# that: +# - result_dhcp_update_database_url.failed == false +# - result_dhcp_update_database_url.changed == true +# - "'Successfully' in result_dhcp_update_database_url.response" +# fail_msg: "DHCP snooping database URL update should have succeeded" +# success_msg: "DHCP snooping database URL update succeeded" +# tags: [positive, dhcp_snooping, update, database_url] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, dhcp_snooping, update] + +# - name: Test DHCP snooping update - update database timeout +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_update_database_timeout }}" +# register: result_dhcp_update_database_timeout +# tags: [positive, dhcp_snooping, update, database_timeout] + +# - name: Assert DHCP snooping database timeout update succeeded +# assert: +# that: +# - result_dhcp_update_database_timeout.failed == false +# - result_dhcp_update_database_timeout.changed == true +# - "'Successfully' in result_dhcp_update_database_timeout.response" +# fail_msg: "DHCP snooping database timeout update should have succeeded" +# success_msg: "DHCP snooping database timeout update succeeded" +# tags: [positive, dhcp_snooping, update, database_timeout] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, dhcp_snooping, update] + +# - name: Test DHCP snooping update - update database write delay +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_update_database_write_delay }}" +# register: result_dhcp_update_database_write_delay +# tags: [positive, dhcp_snooping, update, database_write_delay] + +# - name: Assert DHCP snooping database write delay update succeeded +# assert: +# that: +# - result_dhcp_update_database_write_delay.failed == false +# - result_dhcp_update_database_write_delay.changed == true +# - "'Successfully' in result_dhcp_update_database_write_delay.response" +# fail_msg: "DHCP snooping database write delay update should have succeeded" +# success_msg: "DHCP snooping database write delay update succeeded" +# tags: [positive, dhcp_snooping, update, database_write_delay] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, dhcp_snooping, update] + +# - name: Test DHCP snooping update - update proxy bridge VLANs +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_update_proxy_bridge_vlans }}" +# register: result_dhcp_update_proxy_bridge_vlans +# tags: [positive, dhcp_snooping, update, proxy_bridge_vlans] + +# - name: Assert DHCP snooping proxy bridge VLANs update succeeded +# assert: +# that: +# - result_dhcp_update_proxy_bridge_vlans.failed == false +# - result_dhcp_update_proxy_bridge_vlans.changed == true +# - "'Successfully' in result_dhcp_update_proxy_bridge_vlans.response" +# fail_msg: "DHCP snooping proxy bridge VLANs update should have succeeded" +# success_msg: "DHCP snooping proxy bridge VLANs update succeeded" +# tags: [positive, dhcp_snooping, update, proxy_bridge_vlans] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, dhcp_snooping, update] + +# - name: Test DHCP snooping update - comprehensive update +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_update_comprehensive }}" +# register: result_dhcp_update_comprehensive +# tags: [positive, dhcp_snooping, update, comprehensive] + +# - name: Assert DHCP snooping comprehensive update succeeded +# assert: +# that: +# - result_dhcp_update_comprehensive.failed == false +# - result_dhcp_update_comprehensive.changed == true +# - "'Successfully' in result_dhcp_update_comprehensive.response" +# fail_msg: "DHCP snooping comprehensive update should have succeeded" +# success_msg: "DHCP snooping comprehensive update succeeded" +# tags: [positive, dhcp_snooping, update, comprehensive] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 15 +# tags: [positive, dhcp_snooping, update] + +# # ############################################# +# # DHCP Snooping Reset Tests +# # ############################################# + +# - name: Test DHCP snooping reset - empty configuration (deleted state resets to defaults) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_reset_empty }}" +# register: result_dhcp_reset_empty +# tags: [positive, dhcp_snooping, reset, deleted] + +# - name: Assert DHCP snooping empty reset succeeded +# assert: +# that: +# - result_dhcp_reset_empty.failed == false +# - result_dhcp_reset_empty.changed == true +# - "'Successfully' in result_dhcp_reset_empty.response" +# fail_msg: "DHCP snooping empty reset should have succeeded" +# success_msg: "DHCP snooping empty reset succeeded" +# tags: [positive, dhcp_snooping, reset, deleted] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 15 +# tags: [positive, dhcp_snooping, reset] + +# - name: Test DHCP snooping reset - with existing parameters (deleted state resets to defaults) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_reset_with_params }}" +# register: result_dhcp_reset_with_params +# tags: [positive, dhcp_snooping, reset, deleted] + +# - name: Assert DHCP snooping reset with params succeeded +# assert: +# that: +# - result_dhcp_reset_with_params.failed == false +# - result_dhcp_reset_with_params.changed == true +# - "'Successfully' in result_dhcp_reset_with_params.response" +# fail_msg: "DHCP snooping reset with params should have succeeded" +# success_msg: "DHCP snooping reset with params succeeded" +# tags: [positive, dhcp_snooping, reset, deleted] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 15 +# tags: [positive, dhcp_snooping, reset] + +# # ############################################# +# # Special Configuration Tests +# # ############################################# + +# - name: Test DHCP snooping with url set to empty string should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_special_url_empty_string }}" +# register: result_dhcp_snooping_special_url_empty_string +# tags: [positive, dhcp_snooping, special, database_only] +# ignore_errors: true + +# - name: Assert DHCP snooping database only configuration succeeded +# assert: +# that: +# - result_dhcp_snooping_special_url_empty_string.failed == true +# - result_dhcp_snooping_special_url_empty_string.changed == false +# - "'ASCII' in result_dhcp_snooping_special_url_empty_string.response" +# fail_msg: "DHCP snooping database only configuration should have succeeded" +# success_msg: "DHCP snooping database only configuration succeeded" +# tags: [positive, dhcp_snooping, special, database_only] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, dhcp_snooping, special] + +# - name: Test DHCP snooping with VLANs empty list (check for API bug) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_special_vlan_empty_list }}" +# register: result_dhcp_special_vlan_only +# tags: [positive, dhcp_snooping, special, vlan_only] +# ignore_errors: true + +# - name: Assert DHCP snooping VLAN only configuration succeeded +# assert: +# that: +# - result_dhcp_special_vlan_only.failed == true +# - result_dhcp_special_vlan_only.changed == false +# - "'ASCII' in result_dhcp_special_vlan_only.response" +# fail_msg: "DHCP snooping VLAN only configuration should have succeeded" +# success_msg: "DHCP snooping VLAN only configuration succeeded" +# tags: [positive, dhcp_snooping, special, vlan_only] + +# # ############################################# +# # Cleanup Operations +# # ############################################# + +# - name: Cleanup - Reset DHCP snooping to default configuration +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - "{{ dhcp_snooping_vars_map.test_dhcp_snooping_reset_empty }}" +# register: result_dhcp_cleanup +# tags: [positive, dhcp_snooping, cleanup] + +# - name: Assert DHCP snooping cleanup succeeded +# assert: +# that: +# - result_dhcp_cleanup.failed == false +# - result_dhcp_cleanup.changed == true +# - "'Successfully' in result_dhcp_cleanup.response" +# fail_msg: "DHCP snooping cleanup should have succeeded" +# success_msg: "DHCP snooping cleanup succeeded" +# tags: [positive, dhcp_snooping, cleanup] diff --git a/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_igmp_snooping.yml b/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_igmp_snooping.yml new file mode 100644 index 0000000000..f596cc027e --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_igmp_snooping.yml @@ -0,0 +1,954 @@ +# --- +# # =================================================================================================== +# # IGMP SNOOPING FEATURE TESTS SUMMARY +# # =================================================================================================== +# # +# # This test suite validates IGMP Snooping configuration for Wired Campus Automation. +# # +# # TEST CATEGORIES: +# # +# # 1. NEGATIVE VALIDATION TESTS (Lines 25-820) +# # a) IGMP Global Configuration Tests +# # - Global enabled validation (string/integer instead of boolean) +# # - Global querier enabled validation (string/integer instead of boolean) +# # - Global querier address validation (integer instead of string) +# # - Global querier version validation (invalid choice, integer instead of string) +# # - Global query interval validation (string/boolean instead of integer, range validation) +# # +# # b) IGMP VLAN Configuration Tests +# # - VLAN ID validation (string instead of integer, negative/zero/max values) +# # - Missing required VLAN ID validation +# # - VLAN enabled validation (string/integer instead of boolean) +# # - VLAN querier enabled validation (string/integer instead of boolean) +# # - VLAN querier address validation (integer instead of string) +# # - VLAN querier version validation (invalid choice) +# # - VLAN query interval validation (string instead of integer, range validation) +# # - VLAN mrouter port list validation (string instead of list, integer in list) +# # +# # c) Data Type and Structure Tests +# # - IGMP configuration validation (string/list instead of dictionary) +# # - IGMP VLANs validation (string/dictionary instead of list) +# # - IGMP VLAN item validation (string instead of dictionary) +# # +# # d) Duplicate and Conflict Tests +# # - Duplicate VLAN IDs validation +# # +# # 2. POSITIVE VALIDATION TESTS (Lines 820-1200) +# # a) IGMP Creation Tests (Merged State) +# # - Global parameters only configuration +# # - VLANs only configuration +# # - Single VLAN configuration +# # - Global and VLANs combined configuration +# # - Minimal global parameters configuration +# # - Minimal VLAN parameters configuration +# # - Multiple VLANs with different settings +# # +# # b) IGMP Update Tests (Merged State) +# # - Global parameters only updates +# # - Adding new VLANs +# # - Modifying existing VLANs +# # - Global and VLANs combined updates +# # - Global enablement state changes +# # - Single VLAN updates +# # +# # c) Boundary Value Tests +# # - Minimum query interval values (global and VLAN) +# # - Maximum query interval values (global and VLAN) +# # - Minimum VLAN ID (1) +# # - Maximum VLAN ID (4094) +# # +# # VALIDATION RANGES: +# # - IGMP Global Enabled: boolean (true/false) +# # - IGMP Global Querier Enabled: boolean (true/false) +# # - IGMP Global Querier Address: string (valid IP address format) +# # - IGMP Global Querier Version: string ("VERSION_1", "VERSION_2", "VERSION_3") +# # - IGMP Global Query Interval: integer (1-18000 seconds) +# # - IGMP VLAN ID: integer (1-4094) +# # - IGMP VLAN Enabled: boolean (true/false) +# # - IGMP VLAN Querier Enabled: boolean (true/false) +# # - IGMP VLAN Querier Address: string (valid IP address format) +# # - IGMP VLAN Querier Version: string ("VERSION_1", "VERSION_2", "VERSION_3") +# # - IGMP VLAN Query Interval: integer (1-18000 seconds) +# # - IGMP VLAN Mrouter Port List: list of strings (interface names) +# # +# # EXPECTED BEHAVIORS: +# # - Negative tests should fail with appropriate error messages +# # - Positive tests should succeed with changed=true and response defined +# # - Global and VLAN configurations can be applied independently +# # - Multiple VLANs can be configured with different settings +# # - VLAN ID is required when configuring VLAN-specific settings +# # - Duplicate VLAN IDs within same configuration should be rejected +# # - All boolean fields reject string/integer values +# # - All integer fields reject string/boolean values +# # - Range validation enforced for numeric fields +# # - Structure validation enforced for complex data types +# # +# # =================================================================================================== + +# - debug: msg="Starting IGMP Snooping feature tests for Wired Campus Automation" +# - debug: msg="Role Path {{ role_path }}" + +# - block: +# - name: Load IGMP Snooping test variables +# include_vars: +# file: "{{ role_path }}/vars/vars_wca_test_feature_igmp_snooping.yml" +# name: igmp_vars_map +# vars: +# dnac_login: &dnac_login +# dnac_host: "{{ dnac_host }}" +# dnac_username: "{{ dnac_username }}" +# dnac_password: "{{ dnac_password }}" +# dnac_verify: "{{ dnac_verify }}" +# dnac_port: "{{ dnac_port }}" +# dnac_version: "{{ dnac_version }}" +# dnac_debug: "{{ dnac_debug }}" +# dnac_log: true +# dnac_log_level: DEBUG +# config_verify: true +# dnac_api_task_timeout: "{{ dnac_api_task_timeout }}" +# dnac_log_append: false + +# ################################################################################################################### +# # Negative Validation Tests for IGMP Snooping + +# ############################################# +# # IGMP Global Configuration Tests # +# ############################################# + +# - name: Test IGMP global enabled validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_global_enabled_string }}" +# register: result_igmp_global_enabled_string +# ignore_errors: yes +# tags: [negative, igmp, global, enabled] + +# - name: Assert IGMP global enabled string validation failed +# assert: +# that: +# - result_igmp_global_enabled_string.failed == true +# - "'must be of type boolean' in result_igmp_global_enabled_string.msg" +# fail_msg: "IGMP global enabled string validation should have failed" +# success_msg: "IGMP global enabled string validation correctly failed" +# tags: [negative, igmp, global, enabled] + +# - name: Test IGMP global enabled validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_global_enabled_integer }}" +# register: result_igmp_global_enabled_integer +# ignore_errors: yes +# tags: [negative, igmp, global, enabled] + +# - name: Assert IGMP global enabled integer validation failed +# assert: +# that: +# - result_igmp_global_enabled_integer.failed == true +# - "'must be of type boolean' in result_igmp_global_enabled_integer.msg" +# fail_msg: "IGMP global enabled integer validation should have failed" +# success_msg: "IGMP global enabled integer validation correctly failed" +# tags: [negative, igmp, global, enabled] + +# - name: Test IGMP global querier enabled validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_global_querier_enabled_string }}" +# register: result_igmp_global_querier_enabled_string +# ignore_errors: yes +# tags: [negative, igmp, global, querier] + +# - name: Assert IGMP global querier enabled string validation failed +# assert: +# that: +# - result_igmp_global_querier_enabled_string.failed == true +# - "'must be of type boolean' in result_igmp_global_querier_enabled_string.msg" +# fail_msg: "IGMP global querier enabled string validation should have failed" +# success_msg: "IGMP global querier enabled string validation correctly failed" +# tags: [negative, igmp, global, querier] + +# - name: Test IGMP global querier enabled validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_global_querier_enabled_integer }}" +# register: result_igmp_global_querier_enabled_integer +# ignore_errors: yes +# tags: [negative, igmp, global, querier] + +# - name: Assert IGMP global querier enabled integer validation failed +# assert: +# that: +# - result_igmp_global_querier_enabled_integer.failed == true +# - "'must be of type boolean' in result_igmp_global_querier_enabled_integer.msg" +# fail_msg: "IGMP global querier enabled integer validation should have failed" +# success_msg: "IGMP global querier enabled integer validation correctly failed" +# tags: [negative, igmp, global, querier] + +# - name: Test IGMP global querier address validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_global_querier_address_integer }}" +# register: result_igmp_global_querier_address_integer +# ignore_errors: yes +# tags: [negative, igmp, global, querier_address] + +# - name: Assert IGMP global querier address integer validation failed +# assert: +# that: +# - result_igmp_global_querier_address_integer.failed == true +# - "'must be of type string' in result_igmp_global_querier_address_integer.msg" +# fail_msg: "IGMP global querier address integer validation should have failed" +# success_msg: "IGMP global querier address integer validation correctly failed" +# tags: [negative, igmp, global, querier_address] + +# - name: Test IGMP global querier version validation - invalid choice should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_global_querier_version_invalid }}" +# register: result_igmp_global_querier_version_invalid +# ignore_errors: yes +# tags: [negative, igmp, global, querier_version] + +# - name: Assert IGMP global querier version invalid validation failed +# assert: +# that: +# - result_igmp_global_querier_version_invalid.failed == true +# - "'not a valid choice' in result_igmp_global_querier_version_invalid.msg or 'VERSION_4' in result_igmp_global_querier_version_invalid.msg" +# fail_msg: "IGMP global querier version invalid validation should have failed" +# success_msg: "IGMP global querier version invalid validation correctly failed" +# tags: [negative, igmp, global, querier_version] + +# - name: Test IGMP global querier version validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_global_querier_version_integer }}" +# register: result_igmp_global_querier_version_integer +# ignore_errors: yes +# tags: [negative, igmp, global, querier_version] + +# - name: Assert IGMP global querier version integer validation failed +# assert: +# that: +# - result_igmp_global_querier_version_integer.failed == true +# - "'must be of type string' in result_igmp_global_querier_version_integer.msg" +# fail_msg: "IGMP global querier version integer validation should have failed" +# success_msg: "IGMP global querier version integer validation correctly failed" +# tags: [negative, igmp, global, querier_version] + +# - name: Test IGMP global query interval validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_global_query_interval_string }}" +# register: result_igmp_global_query_interval_string +# ignore_errors: yes +# tags: [negative, igmp, global, query_interval] + +# - name: Assert IGMP global query interval string validation failed +# assert: +# that: +# - result_igmp_global_query_interval_string.failed == true +# - "'must be of type integer' in result_igmp_global_query_interval_string.msg" +# fail_msg: "IGMP global query interval string validation should have failed" +# success_msg: "IGMP global query interval string validation correctly failed" +# tags: [negative, igmp, global, query_interval] + +# - name: Test IGMP global query interval validation - boolean value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_global_query_interval_boolean }}" +# register: result_igmp_global_query_interval_boolean +# ignore_errors: yes +# tags: [negative, igmp, global, query_interval] + +# - name: Assert IGMP global query interval boolean validation failed +# assert: +# that: +# - result_igmp_global_query_interval_boolean.failed == true +# - "'must be of type integer' in result_igmp_global_query_interval_boolean.msg" +# fail_msg: "IGMP global query interval boolean validation should have failed" +# success_msg: "IGMP global query interval boolean validation correctly failed" +# tags: [negative, igmp, global, query_interval] + +# - name: Test IGMP global query interval validation - below minimum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_global_query_interval_min }}" +# register: result_igmp_global_query_interval_min +# ignore_errors: yes +# tags: [negative, igmp, global, query_interval, range] + +# - name: Assert IGMP global query interval minimum validation failed +# assert: +# that: +# - result_igmp_global_query_interval_min.failed == true +# - "'must be within the range' in result_igmp_global_query_interval_min.msg or 'query_interval' in result_igmp_global_query_interval_min.msg" +# fail_msg: "IGMP global query interval minimum validation should have failed" +# success_msg: "IGMP global query interval minimum validation correctly failed" +# tags: [negative, igmp, global, query_interval, range] + +# - name: Test IGMP global query interval validation - exceeds maximum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_global_query_interval_max }}" +# register: result_igmp_global_query_interval_max +# ignore_errors: yes +# tags: [negative, igmp, global, query_interval, range] + +# - name: Assert IGMP global query interval maximum validation failed +# assert: +# that: +# - result_igmp_global_query_interval_max.failed == true +# - "'must be within the range' in result_igmp_global_query_interval_max.msg or 'query_interval' in result_igmp_global_query_interval_max.msg" +# fail_msg: "IGMP global query interval maximum validation should have failed" +# success_msg: "IGMP global query interval maximum validation correctly failed" +# tags: [negative, igmp, global, query_interval, range] + +# ############################################# +# # IGMP VLAN Configuration Tests # +# ############################################# + +# - name: Test IGMP VLAN ID validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_vlan_id_string }}" +# register: result_igmp_vlan_id_string +# ignore_errors: yes +# tags: [negative, igmp, vlan, id] + +# - name: Assert IGMP VLAN ID string validation failed +# assert: +# that: +# - result_igmp_vlan_id_string.failed == true +# - "'must be of type integer' in result_igmp_vlan_id_string.msg" +# fail_msg: "IGMP VLAN ID string validation should have failed" +# success_msg: "IGMP VLAN ID string validation correctly failed" +# tags: [negative, igmp, vlan, id] + +# - name: Test IGMP VLAN ID validation - negative value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_vlan_id_negative }}" +# register: result_igmp_vlan_id_negative +# ignore_errors: yes +# tags: [negative, igmp, vlan, id, range] + +# - name: Assert IGMP VLAN ID negative validation failed +# assert: +# that: +# - result_igmp_vlan_id_negative.failed == true +# - "'must be within the range' in result_igmp_vlan_id_negative.msg or 'vlan_id' in result_igmp_vlan_id_negative.msg" +# fail_msg: "IGMP VLAN ID negative validation should have failed" +# success_msg: "IGMP VLAN ID negative validation correctly failed" +# tags: [negative, igmp, vlan, id, range] + +# - name: Test IGMP VLAN ID validation - exceeds maximum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_vlan_id_max }}" +# register: result_igmp_vlan_id_max +# ignore_errors: yes +# tags: [negative, igmp, vlan, id, range] + +# - name: Assert IGMP VLAN ID maximum validation failed +# assert: +# that: +# - result_igmp_vlan_id_max.failed == true +# - "'must be within the range' in result_igmp_vlan_id_max.msg or 'vlan_id' in result_igmp_vlan_id_max.msg" +# fail_msg: "IGMP VLAN ID maximum validation should have failed" +# success_msg: "IGMP VLAN ID maximum validation correctly failed" +# tags: [negative, igmp, vlan, id, range] + +# - name: Test IGMP VLAN ID validation - zero value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_vlan_id_zero }}" +# register: result_igmp_vlan_id_zero +# ignore_errors: yes +# tags: [negative, igmp, vlan, id, range] + +# - name: Assert IGMP VLAN ID zero validation failed +# assert: +# that: +# - result_igmp_vlan_id_zero.failed == true +# - "'must be within the range' in result_igmp_vlan_id_zero.msg or 'vlan_id' in result_igmp_vlan_id_zero.msg" +# fail_msg: "IGMP VLAN ID zero validation should have failed" +# success_msg: "IGMP VLAN ID zero validation correctly failed" +# tags: [negative, igmp, vlan, id, range] + +# - name: Test missing required IGMP VLAN ID +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_vlan_id_missing }}" +# register: result_igmp_vlan_id_missing +# ignore_errors: yes +# tags: [negative, igmp, vlan, id, required] + +# - name: Assert missing IGMP VLAN ID validation failed +# assert: +# that: +# - result_igmp_vlan_id_missing.failed == true +# - "'igmp_snooping_vlan_id' in result_igmp_vlan_id_missing.msg or 'required' in result_igmp_vlan_id_missing.msg" +# fail_msg: "Missing IGMP VLAN ID validation should have failed" +# success_msg: "Missing IGMP VLAN ID validation correctly failed" +# tags: [negative, igmp, vlan, id, required] + +# - name: Test IGMP VLAN enabled validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_vlan_enabled_string }}" +# register: result_igmp_vlan_enabled_string +# ignore_errors: yes +# tags: [negative, igmp, vlan, enabled] + +# - name: Assert IGMP VLAN enabled string validation failed +# assert: +# that: +# - result_igmp_vlan_enabled_string.failed == true +# - "'must be of type boolean' in result_igmp_vlan_enabled_string.msg" +# fail_msg: "IGMP VLAN enabled string validation should have failed" +# success_msg: "IGMP VLAN enabled string validation correctly failed" +# tags: [negative, igmp, vlan, enabled] + +# - name: Test IGMP VLAN enabled validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_vlan_enabled_integer }}" +# register: result_igmp_vlan_enabled_integer +# ignore_errors: yes +# tags: [negative, igmp, vlan, enabled] + +# - name: Assert IGMP VLAN enabled integer validation failed +# assert: +# that: +# - result_igmp_vlan_enabled_integer.failed == true +# - "'must be of type boolean' in result_igmp_vlan_enabled_integer.msg" +# fail_msg: "IGMP VLAN enabled integer validation should have failed" +# success_msg: "IGMP VLAN enabled integer validation correctly failed" +# tags: [negative, igmp, vlan, enabled] + +# - name: Test IGMP VLAN querier enabled validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_vlan_querier_enabled_string }}" +# register: result_igmp_vlan_querier_enabled_string +# ignore_errors: yes +# tags: [negative, igmp, vlan, querier] + +# - name: Assert IGMP VLAN querier enabled string validation failed +# assert: +# that: +# - result_igmp_vlan_querier_enabled_string.failed == true +# - "'must be of type boolean' in result_igmp_vlan_querier_enabled_string.msg" +# fail_msg: "IGMP VLAN querier enabled string validation should have failed" +# success_msg: "IGMP VLAN querier enabled string validation correctly failed" +# tags: [negative, igmp, vlan, querier] + +# - name: Test IGMP VLAN querier enabled validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_vlan_querier_enabled_integer }}" +# register: result_igmp_vlan_querier_enabled_integer +# ignore_errors: yes +# tags: [negative, igmp, vlan, querier] + +# - name: Assert IGMP VLAN querier enabled integer validation failed +# assert: +# that: +# - result_igmp_vlan_querier_enabled_integer.failed == true +# - "'must be of type boolean' in result_igmp_vlan_querier_enabled_integer.msg" +# fail_msg: "IGMP VLAN querier enabled integer validation should have failed" +# success_msg: "IGMP VLAN querier enabled integer validation correctly failed" +# tags: [negative, igmp, vlan, querier] + +# - name: Test IGMP VLAN querier address validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_vlan_querier_address_integer }}" +# register: result_igmp_vlan_querier_address_integer +# ignore_errors: yes +# tags: [negative, igmp, vlan, querier_address] + +# - name: Assert IGMP VLAN querier address integer validation failed +# assert: +# that: +# - result_igmp_vlan_querier_address_integer.failed == true +# - "'must be of type string' in result_igmp_vlan_querier_address_integer.msg" +# fail_msg: "IGMP VLAN querier address integer validation should have failed" +# success_msg: "IGMP VLAN querier address integer validation correctly failed" +# tags: [negative, igmp, vlan, querier_address] + +# - name: Test IGMP VLAN querier version validation - invalid choice should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_vlan_querier_version_invalid }}" +# register: result_igmp_vlan_querier_version_invalid +# ignore_errors: yes +# tags: [negative, igmp, vlan, querier_version] + +# - name: Assert IGMP VLAN querier version invalid validation failed +# assert: +# that: +# - result_igmp_vlan_querier_version_invalid.failed == true +# - "'not a valid choice' in result_igmp_vlan_querier_version_invalid.msg or 'VERSION_4' in result_igmp_vlan_querier_version_invalid.msg" +# fail_msg: "IGMP VLAN querier version invalid validation should have failed" +# success_msg: "IGMP VLAN querier version invalid validation correctly failed" +# tags: [negative, igmp, vlan, querier_version] + +# - name: Test IGMP VLAN query interval validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_vlan_query_interval_string }}" +# register: result_igmp_vlan_query_interval_string +# ignore_errors: yes +# tags: [negative, igmp, vlan, query_interval] + +# - name: Assert IGMP VLAN query interval string validation failed +# assert: +# that: +# - result_igmp_vlan_query_interval_string.failed == true +# - "'must be of type integer' in result_igmp_vlan_query_interval_string.msg" +# fail_msg: "IGMP VLAN query interval string validation should have failed" +# success_msg: "IGMP VLAN query interval string validation correctly failed" +# tags: [negative, igmp, vlan, query_interval] + +# - name: Test IGMP VLAN query interval validation - below minimum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_vlan_query_interval_min }}" +# register: result_igmp_vlan_query_interval_min +# ignore_errors: yes +# tags: [negative, igmp, vlan, query_interval, range] + +# - name: Assert IGMP VLAN query interval minimum validation failed +# assert: +# that: +# - result_igmp_vlan_query_interval_min.failed == true +# - "'must be within the range' in result_igmp_vlan_query_interval_min.msg or 'query_interval' in result_igmp_vlan_query_interval_min.msg" +# fail_msg: "IGMP VLAN query interval minimum validation should have failed" +# success_msg: "IGMP VLAN query interval minimum validation correctly failed" +# tags: [negative, igmp, vlan, query_interval, range] + +# - name: Test IGMP VLAN query interval validation - exceeds maximum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_vlan_query_interval_max }}" +# register: result_igmp_vlan_query_interval_max +# ignore_errors: yes +# tags: [negative, igmp, vlan, query_interval, range] + +# - name: Assert IGMP VLAN query interval maximum validation failed +# assert: +# that: +# - result_igmp_vlan_query_interval_max.failed == true +# - "'must be within the range' in result_igmp_vlan_query_interval_max.msg or 'query_interval' in result_igmp_vlan_query_interval_max.msg" +# fail_msg: "IGMP VLAN query interval maximum validation should have failed" +# success_msg: "IGMP VLAN query interval maximum validation correctly failed" +# tags: [negative, igmp, vlan, query_interval, range] + +# - name: Test IGMP VLAN mrouter port list validation - string instead of list should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_vlan_mrouter_port_list_string }}" +# register: result_igmp_vlan_mrouter_port_list_string +# ignore_errors: yes +# tags: [negative, igmp, vlan, mrouter_port_list] + +# - name: Assert IGMP VLAN mrouter port list string validation failed +# assert: +# that: +# - result_igmp_vlan_mrouter_port_list_string.failed == true +# - "'must be of type list' in result_igmp_vlan_mrouter_port_list_string.msg" +# fail_msg: "IGMP VLAN mrouter port list string validation should have failed" +# success_msg: "IGMP VLAN mrouter port list string validation correctly failed" +# tags: [negative, igmp, vlan, mrouter_port_list] + +# ############################################# +# # Data Type and Structure Tests # +# ############################################# + +# - name: Test IGMP configuration validation - string instead of dictionary should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_invalid_dict_type }}" +# register: result_igmp_invalid_dict_type +# ignore_errors: yes +# tags: [negative, igmp, structure, dict] + +# - name: Assert IGMP configuration dictionary type validation failed +# assert: +# that: +# - result_igmp_invalid_dict_type.failed == true +# - "'must be of type dict' in result_igmp_invalid_dict_type.msg or 'igmp_snooping' in result_igmp_invalid_dict_type.msg" +# fail_msg: "IGMP configuration dictionary type validation should have failed" +# success_msg: "IGMP configuration dictionary type validation correctly failed" +# tags: [negative, igmp, structure, dict] + +# - name: Test IGMP configuration validation - list instead of dictionary should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_invalid_list_type }}" +# register: result_igmp_invalid_list_type +# ignore_errors: yes +# tags: [negative, igmp, structure, list] + +# - name: Assert IGMP configuration list type validation failed +# assert: +# that: +# - result_igmp_invalid_list_type.failed == true +# - "'must be of type dict' in result_igmp_invalid_list_type.msg or 'igmp_snooping' in result_igmp_invalid_list_type.msg" +# fail_msg: "IGMP configuration list type validation should have failed" +# success_msg: "IGMP configuration list type validation correctly failed" +# tags: [negative, igmp, structure, list] + +# - name: Test IGMP VLANs validation - string instead of list should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_vlans_invalid_string }}" +# register: result_igmp_vlans_invalid_string +# ignore_errors: yes +# tags: [negative, igmp, vlan, structure, string] + +# - name: Assert IGMP VLANs string type validation failed +# assert: +# that: +# - result_igmp_vlans_invalid_string.failed == true +# - "'be a list of dictionaries' in result_igmp_vlans_invalid_string.msg" +# fail_msg: "IGMP VLANs string type validation should have failed" +# success_msg: "IGMP VLANs string type validation correctly failed" +# tags: [negative, igmp, vlan, structure, string] + +# - name: Test IGMP VLANs validation - dictionary instead of list should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_vlans_invalid_dict }}" +# register: result_igmp_vlans_invalid_dict +# ignore_errors: yes +# tags: [negative, igmp, vlan, structure, dict] + +# - name: Assert IGMP VLANs dictionary type validation failed +# assert: +# that: +# - result_igmp_vlans_invalid_dict.failed == true +# - "'be a list of dictionaries' in result_igmp_vlans_invalid_dict.msg or 'igmp_snooping_vlans' in result_igmp_vlans_invalid_dict.msg" +# fail_msg: "IGMP VLANs dictionary type validation should have failed" +# success_msg: "IGMP VLANs dictionary type validation correctly failed" +# tags: [negative, igmp, vlan, structure, dict] + +# - name: Test IGMP VLAN item validation - string instead of dictionary should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_vlan_invalid_dict_type }}" +# register: result_igmp_vlan_invalid_dict_type +# ignore_errors: yes +# tags: [negative, igmp, vlan, structure, item] + +# - name: Assert IGMP VLAN item dictionary type validation failed +# assert: +# that: +# - result_igmp_vlan_invalid_dict_type.failed == true +# - "'be a list of dictionaries' in result_igmp_vlan_invalid_dict_type.msg or 'igmp_snooping_vlans' in result_igmp_vlan_invalid_dict_type.msg" +# fail_msg: "IGMP VLAN item dictionary type validation should have failed" +# success_msg: "IGMP VLAN item dictionary type validation correctly failed" +# tags: [negative, igmp, vlan, structure, item] + +# ################################################################################################################### +# # Positive Test Cases for IGMP Snooping + +# ############################################# +# # POSITIVE TEST CASES - CREATE # +# ############################################# + +# - name: Create IGMP snooping configuration with global parameters only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_create_global_only }}" +# register: result_igmp_create_global_only +# tags: [positive, igmp, create, global] + +# - name: Assert IGMP global only creation succeeded +# assert: +# that: +# - result_igmp_create_global_only.changed == true +# - result_igmp_create_global_only.response is defined +# fail_msg: "IGMP global only creation should have succeeded" +# success_msg: "IGMP global only creation succeeded" +# tags: [positive, igmp, create, global] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, igmp, create] + +# - name: Create IGMP snooping configuration with VLANs only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_create_vlans_only }}" +# register: result_igmp_create_vlans_only +# tags: [positive, igmp, create, vlans] + +# - name: Assert IGMP VLANs only creation succeeded +# assert: +# that: +# - result_igmp_create_vlans_only.changed == true +# - result_igmp_create_vlans_only.response is defined +# fail_msg: "IGMP VLANs only creation should have succeeded" +# success_msg: "IGMP VLANs only creation succeeded" +# tags: [positive, igmp, create, vlans] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, igmp, create] + +# - name: Create IGMP snooping configuration with single VLAN +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_create_single_vlan }}" +# register: result_igmp_create_single_vlan +# tags: [positive, igmp, create, single_vlan] + +# - name: Assert IGMP single VLAN creation succeeded +# assert: +# that: +# - result_igmp_create_single_vlan.changed == true +# - result_igmp_create_single_vlan.response is defined +# fail_msg: "IGMP single VLAN creation should have succeeded" +# success_msg: "IGMP single VLAN creation succeeded" +# tags: [positive, igmp, create, single_vlan] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, igmp, create] + +# - name: Create IGMP snooping configuration with global and VLANs +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_create_global_and_vlans }}" +# register: result_igmp_create_global_and_vlans +# tags: [positive, igmp, create, combined] + +# - name: Assert IGMP global and VLANs creation succeeded +# assert: +# that: +# - result_igmp_create_global_and_vlans.changed == true +# - result_igmp_create_global_and_vlans.response is defined +# fail_msg: "IGMP global and VLANs creation should have succeeded" +# success_msg: "IGMP global and VLANs creation succeeded" +# tags: [positive, igmp, create, combined] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, igmp, create] + +# - name: Create IGMP snooping configuration with minimal VLAN parameters +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_create_minimal_vlan }}" +# register: result_igmp_create_minimal_vlan +# tags: [positive, igmp, create, minimal, vlan] + +# - name: Assert IGMP minimal VLAN creation succeeded +# assert: +# that: +# - result_igmp_create_minimal_vlan.changed == true +# - result_igmp_create_minimal_vlan.response is defined +# fail_msg: "IGMP minimal VLAN creation should have succeeded" +# success_msg: "IGMP minimal VLAN creation succeeded" +# tags: [positive, igmp, create, minimal, vlan] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, igmp, create] + +# - name: Create IGMP snooping configuration with multiple VLANs - different settings +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_create_multiple_vlans_different_settings }}" +# register: result_igmp_create_multiple_vlans_different_settings +# tags: [positive, igmp, create, multiple, different] + +# - name: Assert IGMP multiple VLANs with different settings creation succeeded +# assert: +# that: +# - result_igmp_create_multiple_vlans_different_settings.changed == true +# - result_igmp_create_multiple_vlans_different_settings.response is defined +# fail_msg: "IGMP multiple VLANs with different settings creation should have succeeded" +# success_msg: "IGMP multiple VLANs with different settings creation succeeded" +# tags: [positive, igmp, create, multiple, different] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, igmp, create] + +# ############################################# +# # POSITIVE TEST CASES - UPDATE # +# ############################################# + +# - name: Update IGMP snooping global parameters only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_update_global_only }}" +# register: result_igmp_update_global_only +# tags: [positive, igmp, update, global] + +# - name: Assert IGMP global only update succeeded +# assert: +# that: +# - result_igmp_update_global_only.changed == true +# - result_igmp_update_global_only.response is defined +# fail_msg: "IGMP global only update should have succeeded" +# success_msg: "IGMP global only update succeeded" +# tags: [positive, igmp, update, global] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, igmp, update] + +# - name: Update IGMP snooping by modifying existing VLANs +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_update_modify_vlans }}" +# register: result_igmp_update_modify_vlans +# tags: [positive, igmp, update, modify_vlans] + +# - name: Assert IGMP modify VLANs update succeeded +# assert: +# that: +# - result_igmp_update_modify_vlans.changed == true +# - result_igmp_update_modify_vlans.response is defined +# fail_msg: "IGMP modify VLANs update should have succeeded" +# success_msg: "IGMP modify VLANs update succeeded" +# tags: [positive, igmp, update, modify_vlans] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, igmp, update] + +# - name: Update IGMP snooping global and VLANs together +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_update_global_and_vlans }}" +# register: result_igmp_update_global_and_vlans +# tags: [positive, igmp, update, combined] + +# - name: Assert IGMP global and VLANs update succeeded +# assert: +# that: +# - result_igmp_update_global_and_vlans.changed == true +# - result_igmp_update_global_and_vlans.response is defined +# fail_msg: "IGMP global and VLANs update should have succeeded" +# success_msg: "IGMP global and VLANs update succeeded" +# tags: [positive, igmp, update, combined] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, igmp, update] + +# - name: Update IGMP snooping single VLAN +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ igmp_vars_map.test_igmp_update_single_vlan }}" +# register: result_igmp_update_single_vlan +# tags: [positive, igmp, update, single_vlan] + +# - name: Assert IGMP single VLAN update succeeded +# assert: +# that: +# - result_igmp_update_single_vlan.changed == true +# - result_igmp_update_single_vlan.response is defined +# fail_msg: "IGMP single VLAN update should have succeeded" +# success_msg: "IGMP single VLAN update succeeded" +# tags: [positive, igmp, update, single_vlan] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, igmp, update] diff --git a/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_lldp.yml b/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_lldp.yml new file mode 100644 index 0000000000..a49b5c0fa1 --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_lldp.yml @@ -0,0 +1,954 @@ +# --- +# # =================================================================================================== +# # LLDP FEATURE TESTS SUMMARY +# # =================================================================================================== +# # +# # This test suite validates LLDP configuration for Wired Campus Automation. +# # +# # TEST CATEGORIES: +# # +# # 1. NEGATIVE VALIDATION TESTS (Lines 75-450) +# # - LLDP Admin Status validation (string/integer instead of boolean) +# # - LLDP Hold Time validation (below min, above max, string, float) +# # - LLDP Timer validation (below min, above max, negative, string, float) +# # - LLDP Reinitialization Delay validation (below min, above max, negative, string, float) +# # - Data type and structure validation (invalid dict/list types) +# # +# # 2. POSITIVE VALIDATION TESTS (Lines 450-950) +# # a) LLDP Creation Tests +# # - Admin status only configuration +# # - All parameters configuration +# # - Custom timers configuration +# # - Disabled LLDP configuration +# # - Reinitialization delay only configuration +# # +# # b) LLDP Update Tests (Merged State) +# # - Enable/disable LLDP updates +# # - Timer modifications +# # - Reinitialization delay modifications +# # - All parameters updates +# # +# # c) LLDP Deletion Tests (Deleted State) +# # - Empty configuration deletion/reset +# # - Configuration with parameters deletion +# # +# # d) Boundary Value Tests +# # - Minimum valid values (hold_time: 0, timer: 5, reinit_delay: 2) +# # - Maximum valid values (hold_time: 65535, timer: 65534, reinit_delay: 10) +# # - Recommended values testing +# # - Zero hold time (no aging) configuration +# # +# # e) Special Configuration Tests +# # - Partial configuration testing +# # - Default recommended settings +# # +# # f) Advanced Test Scenarios +# # - Idempotency tests (same config applied twice) +# # - Cleanup operations +# # +# # VALIDATION RANGES: +# # - LLDP Admin Status: boolean (true/false) +# # - LLDP Hold Time: integer (0-65535 seconds, 0 = no aging) +# # - LLDP Timer: integer (5-65534 seconds) +# # - LLDP Reinitialization Delay: integer (2-10 seconds) +# # +# # EXPECTED BEHAVIORS: +# # - Negative tests should fail with appropriate error messages +# # - Positive tests should succeed with 'Successfully' in response +# # - Idempotency: First run changes=true, second run changes=false +# # - All tests include proper pause times for configuration settlement +# # - Zero hold time disables aging of LLDP neighbor information +# # +# # =================================================================================================== + +# - debug: msg="Starting LLDP feature tests for Wired Campus Automation" +# - debug: msg="Role Path {{ role_path }}" + +# - block: +# - name: Load LLDP test variables +# include_vars: +# file: "{{ role_path }}/vars/vars_wca_test_feature_lldp.yml" +# name: lldp_vars_map +# vars: +# dnac_login: &dnac_login +# dnac_host: "{{ dnac_host }}" +# dnac_username: "{{ dnac_username }}" +# dnac_password: "{{ dnac_password }}" +# dnac_verify: "{{ dnac_verify }}" +# dnac_port: "{{ dnac_port }}" +# dnac_version: "{{ dnac_version }}" +# dnac_debug: "{{ dnac_debug }}" +# dnac_log: true +# dnac_log_level: "DEBUG" +# config_verify: true +# dnac_api_task_timeout: "{{ dnac_api_task_timeout }}" +# dnac_log_append: false + +# # ============================================================================= +# # Negative Validation Tests for LLDP +# # ============================================================================= + +# # ############################################# +# # LLDP Admin Status Tests +# # ############################################# + +# - name: Test LLDP admin status validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_admin_status_string }}" +# register: result_lldp_admin_status_string +# ignore_errors: true +# tags: [negative, lldp, admin_status] + +# - name: Assert LLDP admin status string validation failed +# assert: +# that: +# - result_lldp_admin_status_string.failed == true +# - "'must be of type boolean' in result_lldp_admin_status_string.msg" +# fail_msg: "LLDP admin status string validation should have failed" +# success_msg: "LLDP admin status string validation correctly failed" +# tags: [negative, lldp, admin_status] + +# - name: Test LLDP admin status validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_admin_status_integer }}" +# register: result_lldp_admin_status_integer +# ignore_errors: true +# tags: [negative, lldp, admin_status] + +# - name: Assert LLDP admin status integer validation failed +# assert: +# that: +# - result_lldp_admin_status_integer.failed == true +# - "'must be of type boolean' in result_lldp_admin_status_integer.msg" +# fail_msg: "LLDP admin status integer validation should have failed" +# success_msg: "LLDP admin status integer validation correctly failed" +# tags: [negative, lldp, admin_status] + +# # ############################################# +# # LLDP Hold Time Tests +# # ############################################# + +# - name: Test LLDP hold time validation - below minimum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_hold_time_below_min }}" +# register: result_lldp_hold_time_below_min +# ignore_errors: true +# tags: [negative, lldp, hold_time] + +# - name: Assert LLDP hold time below minimum validation failed +# assert: +# that: +# - result_lldp_hold_time_below_min.failed == true +# - "'must be within the range' in result_lldp_hold_time_below_min.msg" +# fail_msg: "LLDP hold time below minimum validation should have failed" +# success_msg: "LLDP hold time below minimum validation correctly failed" +# tags: [negative, lldp, hold_time] + +# - name: Test LLDP hold time validation - above maximum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_hold_time_above_max }}" +# register: result_lldp_hold_time_above_max +# ignore_errors: true +# tags: [negative, lldp, hold_time] + +# - name: Assert LLDP hold time above maximum validation failed +# assert: +# that: +# - result_lldp_hold_time_above_max.failed == true +# - "'must be within the range' in result_lldp_hold_time_above_max.msg" +# fail_msg: "LLDP hold time above maximum validation should have failed" +# success_msg: "LLDP hold time above maximum validation correctly failed" +# tags: [negative, lldp, hold_time] + +# - name: Test LLDP hold time validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_hold_time_string }}" +# register: result_lldp_hold_time_string +# ignore_errors: true +# tags: [negative, lldp, hold_time] + +# - name: Assert LLDP hold time string validation failed +# assert: +# that: +# - result_lldp_hold_time_string.failed == true +# - "'must be of type integer' in result_lldp_hold_time_string.msg" +# fail_msg: "LLDP hold time string validation should have failed" +# success_msg: "LLDP hold time string validation correctly failed" +# tags: [negative, lldp, hold_time] + +# - name: Test LLDP hold time validation - float value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_hold_time_float }}" +# register: result_lldp_hold_time_float +# ignore_errors: true +# tags: [negative, lldp, hold_time] + +# - name: Assert LLDP hold time float validation failed +# assert: +# that: +# - result_lldp_hold_time_float.failed == true +# - "'must be of type integer' in result_lldp_hold_time_float.msg" +# fail_msg: "LLDP hold time float validation should have failed" +# success_msg: "LLDP hold time float validation correctly failed" +# tags: [negative, lldp, hold_time] + +# # ############################################# +# # LLDP Timer Tests +# # ############################################# + +# - name: Test LLDP timer validation - below minimum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_timer_below_min }}" +# register: result_lldp_timer_below_min +# ignore_errors: true +# tags: [negative, lldp, timer] + +# - name: Assert LLDP timer below minimum validation failed +# assert: +# that: +# - result_lldp_timer_below_min.failed == true +# - "'must be within the range' in result_lldp_timer_below_min.msg" +# fail_msg: "LLDP timer below minimum validation should have failed" +# success_msg: "LLDP timer below minimum validation correctly failed" +# tags: [negative, lldp, timer] + +# - name: Test LLDP timer validation - above maximum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_timer_above_max }}" +# register: result_lldp_timer_above_max +# ignore_errors: true +# tags: [negative, lldp, timer] + +# - name: Assert LLDP timer above maximum validation failed +# assert: +# that: +# - result_lldp_timer_above_max.failed == true +# - "'must be within the range' in result_lldp_timer_above_max.msg" +# fail_msg: "LLDP timer above maximum validation should have failed" +# success_msg: "LLDP timer above maximum validation correctly failed" +# tags: [negative, lldp, timer] + +# - name: Test LLDP timer validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_timer_string }}" +# register: result_lldp_timer_string +# ignore_errors: true +# tags: [negative, lldp, timer] + +# - name: Assert LLDP timer string validation failed +# assert: +# that: +# - result_lldp_timer_string.failed == true +# - "'must be of type integer' in result_lldp_timer_string.msg" +# fail_msg: "LLDP timer string validation should have failed" +# success_msg: "LLDP timer string validation correctly failed" +# tags: [negative, lldp, timer] + +# - name: Test LLDP timer validation - float value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_timer_float }}" +# register: result_lldp_timer_float +# ignore_errors: true +# tags: [negative, lldp, timer] + +# - name: Assert LLDP timer float validation failed +# assert: +# that: +# - result_lldp_timer_float.failed == true +# - "'must be of type integer' in result_lldp_timer_float.msg" +# fail_msg: "LLDP timer float validation should have failed" +# success_msg: "LLDP timer float validation correctly failed" +# tags: [negative, lldp, timer] + +# - name: Test LLDP timer validation - negative value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_timer_negative }}" +# register: result_lldp_timer_negative +# ignore_errors: true +# tags: [negative, lldp, timer] + +# - name: Assert LLDP timer negative validation failed +# assert: +# that: +# - result_lldp_timer_negative.failed == true +# - "'must be within the range' in result_lldp_timer_negative.msg" +# fail_msg: "LLDP timer negative validation should have failed" +# success_msg: "LLDP timer negative validation correctly failed" +# tags: [negative, lldp, timer] + +# # ############################################# +# # LLDP Reinitialization Delay Tests +# # ############################################# + +# - name: Test LLDP reinitialization delay validation - below minimum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_reinit_delay_below_min }}" +# register: result_lldp_reinit_delay_below_min +# ignore_errors: true +# tags: [negative, lldp, reinit_delay] + +# - name: Assert LLDP reinitialization delay below minimum validation failed +# assert: +# that: +# - result_lldp_reinit_delay_below_min.failed == true +# - "'must be within the range' in result_lldp_reinit_delay_below_min.msg" +# fail_msg: "LLDP reinitialization delay below minimum validation should have failed" +# success_msg: "LLDP reinitialization delay below minimum validation correctly failed" +# tags: [negative, lldp, reinit_delay] + +# - name: Test LLDP reinitialization delay validation - above maximum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_reinit_delay_above_max }}" +# register: result_lldp_reinit_delay_above_max +# ignore_errors: true +# tags: [negative, lldp, reinit_delay] + +# - name: Assert LLDP reinitialization delay above maximum validation failed +# assert: +# that: +# - result_lldp_reinit_delay_above_max.failed == true +# - "'must be within the range' in result_lldp_reinit_delay_above_max.msg" +# fail_msg: "LLDP reinitialization delay above maximum validation should have failed" +# success_msg: "LLDP reinitialization delay above maximum validation correctly failed" +# tags: [negative, lldp, reinit_delay] + +# - name: Test LLDP reinitialization delay validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_reinit_delay_string }}" +# register: result_lldp_reinit_delay_string +# ignore_errors: true +# tags: [negative, lldp, reinit_delay] + +# - name: Assert LLDP reinitialization delay string validation failed +# assert: +# that: +# - result_lldp_reinit_delay_string.failed == true +# - "'must be of type integer' in result_lldp_reinit_delay_string.msg" +# fail_msg: "LLDP reinitialization delay string validation should have failed" +# success_msg: "LLDP reinitialization delay string validation correctly failed" +# tags: [negative, lldp, reinit_delay] + +# - name: Test LLDP reinitialization delay validation - float value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_reinit_delay_float }}" +# register: result_lldp_reinit_delay_float +# ignore_errors: true +# tags: [negative, lldp, reinit_delay] + +# - name: Assert LLDP reinitialization delay float validation failed +# assert: +# that: +# - result_lldp_reinit_delay_float.failed == true +# - "'must be of type integer' in result_lldp_reinit_delay_float.msg" +# fail_msg: "LLDP reinitialization delay float validation should have failed" +# success_msg: "LLDP reinitialization delay float validation correctly failed" +# tags: [negative, lldp, reinit_delay] + +# - name: Test LLDP reinitialization delay validation - negative value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_reinit_delay_negative }}" +# register: result_lldp_reinit_delay_negative +# ignore_errors: true +# tags: [negative, lldp, reinit_delay] + +# - name: Assert LLDP reinitialization delay negative validation failed +# assert: +# that: +# - result_lldp_reinit_delay_negative.failed == true +# - "'must be within the range' in result_lldp_reinit_delay_negative.msg" +# fail_msg: "LLDP reinitialization delay negative validation should have failed" +# success_msg: "LLDP reinitialization delay negative validation correctly failed" +# tags: [negative, lldp, reinit_delay] + +# # ############################################# +# # Data Type and Structure Tests +# # ############################################# + +# - name: Test LLDP structure validation - string instead of dict should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_invalid_dict_type }}" +# register: result_lldp_invalid_dict_type +# ignore_errors: true +# tags: [negative, lldp, structure] + +# - name: Assert LLDP invalid dict type validation failed +# assert: +# that: +# - result_lldp_invalid_dict_type.failed == true +# - "'must be of type dictionary' in result_lldp_invalid_dict_type.msg" +# fail_msg: "LLDP invalid dict type validation should have failed" +# success_msg: "LLDP invalid dict type validation correctly failed" +# tags: [negative, lldp, structure] + +# - name: Test LLDP structure validation - list instead of dict should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_invalid_list_type }}" +# register: result_lldp_invalid_list_type +# ignore_errors: true +# tags: [negative, lldp, structure] + +# - name: Assert LLDP invalid list type validation failed +# assert: +# that: +# - result_lldp_invalid_list_type.failed == true +# - "'must be of type dictionary' in result_lldp_invalid_list_type.msg" +# fail_msg: "LLDP invalid list type validation should have failed" +# success_msg: "LLDP invalid list type validation correctly failed" +# tags: [negative, lldp, structure] + +# - name: Display negative test summary +# debug: +# msg: "LLDP negative validation tests completed successfully" +# tags: [negative, lldp] + +# # ============================================================================= +# # POSITIVE TEST CASES - LLDP CREATION (MERGED STATE) +# # ============================================================================= + +# # ############################################# +# # Single LLDP Parameter Tests +# # ############################################# + +# - name: Test LLDP configuration with admin status only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_create_admin_status_only }}" +# register: result_lldp_create_admin_status_only +# tags: [positive, lldp, create, admin_status] + +# - name: Assert LLDP admin status only configuration succeeded +# assert: +# that: +# - result_lldp_create_admin_status_only.failed == false +# - result_lldp_create_admin_status_only.changed == true +# - "'Successfully' in result_lldp_create_admin_status_only.response" +# fail_msg: "LLDP admin status only configuration should have succeeded" +# success_msg: "LLDP admin status only configuration succeeded" +# tags: [positive, lldp, create, admin_status] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, lldp, create] + +# - name: Test LLDP configuration with all parameters +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_create_all_params }}" +# register: result_lldp_create_all_params +# tags: [positive, lldp, create, all_params] + +# - name: Assert LLDP all parameters configuration succeeded +# assert: +# that: +# - result_lldp_create_all_params.failed == false +# - result_lldp_create_all_params.changed == true +# - "'Successfully' in result_lldp_create_all_params.response" +# fail_msg: "LLDP all parameters configuration should have succeeded" +# success_msg: "LLDP all parameters configuration succeeded" +# tags: [positive, lldp, create, all_params] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, lldp, create] + +# - name: Test LLDP configuration with custom timers +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_create_custom_timers }}" +# register: result_lldp_create_custom_timers +# tags: [positive, lldp, create, timers] + +# - name: Assert LLDP custom timers configuration succeeded +# assert: +# that: +# - result_lldp_create_custom_timers.failed == false +# - result_lldp_create_custom_timers.changed == true +# - "'Successfully' in result_lldp_create_custom_timers.response" +# fail_msg: "LLDP custom timers configuration should have succeeded" +# success_msg: "LLDP custom timers configuration succeeded" +# tags: [positive, lldp, create, timers] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, lldp, create] + +# - name: Test LLDP disabled configuration +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_create_disabled }}" +# register: result_lldp_create_disabled +# tags: [positive, lldp, create, disabled] + +# - name: Assert LLDP disabled configuration succeeded +# assert: +# that: +# - result_lldp_create_disabled.failed == false +# - result_lldp_create_disabled.changed == true +# - "'Successfully' in result_lldp_create_disabled.response" +# fail_msg: "LLDP disabled configuration should have succeeded" +# success_msg: "LLDP disabled configuration succeeded" +# tags: [positive, lldp, create, disabled] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, lldp, create] + +# - name: Test LLDP with only reinitialization delay +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_create_reinit_delay_only }}" +# register: result_lldp_create_reinit_delay_only +# tags: [positive, lldp, create, reinit_delay] + +# - name: Assert LLDP reinitialization delay only configuration succeeded +# assert: +# that: +# - result_lldp_create_reinit_delay_only.failed == false +# - result_lldp_create_reinit_delay_only.changed == true +# - "'Successfully' in result_lldp_create_reinit_delay_only.response" +# fail_msg: "LLDP reinitialization delay only configuration should have succeeded" +# success_msg: "LLDP reinitialization delay only configuration succeeded" +# tags: [positive, lldp, create, reinit_delay] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, lldp, create] + +# # ============================================================================= +# # POSITIVE TEST CASES - LLDP UPDATE (MERGED STATE) +# # ============================================================================= + +# # ############################################# +# # Single LLDP Update Tests +# # ############################################# + +# - name: Test LLDP update - enable LLDP +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_update_enable }}" +# register: result_lldp_update_enable +# tags: [positive, lldp, update, enable] + +# - name: Assert LLDP enable update succeeded +# assert: +# that: +# - result_lldp_update_enable.failed == false +# - result_lldp_update_enable.changed == true +# - "'Successfully' in result_lldp_update_enable.response" +# fail_msg: "LLDP enable update should have succeeded" +# success_msg: "LLDP enable update succeeded" +# tags: [positive, lldp, update, enable] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, lldp, update] + +# - name: Test LLDP update - disable LLDP +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_update_disable }}" +# register: result_lldp_update_disable +# tags: [positive, lldp, update, disable] + +# - name: Assert LLDP disable update succeeded +# assert: +# that: +# - result_lldp_update_disable.failed == false +# - result_lldp_update_disable.changed == true +# - "'Successfully' in result_lldp_update_disable.response" +# fail_msg: "LLDP disable update should have succeeded" +# success_msg: "LLDP disable update succeeded" +# tags: [positive, lldp, update, disable] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, lldp, update] + +# - name: Test LLDP update - modify timers +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_update_timers }}" +# register: result_lldp_update_timers +# tags: [positive, lldp, update, timers] + +# - name: Assert LLDP timers update succeeded +# assert: +# that: +# - result_lldp_update_timers.failed == false +# - result_lldp_update_timers.changed == true +# - "'Successfully' in result_lldp_update_timers.response" +# fail_msg: "LLDP timers update should have succeeded" +# success_msg: "LLDP timers update succeeded" +# tags: [positive, lldp, update, timers] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, lldp, update] + +# - name: Test LLDP update - modify reinitialization delay +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_update_reinit_delay }}" +# register: result_lldp_update_reinit_delay +# tags: [positive, lldp, update, reinit_delay] + +# - name: Assert LLDP reinitialization delay update succeeded +# assert: +# that: +# - result_lldp_update_reinit_delay.failed == false +# - result_lldp_update_reinit_delay.changed == true +# - "'Successfully' in result_lldp_update_reinit_delay.response" +# fail_msg: "LLDP reinitialization delay update should have succeeded" +# success_msg: "LLDP reinitialization delay update succeeded" +# tags: [positive, lldp, update, reinit_delay] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, lldp, update] + +# - name: Test LLDP update - all parameters +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_update_all_params }}" +# register: result_lldp_update_all_params +# tags: [positive, lldp, update, all_params] + +# - name: Assert LLDP all parameters update succeeded +# assert: +# that: +# - result_lldp_update_all_params.failed == false +# - result_lldp_update_all_params.changed == true +# - "'Successfully' in result_lldp_update_all_params.response" +# fail_msg: "LLDP all parameters update should have succeeded" +# success_msg: "LLDP all parameters update succeeded" +# tags: [positive, lldp, update, all_params] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, lldp, update] +# # ============================================================================= +# # POSITIVE TEST CASES - LLDP DELETION (DELETED STATE) +# # ============================================================================= + +# # ############################################# +# # LLDP Deletion Tests +# # ############################################# + +# - name: Test LLDP deletion/reset - empty configuration +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - "{{ lldp_vars_map.test_lldp_delete_empty }}" +# register: result_lldp_delete_empty +# tags: [positive, lldp, delete, empty] + +# - name: Assert LLDP empty deletion succeeded +# assert: +# that: +# - result_lldp_delete_empty.failed == false +# - result_lldp_delete_empty.changed == true +# - "'Successfully' in result_lldp_delete_empty.response" +# fail_msg: "LLDP empty deletion should have succeeded" +# success_msg: "LLDP empty deletion succeeded" +# tags: [positive, lldp, delete, empty] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, lldp, delete] + +# - name: Test LLDP update - all parameters (setup for deletion test) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_update_all_params }}" +# register: result_lldp_update_all_params_setup +# tags: [positive, lldp, delete] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, lldp, delete] + +# - name: Test LLDP deletion/reset - with existing parameters +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - "{{ lldp_vars_map.test_lldp_delete_with_params }}" +# register: result_lldp_delete_with_params +# tags: [positive, lldp, delete, params] + +# - name: Assert LLDP deletion with params succeeded +# assert: +# that: +# - result_lldp_delete_with_params.failed == false +# - result_lldp_delete_with_params.changed == true +# - "'Successfully' in result_lldp_delete_with_params.response" +# fail_msg: "LLDP deletion with params should have succeeded" +# success_msg: "LLDP deletion with params succeeded" +# tags: [positive, lldp, delete, params] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, lldp, delete] + +# # ============================================================================= +# # BOUNDARY VALUE TESTS +# # ============================================================================= + +# # ############################################# +# # Boundary Value Tests +# # ############################################# + +# - name: Test LLDP with minimum valid values +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_boundary_min_values }}" +# register: result_lldp_boundary_min_values +# tags: [positive, lldp, boundary, min] + +# - name: Assert LLDP minimum values configuration succeeded +# assert: +# that: +# - result_lldp_boundary_min_values.failed == false +# - result_lldp_boundary_min_values.changed == true +# - "'Successfully' in result_lldp_boundary_min_values.response" +# fail_msg: "LLDP minimum values configuration should have succeeded" +# success_msg: "LLDP minimum values configuration succeeded" +# tags: [positive, lldp, boundary, min] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, lldp, boundary] + +# - name: Test LLDP with maximum valid values +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_boundary_max_values }}" +# register: result_lldp_boundary_max_values +# tags: [positive, lldp, boundary, max] + +# - name: Assert LLDP maximum values configuration succeeded +# assert: +# that: +# - result_lldp_boundary_max_values.failed == false +# - result_lldp_boundary_max_values.changed == true +# - "'Successfully' in result_lldp_boundary_max_values.response" +# fail_msg: "LLDP maximum values configuration should have succeeded" +# success_msg: "LLDP maximum values configuration succeeded" +# tags: [positive, lldp, boundary, max] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, lldp, boundary] + +# - name: Test LLDP with recommended values +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_recommended_values }}" +# register: result_lldp_recommended_values +# tags: [positive, lldp, boundary, recommended] + +# - name: Assert LLDP recommended values configuration succeeded +# assert: +# that: +# - result_lldp_recommended_values.failed == false +# - result_lldp_recommended_values.changed == true +# - "'Successfully' in result_lldp_recommended_values.response" +# fail_msg: "LLDP recommended values configuration should have succeeded" +# success_msg: "LLDP recommended values configuration succeeded" +# tags: [positive, lldp, boundary, recommended] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, lldp, boundary] + +# - name: Test LLDP with zero hold time (no aging) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_zero_hold_time }}" +# register: result_lldp_zero_hold_time +# tags: [positive, lldp, boundary, zero_hold] + +# - name: Assert LLDP zero hold time configuration succeeded +# assert: +# that: +# - result_lldp_zero_hold_time.failed == false +# - result_lldp_zero_hold_time.changed == true +# - "'Successfully' in result_lldp_zero_hold_time.response" +# fail_msg: "LLDP zero hold time configuration should have succeeded" +# success_msg: "LLDP zero hold time configuration succeeded" +# tags: [positive, lldp, boundary, zero_hold] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, lldp, boundary] + +# # ============================================================================= +# # ADVANCED POSITIVE TEST SCENARIOS +# # ============================================================================= + +# # ############################################# +# # Idempotency Tests +# # ############################################# + +# - name: Test LLDP Idempotency - Configure same LLDP settings twice +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_create_all_params }}" +# register: result_lldp_idempotency_first +# tags: [positive, lldp, idempotency] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 40 +# tags: [positive, lldp, idempotency] + +# - name: Test LLDP Idempotency - Configure same LLDP settings again (should be idempotent) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ lldp_vars_map.test_lldp_create_all_params }}" +# register: result_lldp_idempotency_second +# tags: [positive, lldp, idempotency] + +# - name: Assert LLDP Idempotency - Second configuration should not change +# assert: +# that: +# - result_lldp_idempotency_second.failed == false +# - result_lldp_idempotency_second.changed == false +# fail_msg: "LLDP idempotency test failed" +# success_msg: "LLDP idempotency test succeeded" +# tags: [positive, lldp, idempotency] + +# # ############################################# +# # Cleanup Test Configurations +# # ############################################# + +# - name: Cleanup - Reset LLDP to default configuration +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - ip_address: "204.1.2.3" +# device_collection_status_check: false +# layer2_configuration: +# lldp: {} +# register: result_lldp_cleanup +# ignore_errors: true +# tags: [cleanup, lldp] + +# always: +# - name: Display positive test summary +# debug: +# msg: "LLDP positive validation tests completed successfully" +# tags: [positive, lldp] + +# - name: Display final cleanup status +# debug: +# var: result_lldp_cleanup +# when: result_lldp_cleanup is defined +# tags: [cleanup, lldp] diff --git a/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_logical_ports.yml b/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_logical_ports.yml new file mode 100644 index 0000000000..8827ad6e5d --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_logical_ports.yml @@ -0,0 +1,1378 @@ +# --- +# # =================================================================================================== +# # LOGICAL PORTS FEATURE TESTS SUMMARY +# # =================================================================================================== +# # +# # This test suite validates Logical Ports configuration for Wired Campus Automation. +# # +# # TEST CATEGORIES: +# # +# # 1. NEGATIVE VALIDATION TESTS (Lines 25-1200) +# # a) Logical Ports Global Configuration Tests +# # - Logical ports configuration validation (string/list instead of dictionary) +# # - Port channel auto validation (string/integer instead of boolean) +# # - Port channel LACP system priority validation (string/boolean instead of integer, range validation) +# # - Port channel load balancing method validation (invalid choice, integer instead of string) +# # - Port channels validation (string/dictionary instead of list, conflict with auto enabled) +# # +# # b) Port Channel Configuration Tests +# # - Port channel item validation (string instead of dictionary) +# # - Port channel protocol validation (missing required, invalid choice, integer instead of string) +# # - Port channel name validation (missing required, integer instead of string, length validation) +# # - Port channel min links validation (string instead of integer, range validation) +# # - Port channel members validation (missing required, string/dictionary instead of list, empty list) +# # +# # c) Port Channel Member Configuration Tests +# # - Port channel member item validation (string instead of dictionary) +# # - Port channel interface name validation (missing required, integer instead of string, empty string) +# # - Port channel mode validation (invalid choices for different protocols, integer instead of string) +# # - Port channel port priority validation (string instead of integer, range validation for LACP/PAGP) +# # - Port channel rate validation (string/invalid choice, protocol specific validation) +# # - Port channel learn method validation (invalid choice, protocol specific validation) +# # +# # d) Duplicate and Conflict Tests +# # - Duplicate port channel names validation +# # - Duplicate member interfaces validation (across channels and within same channel) +# # +# # 2. POSITIVE VALIDATION TESTS (Lines 1200-2000) +# # a) Logical Ports Creation Tests (Merged State) +# # - Global parameters only configuration (auto enabled/disabled) +# # - Single port channel configurations (LACP, PAGP, static - minimal and comprehensive) +# # - Multiple port channel configurations (different protocols, same protocols) +# # - All load balancing methods testing +# # - Boundary value testing (minimum and maximum values) +# # - All mode testing (LACP, PAGP modes) +# # - Large scale testing +# # +# # b) Logical Ports Update Tests (Merged State) +# # - Global parameters only updates +# # - Modifying existing port channel configurations +# # - Adding new port channels +# # - Modifying member configurations +# # - Changing protocol types +# # - Adding members to existing port channels +# # - Comprehensive updates +# # - Enabling auto mode +# # - Individual parameter updates +# # +# # VALIDATION RANGES: +# # - Port Channel Auto: boolean (true/false) +# # - Port Channel LACP System Priority: integer (0-65535) +# # - Port Channel Load Balancing Method: string (valid choices) +# # - Port Channel Protocol: string ("LACP", "PAGP", "NONE") +# # - Port Channel Name: string (13-15 characters) +# # - Port Channel Min Links: integer (1-8) +# # - Port Channel Interface Name: string (non-empty) +# # - Port Channel Mode: string (protocol specific choices) +# # - Port Channel Port Priority: integer (0-65535 for LACP, 0-255 for PAGP) +# # - Port Channel Rate: integer (1, 30 - LACP only) +# # - Port Channel Learn Method: string ("AGGREGATION_PORT", "PHYSICAL_PORT" - PAGP only) +# # +# # EXPECTED BEHAVIORS: +# # - Negative tests should fail with appropriate error messages +# # - Positive tests should succeed with changed=true and response defined +# # - Global and port channel configurations can be applied independently +# # - Multiple port channels can be configured with different settings +# # - Protocol-specific validations enforced (LACP, PAGP, NONE modes) +# # - Duplicate names and interfaces should be rejected +# # - All boolean fields reject string/integer values +# # - All integer fields reject string/boolean values +# # - Range validation enforced for numeric fields +# # - Structure validation enforced for complex data types +# # +# # =================================================================================================== + +# - debug: msg="Starting Logical Ports feature tests for Wired Campus Automation" +# - debug: msg="Role Path {{ role_path }}" + +# - block: +# - name: Load Logical Ports test variables +# include_vars: +# file: "{{ role_path }}/vars/vars_wca_test_feature_logical_ports.yml" +# name: logical_ports_vars_map +# vars: +# dnac_login: &dnac_login +# dnac_host: "{{ dnac_host }}" +# dnac_username: "{{ dnac_username }}" +# dnac_password: "{{ dnac_password }}" +# dnac_verify: "{{ dnac_verify }}" +# dnac_port: "{{ dnac_port }}" +# dnac_version: "{{ dnac_version }}" +# dnac_debug: "{{ dnac_debug }}" +# dnac_log: true +# dnac_log_level: DEBUG +# config_verify: true +# dnac_api_task_timeout: "{{ dnac_api_task_timeout }}" +# dnac_log_append: false + +# ################################################################################################################### +# # Negative Validation Tests for Logical Ports + +# ############################################# +# # Logical Ports Global Configuration Tests # +# ############################################# + +# - name: Test logical ports configuration validation - string instead of dictionary should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_logical_ports_invalid_dict_type }}" +# register: result_logical_ports_invalid_dict_type +# ignore_errors: yes +# tags: [negative, logical_ports, structure, dict] + +# - name: Assert logical ports configuration dictionary type validation failed +# assert: +# that: +# - result_logical_ports_invalid_dict_type.failed == true +# - "'must be of type dict' in result_logical_ports_invalid_dict_type.msg or 'logical_ports' in result_logical_ports_invalid_dict_type.msg" +# fail_msg: "Logical ports configuration dictionary type validation should have failed" +# success_msg: "Logical ports configuration dictionary type validation correctly failed" +# tags: [negative, logical_ports, structure, dict] + +# - name: Test logical ports configuration validation - list instead of dictionary should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_logical_ports_invalid_list_type }}" +# register: result_logical_ports_invalid_list_type +# ignore_errors: yes +# tags: [negative, logical_ports, structure, list] + +# - name: Assert logical ports configuration list type validation failed +# assert: +# that: +# - result_logical_ports_invalid_list_type.failed == true +# - "'must be of type dict' in result_logical_ports_invalid_list_type.msg or 'logical_ports' in result_logical_ports_invalid_list_type.msg" +# fail_msg: "Logical ports configuration list type validation should have failed" +# success_msg: "Logical ports configuration list type validation correctly failed" +# tags: [negative, logical_ports, structure, list] + +# - name: Test port channel auto validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_auto_integer }}" +# register: result_port_channel_auto_integer +# ignore_errors: yes +# tags: [negative, logical_ports, auto, integer] + +# - name: Assert port channel auto integer validation failed +# assert: +# that: +# - result_port_channel_auto_integer.failed == true +# - "'must be of type boolean' in result_port_channel_auto_integer.msg" +# fail_msg: "Port channel auto integer validation should have failed" +# success_msg: "Port channel auto integer validation correctly failed" +# tags: [negative, logical_ports, auto, integer] + +# - name: Test port channel LACP system priority validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_lacp_system_priority_string }}" +# register: result_port_channel_lacp_system_priority_string +# ignore_errors: yes +# tags: [negative, logical_ports, lacp_priority, string] + +# - name: Assert port channel LACP system priority string validation failed +# assert: +# that: +# - result_port_channel_lacp_system_priority_string.failed == true +# - "'must be of type integer' in result_port_channel_lacp_system_priority_string.msg" +# fail_msg: "Port channel LACP system priority string validation should have failed" +# success_msg: "Port channel LACP system priority string validation correctly failed" +# tags: [negative, logical_ports, lacp_priority, string] + +# - name: Test port channel LACP system priority validation - boolean value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_lacp_system_priority_boolean }}" +# register: result_port_channel_lacp_system_priority_boolean +# ignore_errors: yes +# tags: [negative, logical_ports, lacp_priority, boolean] + +# - name: Assert port channel LACP system priority boolean validation failed +# assert: +# that: +# - result_port_channel_lacp_system_priority_boolean.failed == true +# - "'must be of type integer' in result_port_channel_lacp_system_priority_boolean.msg" +# fail_msg: "Port channel LACP system priority boolean validation should have failed" +# success_msg: "Port channel LACP system priority boolean validation correctly failed" +# tags: [negative, logical_ports, lacp_priority, boolean] + +# - name: Test port channel LACP system priority validation - negative value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_lacp_system_priority_negative }}" +# register: result_port_channel_lacp_system_priority_negative +# ignore_errors: yes +# tags: [negative, logical_ports, lacp_priority, range] + +# - name: Assert port channel LACP system priority negative validation failed +# assert: +# that: +# - result_port_channel_lacp_system_priority_negative.failed == true +# - "'must be within the range' in result_port_channel_lacp_system_priority_negative.msg or 'priority' in result_port_channel_lacp_system_priority_negative.msg" +# fail_msg: "Port channel LACP system priority negative validation should have failed" +# success_msg: "Port channel LACP system priority negative validation correctly failed" +# tags: [negative, logical_ports, lacp_priority, range] + +# - name: Test port channel LACP system priority validation - exceeds maximum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_lacp_system_priority_max }}" +# register: result_port_channel_lacp_system_priority_max +# ignore_errors: yes +# tags: [negative, logical_ports, lacp_priority, range] + +# - name: Assert port channel LACP system priority maximum validation failed +# assert: +# that: +# - result_port_channel_lacp_system_priority_max.failed == true +# - "'must be within the range' in result_port_channel_lacp_system_priority_max.msg or 'priority' in result_port_channel_lacp_system_priority_max.msg" +# fail_msg: "Port channel LACP system priority maximum validation should have failed" +# success_msg: "Port channel LACP system priority maximum validation correctly failed" +# tags: [negative, logical_ports, lacp_priority, range] + +# - name: Test port channel load balancing method validation - invalid choice should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_load_balancing_method_invalid }}" +# register: result_port_channel_load_balancing_method_invalid +# ignore_errors: yes +# tags: [negative, logical_ports, load_balancing, choice] + +# - name: Assert port channel load balancing method invalid validation failed +# assert: +# that: +# - result_port_channel_load_balancing_method_invalid.failed == true +# - "'not a valid choice' in result_port_channel_load_balancing_method_invalid.msg or 'INVALID_METHOD' in result_port_channel_load_balancing_method_invalid.msg" +# fail_msg: "Port channel load balancing method invalid validation should have failed" +# success_msg: "Port channel load balancing method invalid validation correctly failed" +# tags: [negative, logical_ports, load_balancing, choice] + +# - name: Test port channel load balancing method validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_load_balancing_method_integer }}" +# register: result_port_channel_load_balancing_method_integer +# ignore_errors: yes +# tags: [negative, logical_ports, load_balancing, integer] + +# - name: Assert port channel load balancing method integer validation failed +# assert: +# that: +# - result_port_channel_load_balancing_method_integer.failed == true +# - "'must be of type string' in result_port_channel_load_balancing_method_integer.msg" +# fail_msg: "Port channel load balancing method integer validation should have failed" +# success_msg: "Port channel load balancing method integer validation correctly failed" +# tags: [negative, logical_ports, load_balancing, integer] + +# - name: Test port channels validation - string instead of list should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channels_invalid_list_type }}" +# register: result_port_channels_invalid_list_type +# ignore_errors: yes +# tags: [negative, logical_ports, port_channels, list] + +# - name: Assert port channels list type validation failed +# assert: +# that: +# - result_port_channels_invalid_list_type.failed == true +# - "'must be a list of dictionaries' in result_port_channels_invalid_list_type.msg or 'port_channels' in result_port_channels_invalid_list_type.msg" +# fail_msg: "Port channels list type validation should have failed" +# success_msg: "Port channels list type validation correctly failed" +# tags: [negative, logical_ports, port_channels, list] + +# - name: Test port channels validation - dictionary instead of list should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channels_invalid_dict_type }}" +# register: result_port_channels_invalid_dict_type +# ignore_errors: yes +# tags: [negative, logical_ports, port_channels, dict] + +# - name: Assert port channels dictionary type validation failed +# assert: +# that: +# - result_port_channels_invalid_dict_type.failed == true +# - "'must be a list of dictionaries' in result_port_channels_invalid_dict_type.msg or 'port_channels' in result_port_channels_invalid_dict_type.msg" +# fail_msg: "Port channels dictionary type validation should have failed" +# success_msg: "Port channels dictionary type validation correctly failed" +# tags: [negative, logical_ports, port_channels, dict] + +# ############################################# +# # Port Channel Configuration Tests # +# ############################################# + +# - name: Test port channel item validation - string instead of dictionary should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_item_invalid_dict_type }}" +# register: result_port_channel_item_invalid_dict_type +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_item, dict] + +# - name: Assert port channel item dictionary type validation failed +# assert: +# that: +# - result_port_channel_item_invalid_dict_type.failed == true +# - "'must be a dictionary' in result_port_channel_item_invalid_dict_type.msg or 'port_channels' in result_port_channel_item_invalid_dict_type.msg" +# fail_msg: "Port channel item dictionary type validation should have failed" +# success_msg: "Port channel item dictionary type validation correctly failed" +# tags: [negative, logical_ports, port_channel_item, dict] + +# - name: Test missing required port channel protocol +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_protocol_missing }}" +# register: result_port_channel_protocol_missing +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_protocol, required] + +# - name: Assert missing port channel protocol validation failed +# assert: +# that: +# - result_port_channel_protocol_missing.failed == true +# - "'port_channel_protocol' in result_port_channel_protocol_missing.msg or 'required' in result_port_channel_protocol_missing.msg" +# fail_msg: "Missing port channel protocol validation should have failed" +# success_msg: "Missing port channel protocol validation correctly failed" +# tags: [negative, logical_ports, port_channel_protocol, required] + +# - name: Test port channel protocol validation - invalid choice should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_protocol_invalid }}" +# register: result_port_channel_protocol_invalid +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_protocol, choice] + +# - name: Assert port channel protocol invalid validation failed +# assert: +# that: +# - result_port_channel_protocol_invalid.failed == true +# - "'not a valid choice' in result_port_channel_protocol_invalid.msg or 'INVALID_PROTOCOL' in result_port_channel_protocol_invalid.msg" +# fail_msg: "Port channel protocol invalid validation should have failed" +# success_msg: "Port channel protocol invalid validation correctly failed" +# tags: [negative, logical_ports, port_channel_protocol, choice] + +# - name: Test port channel protocol validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_protocol_integer }}" +# register: result_port_channel_protocol_integer +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_protocol, integer] + +# - name: Assert port channel protocol integer validation failed +# assert: +# that: +# - result_port_channel_protocol_integer.failed == true +# - "'must be of type string' in result_port_channel_protocol_integer.msg" +# fail_msg: "Port channel protocol integer validation should have failed" +# success_msg: "Port channel protocol integer validation correctly failed" +# tags: [negative, logical_ports, port_channel_protocol, integer] + +# - name: Test missing required port channel name +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_name_missing }}" +# register: result_port_channel_name_missing +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_name, required] + +# - name: Assert missing port channel name validation failed +# assert: +# that: +# - result_port_channel_name_missing.failed == true +# - "'port_channel_name' in result_port_channel_name_missing.msg or 'required' in result_port_channel_name_missing.msg" +# fail_msg: "Missing port channel name validation should have failed" +# success_msg: "Missing port channel name validation correctly failed" +# tags: [negative, logical_ports, port_channel_name, required] + +# - name: Test port channel name validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_name_integer }}" +# register: result_port_channel_name_integer +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_name, integer] + +# - name: Assert port channel name integer validation failed +# assert: +# that: +# - result_port_channel_name_integer.failed == true +# - "'must be of type string' in result_port_channel_name_integer.msg" +# fail_msg: "Port channel name integer validation should have failed" +# success_msg: "Port channel name integer validation correctly failed" +# tags: [negative, logical_ports, port_channel_name, integer] + +# - name: Test port channel name validation - too short should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_name_too_short }}" +# register: result_port_channel_name_too_short +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_name, length] + +# - name: Assert port channel name too short validation failed +# assert: +# that: +# - result_port_channel_name_too_short.failed == true +# - "'length' in result_port_channel_name_too_short.msg or 'characters' in result_port_channel_name_too_short.msg" +# fail_msg: "Port channel name too short validation should have failed" +# success_msg: "Port channel name too short validation correctly failed" +# tags: [negative, logical_ports, port_channel_name, length] + +# - name: Test port channel name validation - too long should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_name_too_long }}" +# register: result_port_channel_name_too_long +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_name, length] + +# - name: Assert port channel name too long validation failed +# assert: +# that: +# - result_port_channel_name_too_long.failed == true +# - "'length' in result_port_channel_name_too_long.msg or 'characters' in result_port_channel_name_too_long.msg" +# fail_msg: "Port channel name too long validation should have failed" +# success_msg: "Port channel name too long validation correctly failed" +# tags: [negative, logical_ports, port_channel_name, length] + +# - name: Test port channel name validation - empty string should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_name_empty }}" +# register: result_port_channel_name_empty +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_name, empty] + +# - name: Assert port channel name empty validation failed +# assert: +# that: +# - result_port_channel_name_empty.failed == true +# - "'empty' in result_port_channel_name_empty.msg or 'port_channel_name' in result_port_channel_name_empty.msg" +# fail_msg: "Port channel name empty validation should have failed" +# success_msg: "Port channel name empty validation correctly failed" +# tags: [negative, logical_ports, port_channel_name, empty] + +# - name: Test port channel min links validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_min_links_string }}" +# register: result_port_channel_min_links_string +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_min_links, string] + +# - name: Assert port channel min links string validation failed +# assert: +# that: +# - result_port_channel_min_links_string.failed == true +# - "'must be of type integer' in result_port_channel_min_links_string.msg" +# fail_msg: "Port channel min links string validation should have failed" +# success_msg: "Port channel min links string validation correctly failed" +# tags: [negative, logical_ports, port_channel_min_links, string] + +# - name: Test port channel min links validation - negative value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_min_links_negative }}" +# register: result_port_channel_min_links_negative +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_min_links, range] + +# - name: Assert port channel min links negative validation failed +# assert: +# that: +# - result_port_channel_min_links_negative.failed == true +# - "'must be within the range' in result_port_channel_min_links_negative.msg or 'min_links' in result_port_channel_min_links_negative.msg" +# fail_msg: "Port channel min links negative validation should have failed" +# success_msg: "Port channel min links negative validation correctly failed" +# tags: [negative, logical_ports, port_channel_min_links, range] + +# - name: Test port channel min links validation - zero value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_min_links_zero }}" +# register: result_port_channel_min_links_zero +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_min_links, range] + +# - name: Assert port channel min links zero validation failed +# assert: +# that: +# - result_port_channel_min_links_zero.failed == true +# - "'must be within the range' in result_port_channel_min_links_zero.msg or 'min_links' in result_port_channel_min_links_zero.msg" +# fail_msg: "Port channel min links zero validation should have failed" +# success_msg: "Port channel min links zero validation correctly failed" +# tags: [negative, logical_ports, port_channel_min_links, range] + +# - name: Test port channel min links validation - exceeds maximum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_min_links_max }}" +# register: result_port_channel_min_links_max +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_min_links, range] + +# - name: Assert port channel min links maximum validation failed +# assert: +# that: +# - result_port_channel_min_links_max.failed == true +# - "'must be within the range' in result_port_channel_min_links_max.msg or 'min_links' in result_port_channel_min_links_max.msg" +# fail_msg: "Port channel min links maximum validation should have failed" +# success_msg: "Port channel min links maximum validation correctly failed" +# tags: [negative, logical_ports, port_channel_min_links, range] + +# - name: Test missing required port channel members +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_members_missing }}" +# register: result_port_channel_members_missing +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_members, required] + +# - name: Assert missing port channel members validation failed +# assert: +# that: +# - result_port_channel_members_missing.failed == true +# - "'port_channel_members' in result_port_channel_members_missing.msg or 'required' in result_port_channel_members_missing.msg" +# fail_msg: "Missing port channel members validation should have failed" +# success_msg: "Missing port channel members validation correctly failed" +# tags: [negative, logical_ports, port_channel_members, required] + +# - name: Test port channel members validation - string instead of list should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_members_invalid_list_type }}" +# register: result_port_channel_members_invalid_list_type +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_members, list] + +# - name: Assert port channel members list type validation failed +# assert: +# that: +# - result_port_channel_members_invalid_list_type.failed == true +# - "'must be of type list' in result_port_channel_members_invalid_list_type.msg" +# fail_msg: "Port channel members list type validation should have failed" +# success_msg: "Port channel members list type validation correctly failed" +# tags: [negative, logical_ports, port_channel_members, list] + +# - name: Test port channel members validation - dictionary instead of list should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_members_invalid_dict_type }}" +# register: result_port_channel_members_invalid_dict_type +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_members, dict] + +# - name: Assert port channel members dictionary type validation failed +# assert: +# that: +# - result_port_channel_members_invalid_dict_type.failed == true +# - "'must be of type list' in result_port_channel_members_invalid_dict_type.msg" +# fail_msg: "Port channel members dictionary type validation should have failed" +# success_msg: "Port channel members dictionary type validation correctly failed" +# tags: [negative, logical_ports, port_channel_members, dict] + +# - name: Test port channel members validation - empty list should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_members_empty_list }}" +# register: result_port_channel_members_empty_list +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_members, empty] + +# - name: Assert port channel members empty list validation failed +# assert: +# that: +# - result_port_channel_members_empty_list.failed == true +# - "'memberPorts: may not be null' in result_port_channel_members_empty_list.msg or 'port_channel_members' in result_port_channel_members_empty_list.msg" +# fail_msg: "Port channel members empty list validation should have failed" +# success_msg: "Port channel members empty list validation correctly failed" +# tags: [negative, logical_ports, port_channel_members, empty] + +# ############################################# +# # Port Channel Member Configuration Tests # +# ############################################# + +# - name: Test port channel member item validation - string instead of dictionary should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_member_invalid_dict_type }}" +# register: result_port_channel_member_invalid_dict_type +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_member, dict] + +# - name: Assert port channel member dictionary type validation failed +# assert: +# that: +# - result_port_channel_member_invalid_dict_type.failed == true +# - "'must be a dictionary' in result_port_channel_member_invalid_dict_type.msg or 'port_channel_members' in result_port_channel_member_invalid_dict_type.msg" +# fail_msg: "Port channel member dictionary type validation should have failed" +# success_msg: "Port channel member dictionary type validation correctly failed" +# tags: [negative, logical_ports, port_channel_member, dict] + +# - name: Test missing required port channel interface name +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_interface_name_missing }}" +# register: result_port_channel_interface_name_missing +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_interface_name, required] + +# - name: Assert missing port channel interface name validation failed +# assert: +# that: +# - result_port_channel_interface_name_missing.failed == true +# - "'port_channel_interface_name' in result_port_channel_interface_name_missing.msg or 'required' in result_port_channel_interface_name_missing.msg" +# fail_msg: "Missing port channel interface name validation should have failed" +# success_msg: "Missing port channel interface name validation correctly failed" +# tags: [negative, logical_ports, port_channel_interface_name, required] + +# - name: Test port channel interface name validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_interface_name_integer }}" +# register: result_port_channel_interface_name_integer +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_interface_name, integer] + +# - name: Assert port channel interface name integer validation failed +# assert: +# that: +# - result_port_channel_interface_name_integer.failed == true +# - "'must be of type string' in result_port_channel_interface_name_integer.msg" +# fail_msg: "Port channel interface name integer validation should have failed" +# success_msg: "Port channel interface name integer validation correctly failed" +# tags: [negative, logical_ports, port_channel_interface_name, integer] + +# - name: Test port channel mode validation - invalid choice for LACP should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_mode_invalid_lacp }}" +# register: result_port_channel_mode_invalid_lacp +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_mode, lacp] + +# - name: Assert port channel mode invalid LACP validation failed +# assert: +# that: +# - result_port_channel_mode_invalid_lacp.failed == true +# - "'not valid for LACP' in result_port_channel_mode_invalid_lacp.msg or 'DESIRABLE' in result_port_channel_mode_invalid_lacp.msg" +# fail_msg: "Port channel mode invalid LACP validation should have failed" +# success_msg: "Port channel mode invalid LACP validation correctly failed" +# tags: [negative, logical_ports, port_channel_mode, lacp] + +# - name: Test port channel mode validation - invalid choice for PAGP should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_mode_invalid_pagp }}" +# register: result_port_channel_mode_invalid_pagp +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_mode, pagp] + +# - name: Assert port channel mode invalid PAGP validation failed +# assert: +# that: +# - result_port_channel_mode_invalid_pagp.failed == true +# - "'not valid for PAGP' in result_port_channel_mode_invalid_pagp.msg or 'ACTIVE' in result_port_channel_mode_invalid_pagp.msg" +# fail_msg: "Port channel mode invalid PAGP validation should have failed" +# success_msg: "Port channel mode invalid PAGP validation correctly failed" +# tags: [negative, logical_ports, port_channel_mode, pagp] + +# - name: Test port channel mode validation - invalid choice for NONE should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_mode_invalid_none }}" +# register: result_port_channel_mode_invalid_none +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_mode, none] + +# - name: Assert port channel mode invalid NONE validation failed +# assert: +# that: +# - result_port_channel_mode_invalid_none.failed == true +# - "'not valid for NONE' in result_port_channel_mode_invalid_none.msg or 'ACTIVE' in result_port_channel_mode_invalid_none.msg" +# fail_msg: "Port channel mode invalid NONE validation should have failed" +# success_msg: "Port channel mode invalid NONE validation correctly failed" +# tags: [negative, logical_ports, port_channel_mode, none] + +# - name: Test port channel mode validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_mode_integer }}" +# register: result_port_channel_mode_integer +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_mode, integer] + +# - name: Assert port channel mode integer validation failed +# assert: +# that: +# - result_port_channel_mode_integer.failed == true +# - "'must be of type string' in result_port_channel_mode_integer.msg" +# fail_msg: "Port channel mode integer validation should have failed" +# success_msg: "Port channel mode integer validation correctly failed" +# tags: [negative, logical_ports, port_channel_mode, integer] + +# - name: Test port channel port priority validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_port_priority_string }}" +# register: result_port_channel_port_priority_string +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_port_priority, string] + +# - name: Assert port channel port priority string validation failed +# assert: +# that: +# - result_port_channel_port_priority_string.failed == true +# - "'must be of type integer' in result_port_channel_port_priority_string.msg" +# fail_msg: "Port channel port priority string validation should have failed" +# success_msg: "Port channel port priority string validation correctly failed" +# tags: [negative, logical_ports, port_channel_port_priority, string] + +# - name: Test port channel port priority validation - negative value for LACP should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_port_priority_negative_lacp }}" +# register: result_port_channel_port_priority_negative_lacp +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_port_priority, lacp, range] + +# - name: Assert port channel port priority negative LACP validation failed +# assert: +# that: +# - result_port_channel_port_priority_negative_lacp.failed == true +# - "'must be within the range' in result_port_channel_port_priority_negative_lacp.msg or 'priority' in result_port_channel_port_priority_negative_lacp.msg" +# fail_msg: "Port channel port priority negative LACP validation should have failed" +# success_msg: "Port channel port priority negative LACP validation correctly failed" +# tags: [negative, logical_ports, port_channel_port_priority, lacp, range] + +# - name: Test port channel port priority validation - exceeds maximum for LACP should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_port_priority_max_lacp }}" +# register: result_port_channel_port_priority_max_lacp +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_port_priority, lacp, range] + +# - name: Assert port channel port priority maximum LACP validation failed +# assert: +# that: +# - result_port_channel_port_priority_max_lacp.failed == true +# - "'must be within the range' in result_port_channel_port_priority_max_lacp.msg or 'priority' in result_port_channel_port_priority_max_lacp.msg" +# fail_msg: "Port channel port priority maximum LACP validation should have failed" +# success_msg: "Port channel port priority maximum LACP validation correctly failed" +# tags: [negative, logical_ports, port_channel_port_priority, lacp, range] + +# - name: Test port channel port priority validation - exceeds maximum for PAGP should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_port_priority_max_pagp }}" +# register: result_port_channel_port_priority_max_pagp +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_port_priority, pagp, range] + +# - name: Assert port channel port priority maximum PAGP validation failed +# assert: +# that: +# - result_port_channel_port_priority_max_pagp.failed == true +# - "'must be within the range' in result_port_channel_port_priority_max_pagp.msg or 'priority' in result_port_channel_port_priority_max_pagp.msg" +# fail_msg: "Port channel port priority maximum PAGP validation should have failed" +# success_msg: "Port channel port priority maximum PAGP validation correctly failed" +# tags: [negative, logical_ports, port_channel_port_priority, pagp, range] + +# - name: Test port channel rate validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_rate_string }}" +# register: result_port_channel_rate_string +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_rate, string] + +# - name: Assert port channel rate string validation failed +# assert: +# that: +# - result_port_channel_rate_string.failed == true +# - "'must be an integer (1 or 30)' in result_port_channel_rate_string.msg" +# fail_msg: "Port channel rate string validation should have failed" +# success_msg: "Port channel rate string validation correctly failed" +# tags: [negative, logical_ports, port_channel_rate, string] + +# - name: Test port channel rate validation - invalid choice should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_rate_invalid }}" +# register: result_port_channel_rate_invalid +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_rate, choice] + +# - name: Assert port channel rate invalid validation failed +# assert: +# that: +# - result_port_channel_rate_invalid.failed == true +# - "'not a valid choice' in result_port_channel_rate_invalid.msg or 'rate' in result_port_channel_rate_invalid.msg" +# fail_msg: "Port channel rate invalid validation should have failed" +# success_msg: "Port channel rate invalid validation correctly failed" +# tags: [negative, logical_ports, port_channel_rate, choice] + +# - name: Test port channel learn method validation - invalid choice should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_learn_method_invalid }}" +# register: result_port_channel_learn_method_invalid +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_learn_method, choice] + +# - name: Assert port channel learn method invalid validation failed +# assert: +# that: +# - result_port_channel_learn_method_invalid.failed == true +# - "'not a valid choice' in result_port_channel_learn_method_invalid.msg or 'INVALID_METHOD' in result_port_channel_learn_method_invalid.msg" +# fail_msg: "Port channel learn method invalid validation should have failed" +# success_msg: "Port channel learn method invalid validation correctly failed" +# tags: [negative, logical_ports, port_channel_learn_method, choice] + +# - name: Test port channel learn method validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_learn_method_integer }}" +# register: result_port_channel_learn_method_integer +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_learn_method, integer] + +# - name: Assert port channel learn method integer validation failed +# assert: +# that: +# - result_port_channel_learn_method_integer.failed == true +# - "'must be of type string' in result_port_channel_learn_method_integer.msg" +# fail_msg: "Port channel learn method integer validation should have failed" +# success_msg: "Port channel learn method integer validation correctly failed" +# tags: [negative, logical_ports, port_channel_learn_method, integer] + +# - name: Test port channel learn method on non-PAGP protocol should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_port_channel_learn_method_non_pagp }}" +# register: result_port_channel_learn_method_non_pagp +# ignore_errors: yes +# tags: [negative, logical_ports, port_channel_learn_method, protocol] + +# - name: Assert port channel learn method non-PAGP validation failed +# assert: +# that: +# - result_port_channel_learn_method_non_pagp.failed == true +# - "'only applicable for PAGP' in result_port_channel_learn_method_non_pagp.msg or 'learn_method' in result_port_channel_learn_method_non_pagp.msg" +# fail_msg: "Port channel learn method non-PAGP validation should have failed" +# success_msg: "Port channel learn method non-PAGP validation correctly failed" +# tags: [negative, logical_ports, port_channel_learn_method, protocol] + + +# # ################################################################################################################## +# # Positive Test Cases for Logical Ports + +# ############################################# +# # POSITIVE TEST CASES - CREATE # +# ############################################# + +# - name: Create logical ports configuration with global parameters only - auto enabled +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_logical_ports_create_global_auto_enabled }}" +# register: result_logical_ports_create_global_auto_enabled +# tags: [positive, logical_ports, create, global, auto] + +# - name: Assert logical ports global auto enabled creation succeeded +# assert: +# that: +# - result_logical_ports_create_global_auto_enabled.changed == true +# - result_logical_ports_create_global_auto_enabled.response is defined +# fail_msg: "Logical ports global auto enabled creation should have succeeded" +# success_msg: "Logical ports global auto enabled creation succeeded" +# tags: [positive, logical_ports, create, global, auto] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, logical_ports, create] + +# - name: Create logical ports configuration with global parameters only - auto disabled +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_logical_ports_create_global_auto_disabled }}" +# register: result_logical_ports_create_global_auto_disabled +# tags: [positive, logical_ports, create, global, manual] + +# - name: Assert logical ports global auto disabled creation succeeded +# assert: +# that: +# - result_logical_ports_create_global_auto_disabled.changed == true +# - result_logical_ports_create_global_auto_disabled.response is defined +# fail_msg: "Logical ports global auto disabled creation should have succeeded" +# success_msg: "Logical ports global auto disabled creation succeeded" +# tags: [positive, logical_ports, create, global, manual] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, logical_ports, create] + +# - name: Create logical ports configuration with single LACP port channel - minimal +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_logical_ports_create_single_lacp_minimal }}" +# register: result_logical_ports_create_single_lacp_minimal +# tags: [positive, logical_ports, create, lacp, minimal] + +# - name: Assert logical ports single LACP minimal creation succeeded +# assert: +# that: +# - result_logical_ports_create_single_lacp_minimal.changed == true +# - result_logical_ports_create_single_lacp_minimal.response is defined +# fail_msg: "Logical ports single LACP minimal creation should have succeeded" +# success_msg: "Logical ports single LACP minimal creation succeeded" +# tags: [positive, logical_ports, create, lacp, minimal] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, logical_ports, create] + +# - name: Create logical ports configuration with single LACP port channel - comprehensive +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_logical_ports_create_single_lacp_comprehensive }}" +# register: result_logical_ports_create_single_lacp_comprehensive +# tags: [positive, logical_ports, create, lacp, comprehensive] + +# - name: Assert logical ports single LACP comprehensive creation succeeded +# assert: +# that: +# - result_logical_ports_create_single_lacp_comprehensive.changed == true +# - result_logical_ports_create_single_lacp_comprehensive.response is defined +# fail_msg: "Logical ports single LACP comprehensive creation should have succeeded" +# success_msg: "Logical ports single LACP comprehensive creation succeeded" +# tags: [positive, logical_ports, create, lacp, comprehensive] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, logical_ports, create] + +# - name: Create logical ports configuration with single PAGP port channel - minimal +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_logical_ports_create_single_pagp_minimal }}" +# register: result_logical_ports_create_single_pagp_minimal +# tags: [positive, logical_ports, create, pagp, minimal] + +# - name: Assert logical ports single PAGP minimal creation succeeded +# assert: +# that: +# - result_logical_ports_create_single_pagp_minimal.changed == true +# - result_logical_ports_create_single_pagp_minimal.response is defined +# fail_msg: "Logical ports single PAGP minimal creation should have succeeded" +# success_msg: "Logical ports single PAGP minimal creation succeeded" +# tags: [positive, logical_ports, create, pagp, minimal] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, logical_ports, create] + +# - name: Create logical ports configuration with single PAGP port channel - comprehensive +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_logical_ports_create_single_pagp_comprehensive }}" +# register: result_logical_ports_create_single_pagp_comprehensive +# tags: [positive, logical_ports, create, pagp, comprehensive] + +# - name: Assert logical ports single PAGP comprehensive creation succeeded +# assert: +# that: +# - result_logical_ports_create_single_pagp_comprehensive.changed == true +# - result_logical_ports_create_single_pagp_comprehensive.response is defined +# fail_msg: "Logical ports single PAGP comprehensive creation should have succeeded" +# success_msg: "Logical ports single PAGP comprehensive creation succeeded" +# tags: [positive, logical_ports, create, pagp, comprehensive] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, logical_ports, create] + +# - name: Create logical ports configuration with single static port channel - minimal +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_logical_ports_create_single_static_minimal }}" +# register: result_logical_ports_create_single_static_minimal +# tags: [positive, logical_ports, create, static, minimal] + +# - name: Assert logical ports single static minimal creation succeeded +# assert: +# that: +# - result_logical_ports_create_single_static_minimal.changed == true +# - result_logical_ports_create_single_static_minimal.response is defined +# fail_msg: "Logical ports single static minimal creation should have succeeded" +# success_msg: "Logical ports single static minimal creation succeeded" +# tags: [positive, logical_ports, create, static, minimal] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, logical_ports, create] + +# - name: Create logical ports configuration with single static port channel - comprehensive +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_logical_ports_create_single_static_comprehensive }}" +# register: result_logical_ports_create_single_static_comprehensive +# tags: [positive, logical_ports, create, static, comprehensive] + +# - name: Assert logical ports single static comprehensive creation succeeded +# assert: +# that: +# - result_logical_ports_create_single_static_comprehensive.changed == true +# - result_logical_ports_create_single_static_comprehensive.response is defined +# fail_msg: "Logical ports single static comprehensive creation should have succeeded" +# success_msg: "Logical ports single static comprehensive creation succeeded" +# tags: [positive, logical_ports, create, static, comprehensive] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, logical_ports, create] + +# - name: Create logical ports configuration with multiple port channels - different protocols +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_logical_ports_create_multiple_different_protocols }}" +# register: result_logical_ports_create_multiple_different_protocols +# tags: [positive, logical_ports, create, multiple, protocols] + +# - name: Assert logical ports multiple different protocols creation succeeded +# assert: +# that: +# - result_logical_ports_create_multiple_different_protocols.changed == true +# - result_logical_ports_create_multiple_different_protocols.response is defined +# fail_msg: "Logical ports multiple different protocols creation should have succeeded" +# success_msg: "Logical ports multiple different protocols creation succeeded" +# tags: [positive, logical_ports, create, multiple, protocols] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, logical_ports, create] + +# - name: Create logical ports configuration with multiple LACP port channels +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_logical_ports_create_multiple_lacp }}" +# register: result_logical_ports_create_multiple_lacp +# tags: [positive, logical_ports, create, multiple, lacp] + +# - name: Assert logical ports multiple LACP creation succeeded +# assert: +# that: +# - result_logical_ports_create_multiple_lacp.changed == true +# - result_logical_ports_create_multiple_lacp.response is defined +# fail_msg: "Logical ports multiple LACP creation should have succeeded" +# success_msg: "Logical ports multiple LACP creation succeeded" +# tags: [positive, logical_ports, create, multiple, lacp] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, logical_ports, create] + +# - name: Create logical ports configuration with all load balancing methods +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_logical_ports_create_all_load_balancing_methods }}" +# register: result_logical_ports_create_all_load_balancing_methods +# tags: [positive, logical_ports, create, load_balancing] + +# - name: Assert logical ports all load balancing methods creation succeeded +# assert: +# that: +# - result_logical_ports_create_all_load_balancing_methods.changed == true +# - result_logical_ports_create_all_load_balancing_methods.response is defined +# fail_msg: "Logical ports all load balancing methods creation should have succeeded" +# success_msg: "Logical ports all load balancing methods creation succeeded" +# tags: [positive, logical_ports, create, load_balancing] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, logical_ports, create] + +# - name: Create logical ports configuration with boundary values - minimum +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_logical_ports_create_boundary_minimum_values }}" +# register: result_logical_ports_create_boundary_minimum_values +# tags: [positive, logical_ports, create, boundary, minimum] + +# - name: Assert logical ports boundary minimum values creation succeeded +# assert: +# that: +# - result_logical_ports_create_boundary_minimum_values.changed == true +# - result_logical_ports_create_boundary_minimum_values.response is defined +# fail_msg: "Logical ports boundary minimum values creation should have succeeded" +# success_msg: "Logical ports boundary minimum values creation succeeded" +# tags: [positive, logical_ports, create, boundary, minimum] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, logical_ports, create] + +# - name: Create logical ports configuration with boundary values - maximum +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_logical_ports_create_boundary_maximum_values }}" +# register: result_logical_ports_create_boundary_maximum_values +# tags: [positive, logical_ports, create, boundary, maximum] + +# - name: Assert logical ports boundary maximum values creation succeeded +# assert: +# that: +# - result_logical_ports_create_boundary_maximum_values.changed == true +# - result_logical_ports_create_boundary_maximum_values.response is defined +# fail_msg: "Logical ports boundary maximum values creation should have succeeded" +# success_msg: "Logical ports boundary maximum values creation succeeded" +# tags: [positive, logical_ports, create, boundary, maximum] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, logical_ports, create] + +# - name: Create logical ports configuration with PAGP boundary values +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_logical_ports_create_pagp_boundary_values }}" +# register: result_logical_ports_create_pagp_boundary_values +# tags: [positive, logical_ports, create, pagp, boundary] + +# - name: Assert logical ports PAGP boundary values creation succeeded +# assert: +# that: +# - result_logical_ports_create_pagp_boundary_values.changed == true +# - result_logical_ports_create_pagp_boundary_values.response is defined +# fail_msg: "Logical ports PAGP boundary values creation should have succeeded" +# success_msg: "Logical ports PAGP boundary values creation succeeded" +# tags: [positive, logical_ports, create, pagp, boundary] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, logical_ports, create] + +# - name: Create logical ports configuration with all LACP modes +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_logical_ports_create_all_lacp_modes }}" +# register: result_logical_ports_create_all_lacp_modes +# tags: [positive, logical_ports, create, lacp, modes] + +# - name: Assert logical ports all LACP modes creation succeeded +# assert: +# that: +# - result_logical_ports_create_all_lacp_modes.changed == true +# - result_logical_ports_create_all_lacp_modes.response is defined +# fail_msg: "Logical ports all LACP modes creation should have succeeded" +# success_msg: "Logical ports all LACP modes creation succeeded" +# tags: [positive, logical_ports, create, lacp, modes] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, logical_ports, update] + +# ############################################# +# # POSITIVE TEST CASES - UPDATE # +# ############################################# + +# - name: Update logical ports configuration - global parameters only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_logical_ports_update_global_only }}" +# register: result_logical_ports_update_global_only +# tags: [positive, logical_ports, update, global] + +# - name: Assert logical ports global only update succeeded +# assert: +# that: +# - result_logical_ports_update_global_only.changed == true +# - result_logical_ports_update_global_only.response is defined +# fail_msg: "Logical ports global only update should have succeeded" +# success_msg: "Logical ports global only update succeeded" +# tags: [positive, logical_ports, update, global] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, logical_ports, update] + +# - name: Update logical ports configuration - change LACP system priority only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_logical_ports_update_lacp_priority_only }}" +# register: result_logical_ports_update_lacp_priority_only +# tags: [positive, logical_ports, update, lacp_priority_only] + +# - name: Assert logical ports LACP priority only update succeeded +# assert: +# that: +# - result_logical_ports_update_lacp_priority_only.changed == true +# - result_logical_ports_update_lacp_priority_only.response is defined +# fail_msg: "Logical ports LACP priority only update should have succeeded" +# success_msg: "Logical ports LACP priority only update succeeded" +# tags: [positive, logical_ports, update, lacp_priority_only] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, logical_ports, update] + +# - name: Update logical ports configuration - modify multiple port channels simultaneously +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ logical_ports_vars_map.test_logical_ports_update_multiple_port_channels }}" +# register: result_logical_ports_update_multiple_port_channels +# tags: [positive, logical_ports, update, multiple_channels] + +# - name: Assert logical ports multiple port channels update succeeded +# assert: +# that: +# - result_logical_ports_update_multiple_port_channels.changed == true +# - result_logical_ports_update_multiple_port_channels.response is defined +# fail_msg: "Logical ports multiple port channels update should have succeeded" +# success_msg: "Logical ports multiple port channels update succeeded" +# tags: [positive, logical_ports, update, multiple_channels] diff --git a/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_mld_snooping.yml b/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_mld_snooping.yml new file mode 100644 index 0000000000..671193f75f --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_mld_snooping.yml @@ -0,0 +1,1042 @@ +# --- +# # =================================================================================================== +# # MLD SNOOPING FEATURE TESTS SUMMARY +# # =================================================================================================== +# # +# # This test suite validates MLD Snooping configuration for Wired Campus Automation. +# # +# # TEST CATEGORIES: +# # +# # 1. NEGATIVE VALIDATION TESTS (Lines 25-820) +# # a) MLD Global Configuration Tests +# # - Global enabled validation (string/integer instead of boolean) +# # - Global querier enabled validation (string/integer instead of boolean) +# # - Global querier address validation (IPv4 instead of IPv6, integer instead of string) +# # - Global querier version validation (invalid choice, integer instead of string) +# # - Global listener validation (string/integer instead of boolean) +# # - Global query interval validation (string/boolean instead of integer, range validation) +# # +# # b) MLD VLAN Configuration Tests +# # - VLAN ID validation (string instead of integer, negative/zero/max values) +# # - Missing required VLAN ID validation +# # - VLAN enabled validation (string/integer instead of boolean) +# # - VLAN immediate leave validation (string/integer instead of boolean) +# # - VLAN querier enabled validation (string/integer instead of boolean) +# # - VLAN querier address validation (IPv4 instead of IPv6, integer instead of string) +# # - VLAN querier version validation (invalid choice, integer instead of string) +# # - VLAN query interval validation (string instead of integer, range validation) +# # - VLAN mrouter port list validation (string instead of list, integer in list) +# # +# # c) Data Type and Structure Tests +# # - MLD configuration validation (string/list instead of dictionary) +# # - MLD VLANs validation (string/dictionary instead of list) +# # - MLD VLAN item validation (string instead of dictionary) +# # +# # 2. POSITIVE VALIDATION TESTS (Lines 820-1200) +# # a) MLD Creation Tests (Merged State) +# # - Global parameters only configuration +# # - VLANs only configuration +# # - Single VLAN configuration +# # - Global and VLANs combined configuration +# # - Minimal VLAN parameters configuration +# # - Multiple VLANs with different settings +# # +# # b) MLD Update Tests (Merged State) +# # - Global parameters only updates +# # - Modifying existing VLANs +# # - Global and VLANs combined updates +# # - Global enablement state changes +# # - Single VLAN updates +# # +# # VALIDATION RANGES: +# # - MLD Global Enabled: boolean (true/false) +# # - MLD Global Querier Enabled: boolean (true/false) +# # - MLD Global Querier Address: string (valid IPv6 address format) +# # - MLD Global Querier Version: string ("VERSION_1", "VERSION_2") +# # - MLD Global Listener: boolean (true/false) +# # - MLD Global Query Interval: integer (1-18000 seconds) +# # - MLD VLAN ID: integer (1-4094) +# # - MLD VLAN Enabled: boolean (true/false) +# # - MLD VLAN Immediate Leave: boolean (true/false) +# # - MLD VLAN Querier Enabled: boolean (true/false) +# # - MLD VLAN Querier Address: string (valid IPv6 address format) +# # - MLD VLAN Querier Version: string ("VERSION_1", "VERSION_2") +# # - MLD VLAN Query Interval: integer (1-18000 seconds) +# # - MLD VLAN Mrouter Port List: list of strings (interface names) +# # +# # EXPECTED BEHAVIORS: +# # - Negative tests should fail with appropriate error messages +# # - Positive tests should succeed with changed=true and response defined +# # - Global and VLAN configurations can be applied independently +# # - Multiple VLANs can be configured with different settings +# # - VLAN ID is required when configuring VLAN-specific settings +# # - All boolean fields reject string/integer values +# # - All integer fields reject string/boolean values +# # - Range validation enforced for numeric fields +# # - Structure validation enforced for complex data types +# # - IPv6 address validation enforced for querier addresses +# # +# # =================================================================================================== + +# - debug: msg="Starting MLD Snooping feature tests for Wired Campus Automation" +# - debug: msg="Role Path {{ role_path }}" + +# - block: +# - name: Load MLD Snooping test variables +# include_vars: +# file: "{{ role_path }}/vars/vars_wca_test_feature_mld_snooping.yml" +# name: mld_vars_map +# vars: +# dnac_login: &dnac_login +# dnac_host: "{{ dnac_host }}" +# dnac_username: "{{ dnac_username }}" +# dnac_password: "{{ dnac_password }}" +# dnac_verify: "{{ dnac_verify }}" +# dnac_port: "{{ dnac_port }}" +# dnac_version: "{{ dnac_version }}" +# dnac_debug: "{{ dnac_debug }}" +# dnac_log: true +# dnac_log_level: DEBUG +# config_verify: true +# dnac_api_task_timeout: "{{ dnac_api_task_timeout }}" +# dnac_log_append: false + +# ################################################################################################################### +# # Negative Validation Tests for MLD Snooping + +# ############################################# +# # MLD Global Configuration Tests # +# ############################################# + +# - name: Test MLD global enabled validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_global_enabled_string }}" +# register: result_mld_global_enabled_string +# ignore_errors: yes +# tags: [negative, mld, global, enabled] + +# - name: Assert MLD global enabled string validation failed +# assert: +# that: +# - result_mld_global_enabled_string.failed == true +# - "'must be of type boolean' in result_mld_global_enabled_string.msg" +# fail_msg: "MLD global enabled string validation should have failed" +# success_msg: "MLD global enabled string validation correctly failed" +# tags: [negative, mld, global, enabled] + +# - name: Test MLD global enabled validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_global_enabled_integer }}" +# register: result_mld_global_enabled_integer +# ignore_errors: yes +# tags: [negative, mld, global, enabled] + +# - name: Assert MLD global enabled integer validation failed +# assert: +# that: +# - result_mld_global_enabled_integer.failed == true +# - "'must be of type boolean' in result_mld_global_enabled_integer.msg" +# fail_msg: "MLD global enabled integer validation should have failed" +# success_msg: "MLD global enabled integer validation correctly failed" +# tags: [negative, mld, global, enabled] + +# - name: Test MLD global querier enabled validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_global_querier_enabled_string }}" +# register: result_mld_global_querier_enabled_string +# ignore_errors: yes +# tags: [negative, mld, global, querier] + +# - name: Assert MLD global querier enabled string validation failed +# assert: +# that: +# - result_mld_global_querier_enabled_string.failed == true +# - "'must be of type boolean' in result_mld_global_querier_enabled_string.msg" +# fail_msg: "MLD global querier enabled string validation should have failed" +# success_msg: "MLD global querier enabled string validation correctly failed" +# tags: [negative, mld, global, querier] + +# - name: Test MLD global querier enabled validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_global_querier_enabled_integer }}" +# register: result_mld_global_querier_enabled_integer +# ignore_errors: yes +# tags: [negative, mld, global, querier] + +# - name: Assert MLD global querier enabled integer validation failed +# assert: +# that: +# - result_mld_global_querier_enabled_integer.failed == true +# - "'must be of type boolean' in result_mld_global_querier_enabled_integer.msg" +# fail_msg: "MLD global querier enabled integer validation should have failed" +# success_msg: "MLD global querier enabled integer validation correctly failed" +# tags: [negative, mld, global, querier] + +# - name: Test MLD global querier address validation - IPv4 instead of IPv6 should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_global_querier_address_invalid }}" +# register: result_mld_global_querier_address_invalid +# ignore_errors: yes +# tags: [negative, mld, global, querier_address] + +# - name: Assert MLD global querier address IPv4 validation failed +# assert: +# that: +# - result_mld_global_querier_address_invalid.failed == true +# - "'IPv6' in result_mld_global_querier_address_invalid.msg or 'invalid format' in result_mld_global_querier_address_invalid.msg" +# fail_msg: "MLD global querier address IPv4 validation should have failed" +# success_msg: "MLD global querier address IPv4 validation correctly failed" +# tags: [negative, mld, global, querier_address] + +# - name: Test MLD global querier version validation - invalid choice should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_global_querier_version_invalid }}" +# register: result_mld_global_querier_version_invalid +# ignore_errors: yes +# tags: [negative, mld, global, querier_version] + +# - name: Assert MLD global querier version invalid validation failed +# assert: +# that: +# - result_mld_global_querier_version_invalid.failed == true +# - "'not a valid choice' in result_mld_global_querier_version_invalid.msg or 'VERSION_3' in result_mld_global_querier_version_invalid.msg" +# fail_msg: "MLD global querier version invalid validation should have failed" +# success_msg: "MLD global querier version invalid validation correctly failed" +# tags: [negative, mld, global, querier_version] + +# - name: Test MLD global querier version validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_global_querier_version_integer }}" +# register: result_mld_global_querier_version_integer +# ignore_errors: yes +# tags: [negative, mld, global, querier_version] + +# - name: Assert MLD global querier version integer validation failed +# assert: +# that: +# - result_mld_global_querier_version_integer.failed == true +# - "'must be of type string' in result_mld_global_querier_version_integer.msg" +# fail_msg: "MLD global querier version integer validation should have failed" +# success_msg: "MLD global querier version integer validation correctly failed" +# tags: [negative, mld, global, querier_version] + +# - name: Test MLD global listener validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_global_listener_string }}" +# register: result_mld_global_listener_string +# ignore_errors: yes +# tags: [negative, mld, global, listener] + +# - name: Assert MLD global listener string validation failed +# assert: +# that: +# - result_mld_global_listener_string.failed == true +# - "'must be of type boolean' in result_mld_global_listener_string.msg" +# fail_msg: "MLD global listener string validation should have failed" +# success_msg: "MLD global listener string validation correctly failed" +# tags: [negative, mld, global, listener] + +# - name: Test MLD global listener validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_global_listener_integer }}" +# register: result_mld_global_listener_integer +# ignore_errors: yes +# tags: [negative, mld, global, listener] + +# - name: Assert MLD global listener integer validation failed +# assert: +# that: +# - result_mld_global_listener_integer.failed == true +# - "'must be of type boolean' in result_mld_global_listener_integer.msg" +# fail_msg: "MLD global listener integer validation should have failed" +# success_msg: "MLD global listener integer validation correctly failed" +# tags: [negative, mld, global, listener] + +# - name: Test MLD global query interval validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_global_query_interval_string }}" +# register: result_mld_global_query_interval_string +# ignore_errors: yes +# tags: [negative, mld, global, query_interval] + +# - name: Assert MLD global query interval string validation failed +# assert: +# that: +# - result_mld_global_query_interval_string.failed == true +# - "'must be of type integer' in result_mld_global_query_interval_string.msg" +# fail_msg: "MLD global query interval string validation should have failed" +# success_msg: "MLD global query interval string validation correctly failed" +# tags: [negative, mld, global, query_interval] + +# - name: Test MLD global query interval validation - boolean value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_global_query_interval_boolean }}" +# register: result_mld_global_query_interval_boolean +# ignore_errors: yes +# tags: [negative, mld, global, query_interval] + +# - name: Assert MLD global query interval boolean validation failed +# assert: +# that: +# - result_mld_global_query_interval_boolean.failed == true +# - "'must be of type integer' in result_mld_global_query_interval_boolean.msg" +# fail_msg: "MLD global query interval boolean validation should have failed" +# success_msg: "MLD global query interval boolean validation correctly failed" +# tags: [negative, mld, global, query_interval] + +# - name: Test MLD global query interval validation - below minimum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_global_query_interval_min }}" +# register: result_mld_global_query_interval_min +# ignore_errors: yes +# tags: [negative, mld, global, query_interval, range] + +# - name: Assert MLD global query interval minimum validation failed +# assert: +# that: +# - result_mld_global_query_interval_min.failed == true +# - "'must be within the range' in result_mld_global_query_interval_min.msg or 'query_interval' in result_mld_global_query_interval_min.msg" +# fail_msg: "MLD global query interval minimum validation should have failed" +# success_msg: "MLD global query interval minimum validation correctly failed" +# tags: [negative, mld, global, query_interval, range] + +# - name: Test MLD global query interval validation - exceeds maximum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_global_query_interval_max }}" +# register: result_mld_global_query_interval_max +# ignore_errors: yes +# tags: [negative, mld, global, query_interval, range] + +# - name: Assert MLD global query interval maximum validation failed +# assert: +# that: +# - result_mld_global_query_interval_max.failed == true +# - "'must be within the range' in result_mld_global_query_interval_max.msg or 'query_interval' in result_mld_global_query_interval_max.msg" +# fail_msg: "MLD global query interval maximum validation should have failed" +# success_msg: "MLD global query interval maximum validation correctly failed" +# tags: [negative, mld, global, query_interval, range] + +# ############################################# +# # MLD VLAN Configuration Tests # +# ############################################# + +# - name: Test MLD VLAN ID validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_vlan_id_string }}" +# register: result_mld_vlan_id_string +# ignore_errors: yes +# tags: [negative, mld, vlan, id] + +# - name: Assert MLD VLAN ID string validation failed +# assert: +# that: +# - result_mld_vlan_id_string.failed == true +# - "'must be of type integer' in result_mld_vlan_id_string.msg" +# fail_msg: "MLD VLAN ID string validation should have failed" +# success_msg: "MLD VLAN ID string validation correctly failed" +# tags: [negative, mld, vlan, id] + +# - name: Test MLD VLAN ID validation - negative value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_vlan_id_negative }}" +# register: result_mld_vlan_id_negative +# ignore_errors: yes +# tags: [negative, mld, vlan, id, range] + +# - name: Assert MLD VLAN ID negative validation failed +# assert: +# that: +# - result_mld_vlan_id_negative.failed == true +# - "'must be within the range' in result_mld_vlan_id_negative.msg or 'vlan_id' in result_mld_vlan_id_negative.msg" +# fail_msg: "MLD VLAN ID negative validation should have failed" +# success_msg: "MLD VLAN ID negative validation correctly failed" +# tags: [negative, mld, vlan, id, range] + +# - name: Test MLD VLAN ID validation - exceeds maximum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_vlan_id_max }}" +# register: result_mld_vlan_id_max +# ignore_errors: yes +# tags: [negative, mld, vlan, id, range] + +# - name: Assert MLD VLAN ID maximum validation failed +# assert: +# that: +# - result_mld_vlan_id_max.failed == true +# - "'must be within the range' in result_mld_vlan_id_max.msg or 'vlan_id' in result_mld_vlan_id_max.msg" +# fail_msg: "MLD VLAN ID maximum validation should have failed" +# success_msg: "MLD VLAN ID maximum validation correctly failed" +# tags: [negative, mld, vlan, id, range] + +# - name: Test MLD VLAN ID validation - zero value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_vlan_id_zero }}" +# register: result_mld_vlan_id_zero +# ignore_errors: yes +# tags: [negative, mld, vlan, id, range] + +# - name: Assert MLD VLAN ID zero validation failed +# assert: +# that: +# - result_mld_vlan_id_zero.failed == true +# - "'must be within the range' in result_mld_vlan_id_zero.msg or 'vlan_id' in result_mld_vlan_id_zero.msg" +# fail_msg: "MLD VLAN ID zero validation should have failed" +# success_msg: "MLD VLAN ID zero validation correctly failed" +# tags: [negative, mld, vlan, id, range] + +# - name: Test missing required MLD VLAN ID +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_vlan_id_missing }}" +# register: result_mld_vlan_id_missing +# ignore_errors: yes +# tags: [negative, mld, vlan, id, required] + +# - name: Assert missing MLD VLAN ID validation failed +# assert: +# that: +# - result_mld_vlan_id_missing.failed == true +# - "'mld_snooping_vlan_id' in result_mld_vlan_id_missing.msg or 'required' in result_mld_vlan_id_missing.msg" +# fail_msg: "Missing MLD VLAN ID validation should have failed" +# success_msg: "Missing MLD VLAN ID validation correctly failed" +# tags: [negative, mld, vlan, id, required] + +# - name: Test MLD VLAN enabled validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_vlan_enabled_string }}" +# register: result_mld_vlan_enabled_string +# ignore_errors: yes +# tags: [negative, mld, vlan, enabled] + +# - name: Assert MLD VLAN enabled string validation failed +# assert: +# that: +# - result_mld_vlan_enabled_string.failed == true +# - "'must be of type boolean' in result_mld_vlan_enabled_string.msg" +# fail_msg: "MLD VLAN enabled string validation should have failed" +# success_msg: "MLD VLAN enabled string validation correctly failed" +# tags: [negative, mld, vlan, enabled] + +# - name: Test MLD VLAN enabled validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_vlan_enabled_integer }}" +# register: result_mld_vlan_enabled_integer +# ignore_errors: yes +# tags: [negative, mld, vlan, enabled] + +# - name: Assert MLD VLAN enabled integer validation failed +# assert: +# that: +# - result_mld_vlan_enabled_integer.failed == true +# - "'must be of type boolean' in result_mld_vlan_enabled_integer.msg" +# fail_msg: "MLD VLAN enabled integer validation should have failed" +# success_msg: "MLD VLAN enabled integer validation correctly failed" +# tags: [negative, mld, vlan, enabled] + +# - name: Test MLD VLAN immediate leave validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_vlan_immediate_leave_string }}" +# register: result_mld_vlan_immediate_leave_string +# ignore_errors: yes +# tags: [negative, mld, vlan, immediate_leave] + +# - name: Assert MLD VLAN immediate leave string validation failed +# assert: +# that: +# - result_mld_vlan_immediate_leave_string.failed == true +# - "'must be of type boolean' in result_mld_vlan_immediate_leave_string.msg" +# fail_msg: "MLD VLAN immediate leave string validation should have failed" +# success_msg: "MLD VLAN immediate leave string validation correctly failed" +# tags: [negative, mld, vlan, immediate_leave] + +# - name: Test MLD VLAN immediate leave validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_vlan_immediate_leave_integer }}" +# register: result_mld_vlan_immediate_leave_integer +# ignore_errors: yes +# tags: [negative, mld, vlan, immediate_leave] + +# - name: Assert MLD VLAN immediate leave integer validation failed +# assert: +# that: +# - result_mld_vlan_immediate_leave_integer.failed == true +# - "'must be of type boolean' in result_mld_vlan_immediate_leave_integer.msg" +# fail_msg: "MLD VLAN immediate leave integer validation should have failed" +# success_msg: "MLD VLAN immediate leave integer validation correctly failed" +# tags: [negative, mld, vlan, immediate_leave] + +# - name: Test MLD VLAN querier enabled validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_vlan_querier_enabled_string }}" +# register: result_mld_vlan_querier_enabled_string +# ignore_errors: yes +# tags: [negative, mld, vlan, querier] + +# - name: Assert MLD VLAN querier enabled string validation failed +# assert: +# that: +# - result_mld_vlan_querier_enabled_string.failed == true +# - "'must be of type boolean' in result_mld_vlan_querier_enabled_string.msg" +# fail_msg: "MLD VLAN querier enabled string validation should have failed" +# success_msg: "MLD VLAN querier enabled string validation correctly failed" +# tags: [negative, mld, vlan, querier] + +# - name: Test MLD VLAN querier enabled validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_vlan_querier_enabled_integer }}" +# register: result_mld_vlan_querier_enabled_integer +# ignore_errors: yes +# tags: [negative, mld, vlan, querier] + +# - name: Assert MLD VLAN querier enabled integer validation failed +# assert: +# that: +# - result_mld_vlan_querier_enabled_integer.failed == true +# - "'must be of type boolean' in result_mld_vlan_querier_enabled_integer.msg" +# fail_msg: "MLD VLAN querier enabled integer validation should have failed" +# success_msg: "MLD VLAN querier enabled integer validation correctly failed" +# tags: [negative, mld, vlan, querier] + +# - name: Test MLD VLAN querier address validation - IPv4 instead of IPv6 should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_vlan_querier_address_ipv4 }}" +# register: result_mld_vlan_querier_address_ipv4 +# ignore_errors: yes +# tags: [negative, mld, vlan, querier_address] + +# - name: Assert MLD VLAN querier address IPv4 validation failed +# assert: +# that: +# - result_mld_vlan_querier_address_ipv4.failed == true +# - "'IPv6' in result_mld_vlan_querier_address_ipv4.msg or 'invalid format' in result_mld_vlan_querier_address_ipv4.msg" +# fail_msg: "MLD VLAN querier address IPv4 validation should have failed" +# success_msg: "MLD VLAN querier address IPv4 validation correctly failed" +# tags: [negative, mld, vlan, querier_address] + +# - name: Test MLD VLAN querier version validation - invalid choice should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_vlan_querier_version_invalid }}" +# register: result_mld_vlan_querier_version_invalid +# ignore_errors: yes +# tags: [negative, mld, vlan, querier_version] + +# - name: Assert MLD VLAN querier version invalid validation failed +# assert: +# that: +# - result_mld_vlan_querier_version_invalid.failed == true +# - "'not a valid choice' in result_mld_vlan_querier_version_invalid.msg or 'VERSION_3' in result_mld_vlan_querier_version_invalid.msg" +# fail_msg: "MLD VLAN querier version invalid validation should have failed" +# success_msg: "MLD VLAN querier version invalid validation correctly failed" +# tags: [negative, mld, vlan, querier_version] + +# - name: Test MLD VLAN querier version validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_vlan_querier_version_integer }}" +# register: result_mld_vlan_querier_version_integer +# ignore_errors: yes +# tags: [negative, mld, vlan, querier_version] + +# - name: Assert MLD VLAN querier version integer validation failed +# assert: +# that: +# - result_mld_vlan_querier_version_integer.failed == true +# - "'must be of type string' in result_mld_vlan_querier_version_integer.msg" +# fail_msg: "MLD VLAN querier version integer validation should have failed" +# success_msg: "MLD VLAN querier version integer validation correctly failed" +# tags: [negative, mld, vlan, querier_version] + +# - name: Test MLD VLAN query interval validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_vlan_query_interval_string }}" +# register: result_mld_vlan_query_interval_string +# ignore_errors: yes +# tags: [negative, mld, vlan, query_interval] + +# - name: Assert MLD VLAN query interval string validation failed +# assert: +# that: +# - result_mld_vlan_query_interval_string.failed == true +# - "'must be of type integer' in result_mld_vlan_query_interval_string.msg" +# fail_msg: "MLD VLAN query interval string validation should have failed" +# success_msg: "MLD VLAN query interval string validation correctly failed" +# tags: [negative, mld, vlan, query_interval] + +# - name: Test MLD VLAN query interval validation - below minimum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_vlan_query_interval_min }}" +# register: result_mld_vlan_query_interval_min +# ignore_errors: yes +# tags: [negative, mld, vlan, query_interval, range] + +# - name: Assert MLD VLAN query interval minimum validation failed +# assert: +# that: +# - result_mld_vlan_query_interval_min.failed == true +# - "'must be within the range' in result_mld_vlan_query_interval_min.msg or 'query_interval' in result_mld_vlan_query_interval_min.msg" +# fail_msg: "MLD VLAN query interval minimum validation should have failed" +# success_msg: "MLD VLAN query interval minimum validation correctly failed" +# tags: [negative, mld, vlan, query_interval, range] + +# - name: Test MLD VLAN query interval validation - exceeds maximum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_vlan_query_interval_max }}" +# register: result_mld_vlan_query_interval_max +# ignore_errors: yes +# tags: [negative, mld, vlan, query_interval, range] + +# - name: Assert MLD VLAN query interval maximum validation failed +# assert: +# that: +# - result_mld_vlan_query_interval_max.failed == true +# - "'must be within the range' in result_mld_vlan_query_interval_max.msg or 'query_interval' in result_mld_vlan_query_interval_max.msg" +# fail_msg: "MLD VLAN query interval maximum validation should have failed" +# success_msg: "MLD VLAN query interval maximum validation correctly failed" +# tags: [negative, mld, vlan, query_interval, range] + +# - name: Test MLD VLAN mrouter port list validation - string instead of list should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_vlan_mrouter_port_list_string }}" +# register: result_mld_vlan_mrouter_port_list_string +# ignore_errors: yes +# tags: [negative, mld, vlan, mrouter_port_list] + +# - name: Assert MLD VLAN mrouter port list string validation failed +# assert: +# that: +# - result_mld_vlan_mrouter_port_list_string.failed == true +# - "'must be of type list' in result_mld_vlan_mrouter_port_list_string.msg" +# fail_msg: "MLD VLAN mrouter port list string validation should have failed" +# success_msg: "MLD VLAN mrouter port list string validation correctly failed" +# tags: [negative, mld, vlan, mrouter_port_list] + +# ############################################# +# # Data Type and Structure Tests # +# ############################################# + +# - name: Test MLD configuration validation - string instead of dictionary should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_invalid_dict_type }}" +# register: result_mld_invalid_dict_type +# ignore_errors: yes +# tags: [negative, mld, structure, dict] + +# - name: Assert MLD configuration dictionary type validation failed +# assert: +# that: +# - result_mld_invalid_dict_type.failed == true +# - "'must be of type dict' in result_mld_invalid_dict_type.msg or 'mld_snooping' in result_mld_invalid_dict_type.msg" +# fail_msg: "MLD configuration dictionary type validation should have failed" +# success_msg: "MLD configuration dictionary type validation correctly failed" +# tags: [negative, mld, structure, dict] + +# - name: Test MLD configuration validation - list instead of dictionary should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_invalid_list_type }}" +# register: result_mld_invalid_list_type +# ignore_errors: yes +# tags: [negative, mld, structure, list] + +# - name: Assert MLD configuration list type validation failed +# assert: +# that: +# - result_mld_invalid_list_type.failed == true +# - "'must be of type dict' in result_mld_invalid_list_type.msg or 'mld_snooping' in result_mld_invalid_list_type.msg" +# fail_msg: "MLD configuration list type validation should have failed" +# success_msg: "MLD configuration list type validation correctly failed" +# tags: [negative, mld, structure, list] + +# - name: Test MLD VLANs validation - string instead of list should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_vlans_invalid_string }}" +# register: result_mld_vlans_invalid_string +# ignore_errors: yes +# tags: [negative, mld, vlan, structure, string] + +# - name: Assert MLD VLANs string type validation failed +# assert: +# that: +# - result_mld_vlans_invalid_string.failed == true +# - "'be a list of dictionaries' in result_mld_vlans_invalid_string.msg" +# fail_msg: "MLD VLANs string type validation should have failed" +# success_msg: "MLD VLANs string type validation correctly failed" +# tags: [negative, mld, vlan, structure, string] + +# - name: Test MLD VLANs validation - dictionary instead of list should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_vlans_invalid_dict }}" +# register: result_mld_vlans_invalid_dict +# ignore_errors: yes +# tags: [negative, mld, vlan, structure, dict] + +# - name: Assert MLD VLANs dictionary type validation failed +# assert: +# that: +# - result_mld_vlans_invalid_dict.failed == true +# - "'be a list of dictionaries' in result_mld_vlans_invalid_dict.msg or 'mld_snooping_vlans' in result_mld_vlans_invalid_dict.msg" +# fail_msg: "MLD VLANs dictionary type validation should have failed" +# success_msg: "MLD VLANs dictionary type validation correctly failed" +# tags: [negative, mld, vlan, structure, dict] + +# - name: Test MLD VLAN item validation - string instead of dictionary should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_vlan_invalid_dict_type }}" +# register: result_mld_vlan_invalid_dict_type +# ignore_errors: yes +# tags: [negative, mld, vlan, structure, item] + +# - name: Assert MLD VLAN item dictionary type validation failed +# assert: +# that: +# - result_mld_vlan_invalid_dict_type.failed == true +# - "'must be a dictionary' in result_mld_vlan_invalid_dict_type.msg or 'mld_snooping_vlans' in result_mld_vlan_invalid_dict_type.msg" +# fail_msg: "MLD VLAN item dictionary type validation should have failed" +# success_msg: "MLD VLAN item dictionary type validation correctly failed" +# tags: [negative, mld, vlan, structure, item] + +# ################################################################################################################### +# # Positive Test Cases for MLD Snooping + +# ############################################# +# # POSITIVE TEST CASES - CREATE # +# ############################################# + +# - name: Create MLD snooping configuration with global parameters only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_create_global_only }}" +# register: result_mld_create_global_only +# tags: [positive, mld, create, global] + +# - name: Assert MLD global only creation succeeded +# assert: +# that: +# - result_mld_create_global_only.changed == true +# - result_mld_create_global_only.response is defined +# fail_msg: "MLD global only creation should have succeeded" +# success_msg: "MLD global only creation succeeded" +# tags: [positive, mld, create, global] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, mld, create] + +# - name: Create MLD snooping configuration with VLANs only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_create_vlans_only }}" +# register: result_mld_create_vlans_only +# tags: [positive, mld, create, vlans] + +# - name: Assert MLD VLANs only creation succeeded +# assert: +# that: +# - result_mld_create_vlans_only.changed == true +# - result_mld_create_vlans_only.response is defined +# fail_msg: "MLD VLANs only creation should have succeeded" +# success_msg: "MLD VLANs only creation succeeded" +# tags: [positive, mld, create, vlans] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, mld, create] + +# - name: Create MLD snooping configuration with single VLAN +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_create_single_vlan }}" +# register: result_mld_create_single_vlan +# tags: [positive, mld, create, single_vlan] + +# - name: Assert MLD single VLAN creation succeeded +# assert: +# that: +# - result_mld_create_single_vlan.changed == true +# - result_mld_create_single_vlan.response is defined +# fail_msg: "MLD single VLAN creation should have succeeded" +# success_msg: "MLD single VLAN creation succeeded" +# tags: [positive, mld, create, single_vlan] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, mld, create] + +# - name: Create MLD snooping configuration with global and VLANs +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_create_global_and_vlans }}" +# register: result_mld_create_global_and_vlans +# tags: [positive, mld, create, combined] + +# - name: Assert MLD global and VLANs creation succeeded +# assert: +# that: +# - result_mld_create_global_and_vlans.changed == true +# - result_mld_create_global_and_vlans.response is defined +# fail_msg: "MLD global and VLANs creation should have succeeded" +# success_msg: "MLD global and VLANs creation succeeded" +# tags: [positive, mld, create, combined] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, mld, create] + +# - name: Create MLD snooping configuration with minimal VLAN parameters +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_create_minimal_vlan }}" +# register: result_mld_create_minimal_vlan +# tags: [positive, mld, create, minimal, vlan] + +# - name: Assert MLD minimal VLAN creation succeeded +# assert: +# that: +# - result_mld_create_minimal_vlan.changed == true +# - result_mld_create_minimal_vlan.response is defined +# fail_msg: "MLD minimal VLAN creation should have succeeded" +# success_msg: "MLD minimal VLAN creation succeeded" +# tags: [positive, mld, create, minimal, vlan] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, mld, create] + +# - name: Create MLD snooping configuration with multiple VLANs - different settings +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_create_multiple_vlans_different_settings }}" +# register: result_mld_create_multiple_vlans_different_settings +# tags: [positive, mld, create, multiple, different] + +# - name: Assert MLD multiple VLANs with different settings creation succeeded +# assert: +# that: +# - result_mld_create_multiple_vlans_different_settings.changed == true +# - result_mld_create_multiple_vlans_different_settings.response is defined +# fail_msg: "MLD multiple VLANs with different settings creation should have succeeded" +# success_msg: "MLD multiple VLANs with different settings creation succeeded" +# tags: [positive, mld, create, multiple, different] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, mld, create] + +# ############################################# +# # POSITIVE TEST CASES - UPDATE # +# ############################################# + +# - name: Update MLD snooping global parameters only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_update_global_only }}" +# register: result_mld_update_global_only +# tags: [positive, mld, update, global] + +# - name: Assert MLD global only update succeeded +# assert: +# that: +# - result_mld_update_global_only.changed == true +# - result_mld_update_global_only.response is defined +# fail_msg: "MLD global only update should have succeeded" +# success_msg: "MLD global only update succeeded" +# tags: [positive, mld, update, global] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, mld, update] + +# - name: Update MLD snooping by modifying existing VLANs +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_update_modify_vlans }}" +# register: result_mld_update_modify_vlans +# tags: [positive, mld, update, modify_vlans] + +# - name: Assert MLD modify VLANs update succeeded +# assert: +# that: +# - result_mld_update_modify_vlans.changed == true +# - result_mld_update_modify_vlans.response is defined +# fail_msg: "MLD modify VLANs update should have succeeded" +# success_msg: "MLD modify VLANs update succeeded" +# tags: [positive, mld, update, modify_vlans] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, mld, update] + +# - name: Update MLD snooping global and VLANs together +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_update_global_and_vlans }}" +# register: result_mld_update_global_and_vlans +# tags: [positive, mld, update, combined] + +# - name: Assert MLD global and VLANs update succeeded +# assert: +# that: +# - result_mld_update_global_and_vlans.changed == true +# - result_mld_update_global_and_vlans.response is defined +# fail_msg: "MLD global and VLANs update should have succeeded" +# success_msg: "MLD global and VLANs update succeeded" +# tags: [positive, mld, update, combined] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, mld, update] + +# - name: Update MLD snooping single VLAN +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ mld_vars_map.test_mld_update_single_vlan }}" +# register: result_mld_update_single_vlan +# tags: [positive, mld, update, single_vlan] + +# - name: Assert MLD single VLAN update succeeded +# assert: +# that: +# - result_mld_update_single_vlan.changed == true +# - result_mld_update_single_vlan.response is defined +# fail_msg: "MLD single VLAN update should have succeeded" +# success_msg: "MLD single VLAN update succeeded" +# tags: [positive, mld, update, single_vlan] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, mld, update] diff --git a/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_port_configurations.yml b/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_port_configurations.yml new file mode 100644 index 0000000000..5851bfcefd --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_port_configurations.yml @@ -0,0 +1,2904 @@ +# --- +# - debug: msg="Starting Port Configuration feature tests for Wired Campus Automation" +# - debug: msg="Role Path {{ role_path }}" + +# - block: +# - name: Load Port Configuration test variables +# include_vars: +# file: "{{ role_path }}/vars/vars_wca_test_feature_port_configuration.yml" +# name: port_config_vars_map +# vars: +# dnac_login: &dnac_login +# dnac_host: "{{ dnac_host }}" +# dnac_username: "{{ dnac_username }}" +# dnac_password: "{{ dnac_password }}" +# dnac_verify: "{{ dnac_verify }}" +# dnac_port: "{{ dnac_port }}" +# dnac_version: "{{ dnac_version }}" +# dnac_debug: "{{ dnac_debug }}" +# dnac_log: true +# dnac_log_level: DEBUG +# config_verify: true +# dnac_api_task_timeout: "{{ dnac_api_task_timeout }}" +# dnac_log_append: false + +# ############################################# +# # Port Configuration Test Summary # +# ############################################# + +# - name: Display Port Configuration test cases summary +# debug: +# msg: | +# Port Configuration Feature Test Cases Summary: + +# NEGATIVE TEST CASES (Input Validation): +# 1. Port Configuration Structure Tests - Invalid data types, missing required fields +# 2. Switchport Interface Config Tests - Invalid modes, VLANs, descriptions +# 3. VLAN Trunking Interface Config Tests - Invalid DTP settings, protection modes +# 4. 802.1X Interface Config Tests - Invalid authentication orders, methods +# 5. MAB Interface Config Tests - Invalid enablement values +# 6. STP Interface Config Tests - Invalid costs, priorities, modes +# 7. DHCP Snooping Interface Config Tests - Invalid rates, trust settings +# 8. CDP/LLDP Interface Config Tests - Invalid admin status, modes +# 9. VTP Interface Config Tests - Invalid admin status + +# POSITIVE TEST CASES (Functional Validation): +# 1. CREATE Operations - Minimal to comprehensive port configurations +# 2. UPDATE Operations - Modify existing port configurations +# 3. DELETE Operations - Remove port configurations +# 4. BOUNDARY Value Tests - Min/max values for all parameters +# 5. SPECIAL Configuration Tests - All modes, features, edge cases +# 6. PERFORMANCE Tests - Large scale port configurations +# 7. REGRESSION Tests - Previously problematic scenarios +# 8. COMPATIBILITY Tests - Mixed interface types and configurations +# 9. EDGE CASE Tests - Special characters, unicode, interface variations +# 10. CLEANUP Operations - Reset ports to default configuration + +# Total Test Coverage: +# - Port Configuration Structure (list/dict validation) +# - Switchport Interface Configuration (modes, VLANs, admin status) +# - VLAN Trunking Configuration (DTP, protection, pruning) +# - 802.1X Authentication Configuration (order, methods) +# - MAB Authentication Configuration (enablement) +# - STP Interface Configuration (portfast, BPDU, cost, priority) +# - DHCP Snooping Configuration (rate, trust) +# - CDP Interface Configuration (admin status, logging) +# - LLDP Interface Configuration (transmit/receive modes) +# - VTP Interface Configuration (admin status) + +# # ################################################################################################################### +# # # NEGATIVE TEST CASES - PORT CONFIGURATION STRUCTURE VALIDATION +# # ################################################################################################################### + +# # - name: "NEGATIVE TEST - Port Configuration Structure Tests" +# # block: +# # - name: "TEST 1: Invalid port configuration - string instead of list" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_invalid_list_type }}" +# # register: result_invalid_list_type +# # ignore_errors: true +# # tags: [negative, structure, port_config_invalid_list_type] + +# # - name: Assert port configuration invalid list type fails +# # assert: +# # that: +# # - result_invalid_list_type.failed == true +# # - "'Port configurations must be a list' in result_invalid_list_type.msg" +# # fail_msg: "Test should fail when port_configuration is not a list" +# # success_msg: "Port configuration invalid list type correctly failed" +# # tags: [negative, structure, port_config_invalid_list_type] + +# # - name: "TEST 2: Invalid port configuration - dictionary instead of list" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_invalid_dict_type }}" +# # register: result_invalid_dict_type +# # ignore_errors: true +# # tags: [negative, structure, port_config_invalid_dict_type] + +# # - name: Assert port configuration invalid dict type fails +# # assert: +# # that: +# # - result_invalid_dict_type.failed == true +# # - "'Port configurations must be a list' in result_invalid_dict_type.msg or 'must be of type list' in result_invalid_dict_type.msg" +# # fail_msg: "Test should fail when port_configuration is a dictionary" +# # success_msg: "Port configuration invalid dict type correctly failed" +# # tags: [negative, structure, port_config_invalid_dict_type] + +# # - name: "TEST 3: Invalid port configuration item - string instead of dictionary" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_item_invalid_dict_type }}" +# # register: result_item_invalid_dict +# # ignore_errors: true +# # tags: [negative, structure, port_config_item_invalid_dict] + +# # - name: Assert port configuration item invalid dict fails +# # assert: +# # that: +# # - result_item_invalid_dict.failed == true +# # - "'must be a dictionary' in result_item_invalid_dict.msg" +# # fail_msg: "Test should fail when port_configuration item is not a dictionary" +# # success_msg: "Port configuration item invalid dict correctly failed" +# # tags: [negative, structure, port_config_item_invalid_dict] + +# # - name: "TEST 4: Missing interface_name - required field" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_missing_interface_name }}" +# # register: result_missing_interface_name +# # ignore_errors: true +# # tags: [negative, structure, missing_interface_name] + +# # - name: Assert missing interface_name fails +# # assert: +# # that: +# # - result_missing_interface_name.failed == true +# # - "'Each port configuration must have an' in result_missing_interface_name.msg" +# # fail_msg: "Test should fail when interface_name is missing" +# # success_msg: "Missing interface_name correctly failed" +# # tags: [negative, structure, missing_interface_name] + +# # - name: "TEST 5: Invalid interface_name - integer instead of string" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_interface_name_integer }}" +# # register: result_interface_name_integer +# # ignore_errors: true +# # tags: [negative, structure, interface_name_integer] + +# # - name: Assert interface_name integer type fails +# # assert: +# # that: +# # - result_interface_name_integer.failed == true +# # - "'interface_name must be a string' in result_interface_name_integer.msg or 'must be of type str' in result_interface_name_integer.msg" +# # fail_msg: "Test should fail when interface_name is an integer" +# # success_msg: "Interface_name integer type correctly failed" +# # tags: [negative, structure, interface_name_integer] + +# # - name: "TEST 6: Invalid interface_name - empty string" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_interface_name_empty }}" +# # register: result_interface_name_empty +# # ignore_errors: true +# # tags: [negative, structure, interface_name_empty] + +# # - name: Assert interface_name empty string fails +# # assert: +# # that: +# # - result_interface_name_empty.failed == true +# # - "'interface_name cannot be empty' in result_interface_name_empty.msg or 'empty' in result_interface_name_empty.msg" +# # fail_msg: "Test should fail when interface_name is empty" +# # success_msg: "Interface_name empty string correctly failed" +# # tags: [negative, structure, interface_name_empty] + +# # ################################################################################################################### +# # # NEGATIVE TEST CASES - SWITCHPORT INTERFACE CONFIG VALIDATION +# # ################################################################################################################### + +# # - name: "NEGATIVE TEST - Switchport Interface Config Tests" +# # block: +# # - name: "TEST 7: Invalid switchport_interface_config - string instead of dict" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_switchport_config_invalid_dict_type }}" +# # register: result_switchport_invalid_dict +# # ignore_errors: true +# # tags: [negative, switchport, config_invalid_dict] + +# # - name: Assert switchport_interface_config invalid dict fails +# # assert: +# # that: +# # - result_switchport_invalid_dict.failed == true +# # - "'switchport_interface_config must be a dictionary' in result_switchport_invalid_dict.msg or 'must be of type dict' in result_switchport_invalid_dict.msg" +# # fail_msg: "Test should fail when switchport_interface_config is not a dictionary" +# # success_msg: "Switchport_interface_config invalid dict correctly failed" +# # tags: [negative, switchport, config_invalid_dict] + +# # - name: "TEST 8: Invalid switchport_description - integer instead of string" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_switchport_description_integer }}" +# # register: result_description_integer +# # ignore_errors: true +# # tags: [negative, switchport, description_integer] + +# # - name: Assert switchport_description integer fails +# # assert: +# # that: +# # - result_description_integer.failed == true +# # - "'switchport_description must be a string' in result_description_integer.msg or 'must be of type str' in result_description_integer.msg" +# # fail_msg: "Test should fail when switchport_description is an integer" +# # success_msg: "Switchport_description integer correctly failed" +# # tags: [negative, switchport, description_integer] + +# # - name: "TEST 9: Invalid switchport_description - exceeds maximum length" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_switchport_description_max_length }}" +# # register: result_description_max_length +# # ignore_errors: true +# # tags: [negative, switchport, description_max_length] + +# # - name: Assert switchport_description max length fails +# # assert: +# # that: +# # - result_description_max_length.failed == true +# # - "'exceeds maximum length of 230' in result_description_max_length.msg" +# # fail_msg: "Test should fail when switchport_description exceeds maximum length" +# # success_msg: "Switchport_description max length correctly failed" +# # tags: [negative, switchport, description_max_length] + +# # - name: "TEST 10: Invalid switchport_mode - invalid choice" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_switchport_mode_invalid }}" +# # register: result_mode_invalid +# # ignore_errors: true +# # tags: [negative, switchport, mode_invalid] + +# # - name: Assert switchport_mode invalid choice fails +# # assert: +# # that: +# # - result_mode_invalid.failed == true +# # - "'must be one of' in result_mode_invalid.msg" +# # fail_msg: "Test should fail when switchport_mode has invalid choice" +# # success_msg: "Switchport_mode invalid choice correctly failed" +# # tags: [negative, switchport, mode_invalid] + +# # - name: "TEST 11: Invalid switchport_mode - integer instead of string" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_switchport_mode_integer }}" +# # register: result_mode_integer +# # ignore_errors: true +# # tags: [negative, switchport, mode_integer] + +# # - name: Assert switchport_mode integer fails +# # assert: +# # that: +# # - result_mode_integer.failed == true +# # - "'must be of type string' in result_mode_integer.msg" +# # fail_msg: "Test should fail when switchport_mode is an integer" +# # success_msg: "Switchport_mode integer correctly failed" +# # tags: [negative, switchport, mode_integer] + +# # - name: "TEST 12: Invalid access_vlan - string instead of integer" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_access_vlan_string }}" +# # register: result_access_vlan_string +# # ignore_errors: true +# # tags: [negative, switchport, access_vlan_string] + +# # - name: Assert access_vlan string type fails +# # assert: +# # that: +# # - result_access_vlan_string.failed == true +# # - "'must be of type integer' in result_access_vlan_string.msg" +# # fail_msg: "Test should fail when access_vlan is a string" +# # success_msg: "Access_vlan string type correctly failed" +# # tags: [negative, switchport, access_vlan_string] + +# # - name: "TEST 13: Invalid access_vlan - negative value" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_access_vlan_negative }}" +# # register: result_access_vlan_negative +# # ignore_errors: true +# # tags: [negative, switchport, access_vlan_negative] + +# # - name: Assert access_vlan negative fails +# # assert: +# # that: +# # - result_access_vlan_negative.failed == true +# # - "'must be within the range (1, 4094)' in result_access_vlan_negative.msg or 'must be greater than' in result_access_vlan_negative.msg" +# # fail_msg: "Test should fail when access_vlan is negative" +# # success_msg: "Access_vlan negative correctly failed" +# # tags: [negative, switchport, access_vlan_negative] + +# # - name: "TEST 14: Invalid access_vlan - exceeds maximum value" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_access_vlan_max }}" +# # register: result_access_vlan_max +# # ignore_errors: true +# # tags: [negative, switchport, access_vlan_max] + +# # - name: Assert access_vlan maximum value fails +# # assert: +# # that: +# # - result_access_vlan_max.failed == true +# # - "'must be within the range (1, 4094)' in result_access_vlan_max.msg or 'greater than' in result_access_vlan_max.msg" +# # fail_msg: "Test should fail when access_vlan exceeds maximum value" +# # success_msg: "Access_vlan maximum value correctly failed" +# # tags: [negative, switchport, access_vlan_max] + +# # - name: "TEST 15: Invalid access_vlan - zero value" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_access_vlan_zero }}" +# # register: result_access_vlan_zero +# # ignore_errors: true +# # tags: [negative, switchport, access_vlan_zero] + +# # - name: Assert access_vlan zero value fails +# # assert: +# # that: +# # - result_access_vlan_zero.failed == true +# # - "'must be within the range (1, 4094)' in result_access_vlan_zero.msg or 'must be greater than 0' in result_access_vlan_zero.msg" +# # fail_msg: "Test should fail when access_vlan is zero" +# # success_msg: "Access_vlan zero value correctly failed" +# # tags: [negative, switchport, access_vlan_zero] + +# # - name: "TEST 16: Invalid admin_status - string instead of boolean" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_admin_status_string }}" +# # register: result_admin_status_string +# # ignore_errors: true +# # tags: [negative, switchport, admin_status_string] + +# # - name: Assert admin_status string type fails +# # assert: +# # that: +# # - result_admin_status_string.failed == true +# # - "'must be of type bool' in result_admin_status_string.msg" +# # fail_msg: "Test should fail when admin_status is a string" +# # success_msg: "Admin_status string type correctly failed" +# # tags: [negative, switchport, admin_status_string] + +# # - name: "TEST 17: Invalid admin_status - integer instead of boolean" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_admin_status_integer }}" +# # register: result_admin_status_integer +# # ignore_errors: true +# # tags: [negative, switchport, admin_status_integer] + +# # - name: Assert admin_status integer type fails +# # assert: +# # that: +# # - result_admin_status_integer.failed == true +# # - "'must be of type bool' in result_admin_status_integer.msg" +# # fail_msg: "Test should fail when admin_status is an integer" +# # success_msg: "Admin_status integer type correctly failed" +# # tags: [negative, switchport, admin_status_integer] + +# # - name: "TEST 17: Invalid voice_vlan - string instead of integer" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_voice_vlan_string }}" +# # register: result_voice_vlan_string +# # ignore_errors: true +# # tags: [negative, switchport, voice_vlan_string] + +# # - name: Assert voice_vlan string type fails +# # assert: +# # that: +# # - result_voice_vlan_string.failed == true +# # - "'must be of type int' in result_voice_vlan_string.msg" +# # fail_msg: "Test should fail when voice_vlan is a string" +# # success_msg: "Voice_vlan string type correctly failed" +# # tags: [negative, switchport, voice_vlan_string] + +# # - name: "TEST 18: Invalid voice_vlan - negative value" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_voice_vlan_negative }}" +# # register: result_voice_vlan_negative +# # ignore_errors: true +# # tags: [negative, switchport, voice_vlan_negative] + +# # - name: Assert voice_vlan negative fails +# # assert: +# # that: +# # - result_voice_vlan_negative.failed == true +# # - "'must be within the range (1, 4094)' in result_voice_vlan_negative.msg" +# # fail_msg: "Test should fail when voice_vlan is negative" +# # success_msg: "Voice_vlan negative correctly failed" +# # tags: [negative, switchport, voice_vlan_negative] + +# # - name: "TEST 19: Invalid voice_vlan - exceeds maximum value" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_voice_vlan_max }}" +# # register: result_voice_vlan_max +# # ignore_errors: true +# # tags: [negative, switchport, voice_vlan_max] + +# # - name: Assert voice_vlan maximum value fails +# # assert: +# # that: +# # - result_voice_vlan_max.failed == true +# # - "'must be within the range (1, 4094)' in result_voice_vlan_max.msg or 'greater than' in result_voice_vlan_max.msg" +# # fail_msg: "Test should fail when voice_vlan exceeds maximum value" +# # success_msg: "Voice_vlan maximum value correctly failed" +# # tags: [negative, switchport, voice_vlan_max] + +# # - name: "TEST 20: Invalid voice_vlan - zero value" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_voice_vlan_zero }}" +# # register: result_voice_vlan_zero +# # ignore_errors: true +# # tags: [negative, switchport, voice_vlan_zero] + +# # - name: Assert voice_vlan zero value fails +# # assert: +# # that: +# # - result_voice_vlan_zero.failed == true +# # - "'must be within the range (1, 4094)' in result_voice_vlan_zero.msg or 'must be greater than 0' in result_voice_vlan_zero.msg" +# # fail_msg: "Test should fail when voice_vlan is zero" +# # success_msg: "Voice_vlan zero value correctly failed" +# # tags: [negative, switchport, voice_vlan_zero] + +# # - name: "TEST 21: Invalid allowed_vlans - string instead of list" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_allowed_vlans_string }}" +# # register: result_allowed_vlans_string +# # ignore_errors: true +# # tags: [negative, switchport, allowed_vlans_string] + +# # - name: Assert allowed_vlans string type fails +# # assert: +# # that: +# # - result_allowed_vlans_string.failed == true +# # - "'must be a list' in result_allowed_vlans_string.msg or 'must be of type list' in result_allowed_vlans_string.msg" +# # fail_msg: "Test should fail when allowed_vlans is a string" +# # success_msg: "Allowed_vlans string type correctly failed" +# # tags: [negative, switchport, allowed_vlans_string] + +# # - name: "TEST 22: Invalid allowed_vlans - non-integer elements" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_allowed_vlans_non_integer }}" +# # register: result_allowed_vlans_non_integer +# # ignore_errors: true +# # tags: [negative, switchport, allowed_vlans_non_integer] + +# # - name: Assert allowed_vlans non-integer elements fails +# # assert: +# # that: +# # - result_allowed_vlans_non_integer.failed == true +# # - "'must be an integer between 1 and 4094' in result_allowed_vlans_non_integer.msg or 'must be of type int' in result_allowed_vlans_non_integer.msg" +# # fail_msg: "Test should fail when allowed_vlans contains non-integer elements" +# # success_msg: "Allowed_vlans non-integer elements correctly failed" +# # tags: [negative, switchport, allowed_vlans_non_integer] + +# # - name: "TEST 23: Invalid allowed_vlans - out of range values" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_allowed_vlans_out_of_range }}" +# # register: result_allowed_vlans_out_of_range +# # ignore_errors: true +# # tags: [negative, switchport, allowed_vlans_out_of_range] + +# # - name: Assert allowed_vlans out of range fails +# # assert: +# # that: +# # - result_allowed_vlans_out_of_range.failed == true +# # - "'must be an integer between 1 and 4094' in result_allowed_vlans_out_of_range.msg or 'out of range' in result_allowed_vlans_out_of_range.msg" +# # fail_msg: "Test should fail when allowed_vlans contains out of range values" +# # success_msg: "Allowed_vlans out of range values correctly failed" +# # tags: [negative, switchport, allowed_vlans_out_of_range] + +# # - name: "TEST 24: Invalid native_vlan_id - string instead of integer" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_native_vlan_id_string }}" +# # register: result_native_vlan_id_string +# # ignore_errors: true +# # tags: [negative, switchport, native_vlan_id_string] + +# # - name: Assert native_vlan_id string type fails +# # assert: +# # that: +# # - result_native_vlan_id_string.failed == true +# # - "'must be an integer' in result_native_vlan_id_string.msg or 'must be of type int' in result_native_vlan_id_string.msg" +# # fail_msg: "Test should fail when native_vlan_id is a string" +# # success_msg: "Native_vlan_id string type correctly failed" +# # tags: [negative, switchport, native_vlan_id_string] + +# # - name: "TEST 25: Invalid native_vlan_id - negative value" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_native_vlan_id_negative }}" +# # register: result_native_vlan_id_negative +# # ignore_errors: true +# # tags: [negative, switchport, native_vlan_id_negative] + +# # - name: Assert native_vlan_id negative fails +# # assert: +# # that: +# # - result_native_vlan_id_negative.failed == true +# # - "'must be within the range (1, 4094)' in result_native_vlan_id_negative.msg or 'must be greater than' in result_native_vlan_id_negative.msg" +# # fail_msg: "Test should fail when native_vlan_id is negative" +# # success_msg: "Native_vlan_id negative correctly failed" +# # tags: [negative, switchport, native_vlan_id_negative] + +# # - name: "TEST 26: Invalid native_vlan_id - exceeds maximum value" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_native_vlan_id_max }}" +# # register: result_native_vlan_id_max +# # ignore_errors: true +# # tags: [negative, switchport, native_vlan_id_max] + +# # - name: Assert native_vlan_id maximum value fails +# # assert: +# # that: +# # - result_native_vlan_id_max.failed == true +# # - "'must be within the range (1, 4094)' in result_native_vlan_id_max.msg or 'greater than maximum' in result_native_vlan_id_max.msg" +# # fail_msg: "Test should fail when native_vlan_id exceeds maximum value" +# # success_msg: "Native_vlan_id maximum value correctly failed" +# # tags: [negative, switchport, native_vlan_id_max] + +# # - name: "TEST 27: Invalid native_vlan_id - zero value" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_native_vlan_id_zero }}" +# # register: result_native_vlan_id_zero +# # ignore_errors: true +# # tags: [negative, switchport, native_vlan_id_zero] + +# # - name: Assert native_vlan_id zero value fails +# # assert: +# # that: +# # - result_native_vlan_id_zero.failed == true +# # - "'must be within the range (1, 4094)' in result_native_vlan_id_zero.msg or 'must be greater than 0' in result_native_vlan_id_zero.msg" +# # fail_msg: "Test should fail when native_vlan_id is zero" +# # success_msg: "Native_vlan_id zero value correctly failed" +# # tags: [negative, switchport, native_vlan_id_zero] + +# # ################################################################################################################### +# # # NEGATIVE TEST CASES - VLAN TRUNKING INTERFACE CONFIG VALIDATION +# # ################################################################################################################### + +# # - name: "NEGATIVE TEST - VLAN Trunking Interface Config Tests" +# # block: +# # - name: "TEST 28: Invalid vlan_trunking_interface_config - string instead of dict" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_vlan_trunking_config_invalid_dict_type }}" +# # register: result_vlan_trunking_invalid_dict +# # ignore_errors: true +# # tags: [negative, vlan_trunking, config_invalid_dict] + +# # - name: Assert vlan_trunking_interface_config invalid dict fails +# # assert: +# # that: +# # - result_vlan_trunking_invalid_dict.failed == true +# # - "'vlan_trunking_interface_config must be a dictionary' in result_vlan_trunking_invalid_dict.msg or 'must be of type dict' in result_vlan_trunking_invalid_dict.msg" +# # fail_msg: "Test should fail when vlan_trunking_interface_config is not a dictionary" +# # success_msg: "Vlan_trunking_interface_config invalid dict correctly failed" +# # tags: [negative, vlan_trunking, config_invalid_dict] + +# # - name: "TEST 29: Invalid dtp_negotiation - str instead of boolean" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_dtp_negotiation_invalid }}" +# # register: result_dtp_negotiation_invalid +# # ignore_errors: true +# # tags: [negative, vlan_trunking, dtp_negotiation_invalid] + +# # - name: Assert enable_dtp_negotiation invalid choice fails +# # assert: +# # that: +# # - result_dtp_negotiation_invalid.failed == true +# # - "'must be of type boolean' in result_dtp_negotiation_invalid.msg" +# # fail_msg: "Test should fail when dtp_negotiation has invalid choice" +# # success_msg: "Dtp_negotiation invalid choice correctly failed" +# # tags: [negative, vlan_trunking, dtp_negotiation_invalid] + +# # - name: "TEST 30: Invalid dtp_negotiation - integer instead of bool" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_dtp_negotiation_integer }}" +# # register: result_dtp_negotiation_integer +# # ignore_errors: true +# # tags: [negative, vlan_trunking, dtp_negotiation_integer] + +# # - name: Assert enable_dtp_negotiation integer type fails +# # assert: +# # that: +# # - result_dtp_negotiation_integer.failed == true +# # - "'must be of type boolean' in result_dtp_negotiation_integer.msg" +# # fail_msg: "Test should fail when dtp_negotiation is an integer" +# # success_msg: "Dtp_negotiation integer type correctly failed" +# # tags: [negative, vlan_trunking, dtp_negotiation_integer] + +# # - name: "TEST 31: Invalid protected - string instead of boolean" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_protected_string }}" +# # register: result_protected_string +# # ignore_errors: true +# # tags: [negative, vlan_trunking, protected_string] + +# # - name: Assert protected string type fails +# # assert: +# # that: +# # - result_protected_string.failed == true +# # - "'protected must be a boolean' in result_protected_string.msg or 'must be of type bool' in result_protected_string.msg" +# # fail_msg: "Test should fail when protected is a string" +# # success_msg: "Protected string type correctly failed" +# # tags: [negative, vlan_trunking, protected_string] + +# # - name: "TEST 32: Invalid protected - integer instead of boolean" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_protected_integer }}" +# # register: result_protected_integer +# # ignore_errors: true +# # tags: [negative, vlan_trunking, protected_integer] + +# # - name: Assert protected integer type fails +# # assert: +# # that: +# # - result_protected_integer.failed == true +# # - "'protected must be a boolean' in result_protected_integer.msg or 'must be of type bool' in result_protected_integer.msg" +# # fail_msg: "Test should fail when protected is an integer" +# # success_msg: "Protected integer type correctly failed" +# # tags: [negative, vlan_trunking, protected_integer] + +# # - name: "TEST 33: Invalid pruning_vlan_ids - string instead of list" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_pruning_vlan_ids_string }}" +# # register: result_pruning_vlan_ids_string +# # ignore_errors: true +# # tags: [negative, vlan_trunking, pruning_vlan_ids_string] + +# # - name: Assert pruning_vlan_ids string type fails +# # assert: +# # that: +# # - result_pruning_vlan_ids_string.failed == true +# # - "'pruning_vlan_ids must be a list' in result_pruning_vlan_ids_string.msg or 'must be of type list' in result_pruning_vlan_ids_string.msg" +# # fail_msg: "Test should fail when pruning_vlan_ids is a string" +# # success_msg: "Pruning_vlan_ids string type correctly failed" +# # tags: [negative, vlan_trunking, pruning_vlan_ids_string] + +# # - name: "TEST 35: Invalid pruning_vlan_ids - out of range values" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_pruning_vlan_ids_out_of_range }}" +# # register: result_pruning_vlan_ids_out_of_range +# # ignore_errors: true +# # tags: [negative, vlan_trunking, pruning_vlan_ids_out_of_range] + +# # - name: Assert pruning_vlan_ids out of range fails +# # assert: +# # that: +# # - result_pruning_vlan_ids_out_of_range.failed == true +# # - "'must be within range (1, 4094)' in result_pruning_vlan_ids_out_of_range.msg or 'out of range' in result_pruning_vlan_ids_out_of_range.msg" +# # fail_msg: "Test should fail when pruning_vlan_ids contains out of range values" +# # success_msg: "Pruning_vlan_ids out of range values correctly failed" +# # tags: [negative, vlan_trunking, pruning_vlan_ids_out_of_range] + +# # ################################################################################################################### +# # # NEGATIVE TEST CASES - 802.1X INTERFACE CONFIG VALIDATION +# # ################################################################################################################### + +# # - name: "NEGATIVE TEST - 802.1X Interface Config Tests" +# # block: +# # - name: "TEST 22: Invalid authentication_order - string instead of list" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_authentication_order_string }}" +# # register: result_auth_order_string +# # ignore_errors: true +# # tags: [negative, dot1x, auth_order_string] + +# # - name: Assert authentication_order string type fails +# # assert: +# # that: +# # - result_auth_order_string.failed == true +# # - "'must be a list' in result_auth_order_string.msg or 'must be of type list' in result_auth_order_string.msg" +# # fail_msg: "Test should fail when authentication_order is a string" +# # success_msg: "Authentication_order string type correctly failed" +# # tags: [negative, dot1x, auth_order_string] + +# # - name: "TEST 23: Invalid authentication_order - exceeds maximum items" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_authentication_order_max_items }}" +# # register: result_auth_order_max +# # ignore_errors: true +# # tags: [negative, dot1x, auth_order_max] + +# # - name: Assert authentication_order max items fails +# # assert: +# # that: +# # - result_auth_order_max.failed == true +# # - "'exceeds maximum' in result_auth_order_max.msg" +# # fail_msg: "Test should fail when authentication_order exceeds maximum items" +# # success_msg: "Authentication_order max items correctly failed" +# # tags: [negative, dot1x, auth_order_max] + +# # - name: "TEST 24: Invalid dot1x_interface_config - string instead of dict" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_dot1x_config_invalid_dict_type }}" +# # register: result_dot1x_invalid_dict +# # ignore_errors: true +# # tags: [negative, dot1x, config_invalid_dict] + +# # - name: Assert dot1x_interface_config invalid dict type fails +# # assert: +# # that: +# # - result_dot1x_invalid_dict.failed == true +# # - "'must be of type dictionary' in result_dot1x_invalid_dict.msg" +# # fail_msg: "Test should fail when dot1x_interface_config is a string" +# # success_msg: "Dot1x_interface_config invalid dict type correctly failed" +# # tags: [negative, dot1x, config_invalid_dict] + +# # - name: "TEST 25: Invalid authentication_order - invalid choice in list" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_authentication_order_invalid_choice }}" +# # register: result_auth_order_invalid_choice +# # ignore_errors: true +# # tags: [negative, dot1x, auth_order_invalid_choice] + +# # - name: Assert authentication_order invalid choice fails +# # assert: +# # that: +# # - result_auth_order_invalid_choice.failed == true +# # - "'must be one of' in result_auth_order_invalid_choice.msg" +# # fail_msg: "Test should fail when authentication_order contains invalid choice" +# # success_msg: "Authentication_order invalid choice correctly failed" +# # tags: [negative, dot1x, auth_order_invalid_choice] + +# # - name: "TEST 26: Invalid authentication_order - non-string elements" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_authentication_order_non_string }}" +# # register: result_auth_order_non_string +# # ignore_errors: true +# # tags: [negative, dot1x, auth_order_non_string] + +# # - name: Assert authentication_order non-string elements fails +# # assert: +# # that: +# # - result_auth_order_non_string.failed == true +# # - "'must be one of' in result_auth_order_non_string.msg" +# # fail_msg: "Test should fail when authentication_order contains non-string elements" +# # success_msg: "Authentication_order non-string elements correctly failed" +# # tags: [negative, dot1x, auth_order_non_string] + +# # - name: "TEST 27: Invalid authentication mode choice" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_invalid_authentication_mode }}" +# # register: result_invalid_auth_mode +# # ignore_errors: true +# # tags: [negative, dot1x, invalid_auth_mode] + +# # - name: Assert invalid authentication mode fails +# # assert: +# # that: +# # - result_invalid_auth_mode.failed == true +# # - "'must be one of' in result_invalid_auth_mode.msg" +# # fail_msg: "Test should fail when authentication_mode has invalid choice" +# # success_msg: "Invalid authentication mode correctly failed" +# # tags: [negative, dot1x, invalid_auth_mode] + +# # - name: "TEST 28: Authentication mode as non-string" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_authentication_mode_non_string }}" +# # register: result_auth_mode_non_string +# # ignore_errors: true +# # tags: [negative, dot1x, auth_mode_non_string] + +# # - name: Assert authentication mode non-string fails +# # assert: +# # that: +# # - result_auth_mode_non_string.failed == true +# # - "'must be of type str' in result_auth_mode_non_string.msg" +# # fail_msg: "Test should fail when authentication_mode is not a string" +# # success_msg: "Authentication mode non-string correctly failed" +# # tags: [negative, dot1x, auth_mode_non_string] + +# # - name: "TEST 29: Invalid PAE type choice" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_invalid_pae_type }}" +# # register: result_invalid_pae_type +# # ignore_errors: true +# # tags: [negative, dot1x, invalid_pae_type] + +# # - name: Assert invalid PAE type fails +# # assert: +# # that: +# # - result_invalid_pae_type.failed == true +# # - "'must be one of' in result_invalid_pae_type.msg" +# # fail_msg: "Test should fail when PAE type has invalid choice" +# # success_msg: "Invalid PAE type correctly failed" +# # tags: [negative, dot1x, invalid_pae_type] + +# # - name: "TEST 30: Invalid control direction choice" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_invalid_control_direction }}" +# # register: result_invalid_control_direction +# # ignore_errors: true +# # tags: [negative, dot1x, invalid_control_direction] + +# # - name: Assert invalid control direction fails +# # assert: +# # that: +# # - result_invalid_control_direction.failed == true +# # - "'must be one of' in result_invalid_control_direction.msg" +# # fail_msg: "Test should fail when control direction has invalid choice" +# # success_msg: "Invalid control direction correctly failed" +# # tags: [negative, dot1x, invalid_control_direction] + +# # - name: "TEST 31: Invalid host mode choice" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_invalid_host_mode }}" +# # register: result_invalid_host_mode +# # ignore_errors: true +# # tags: [negative, dot1x, invalid_host_mode] + +# # - name: Assert invalid host mode fails +# # assert: +# # that: +# # - result_invalid_host_mode.failed == true +# # - "'must be one of' in result_invalid_host_mode.msg" +# # fail_msg: "Test should fail when host mode has invalid choice" +# # success_msg: "Invalid host mode correctly failed" +# # tags: [negative, dot1x, invalid_host_mode] + +# # - name: "TEST 32: Invalid port control choice" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_invalid_port_control }}" +# # register: result_invalid_port_control +# # ignore_errors: true +# # tags: [negative, dot1x, invalid_port_control] + +# # - name: Assert invalid port control fails +# # assert: +# # that: +# # - result_invalid_port_control.failed == true +# # - "'must be one of' in result_invalid_port_control.msg" +# # fail_msg: "Test should fail when port control has invalid choice" +# # success_msg: "Invalid port control correctly failed" +# # tags: [negative, dot1x, invalid_port_control] + +# # - name: "TEST 33: Invalid inactivity timer - below range" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_inactivity_timer_below_range }}" +# # register: result_inactivity_timer_below +# # ignore_errors: true +# # tags: [negative, dot1x, inactivity_timer_below] + +# # - name: Assert inactivity timer below range fails +# # assert: +# # that: +# # - result_inactivity_timer_below.failed == true +# # - "'must be within the range (0, 65535)' in result_inactivity_timer_below.msg" +# # fail_msg: "Test should fail when inactivity timer is below range" +# # success_msg: "Inactivity timer below range correctly failed" +# # tags: [negative, dot1x, inactivity_timer_below] + +# # - name: "TEST 34: Invalid inactivity timer - above range" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_inactivity_timer_above_range }}" +# # register: result_inactivity_timer_above +# # ignore_errors: true +# # tags: [negative, dot1x, inactivity_timer_above] + +# # - name: Assert inactivity timer above range fails +# # assert: +# # that: +# # - result_inactivity_timer_above.failed == true +# # - "'must be within the range' in result_inactivity_timer_above.msg" +# # fail_msg: "Test should fail when inactivity timer is above range" +# # success_msg: "Inactivity timer above range correctly failed" +# # tags: [negative, dot1x, inactivity_timer_above] + +# # - name: "TEST 35: Invalid max reauth requests - below range" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_max_reauth_below_range }}" +# # register: result_max_reauth_below +# # ignore_errors: true +# # tags: [negative, dot1x, max_reauth_below] + +# # - name: Assert max reauth requests below range fails +# # assert: +# # that: +# # - result_max_reauth_below.failed == true +# # - "'must be within the range' in result_max_reauth_below.msg" +# # fail_msg: "Test should fail when max reauth requests is below range" +# # success_msg: "Max reauth requests below range correctly failed" +# # tags: [negative, dot1x, max_reauth_below] + +# # - name: "TEST 36: Invalid reauth timer - below range" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_reauth_timer_below_range }}" +# # register: result_reauth_timer_below +# # ignore_errors: true +# # tags: [negative, dot1x, reauth_timer_below] + +# # - name: Assert reauth timer below range fails +# # assert: +# # that: +# # - result_reauth_timer_below.failed == true +# # - "'must be within the range' in result_reauth_timer_below.msg" +# # fail_msg: "Test should fail when reauth timer is below range" +# # success_msg: "Reauth timer below range correctly failed" +# # tags: [negative, dot1x, reauth_timer_below] + +# # - name: "TEST 37: Invalid tx period - below range" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_tx_period_below_range }}" +# # register: result_tx_period_below +# # ignore_errors: true +# # tags: [negative, dot1x, tx_period_below] + +# # - name: Assert tx period below range fails +# # assert: +# # that: +# # - result_tx_period_below.failed == true +# # - "'must be within the range' in result_tx_period_below.msg" +# # fail_msg: "Test should fail when tx period is below range" +# # success_msg: "Tx period below range correctly failed" +# # tags: [negative, dot1x, tx_period_below] + +# # - name: "TEST 38: Boolean parameter as string" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_boolean_as_string }}" +# # register: result_boolean_as_string +# # ignore_errors: true +# # tags: [negative, dot1x, boolean_as_string] + +# # - name: Assert boolean parameter as string fails +# # assert: +# # that: +# # - result_boolean_as_string.failed == true +# # - "'must be of type bool' in result_boolean_as_string.msg" +# # fail_msg: "Test should fail when boolean parameter is a string" +# # success_msg: "Boolean parameter as string correctly failed" +# # tags: [negative, dot1x, boolean_as_string] + +# # - name: "TEST 39: Boolean parameter as integer" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_boolean_as_integer }}" +# # register: result_boolean_as_integer +# # ignore_errors: true +# # tags: [negative, dot1x, boolean_as_integer] + +# # - name: Assert boolean parameter as integer fails +# # assert: +# # that: +# # - result_boolean_as_integer.failed == true +# # - "'must be of type bool' in result_boolean_as_integer.msg" +# # fail_msg: "Test should fail when boolean parameter is an integer" +# # success_msg: "Boolean parameter as integer correctly failed" +# # tags: [negative, dot1x, boolean_as_integer] + +# # - name: "TEST 40: Invalid priority - exceeds max items" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_priority_max_items }}" +# # register: result_priority_max_items +# # ignore_errors: true +# # tags: [negative, dot1x, priority_max_items] + +# # - name: Assert priority max items fails +# # assert: +# # that: +# # - result_priority_max_items.failed == true +# # - "'must be one of' in result_priority_max_items.msg" +# # fail_msg: "Test should fail when priority exceeds maximum items" +# # success_msg: "Priority max items correctly failed" +# # tags: [negative, dot1x, priority_max_items] + +# # - name: "TEST 41: Invalid priority - invalid choice" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_priority_invalid_choice }}" +# # register: result_priority_invalid_choice +# # ignore_errors: true +# # tags: [negative, dot1x, priority_invalid_choice] + +# # - name: Assert priority invalid choice fails +# # assert: +# # that: +# # - result_priority_invalid_choice.failed == true +# # - "'must be one of' in result_priority_invalid_choice.msg" +# # fail_msg: "Test should fail when priority contains invalid choice" +# # success_msg: "Priority invalid choice correctly failed" +# # tags: [negative, dot1x, priority_invalid_choice] + +# # ################################################################################################################### +# # # NEGATIVE TEST CASES - MAB INTERFACE CONFIG VALIDATION +# # ################################################################################################################### + +# # - name: "NEGATIVE TEST - MAB Interface Config Tests" +# # block: +# # - name: "TEST 42: Invalid mab_interface_config - string instead of dict" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_mab_config_invalid_dict_type }}" +# # register: result_mab_invalid_dict +# # ignore_errors: true +# # tags: [negative, mab, config_invalid_dict] + +# # - name: Assert mab_interface_config invalid dict fails +# # assert: +# # that: +# # - result_mab_invalid_dict.failed == true +# # - "'must be a dictionary' in result_mab_invalid_dict.msg or 'must be of type dict' in result_mab_invalid_dict.msg" +# # fail_msg: "Test should fail when mab_interface_config is not a dictionary" +# # success_msg: "Mab_interface_config invalid dict correctly failed" +# # tags: [negative, mab, config_invalid_dict] + +# # - name: "TEST 43: Invalid enable_mab - string instead of boolean" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_enable_mab_string }}" +# # register: result_enable_mab_string +# # ignore_errors: true +# # tags: [negative, mab, enable_mab_string] + +# # - name: Assert enable_mab string type fails +# # assert: +# # that: +# # - result_enable_mab_string.failed == true +# # - "'must be a boolean' in result_enable_mab_string.msg or 'must be of type bool' in result_enable_mab_string.msg" +# # fail_msg: "Test should fail when enable_mab is a string" +# # success_msg: "Enable_mab string type correctly failed" +# # tags: [negative, mab, enable_mab_string] + +# # - name: "TEST 44: Invalid enable_mab - integer instead of boolean" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_enable_mab_integer }}" +# # register: result_enable_mab_integer +# # ignore_errors: true +# # tags: [negative, mab, enable_mab_integer] + +# # - name: Assert enable_mab integer type fails +# # assert: +# # that: +# # - result_enable_mab_integer.failed == true +# # - "'must be a boolean' in result_enable_mab_integer.msg or 'must be of type bool' in result_enable_mab_integer.msg" +# # fail_msg: "Test should fail when enable_mab is an integer" +# # success_msg: "Enable_mab integer type correctly failed" +# # tags: [negative, mab, enable_mab_integer] + +# # ################################################################################################################### +# # # NEGATIVE TEST CASES - STP INTERFACE CONFIG VALIDATION +# # ################################################################################################################### + +# # - name: "NEGATIVE TEST - STP Interface Config Tests" +# # block: +# # - name: "TEST 45: Invalid stp_interface_config - string instead of dict" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_stp_interface_config_invalid_dict_type }}" +# # register: result_stp_config_invalid_dict +# # ignore_errors: true +# # tags: [negative, stp, config_invalid_dict] + +# # - name: Assert stp_interface_config invalid dict fails +# # assert: +# # that: +# # - result_stp_config_invalid_dict.failed == true +# # - "'stp_interface_config must be a dictionary' in result_stp_config_invalid_dict.msg or 'must be of type dict' in result_stp_config_invalid_dict.msg" +# # fail_msg: "Test should fail when stp_interface_config is not a dictionary" +# # success_msg: "Stp_interface_config invalid dict correctly failed" +# # tags: [negative, stp, config_invalid_dict] + +# # - name: "TEST 46: Invalid stp_interface_portfast_mode - invalid choice" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_stp_interface_portfast_mode_invalid }}" +# # register: result_stp_portfast_mode_invalid +# # ignore_errors: true +# # tags: [negative, stp, portfast_mode_invalid] + +# # - name: Assert stp_interface_portfast_mode invalid choice fails +# # assert: +# # that: +# # - result_stp_portfast_mode_invalid.failed == true +# # - "'must be one of' in result_stp_portfast_mode_invalid.msg" +# # fail_msg: "Test should fail when stp_interface_portfast_mode has invalid choice" +# # success_msg: "Stp_interface_portfast_mode invalid choice correctly failed" +# # tags: [negative, stp, portfast_mode_invalid] + +# # - name: "TEST 47: Invalid stp_interface_portfast_mode - integer instead of string" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_stp_interface_portfast_mode_integer }}" +# # register: result_stp_portfast_mode_integer +# # ignore_errors: true +# # tags: [negative, stp, portfast_mode_integer] + +# # - name: Assert stp_interface_portfast_mode integer type fails +# # assert: +# # that: +# # - result_stp_portfast_mode_integer.failed == true +# # - "'must be of type str' in result_stp_portfast_mode_integer.msg" +# # fail_msg: "Test should fail when stp_interface_portfast_mode is an integer" +# # success_msg: "Stp_interface_portfast_mode integer type correctly failed" +# # tags: [negative, stp, portfast_mode_integer] + +# # - name: "TEST 48: Invalid stp_interface_bpdu_filter - string instead of boolean" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_stp_interface_bpdu_filter_string }}" +# # register: result_stp_bpdu_filter_string +# # ignore_errors: true +# # tags: [negative, stp, bpdu_filter_string] + +# # - name: Assert stp_interface_bpdu_filter string type fails +# # assert: +# # that: +# # - result_stp_bpdu_filter_string.failed == true +# # - "'must be of type bool' in result_stp_bpdu_filter_string.msg" +# # fail_msg: "Test should fail when stp_interface_bpdu_filter is a string" +# # success_msg: "Stp_interface_bpdu_filter string type correctly failed" +# # tags: [negative, stp, bpdu_filter_string] + +# # - name: "TEST 49: Invalid stp_interface_bpdu_guard - string instead of boolean" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_stp_interface_bpdu_guard_string }}" +# # register: result_stp_bpdu_guard_string +# # ignore_errors: true +# # tags: [negative, stp, bpdu_guard_string] + +# # - name: Assert stp_interface_bpdu_guard string type fails +# # assert: +# # that: +# # - result_stp_bpdu_guard_string.failed == true +# # - "'must be of type bool' in result_stp_bpdu_guard_string.msg" +# # fail_msg: "Test should fail when stp_interface_bpdu_guard is a string" +# # success_msg: "Stp_interface_bpdu_guard string type correctly failed" +# # tags: [negative, stp, bpdu_guard_string] + +# # - name: "TEST 50: Invalid stp_interface_cost - string instead of integer" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_stp_interface_cost_string }}" +# # register: result_stp_cost_string +# # ignore_errors: true +# # tags: [negative, stp, cost_string] + +# # - name: Assert stp_interface_cost string type fails +# # assert: +# # that: +# # - result_stp_cost_string.failed == true +# # - "'stp_interface_cost must be an integer' in result_stp_cost_string.msg or 'must be of type int' in result_stp_cost_string.msg" +# # fail_msg: "Test should fail when stp_interface_cost is a string" +# # success_msg: "Stp_interface_cost string type correctly failed" +# # tags: [negative, stp, cost_string] + +# # - name: "TEST 51: Invalid stp_interface_cost - negative value" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_stp_interface_cost_negative }}" +# # register: result_stp_cost_negative +# # ignore_errors: true +# # tags: [negative, stp, cost_negative] + +# # - name: Assert stp_interface_cost negative fails +# # assert: +# # that: +# # - result_stp_cost_negative.failed == true +# # - "'must be within the range' in result_stp_cost_negative.msg or 'must be greater than 0' in result_stp_cost_negative.msg" +# # fail_msg: "Test should fail when stp_interface_cost is negative" +# # success_msg: "Stp_interface_cost negative correctly failed" +# # tags: [negative, stp, cost_negative] + +# # - name: "TEST 52: Invalid stp_interface_cost - exceeds maximum value" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_stp_interface_cost_max }}" +# # register: result_stp_cost_max +# # ignore_errors: true +# # tags: [negative, stp, cost_max] + +# # - name: Assert stp_interface_cost maximum fails +# # assert: +# # that: +# # - result_stp_cost_max.failed == true +# # - "'must be within the range' in result_stp_cost_max.msg or 'greater than maximum' in result_stp_cost_max.msg" +# # fail_msg: "Test should fail when stp_interface_cost exceeds maximum" +# # success_msg: "Stp_interface_cost maximum correctly failed" +# # tags: [negative, stp, cost_max] + +# # - name: "TEST 53: Invalid stp_interface_guard - invalid choice" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_stp_interface_guard_invalid }}" +# # register: result_stp_guard_invalid +# # ignore_errors: true +# # tags: [negative, stp, guard_invalid] + +# # - name: Assert stp_interface_guard invalid choice fails +# # assert: +# # that: +# # - result_stp_guard_invalid.failed == true +# # - "'must be one of' in result_stp_guard_invalid.msg" +# # fail_msg: "Test should fail when stp_interface_guard has invalid choice" +# # success_msg: "Stp_interface_guard invalid choice correctly failed" +# # tags: [negative, stp, guard_invalid] + +# # - name: "TEST 54: Invalid stp_interface_priority - string instead of integer" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_stp_interface_priority_string }}" +# # register: result_stp_priority_string +# # ignore_errors: true +# # tags: [negative, stp, priority_string] + +# # - name: Assert stp_interface_priority string type fails +# # assert: +# # that: +# # - result_stp_priority_string.failed == true +# # - "'must be of type int' in result_stp_priority_string.msg" +# # fail_msg: "Test should fail when stp_interface_priority is a string" +# # success_msg: "Stp_interface_priority string type correctly failed" +# # tags: [negative, stp, priority_string] + +# # - name: "TEST 55: Invalid stp_interface_priority - negative value" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_stp_interface_priority_negative }}" +# # register: result_stp_priority_negative +# # ignore_errors: true +# # tags: [negative, stp, priority_negative] + +# # - name: Assert stp_interface_priority negative fails +# # assert: +# # that: +# # - result_stp_priority_negative.failed == true +# # - "'must be within the range' in result_stp_priority_negative.msg or 'must be greater than' in result_stp_priority_negative.msg" +# # fail_msg: "Test should fail when stp_interface_priority is negative" +# # success_msg: "Stp_interface_priority negative correctly failed" +# # tags: [negative, stp, priority_negative] + +# # - name: "TEST 56: Invalid stp_interface_priority - exceeds maximum value" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_stp_interface_priority_max }}" +# # register: result_stp_priority_max +# # ignore_errors: true +# # tags: [negative, stp, priority_max] + +# # - name: Assert stp_interface_priority maximum fails +# # assert: +# # that: +# # - result_stp_priority_max.failed == true +# # - "'must be within the range' in result_stp_priority_max.msg or 'must be within the range (0, 240)' in result_stp_priority_max.msg" +# # fail_msg: "Test should fail when stp_interface_priority exceeds maximum" +# # success_msg: "Stp_interface_priority maximum correctly failed" +# # tags: [negative, stp, priority_max] + +# # - name: "TEST 57: Invalid stp_interface_priority - not multiple of 16" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_stp_interface_priority_not_multiple }}" +# # register: result_stp_priority_multiple +# # ignore_errors: true +# # tags: [negative, stp, priority_multiple] + +# # - name: Assert stp_interface_priority multiple of 16 fails +# # assert: +# # that: +# # - result_stp_priority_multiple.failed == true +# # - "'multiple of 16' in result_stp_priority_multiple.msg or 'must be divisible by 16' in result_stp_priority_multiple.msg" +# # fail_msg: "Test should fail when stp_interface_priority is not multiple of 16" +# # success_msg: "Stp_interface_priority multiple of 16 correctly failed" +# # tags: [negative, stp, priority_multiple] + +# # - name: "TEST 58: Invalid stp_interface_per_vlan_cost - string instead of dict" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_stp_interface_per_vlan_cost_invalid_dict }}" +# # register: result_stp_per_vlan_cost_invalid_dict +# # ignore_errors: true +# # tags: [negative, stp, per_vlan_cost_invalid_dict] + +# # - name: Assert stp_interface_per_vlan_cost invalid dict fails +# # assert: +# # that: +# # - result_stp_per_vlan_cost_invalid_dict.failed == true +# # - "'must be a dictionary' in result_stp_per_vlan_cost_invalid_dict.msg or 'must be of type dict' in result_stp_per_vlan_cost_invalid_dict.msg" +# # fail_msg: "Test should fail when stp_interface_per_vlan_cost is not a dictionary" +# # success_msg: "Stp_interface_per_vlan_cost invalid dict correctly failed" +# # tags: [negative, stp, per_vlan_cost_invalid_dict] + +# # - name: "TEST 59: Invalid stp_interface_per_vlan_cost priority - string instead of integer" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_stp_interface_per_vlan_cost_priority_string }}" +# # register: result_stp_per_vlan_cost_priority_string +# # ignore_errors: true +# # tags: [negative, stp, per_vlan_cost_priority_string] + +# # - name: Assert stp_interface_per_vlan_cost priority string fails +# # assert: +# # that: +# # - result_stp_per_vlan_cost_priority_string.failed == true +# # - "'priority must be an integer' in result_stp_per_vlan_cost_priority_string.msg or 'must be of type int' in result_stp_per_vlan_cost_priority_string.msg" +# # fail_msg: "Test should fail when per_vlan_cost priority is a string" +# # success_msg: "Stp_interface_per_vlan_cost priority string correctly failed" +# # tags: [negative, stp, per_vlan_cost_priority_string] + +# # - name: "TEST 60: Invalid stp_interface_per_vlan_cost vlan_ids - string instead of list" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_stp_interface_per_vlan_cost_vlan_ids_string }}" +# # register: result_stp_per_vlan_cost_vlan_ids_string +# # ignore_errors: true +# # tags: [negative, stp, per_vlan_cost_vlan_ids_string] + +# # - name: Assert stp_interface_per_vlan_cost vlan_ids string fails +# # assert: +# # that: +# # - result_stp_per_vlan_cost_vlan_ids_string.failed == true +# # - "'must be a list' in result_stp_per_vlan_cost_vlan_ids_string.msg or 'must be of type list' in result_stp_per_vlan_cost_vlan_ids_string.msg" +# # fail_msg: "Test should fail when per_vlan_cost vlan_ids is a string" +# # success_msg: "Stp_interface_per_vlan_cost vlan_ids string correctly failed" +# # tags: [negative, stp, per_vlan_cost_vlan_ids_string] + +# # - name: "TEST 61: Invalid stp_interface_per_vlan_priority - string instead of dict" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_stp_interface_per_vlan_priority_invalid_dict }}" +# # register: result_stp_per_vlan_priority_invalid_dict +# # ignore_errors: true +# # tags: [negative, stp, per_vlan_priority_invalid_dict] + +# # - name: Assert stp_interface_per_vlan_priority invalid dict fails +# # assert: +# # that: +# # - result_stp_per_vlan_priority_invalid_dict.failed == true +# # - "'must be a dictionary' in result_stp_per_vlan_priority_invalid_dict.msg or 'must be of type dict' in result_stp_per_vlan_priority_invalid_dict.msg" +# # fail_msg: "Test should fail when stp_interface_per_vlan_priority is not a dictionary" +# # success_msg: "Stp_interface_per_vlan_priority invalid dict correctly failed" +# # tags: [negative, stp, per_vlan_priority_invalid_dict] + +# # # ################################################################################################################### +# # # # NEGATIVE TEST CASES - DHCP SNOOPING INTERFACE CONFIG VALIDATION +# # # ################################################################################################################### + +# # - name: "NEGATIVE TEST - DHCP Snooping Interface Config Tests" +# # block: +# # - name: "TEST 62: Invalid dhcp_snooping_interface_config - string instead of dict" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_dhcp_snooping_interface_config_invalid_dict_type }}" +# # register: result_dhcp_snooping_invalid_dict +# # ignore_errors: true +# # tags: [negative, dhcp_snooping, config_invalid_dict] + +# # - name: Assert dhcp_snooping_interface_config invalid dict fails +# # assert: +# # that: +# # - result_dhcp_snooping_invalid_dict.failed == true +# # - "' must be a dictionary' in result_dhcp_snooping_invalid_dict.msg or 'must be of type dict' in result_dhcp_snooping_invalid_dict.msg" +# # fail_msg: "Test should fail when dhcp_snooping_interface_config is not a dictionary" +# # success_msg: "Dhcp_snooping_interface_config invalid dict correctly failed" +# # tags: [negative, dhcp_snooping, config_invalid_dict] + +# # - name: "TEST 63: Invalid dhcp_snooping_interface_rate - string instead of integer" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_dhcp_snooping_interface_rate_string }}" +# # register: result_dhcp_rate_string +# # ignore_errors: true +# # tags: [negative, dhcp_snooping, rate_string] + +# # - name: Assert dhcp_snooping_interface_rate string type fails +# # assert: +# # that: +# # - result_dhcp_rate_string.failed == true +# # - "' must be an integer' in result_dhcp_rate_string.msg or 'must be of type int' in result_dhcp_rate_string.msg" +# # fail_msg: "Test should fail when dhcp_snooping_interface_rate is a string" +# # success_msg: "Dhcp_snooping_interface_rate string type correctly failed" +# # tags: [negative, dhcp_snooping, rate_string] + +# # - name: "TEST 64: Invalid dhcp_snooping_interface_rate - negative value" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_dhcp_snooping_interface_rate_negative }}" +# # register: result_dhcp_rate_negative +# # ignore_errors: true +# # tags: [negative, dhcp_snooping, rate_negative] + +# # - name: Assert dhcp_snooping_interface_rate negative fails +# # assert: +# # that: +# # - result_dhcp_rate_negative.failed == true +# # - "'must be within the range (1, 2048)' in result_dhcp_rate_negative.msg or 'must be greater than 0' in result_dhcp_rate_negative.msg" +# # fail_msg: "Test should fail when dhcp_snooping_interface_rate is negative" +# # success_msg: "Dhcp_snooping_interface_rate negative correctly failed" +# # tags: [negative, dhcp_snooping, rate_negative] + +# # - name: "TEST 65: Invalid dhcp_snooping_interface_rate - exceeds maximum value" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_dhcp_snooping_interface_rate_max }}" +# # register: result_dhcp_rate_max +# # ignore_errors: true +# # tags: [negative, dhcp_snooping, rate_max] + +# # - name: Assert dhcp_snooping_interface_rate max value fails +# # assert: +# # that: +# # - result_dhcp_rate_max.failed == true +# # - "'must be within the range (1, 2048)' in result_dhcp_rate_max.msg or 'exceeds maximum' in result_dhcp_rate_max.msg" +# # fail_msg: "Test should fail when dhcp_snooping_interface_rate exceeds maximum" +# # success_msg: "Dhcp_snooping_interface_rate max value correctly failed" +# # tags: [negative, dhcp_snooping, rate_max] + +# # - name: "TEST 66: Invalid dhcp_snooping_interface_trust - string instead of boolean" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_dhcp_snooping_interface_trust_string }}" +# # register: result_dhcp_trust_string +# # ignore_errors: true +# # tags: [negative, dhcp_snooping, trust_string] + +# # - name: Assert dhcp_snooping_interface_trust string type fails +# # assert: +# # that: +# # - result_dhcp_trust_string.failed == true +# # - "' must be a boolean' in result_dhcp_trust_string.msg or 'must be of type bool' in result_dhcp_trust_string.msg" +# # fail_msg: "Test should fail when dhcp_snooping_interface_trust is a string" +# # success_msg: "Dhcp_snooping_interface_trust string type correctly failed" +# # tags: [negative, dhcp_snooping, trust_string] + +# # # ################################################################################################################### +# # # # NEGATIVE TEST CASES - CDP INTERFACE CONFIG VALIDATION +# # # ################################################################################################################### + +# # - name: "NEGATIVE TEST - CDP Interface Config Tests" +# # block: +# # - name: "TEST 67: Invalid cdp_interface_config - string instead of dict" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_cdp_interface_config_invalid_dict_type }}" +# # register: result_cdp_invalid_dict +# # ignore_errors: true +# # tags: [negative, cdp, config_invalid_dict] + +# # - name: Assert cdp_interface_config invalid dict fails +# # assert: +# # that: +# # - result_cdp_invalid_dict.failed == true +# # - "' must be a dictionary' in result_cdp_invalid_dict.msg or 'must be of type dict' in result_cdp_invalid_dict.msg" +# # fail_msg: "Test should fail when cdp_interface_config is not a dictionary" +# # success_msg: "Cdp_interface_config invalid dict correctly failed" +# # tags: [negative, cdp, config_invalid_dict] + +# # - name: "TEST 68: Invalid cdp_interface_admin_status - string instead of boolean" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_cdp_interface_admin_status_string }}" +# # register: result_cdp_admin_status_string +# # ignore_errors: true +# # tags: [negative, cdp, admin_status_string] + +# # - name: Assert cdp_interface_admin_status string type fails +# # assert: +# # that: +# # - result_cdp_admin_status_string.failed == true +# # - "' must be a boolean' in result_cdp_admin_status_string.msg or 'must be of type bool' in result_cdp_admin_status_string.msg" +# # fail_msg: "Test should fail when cdp_interface_admin_status is a string" +# # success_msg: "Cdp_interface_admin_status string type correctly failed" +# # tags: [negative, cdp, admin_status_string] + +# # - name: "TEST 69: Invalid cdp_interface_log_duplex_mismatch - string instead of boolean" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_cdp_interface_log_duplex_mismatch_string }}" +# # register: result_cdp_log_duplex_string +# # ignore_errors: true +# # tags: [negative, cdp, log_duplex_string] + +# # - name: Assert cdp_interface_log_duplex_mismatch string type fails +# # assert: +# # that: +# # - result_cdp_log_duplex_string.failed == true +# # - "' must be a boolean' in result_cdp_log_duplex_string.msg or 'must be of type bool' in result_cdp_log_duplex_string.msg" +# # fail_msg: "Test should fail when cdp_interface_log_duplex_mismatch is a string" +# # success_msg: "Cdp_interface_log_duplex_mismatch string type correctly failed" +# # tags: [negative, cdp, log_duplex_string] + +# # # ################################################################################################################### +# # # # NEGATIVE TEST CASES - LLDP INTERFACE CONFIG VALIDATION +# # # ################################################################################################################### + +# # - name: "NEGATIVE TEST - LLDP Interface Config Tests" +# # block: +# # - name: "TEST 70: Invalid lldp_interface_config - string instead of dict" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_lldp_interface_config_invalid_dict_type }}" +# # register: result_lldp_invalid_dict +# # ignore_errors: true +# # tags: [negative, lldp, config_invalid_dict] + +# # - name: Assert lldp_interface_config invalid dict fails +# # assert: +# # that: +# # - result_lldp_invalid_dict.failed == true +# # - "' must be a dictionary' in result_lldp_invalid_dict.msg or 'must be of type dict' in result_lldp_invalid_dict.msg" +# # fail_msg: "Test should fail when lldp_interface_config is not a dictionary" +# # success_msg: "Lldp_interface_config invalid dict correctly failed" +# # tags: [negative, lldp, config_invalid_dict] + +# # - name: "TEST 71: Invalid lldp_interface_receive_transmit - invalid choice" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_lldp_interface_receive_transmit_invalid }}" +# # register: result_lldp_receive_transmit_invalid +# # ignore_errors: true +# # tags: [negative, lldp, receive_transmit_invalid] + +# # - name: Assert lldp_interface_receive_transmit invalid choice fails +# # assert: +# # that: +# # - result_lldp_receive_transmit_invalid.failed == true +# # - "'must be one of' in result_lldp_receive_transmit_invalid.msg" +# # fail_msg: "Test should fail when lldp_interface_receive_transmit has invalid choice" +# # success_msg: "Lldp_interface_receive_transmit invalid choice correctly failed" +# # tags: [negative, lldp, receive_transmit_invalid] + +# # - name: "TEST 72: Invalid lldp_interface_receive_transmit - integer instead of string" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_lldp_interface_receive_transmit_integer }}" +# # register: result_lldp_receive_transmit_integer +# # ignore_errors: true +# # tags: [negative, lldp, receive_transmit_integer] + +# # - name: Assert lldp_interface_receive_transmit integer type fails +# # assert: +# # that: +# # - result_lldp_receive_transmit_integer.failed == true +# # - "' must be a string' in result_lldp_receive_transmit_integer.msg or 'must be of type str' in result_lldp_receive_transmit_integer.msg" +# # fail_msg: "Test should fail when lldp_interface_receive_transmit is an integer" +# # success_msg: "Lldp_interface_receive_transmit integer type correctly failed" +# # tags: [negative, lldp, receive_transmit_integer] + +# # # ################################################################################################################### +# # # # NEGATIVE TEST CASES - VTP INTERFACE CONFIG VALIDATION +# # # ################################################################################################################### + +# # - name: "NEGATIVE TEST - VTP Interface Config Tests" +# # block: +# # - name: "TEST 73: Invalid vtp_interface_config - string instead of dict" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_vtp_interface_config_invalid_dict_type }}" +# # register: result_vtp_invalid_dict +# # ignore_errors: true +# # tags: [negative, vtp, config_invalid_dict] + +# # - name: Assert vtp_interface_config invalid dict fails +# # assert: +# # that: +# # - result_vtp_invalid_dict.failed == true +# # - "'vtp_interface_config must be a dictionary' in result_vtp_invalid_dict.msg or 'must be of type dict' in result_vtp_invalid_dict.msg" +# # fail_msg: "Test should fail when vtp_interface_config is not a dictionary" +# # success_msg: "Vtp_interface_config invalid dict correctly failed" +# # tags: [negative, vtp, config_invalid_dict] + +# # - name: "TEST 74: Invalid vtp_interface_admin_status - string instead of boolean" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_vtp_interface_admin_status_string }}" +# # register: result_vtp_admin_status_string +# # ignore_errors: true +# # tags: [negative, vtp, admin_status_string] + +# # - name: Assert vtp_interface_admin_status string type fails +# # assert: +# # that: +# # - result_vtp_admin_status_string.failed == true +# # - "'vtp_interface_admin_status must be a boolean' in result_vtp_admin_status_string.msg or 'must be of type bool' in result_vtp_admin_status_string.msg" +# # fail_msg: "Test should fail when vtp_interface_admin_status is a string" +# # success_msg: "Vtp_interface_admin_status string type correctly failed" +# # tags: [negative, vtp, admin_status_string] + +# # ################################################################################################################### +# # # POSITIVE TEST CASES - CREATE OPERATIONS +# # ################################################################################################################### + +# # - name: "POSITIVE TEST - Port Configuration CREATE Operations" +# # block: +# # - name: "CREATE TEST 1: Minimal switchport configuration" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_create_minimal_switchport }}" +# # register: result_create_minimal +# # tags: [positive, create, minimal_switchport] + +# # - name: Assert minimal switchport creation success +# # assert: +# # that: +# # - result_create_minimal.failed == false +# # - result_create_minimal.changed == true +# # - "'Successfully' in result_create_minimal.msg" +# # fail_msg: "Minimal switchport configuration should succeed" +# # success_msg: "Minimal switchport configuration successfully created" +# # tags: [positive, create, minimal_switchport] + +# # - name: "Wait 60 seconds before next test to allow configuration to settle" +# # pause: +# # seconds: 60 +# # tags: [positive, create, pause] + +# # - name: "CREATE TEST 2: Comprehensive switchport access configuration" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_create_comprehensive_switchport_access }}" +# # register: result_create_access +# # tags: [positive, create, comprehensive_access] + +# # - name: Assert comprehensive access port creation success +# # assert: +# # that: +# # - result_create_access.failed == false +# # - result_create_access.changed == true +# # - "'Successfully' in result_create_access.msg or 'success' in result_create_access.msg" +# # fail_msg: "Comprehensive access port configuration should succeed" +# # success_msg: "Comprehensive access port configuration successfully created" +# # tags: [positive, create, comprehensive_access] + +# # - name: "CREATE TEST 3: Comprehensive switchport trunk configuration" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_create_comprehensive_switchport_trunk }}" +# # register: result_create_trunk +# # tags: [positive, create, comprehensive_trunk] + +# # - name: Assert comprehensive trunk port creation success +# # assert: +# # that: +# # - result_create_trunk.failed == false +# # - result_create_trunk.changed == true +# # - "'Successfully' in result_create_trunk.msg" +# # fail_msg: "Comprehensive trunk port configuration should succeed" +# # success_msg: "Comprehensive trunk port configuration successfully created" +# # tags: [positive, create, comprehensive_trunk] + +# # - name: "CREATE TEST 4: Switchport with VLAN trunking" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_create_switchport_with_trunking }}" +# # register: result_create_trunking +# # tags: [positive, create, with_trunking] + +# # - name: Assert switchport with trunking creation success +# # assert: +# # that: +# # - result_create_trunking.failed == false +# # - result_create_trunking.changed == true +# # - "'Successfully' in result_create_trunking.msg or 'success' in result_create_trunking.msg" +# # fail_msg: "Switchport with trunking configuration should succeed" +# # success_msg: "Switchport with trunking configuration successfully created" +# # tags: [positive, create, with_trunking] + +# # - name: "CREATE TEST 5: Access port with 802.1X authentication" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_create_access_with_dot1x }}" +# # register: result_create_dot1x +# # tags: [positive, create, access_dot1x] + +# # - name: Assert access port with 802.1X creation success +# # assert: +# # that: +# # - result_create_dot1x.failed == false +# # - result_create_dot1x.changed == true +# # - "'Successfully' in result_create_dot1x.msg or 'success' in result_create_dot1x.msg" +# # fail_msg: "Access port with 802.1X configuration should succeed" +# # success_msg: "Access port with 802.1X configuration successfully created" +# # tags: [positive, create, access_dot1x] + +# # - name: "CREATE TEST 6: Access port with MAB authentication" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_create_access_with_mab }}" +# # register: result_create_mab +# # tags: [positive, create, access_mab] + +# # - name: Assert access port with MAB creation success +# # assert: +# # that: +# # - result_create_mab.failed == false +# # - result_create_mab.changed == true +# # - "'Successfully' in result_create_mab.msg or 'success' in result_create_mab.msg" +# # fail_msg: "Access port with MAB configuration should succeed" +# # success_msg: "Access port with MAB configuration successfully created" +# # tags: [positive, create, access_mab] + +# # - name: "CREATE TEST 7: Access port with STP configuration" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_create_access_with_stp }}" +# # register: result_create_stp +# # tags: [positive, create, access_stp] + +# # - name: Assert access port with STP creation success +# # assert: +# # that: +# # - result_create_stp.failed == false +# # - result_create_stp.changed == true +# # - "'Successfully' in result_create_stp.msg or 'success' in result_create_stp.msg" +# # fail_msg: "Access port with STP configuration should succeed" +# # success_msg: "Access port with STP configuration successfully created" +# # tags: [positive, create, access_stp] + +# # - name: "CREATE TEST 8: Access port with DHCP snooping" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_create_access_with_dhcp_snooping }}" +# # register: result_create_dhcp_snooping +# # tags: [positive, create, access_dhcp_snooping] + +# # - name: Assert access port with DHCP snooping creation success +# # assert: +# # that: +# # - result_create_dhcp_snooping.failed == false +# # - result_create_dhcp_snooping.changed == true +# # - "'Successfully' in result_create_dhcp_snooping.msg or 'success' in result_create_dhcp_snooping.msg" +# # fail_msg: "Access port with DHCP snooping configuration should succeed" +# # success_msg: "Access port with DHCP snooping configuration successfully created" +# # tags: [positive, create, access_dhcp_snooping] + +# # - name: "CREATE TEST 9: Access port with CDP and LLDP" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_create_access_with_cdp_lldp }}" +# # register: result_create_cdp_lldp +# # tags: [positive, create, access_cdp_lldp] + +# # - name: Assert access port with CDP and LLDP creation success +# # assert: +# # that: +# # - result_create_cdp_lldp.failed == false +# # - result_create_cdp_lldp.changed == true +# # - "'Successfully' in result_create_cdp_lldp.msg or 'success' in result_create_cdp_lldp.msg" +# # fail_msg: "Access port with CDP and LLDP configuration should succeed" +# # success_msg: "Access port with CDP and LLDP configuration successfully created" +# # tags: [positive, create, access_cdp_lldp] + +# # - name: "CREATE TEST 10: Comprehensive all features single port" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_create_comprehensive_all_features }}" +# # register: result_create_all_features +# # tags: [positive, create, all_features] + +# # - name: Assert comprehensive all features creation success +# # assert: +# # that: +# # - result_create_all_features.failed == false +# # - result_create_all_features.changed == true +# # - "'Successfully' in result_create_all_features.msg or 'success' in result_create_all_features.msg" +# # fail_msg: "Comprehensive all features configuration should succeed" +# # success_msg: "Comprehensive all features configuration successfully created" +# # tags: [positive, create, all_features] + +# # - name: "CREATE TEST 11: Multiple ports different configurations" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_create_multiple_ports_different }}" +# # register: result_create_multiple_different +# # tags: [positive, create, multiple_different] + +# # - name: Assert multiple ports different configurations success +# # assert: +# # that: +# # - result_create_multiple_different.failed == false +# # - result_create_multiple_different.changed == true +# # - "'Successfully' in result_create_multiple_different.msg or 'success' in result_create_multiple_different.msg" +# # fail_msg: "Multiple ports with different configurations should succeed" +# # success_msg: "Multiple ports with different configurations successfully created" +# # tags: [positive, create, multiple_different] + +# # - name: "CREATE TEST 12: Multiple ports same configuration" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_create_multiple_ports_same }}" +# # register: result_create_multiple_same +# # tags: [positive, create, multiple_same] + +# # - name: Assert multiple ports same configurations success +# # assert: +# # that: +# # - result_create_multiple_same.failed == false +# # - result_create_multiple_same.changed == true +# # - "'Successfully' in result_create_multiple_same.msg or 'success' in result_create_multiple_same.msg" +# # fail_msg: "Multiple ports with same configurations should succeed" +# # success_msg: "Multiple ports with same configurations successfully created" +# # tags: [positive, create, multiple_same] + +# # - name: "CREATE TEST 13: Dynamic mode configurations" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_create_dynamic_modes }}" +# # register: result_create_dynamic_modes +# # tags: [positive, create, dynamic_modes] + +# # - name: Assert dynamic mode configurations success +# # assert: +# # that: +# # - result_create_dynamic_modes.failed == false +# # - result_create_dynamic_modes.changed == true +# # - "'Successfully' in result_create_dynamic_modes.msg or 'success' in result_create_dynamic_modes.msg" +# # fail_msg: "Dynamic mode configurations should succeed" +# # success_msg: "Dynamic mode configurations successfully created" +# # tags: [positive, create, dynamic_modes] + +# # ################################################################################################################### +# # # POSITIVE TEST CASES - UPDATE OPERATIONS +# # ################################################################################################################### + +# # - name: "POSITIVE TEST - Port Configuration UPDATE Operations" +# # block: +# # - name: "UPDATE TEST 1: Modify switchport description" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_update_description }}" +# # register: result_update_description +# # tags: [positive, update, description] + +# # - name: Assert description update success +# # assert: +# # that: +# # - result_update_description.failed == false +# # - result_update_description.changed == true +# # - "'Successfully' in result_update_description.msg or 'success' in result_update_description.msg" +# # fail_msg: "Description update should succeed" +# # success_msg: "Description update successfully completed" +# # tags: [positive, update, description] + +# # - name: "UPDATE TEST 2: Change access VLAN" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_update_access_vlan }}" +# # register: result_update_access_vlan +# # tags: [positive, update, access_vlan] + +# # - name: Assert access VLAN update success +# # assert: +# # that: +# # - result_update_access_vlan.failed == false +# # - result_update_access_vlan.changed == true +# # - "'Successfully' in result_update_access_vlan.msg or 'success' in result_update_access_vlan.msg" +# # fail_msg: "Access VLAN update should succeed" +# # success_msg: "Access VLAN update successfully completed" +# # tags: [positive, update, access_vlan] + +# # - name: "UPDATE TEST 3: Change switchport mode from access to trunk" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_update_mode_access_to_trunk }}" +# # register: result_update_mode_trunk +# # tags: [positive, update, mode_trunk] + +# # - name: Assert mode change to trunk success +# # assert: +# # that: +# # - result_update_mode_trunk.failed == false +# # - result_update_mode_trunk.changed == true +# # - "'Successfully' in result_update_mode_trunk.msg or 'success' in result_update_mode_trunk.msg" +# # fail_msg: "Mode change from access to trunk should succeed" +# # success_msg: "Mode change from access to trunk successfully completed" +# # tags: [positive, update, mode_trunk] + +# # - name: "UPDATE TEST 4: Change switchport mode from trunk to access" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_update_mode_trunk_to_access }}" +# # register: result_update_mode_access +# # tags: [positive, update, mode_access] + +# # - name: Assert mode change to access success +# # assert: +# # that: +# # - result_update_mode_access.failed == false +# # - result_update_mode_access.changed == true +# # - "'Successfully' in result_update_mode_access.msg or 'success' in result_update_mode_access.msg" +# # fail_msg: "Mode change from trunk to access should succeed" +# # success_msg: "Mode change from trunk to access successfully completed" +# # tags: [positive, update, mode_access] + +# # - name: "UPDATE TEST 5: Modify trunk allowed VLANs" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_update_trunk_allowed_vlans }}" +# # register: result_update_allowed_vlans +# # tags: [positive, update, allowed_vlans] + +# # - name: Assert trunk allowed VLANs update success +# # assert: +# # that: +# # - result_update_allowed_vlans.failed == false +# # - result_update_allowed_vlans.changed == true +# # - "'Successfully' in result_update_allowed_vlans.msg or 'success' in result_update_allowed_vlans.msg" +# # fail_msg: "Trunk allowed VLANs update should succeed" +# # success_msg: "Trunk allowed VLANs update successfully completed" +# # tags: [positive, update, allowed_vlans] + +# # - name: "UPDATE TEST 6: Add voice VLAN to access port" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_update_add_voice_vlan }}" +# # register: result_update_add_voice_vlan +# # tags: [positive, update, add_voice_vlan] + +# # - name: Assert add voice VLAN success +# # assert: +# # that: +# # - result_update_add_voice_vlan.failed == false +# # - result_update_add_voice_vlan.changed == true +# # - "'Successfully' in result_update_add_voice_vlan.msg or 'success' in result_update_add_voice_vlan.msg" +# # fail_msg: "Add voice VLAN should succeed" +# # success_msg: "Add voice VLAN successfully completed" +# # tags: [positive, update, add_voice_vlan] + +# # - name: "UPDATE TEST 7: Modify admin status" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_update_admin_status }}" +# # register: result_update_admin_status +# # tags: [positive, update, admin_status] + +# # - name: Assert admin status update success +# # assert: +# # that: +# # - result_update_admin_status.failed == false +# # - result_update_admin_status.changed == true +# # - "'Successfully' in result_update_admin_status.msg or 'success' in result_update_admin_status.msg" +# # fail_msg: "Admin status update should succeed" +# # success_msg: "Admin status update successfully completed" +# # tags: [positive, update, admin_status] + +# # - name: "UPDATE TEST 8: Add 802.1X to existing port" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_update_add_dot1x }}" +# # register: result_update_add_dot1x +# # tags: [positive, update, add_dot1x] + +# # - name: Assert add 802.1X success +# # assert: +# # that: +# # - result_update_add_dot1x.failed == false +# # - result_update_add_dot1x.changed == true +# # - "'Successfully' in result_update_add_dot1x.msg or 'success' in result_update_add_dot1x.msg" +# # fail_msg: "Add 802.1X should succeed" +# # success_msg: "Add 802.1X successfully completed" +# # tags: [positive, update, add_dot1x] + +# # - name: "UPDATE TEST 9: Modify STP settings" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_update_stp_settings }}" +# # register: result_update_stp_settings +# # tags: [positive, update, stp_settings] + +# # - name: Assert STP settings update success +# # assert: +# # that: +# # - result_update_stp_settings.failed == false +# # - result_update_stp_settings.changed == true +# # - "'Successfully' in result_update_stp_settings.msg or 'success' in result_update_stp_settings.msg" +# # fail_msg: "STP settings update should succeed" +# # success_msg: "STP settings update successfully completed" +# # tags: [positive, update, stp_settings] + +# # - name: "UPDATE TEST 10: Comprehensive update all features" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_update_comprehensive_all_features }}" +# # register: result_update_comprehensive +# # tags: [positive, update, comprehensive] + +# # - name: Assert comprehensive update success +# # assert: +# # that: +# # - result_update_comprehensive.failed == false +# # - result_update_comprehensive.changed == true +# # - "'Successfully' in result_update_comprehensive.msg or 'success' in result_update_comprehensive.msg" +# # fail_msg: "Comprehensive update of all features should succeed" +# # success_msg: "Comprehensive update of all features successfully completed" +# # tags: [positive, update, comprehensive] + +# # ################################################################################################################### +# # # POSITIVE TEST CASES - BOUNDARY VALUE TESTING +# # ################################################################################################################### + +# # - name: "POSITIVE TEST - Boundary Value Testing" +# # block: +# # - name: "BOUNDARY TEST 1: Minimum VLAN values" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_boundary_min_vlans }}" +# # register: result_boundary_min_vlans +# # tags: [positive, boundary, min_vlans] + +# # - name: "Wait 10 seconds before next test to allow configuration to settle" +# # pause: +# # seconds: 10 +# # tags: [positive, create, pause] + +# # - name: Assert minimum VLAN values success +# # assert: +# # that: +# # - result_boundary_min_vlans.failed == false +# # - result_boundary_min_vlans.changed == true +# # - "'Successfully' in result_boundary_min_vlans.msg or 'success' in result_boundary_min_vlans.msg" +# # fail_msg: "Minimum VLAN values should succeed" +# # success_msg: "Minimum VLAN values successfully configured" +# # tags: [positive, boundary, min_vlans] + +# # - name: "BOUNDARY TEST 2: Maximum VLAN values" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_boundary_max_vlans }}" +# # register: result_boundary_max_vlans +# # tags: [positive, boundary, max_vlans] + +# # - name: Assert maximum VLAN values success +# # assert: +# # that: +# # - result_boundary_max_vlans.failed == false +# # - result_boundary_max_vlans.changed == true +# # - "'Successfully' in result_boundary_max_vlans.msg or 'success' in result_boundary_max_vlans.msg" +# # fail_msg: "Maximum VLAN values should succeed" +# # success_msg: "Maximum VLAN values successfully configured" +# # tags: [positive, boundary, max_vlans] + +# # - name: "Wait 10 seconds before next test to allow configuration to settle" +# # pause: +# # seconds: 10 +# # tags: [positive, create, pause] + +# # - name: "BOUNDARY TEST 3: Minimum STP cost" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_boundary_min_stp_cost }}" +# # register: result_boundary_min_stp_cost +# # tags: [positive, boundary, min_stp_cost] + +# # - name: Assert minimum STP cost success +# # assert: +# # that: +# # - result_boundary_min_stp_cost.failed == false +# # - result_boundary_min_stp_cost.changed == true +# # - "'Successfully' in result_boundary_min_stp_cost.msg or 'success' in result_boundary_min_stp_cost.msg" +# # fail_msg: "Minimum STP cost should succeed" +# # success_msg: "Minimum STP cost successfully configured" +# # tags: [positive, boundary, min_stp_cost] + +# # - name: "Wait 10 seconds before next test to allow configuration to settle" +# # pause: +# # seconds: 10 +# # tags: [positive, create, pause] + +# # - name: "BOUNDARY TEST 4: Maximum STP cost" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_boundary_max_stp_cost }}" +# # register: result_boundary_max_stp_cost +# # tags: [positive, boundary, max_stp_cost] + +# # - name: Assert maximum STP cost success +# # assert: +# # that: +# # - result_boundary_max_stp_cost.failed == false +# # - result_boundary_max_stp_cost.changed == true +# # - "'Successfully' in result_boundary_max_stp_cost.msg or 'success' in result_boundary_max_stp_cost.msg" +# # fail_msg: "Maximum STP cost should succeed" +# # success_msg: "Maximum STP cost successfully configured" +# # tags: [positive, boundary, max_stp_cost] + +# # - name: "Wait 10 seconds before next test to allow configuration to settle" +# # pause: +# # seconds: 10 +# # tags: [positive, create, pause] + +# # - name: "BOUNDARY TEST 5: Maximum description length" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_boundary_max_description_length }}" +# # register: result_boundary_max_description +# # tags: [positive, boundary, max_description] + +# # - name: Assert maximum description length success +# # assert: +# # that: +# # - result_boundary_max_description.failed == false +# # - result_boundary_max_description.changed == true +# # - "'Successfully' in result_boundary_max_description.msg or 'success' in result_boundary_max_description.msg" +# # fail_msg: "Maximum description length should succeed" +# # success_msg: "Maximum description length successfully configured" +# # tags: [positive, boundary, max_description] + +# # - name: "Wait 10 seconds before next test to allow configuration to settle" +# # pause: +# # seconds: 10 +# # tags: [positive, create, pause] + +# # - name: "BOUNDARY TEST 6: Maximum allowed VLANs list" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_boundary_max_allowed_vlans }}" +# # register: result_boundary_max_allowed_vlans +# # tags: [positive, boundary, max_allowed_vlans] + +# # - name: Assert maximum allowed VLANs success +# # assert: +# # that: +# # - result_boundary_max_allowed_vlans.failed == false +# # - result_boundary_max_allowed_vlans.changed == true +# # - "'Successfully' in result_boundary_max_allowed_vlans.msg or 'success' in result_boundary_max_allowed_vlans.msg" +# # fail_msg: "Maximum allowed VLANs should succeed" +# # success_msg: "Maximum allowed VLANs successfully configured" +# # tags: [positive, boundary, max_allowed_vlans] + +# # ################################################################################################################### +# # # POSITIVE TEST CASES - SPECIAL CONFIGURATIONS +# # ################################################################################################################### + +# # - name: "POSITIVE TEST - Special Configuration Testing" +# # block: +# # - name: "SPECIAL TEST 1: All switchport modes" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_special_all_switchport_modes }}" +# # register: result_special_switchport_modes +# # tags: [positive, special, switchport_modes] + +# # - name: Assert all switchport modes success +# # assert: +# # that: +# # - result_special_switchport_modes.failed == false +# # - result_special_switchport_modes.changed == true +# # - "'Successfully' in result_special_switchport_modes.msg or 'success' in result_special_switchport_modes.msg" +# # fail_msg: "All switchport modes should succeed" +# # success_msg: "All switchport modes successfully configured" +# # tags: [positive, special, switchport_modes] + +# # - name: "Wait 10 seconds before next test to allow configuration to settle" +# # pause: +# # seconds: 10 +# # tags: [positive, create, pause] + +# # - name: "SPECIAL TEST 3: All STP guard modes" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_special_all_stp_guard_modes }}" +# # register: result_special_stp_guard +# # tags: [positive, special, stp_guard] + +# # - name: Assert all STP guard modes success +# # assert: +# # that: +# # - result_special_stp_guard.failed == false +# # - result_special_stp_guard.changed == true +# # - "'Successfully' in result_special_stp_guard.msg or 'success' in result_special_stp_guard.msg" +# # fail_msg: "All STP guard modes should succeed" +# # success_msg: "All STP guard modes successfully configured" +# # tags: [positive, special, stp_guard] + +# # - name: "Wait 10 seconds before next test to allow configuration to settle" +# # pause: +# # seconds: 10 +# # tags: [positive, create, pause] + +# # - name: "SPECIAL TEST 4: All LLDP modes" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_special_all_lldp_modes }}" +# # register: result_special_lldp_modes +# # tags: [positive, special, lldp_modes] + +# # - name: Assert all LLDP modes success +# # assert: +# # that: +# # - result_special_lldp_modes.failed == false +# # - result_special_lldp_modes.changed == true +# # - "'Successfully' in result_special_lldp_modes.msg or 'success' in result_special_lldp_modes.msg" +# # fail_msg: "All LLDP modes should succeed" +# # success_msg: "All LLDP modes successfully configured" +# # tags: [positive, special, lldp_modes] + +# # - name: "Wait 10 seconds before next test to allow configuration to settle" +# # pause: +# # seconds: 10 +# # tags: [positive, create, pause] + +# # - name: "SPECIAL TEST 5: All boolean combinations" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_special_all_boolean_combinations }}" +# # register: result_special_boolean_combinations +# # tags: [positive, special, boolean_combinations] + +# # - name: Assert all boolean combinations success +# # assert: +# # that: +# # - result_special_boolean_combinations.failed == false +# # - result_special_boolean_combinations.changed == true +# # - "'Successfully' in result_special_boolean_combinations.msg or 'success' in result_special_boolean_combinations.msg" +# # fail_msg: "All boolean combinations should succeed" +# # success_msg: "All boolean combinations successfully configured" +# # tags: [positive, special, boolean_combinations] + +# # - name: "Wait 10 seconds before next test to allow configuration to settle" +# # pause: +# # seconds: 10 +# # tags: [positive, create, pause] + +# # - name: "SPECIAL TEST 6: Comprehensive large scale configuration" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_special_large_scale }}" +# # register: result_special_large_scale +# # tags: [positive, special, large_scale] + +# # - name: Assert comprehensive large scale success +# # assert: +# # that: +# # - result_special_large_scale.failed == false +# # - result_special_large_scale.changed == true +# # - "'Successfully' in result_special_large_scale.msg or 'success' in result_special_large_scale.msg" +# # fail_msg: "Comprehensive large scale should succeed" +# # success_msg: "Comprehensive large scale successfully configured" +# # tags: [positive, special, large_scale] + + +# # ################################################################################################################### +# # # DELETE/CLEANUP TEST CASES +# # ################################################################################################################### + +# # - name: "DELETE TEST - Port Configuration Cleanup" +# # block: +# # - name: "DELETE TEST 1: Remove specific features only" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_delete_specific_features }}" +# # register: result_delete_specific_features +# # tags: [positive, delete, specific_features] + +# # - name: Assert delete specific features success +# # assert: +# # that: +# # - result_delete_specific_features.failed == false +# # - result_delete_specific_features.changed == true +# # - "'Successfully' in result_delete_specific_features.msg or 'success' in result_delete_specific_features.msg" +# # fail_msg: "Delete specific features should succeed" +# # success_msg: "Delete specific features successfully completed" +# # tags: [positive, delete, specific_features] + +# # - name: "DELETE TEST 2: Reset to minimal configuration" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_delete_reset_to_minimal }}" +# # register: result_delete_reset_minimal +# # tags: [positive, delete, reset_minimal] + +# # - name: Assert reset to minimal success +# # assert: +# # that: +# # - result_delete_reset_minimal.failed == false +# # - result_delete_reset_minimal.changed == true +# # - "'Successfully' in result_delete_reset_minimal.msg or 'success' in result_delete_reset_minimal.msg" +# # fail_msg: "Reset to minimal configuration should succeed" +# # success_msg: "Reset to minimal configuration successfully completed" +# # tags: [positive, delete, reset_minimal] + +# # - name: "DELETE TEST 3: Remove all port configurations" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: deleted +# # config: +# # - "{{ port_config_vars_map.test_port_config_cleanup_all }}" +# # register: result_cleanup_all +# # tags: [positive, delete, cleanup_all] + +# # - name: Assert cleanup all configurations success +# # assert: +# # that: +# # - result_cleanup_all.failed == false +# # - result_cleanup_all.changed == true +# # - "'Successfully' in result_cleanup_all.msg or 'success' in result_cleanup_all.msg or 'deleted' in result_cleanup_all.msg" +# # fail_msg: "Cleanup all configurations should succeed" +# # success_msg: "Cleanup all configurations successfully completed" +# # tags: [positive, delete, cleanup_all] + +# # - name: "DELETE TEST 4: Reset to default state" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_cleanup_reset_defaults }}" +# # register: result_reset_defaults +# # tags: [positive, delete, reset_defaults] + +# # - name: Assert reset to defaults success +# # assert: +# # that: +# # - result_reset_defaults.failed == false +# # - result_reset_defaults.changed == true +# # - "'Successfully' in result_reset_defaults.msg or 'success' in result_reset_defaults.msg" +# # fail_msg: "Reset to defaults should succeed" +# # success_msg: "Reset to defaults successfully completed" +# # tags: [positive, delete, reset_defaults] + +# # ################################################################################################################### +# # # IDEMPOTENCY TEST CASES +# # ################################################################################################################### + +# # - name: "IDEMPOTENCY TEST - Verify No Changes on Repeat Operations" +# # block: +# # - name: "IDEMPOTENCY TEST 1: Repeat minimal switchport configuration" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_create_minimal_switchport }}" +# # register: result_idempotency_minimal +# # tags: [idempotency, minimal_repeat] + +# # - name: Assert idempotency minimal configuration +# # assert: +# # that: +# # - result_idempotency_minimal.failed == false +# # - result_idempotency_minimal.changed == false +# # fail_msg: "Idempotency test should not show changes when configuration is already applied" +# # success_msg: "Idempotency test passed - no changes detected for existing configuration" +# # tags: [idempotency, minimal_repeat] + +# # - name: "IDEMPOTENCY TEST 2: Repeat comprehensive configuration" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_create_comprehensive_all_features }}" +# # register: result_idempotency_comprehensive +# # tags: [idempotency, comprehensive_repeat] + +# # - name: Assert idempotency comprehensive configuration +# # assert: +# # that: +# # - result_idempotency_comprehensive.failed == false +# # - result_idempotency_comprehensive.changed == false +# # fail_msg: "Idempotency test should not show changes when comprehensive configuration is already applied" +# # success_msg: "Idempotency test passed - no changes detected for existing comprehensive configuration" +# # tags: [idempotency, comprehensive_repeat] + +# # - name: "IDEMPOTENCY TEST 3: Repeat multiple ports configuration" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ port_config_vars_map.test_port_config_create_multiple_ports_different }}" +# # register: result_idempotency_multiple +# # tags: [idempotency, multiple_repeat] + +# # - name: Assert idempotency multiple ports configuration +# # assert: +# # that: +# # - result_idempotency_multiple.failed == false +# # - result_idempotency_multiple.changed == false +# # fail_msg: "Idempotency test should not show changes when multiple ports configuration is already applied" +# # success_msg: "Idempotency test passed - no changes detected for existing multiple ports configuration" +# # tags: [idempotency, multiple_repeat] + +# # ...existing code... + +# # ################################################################################################################### +# # # DELETE TEST CASES - PORT CONFIGURATION FEATURE REMOVAL +# # ################################################################################################################### + +# - name: "DELETE TEST - Port Configuration Feature Removal" +# block: +# - name: "DELETE TEST 1: Remove single feature (STP only)" +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - "{{ port_config_vars_map.test_port_config_delete_single_feature_stp }}" +# register: result_delete_single_stp +# tags: [positive, delete, single_feature_stp] + +# - name: Assert delete single STP feature success +# assert: +# that: +# - result_delete_single_stp.failed == false +# - result_delete_single_stp.changed == true +# - "'Successfully' in result_delete_single_stp.msg or 'success' in result_delete_single_stp.msg" +# fail_msg: "Delete single STP feature should succeed" +# success_msg: "Delete single STP feature successfully completed" +# tags: [positive, delete, single_feature_stp] + +# # - name: "DELETE TEST 2: Remove single feature (802.1X only)" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: deleted +# # config: +# # - "{{ port_config_vars_map.test_port_config_delete_single_feature_dot1x }}" +# # register: result_delete_single_dot1x +# # tags: [positive, delete, single_feature_dot1x] + +# # - name: Assert delete single 802.1X feature success +# # assert: +# # that: +# # - result_delete_single_dot1x.failed == false +# # - result_delete_single_dot1x.changed == true +# # - "'Successfully' in result_delete_single_dot1x.msg or 'success' in result_delete_single_dot1x.msg" +# # fail_msg: "Delete single 802.1X feature should succeed" +# # success_msg: "Delete single 802.1X feature successfully completed" +# # tags: [positive, delete, single_feature_dot1x] + +# # - name: "DELETE TEST 3: Remove single feature (MAB only)" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: deleted +# # config: +# # - "{{ port_config_vars_map.test_port_config_delete_single_feature_mab }}" +# # register: result_delete_single_mab +# # tags: [positive, delete, single_feature_mab] + +# # - name: Assert delete single MAB feature success +# # assert: +# # that: +# # - result_delete_single_mab.failed == false +# # - result_delete_single_mab.changed == true +# # - "'Successfully' in result_delete_single_mab.msg or 'success' in result_delete_single_mab.msg" +# # fail_msg: "Delete single MAB feature should succeed" +# # success_msg: "Delete single MAB feature successfully completed" +# # tags: [positive, delete, single_feature_mab] + +# # - name: "DELETE TEST 4: Remove single feature (DHCP Snooping only)" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: deleted +# # config: +# # - "{{ port_config_vars_map.test_port_config_delete_single_feature_dhcp_snooping }}" +# # register: result_delete_single_dhcp_snooping +# # tags: [positive, delete, single_feature_dhcp_snooping] + +# # - name: Assert delete single DHCP Snooping feature success +# # assert: +# # that: +# # - result_delete_single_dhcp_snooping.failed == false +# # - result_delete_single_dhcp_snooping.changed == true +# # - "'Successfully' in result_delete_single_dhcp_snooping.msg or 'success' in result_delete_single_dhcp_snooping.msg" +# # fail_msg: "Delete single DHCP Snooping feature should succeed" +# # success_msg: "Delete single DHCP Snooping feature successfully completed" +# # tags: [positive, delete, single_feature_dhcp_snooping] + +# # - name: "DELETE TEST 5: Remove multiple specific features" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: deleted +# # config: +# # - "{{ port_config_vars_map.test_port_config_delete_multiple_features }}" +# # register: result_delete_multiple_features +# # tags: [positive, delete, multiple_features] + +# # - name: Assert delete multiple features success +# # assert: +# # that: +# # - result_delete_multiple_features.failed == false +# # - result_delete_multiple_features.changed == true +# # - "'Successfully' in result_delete_multiple_features.msg or 'success' in result_delete_multiple_features.msg" +# # fail_msg: "Delete multiple features should succeed" +# # success_msg: "Delete multiple features successfully completed" +# # tags: [positive, delete, multiple_features] + +# # - name: "DELETE TEST 6: Remove all features except switchport" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: deleted +# # config: +# # - "{{ port_config_vars_map.test_port_config_delete_all_features_except_switchport }}" +# # register: result_delete_all_except_switchport +# # tags: [positive, delete, all_except_switchport] + +# # - name: Assert delete all features except switchport success +# # assert: +# # that: +# # - result_delete_all_except_switchport.failed == false +# # - result_delete_all_except_switchport.changed == true +# # - "'Successfully' in result_delete_all_except_switchport.msg or 'success' in result_delete_all_except_switchport.msg" +# # fail_msg: "Delete all features except switchport should succeed" +# # success_msg: "Delete all features except switchport successfully completed" +# # tags: [positive, delete, all_except_switchport] + +# # - name: "DELETE TEST 7: Remove CDP and LLDP features" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: medeletedrged +# # config: +# # - "{{ port_config_vars_map.test_port_config_delete_cdp_lldp_features }}" +# # register: result_delete_cdp_lldp +# # tags: [positive, delete, cdp_lldp_features] + +# # - name: Assert delete CDP and LLDP features success +# # assert: +# # that: +# # - result_delete_cdp_lldp.failed == false +# # - result_delete_cdp_lldp.changed == true +# # - "'Successfully' in result_delete_cdp_lldp.msg or 'success' in result_delete_cdp_lldp.msg" +# # fail_msg: "Delete CDP and LLDP features should succeed" +# # success_msg: "Delete CDP and LLDP features successfully completed" +# # tags: [positive, delete, cdp_lldp_features] + +# # - name: "DELETE TEST 8: Remove VLAN trunking feature" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: deleted +# # config: +# # - "{{ port_config_vars_map.test_port_config_delete_vlan_trunking_feature }}" +# # register: result_delete_vlan_trunking +# # tags: [positive, delete, vlan_trunking_feature] + +# # - name: Assert delete VLAN trunking feature success +# # assert: +# # that: +# # - result_delete_vlan_trunking.failed == false +# # - result_delete_vlan_trunking.changed == true +# # - "'Successfully' in result_delete_vlan_trunking.msg or 'success' in result_delete_vlan_trunking.msg" +# # fail_msg: "Delete VLAN trunking feature should succeed" +# # success_msg: "Delete VLAN trunking feature successfully completed" +# # tags: [positive, delete, vlan_trunking_feature] + +# # - name: "DELETE TEST 9: Remove VTP feature" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: deleted +# # config: +# # - "{{ port_config_vars_map.test_port_config_delete_vtp_feature }}" +# # register: result_delete_vtp_feature +# # tags: [positive, delete, vtp_feature] + +# # - name: Assert delete VTP feature success +# # assert: +# # that: +# # - result_delete_vtp_feature.failed == false +# # - result_delete_vtp_feature.changed == true +# # - "'Successfully' in result_delete_vtp_feature.msg or 'success' in result_delete_vtp_feature.msg" +# # fail_msg: "Delete VTP feature should succeed" +# # success_msg: "Delete VTP feature successfully completed" +# # tags: [positive, delete, vtp_feature] + +# # - name: "DELETE TEST 10: Remove features from multiple ports" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: deleted +# # config: +# # - "{{ port_config_vars_map.test_port_config_delete_features_multiple_ports }}" +# # register: result_delete_features_multiple_ports +# # tags: [positive, delete, features_multiple_ports] + +# # - name: Assert delete features from multiple ports success +# # assert: +# # that: +# # - result_delete_features_multiple_ports.failed == false +# # - result_delete_features_multiple_ports.changed == true +# # - "'Successfully' in result_delete_features_multiple_ports.msg or 'success' in result_delete_features_multiple_ports.msg" +# # fail_msg: "Delete features from multiple ports should succeed" +# # success_msg: "Delete features from multiple ports successfully completed" +# # tags: [positive, delete, features_multiple_ports] + +# # - name: "DELETE TEST 11: Remove complete interface configurations" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: deleted +# # config: +# # - "{{ port_config_vars_map.test_port_config_delete_complete_interfaces }}" +# # register: result_delete_complete_interfaces +# # tags: [positive, delete, complete_interfaces] + +# # - name: Assert delete complete interface configurations success +# # assert: +# # that: +# # - result_delete_complete_interfaces.failed == false +# # - result_delete_complete_interfaces.changed == true +# # - "'Successfully' in result_delete_complete_interfaces.msg or 'success' in result_delete_complete_interfaces.msg" +# # fail_msg: "Delete complete interface configurations should succeed" +# # success_msg: "Delete complete interface configurations successfully completed" +# # tags: [positive, delete, complete_interfaces] + +# # # ################################################################################################################### +# # # # CLEANUP TEST CASES - FINAL PORT CONFIGURATION CLEANUP +# # # ################################################################################################################### + +# # - name: "CLEANUP TEST - Final Port Configuration Cleanup" +# # block: +# # - name: "CLEANUP TEST 1: Final cleanup for all remaining test interfaces" +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: deleted +# # config: +# # - "{{ port_config_vars_map.test_port_config_final_cleanup_all_interfaces }}" +# # register: result_final_cleanup +# # tags: [cleanup, final_cleanup_all] + +# # - name: Assert final cleanup success +# # assert: +# # that: +# # - result_final_cleanup.failed == false +# # - result_final_cleanup.changed == true +# # - "'Successfully' in result_final_cleanup.msg or 'success' in result_final_cleanup.msg or 'deleted' in result_final_cleanup.msg" +# # fail_msg: "Final cleanup should succeed" +# # success_msg: "Final cleanup successfully completed" +# # tags: [cleanup, final_cleanup_all] diff --git a/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_stp.yml b/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_stp.yml new file mode 100644 index 0000000000..15f3b65dd2 --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_stp.yml @@ -0,0 +1,1207 @@ +# --- +# # =================================================================================================== +# # STP (SPANNING TREE PROTOCOL) FEATURE TESTS SUMMARY +# # =================================================================================================== +# # +# # This test suite validates STP (Spanning Tree Protocol) configuration functionality for Wired Campus Automation. +# # +# # TEST CATEGORIES: +# # +# # 1. NEGATIVE VALIDATION TESTS (Lines 130-950) +# # a) STP Global Configuration Tests +# # - STP Mode validation (invalid string values, integer instead of string) +# # - STP Portfast Mode validation (invalid string values, integer instead of string) +# # - STP Boolean Parameters validation (string/integer instead of boolean) +# # - STP Transmit Hold Count validation (string/range validation) +# # - STP Uplinkfast Max Update Rate validation (string/range validation) +# # +# # b) STP Instance Configuration Tests +# # - Instance VLAN ID validation (string/negative/exceeds max/missing required) +# # - Instance Priority validation (string/not multiple of 4096/range) +# # - Instance Boolean Parameters validation (string/integer instead of boolean) +# # - Instance Timer validation (string/range validation) +# # +# # c) Data Type and Structure Tests +# # - Invalid configuration structure validation +# # - Invalid instances list structure validation +# # - Invalid instance item structure validation +# # +# # d) Duplicate and Conflict Tests +# # - Duplicate instance VLAN IDs validation +# # +# # 2. POSITIVE VALIDATION TESTS (Lines 950-1600) +# # a) STP Creation Tests (Merged State) +# # - Global parameters only configuration +# # - Instances only configuration +# # - Single instance configuration +# # - Global and instances combined configuration +# # - Minimal configurations +# # +# # b) STP Update Tests (Merged State) +# # - Global parameters only updates +# # - Add new instances updates +# # - Modify existing instances updates +# # - Combined updates +# # +# # c) STP Deletion Tests (Deleted State - Type 3) +# # - Empty configuration (resets global + deletes all instances) +# # - Global only (resets global + deletes instances) +# # - Instances only (deletes specified instances, preserves global) +# # - Global+instances (deletes instances only per Type 3 rule) +# # - Multiple instance deletion +# # +# # d) Idempotency Tests +# # - Same configuration applied twice (first=changed, second=no change) +# # +# # VALIDATION RANGES: +# # - STP Mode: string choices ["PVST", "RSTP", "MST"] +# # - STP Portfast Mode: string choices ["ENABLE", "DISABLE", "EDGE", "NETWORK", "TRUNK"] +# # - STP Boolean Parameters: boolean (true/false) +# # - STP Transmit Hold Count: integer (1-20) +# # - STP Uplinkfast Max Update Rate: integer (0-32000) +# # - Instance VLAN ID: integer (1-4094) +# # - Instance Priority: integer (0-61440, must be multiple of 4096) +# # - Instance Timers: max_age (6-40), hello_interval (1-10), forward_delay (4-30) +# # +# # STP FUNCTIONALITY: +# # - STP prevents loops in Layer 2 networks by blocking redundant paths +# # - Global STP configuration affects the entire spanning tree domain +# # - STP instances allow per-VLAN spanning tree configurations +# # - Different STP modes provide various features and compatibility +# # +# # TYPE 3 DELETE BEHAVIOR: +# # - Global only: resets global to defaults + deletes all instances +# # - Instances only: deletes specified instances, preserves global config +# # - Global+instances: deletes instances only (lowest level per Type 3 rule) +# # - Empty config: resets global to defaults + deletes all instances +# # +# # EXPECTED BEHAVIORS: +# # - Negative tests should fail with appropriate error messages +# # - Positive tests should succeed with 'Successfully' in response +# # - Idempotency: First run changes=true, second run changes=false +# # - All tests include proper pause times for configuration settlement +# # +# # =================================================================================================== + +# - debug: msg="Starting STP (Spanning Tree Protocol) feature tests for Wired Campus Automation" +# - debug: msg="Role Path {{ role_path }}" + +# - block: +# - name: Load STP test variables +# include_vars: +# file: "{{ role_path }}/vars/vars_wca_test_feature_stp.yml" +# name: stp_vars_map +# vars: +# dnac_login: &dnac_login +# dnac_host: "{{ dnac_host }}" +# dnac_username: "{{ dnac_username }}" +# dnac_password: "{{ dnac_password }}" +# dnac_verify: "{{ dnac_verify }}" +# dnac_port: "{{ dnac_port }}" +# dnac_version: "{{ dnac_version }}" +# dnac_debug: "{{ dnac_debug }}" +# dnac_log: true +# dnac_log_level: "DEBUG" +# config_verify: true +# dnac_api_task_timeout: "{{ dnac_api_task_timeout }}" +# dnac_log_append: false + +# # ============================================================================= +# # Negative Validation Tests for STP +# # ============================================================================= + +# # ############################################# +# # STP Global Mode Tests +# # ############################################# + +# - name: Test STP mode validation - invalid string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_global_mode_invalid_string }}" +# register: result_stp_mode_invalid_string +# ignore_errors: true +# tags: [negative, stp, mode] + +# - name: Assert STP mode invalid string validation failed +# assert: +# that: +# - result_stp_mode_invalid_string.failed == true +# - "'must be one of' in result_stp_mode_invalid_string.msg or 'invalid' in result_stp_mode_invalid_string.msg" +# fail_msg: "STP mode invalid string validation should have failed" +# success_msg: "STP mode invalid string validation correctly failed" +# tags: [negative, stp, mode] + +# - name: Test STP mode validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_global_mode_invalid_integer }}" +# register: result_stp_mode_invalid_integer +# ignore_errors: true +# tags: [negative, stp, mode] + +# - name: Assert STP mode integer validation failed +# assert: +# that: +# - result_stp_mode_invalid_integer.failed == true +# - "'must be of type string' in result_stp_mode_invalid_integer.msg" +# fail_msg: "STP mode integer validation should have failed" +# success_msg: "STP mode integer validation correctly failed" +# tags: [negative, stp, mode] + +# # ############################################# +# # STP Portfast Mode Tests +# # ############################################# + +# - name: Test STP portfast mode validation - invalid string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_portfast_mode_invalid_string }}" +# register: result_stp_portfast_mode_invalid_string +# ignore_errors: true +# tags: [negative, stp, portfast_mode] + +# - name: Assert STP portfast mode invalid string validation failed +# assert: +# that: +# - result_stp_portfast_mode_invalid_string.failed == true +# - "'must be one of' in result_stp_portfast_mode_invalid_string.msg" +# fail_msg: "STP portfast mode invalid string validation should have failed" +# success_msg: "STP portfast mode invalid string validation correctly failed" +# tags: [negative, stp, portfast_mode] + +# - name: Test STP portfast mode validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_portfast_mode_invalid_integer }}" +# register: result_stp_portfast_mode_invalid_integer +# ignore_errors: true +# tags: [negative, stp, portfast_mode] + +# - name: Assert STP portfast mode integer validation failed +# assert: +# that: +# - result_stp_portfast_mode_invalid_integer.failed == true +# - "'must be of type string' in result_stp_portfast_mode_invalid_integer.msg" +# fail_msg: "STP portfast mode integer validation should have failed" +# success_msg: "STP portfast mode integer validation correctly failed" +# tags: [negative, stp, portfast_mode] + +# # ############################################# +# # STP Boolean Parameters Tests +# # ############################################# + +# - name: Test STP BPDU guard validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_bpdu_guard_invalid_string }}" +# register: result_stp_bpdu_guard_invalid_string +# ignore_errors: true +# tags: [negative, stp, bpdu_guard] + +# - name: Assert STP BPDU guard string validation failed +# assert: +# that: +# - result_stp_bpdu_guard_invalid_string.failed == true +# - "'must be of type boolean' in result_stp_bpdu_guard_invalid_string.msg" +# fail_msg: "STP BPDU guard string validation should have failed" +# success_msg: "STP BPDU guard string validation correctly failed" +# tags: [negative, stp, bpdu_guard] + +# - name: Test STP BPDU guard validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_bpdu_guard_invalid_integer }}" +# register: result_stp_bpdu_guard_invalid_integer +# ignore_errors: true +# tags: [negative, stp, bpdu_guard] + +# - name: Assert STP BPDU guard integer validation failed +# assert: +# that: +# - result_stp_bpdu_guard_invalid_integer.failed == true +# - "'must be of type boolean' in result_stp_bpdu_guard_invalid_integer.msg" +# fail_msg: "STP BPDU guard integer validation should have failed" +# success_msg: "STP BPDU guard integer validation correctly failed" +# tags: [negative, stp, bpdu_guard] + +# - name: Test STP BPDU filter validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_bpdu_filter_invalid_string }}" +# register: result_stp_bpdu_filter_invalid_string +# ignore_errors: true +# tags: [negative, stp, bpdu_filter] + +# - name: Assert STP BPDU filter string validation failed +# assert: +# that: +# - result_stp_bpdu_filter_invalid_string.failed == true +# - "'must be of type boolean' in result_stp_bpdu_filter_invalid_string.msg" +# fail_msg: "STP BPDU filter string validation should have failed" +# success_msg: "STP BPDU filter string validation correctly failed" +# tags: [negative, stp, bpdu_filter] + +# - name: Test STP BPDU filter validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_bpdu_filter_invalid_integer }}" +# register: result_stp_bpdu_filter_invalid_integer +# ignore_errors: true +# tags: [negative, stp, bpdu_filter] + +# - name: Assert STP BPDU filter integer validation failed +# assert: +# that: +# - result_stp_bpdu_filter_invalid_integer.failed == true +# - "'must be of type boolean' in result_stp_bpdu_filter_invalid_integer.msg" +# fail_msg: "STP BPDU filter integer validation should have failed" +# success_msg: "STP BPDU filter integer validation correctly failed" +# tags: [negative, stp, bpdu_filter] + +# - name: Test STP backbonefast validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_backbonefast_invalid_string }}" +# register: result_stp_backbonefast_invalid_string +# ignore_errors: true +# tags: [negative, stp, backbonefast] + +# - name: Assert STP backbonefast string validation failed +# assert: +# that: +# - result_stp_backbonefast_invalid_string.failed == true +# - "'must be of type boolean' in result_stp_backbonefast_invalid_string.msg" +# fail_msg: "STP backbonefast string validation should have failed" +# success_msg: "STP backbonefast string validation correctly failed" +# tags: [negative, stp, backbonefast] + +# - name: Test STP backbonefast validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_backbonefast_invalid_integer }}" +# register: result_stp_backbonefast_invalid_integer +# ignore_errors: true +# tags: [negative, stp, backbonefast] + +# - name: Assert STP backbonefast integer validation failed +# assert: +# that: +# - result_stp_backbonefast_invalid_integer.failed == true +# - "'must be of type boolean' in result_stp_backbonefast_invalid_integer.msg" +# fail_msg: "STP backbonefast integer validation should have failed" +# success_msg: "STP backbonefast integer validation correctly failed" +# tags: [negative, stp, backbonefast] + +# # ############################################# +# # STP Integer Parameters Tests +# # ############################################# + +# - name: Test STP transmit hold count validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_transmit_hold_count_invalid_string }}" +# register: result_stp_transmit_hold_count_invalid_string +# ignore_errors: true +# tags: [negative, stp, transmit_hold_count] + +# - name: Assert STP transmit hold count string validation failed +# assert: +# that: +# - result_stp_transmit_hold_count_invalid_string.failed == true +# - "'must be of type integer' in result_stp_transmit_hold_count_invalid_string.msg" +# fail_msg: "STP transmit hold count string validation should have failed" +# success_msg: "STP transmit hold count string validation correctly failed" +# tags: [negative, stp, transmit_hold_count] + +# - name: Test STP transmit hold count validation - below minimum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_transmit_hold_count_below_min }}" +# register: result_stp_transmit_hold_count_below_min +# ignore_errors: true +# tags: [negative, stp, transmit_hold_count] + +# - name: Assert STP transmit hold count below minimum validation failed +# assert: +# that: +# - result_stp_transmit_hold_count_below_min.failed == true +# - "'must be within the range' in result_stp_transmit_hold_count_below_min.msg" +# fail_msg: "STP transmit hold count below minimum validation should have failed" +# success_msg: "STP transmit hold count below minimum validation correctly failed" +# tags: [negative, stp, transmit_hold_count] + +# - name: Test STP transmit hold count validation - above maximum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_transmit_hold_count_above_max }}" +# register: result_stp_transmit_hold_count_above_max +# ignore_errors: true +# tags: [negative, stp, transmit_hold_count] + +# - name: Assert STP transmit hold count above maximum validation failed +# assert: +# that: +# - result_stp_transmit_hold_count_above_max.failed == true +# - "'must be within the range' in result_stp_transmit_hold_count_above_max.msg" +# fail_msg: "STP transmit hold count above maximum validation should have failed" +# success_msg: "STP transmit hold count above maximum validation correctly failed" +# tags: [negative, stp, transmit_hold_count] + +# - name: Test STP uplinkfast max update rate validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_uplinkfast_max_update_rate_invalid_string }}" +# register: result_stp_uplinkfast_max_update_rate_invalid_string +# ignore_errors: true +# tags: [negative, stp, uplinkfast_max_update_rate] + +# - name: Assert STP uplinkfast max update rate string validation failed +# assert: +# that: +# - result_stp_uplinkfast_max_update_rate_invalid_string.failed == true +# - "'must be of type integer' in result_stp_uplinkfast_max_update_rate_invalid_string.msg" +# fail_msg: "STP uplinkfast max update rate string validation should have failed" +# success_msg: "STP uplinkfast max update rate string validation correctly failed" +# tags: [negative, stp, uplinkfast_max_update_rate] + +# - name: Test STP uplinkfast max update rate validation - below minimum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_uplinkfast_max_update_rate_below_min }}" +# register: result_stp_uplinkfast_max_update_rate_below_min +# ignore_errors: true +# tags: [negative, stp, uplinkfast_max_update_rate] + +# - name: Assert STP uplinkfast max update rate below minimum validation failed +# assert: +# that: +# - result_stp_uplinkfast_max_update_rate_below_min.failed == true +# - "'must be within the range' in result_stp_uplinkfast_max_update_rate_below_min.msg" +# fail_msg: "STP uplinkfast max update rate below minimum validation should have failed" +# success_msg: "STP uplinkfast max update rate below minimum validation correctly failed" +# tags: [negative, stp, uplinkfast_max_update_rate] + +# - name: Test STP uplinkfast max update rate validation - above maximum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_uplinkfast_max_update_rate_above_max }}" +# register: result_stp_uplinkfast_max_update_rate_above_max +# ignore_errors: true +# tags: [negative, stp, uplinkfast_max_update_rate] + +# - name: Assert STP uplinkfast max update rate above maximum validation failed +# assert: +# that: +# - result_stp_uplinkfast_max_update_rate_above_max.failed == true +# - "'must be within the range' in result_stp_uplinkfast_max_update_rate_above_max.msg" +# fail_msg: "STP uplinkfast max update rate above maximum validation should have failed" +# success_msg: "STP uplinkfast max update rate above maximum validation correctly failed" +# tags: [negative, stp, uplinkfast_max_update_rate] + +# # ############################################# +# # STP Instance Configuration Tests +# # ############################################# + +# - name: Test STP instance VLAN ID validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_instance_vlan_id_invalid_string }}" +# register: result_stp_instance_vlan_id_invalid_string +# ignore_errors: true +# tags: [negative, stp, instance_vlan_id] + +# - name: Assert STP instance VLAN ID string validation failed +# assert: +# that: +# - result_stp_instance_vlan_id_invalid_string.failed == true +# - "'must be of type integer' in result_stp_instance_vlan_id_invalid_string.msg" +# fail_msg: "STP instance VLAN ID string validation should have failed" +# success_msg: "STP instance VLAN ID string validation correctly failed" +# tags: [negative, stp, instance_vlan_id] + +# - name: Test STP instance VLAN ID validation - below minimum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_instance_vlan_id_below_min }}" +# register: result_stp_instance_vlan_id_below_min +# ignore_errors: true +# tags: [negative, stp, instance_vlan_id] + +# - name: Assert STP instance VLAN ID below minimum validation failed +# assert: +# that: +# - result_stp_instance_vlan_id_below_min.failed == true +# - "'must be within the range' in result_stp_instance_vlan_id_below_min.msg" +# fail_msg: "STP instance VLAN ID below minimum validation should have failed" +# success_msg: "STP instance VLAN ID below minimum validation correctly failed" +# tags: [negative, stp, instance_vlan_id] + +# - name: Test STP instance VLAN ID validation - above maximum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_instance_vlan_id_above_max }}" +# register: result_stp_instance_vlan_id_above_max +# ignore_errors: true +# tags: [negative, stp, instance_vlan_id] + +# - name: Assert STP instance VLAN ID above maximum validation failed +# assert: +# that: +# - result_stp_instance_vlan_id_above_max.failed == true +# - "'must be within the range' in result_stp_instance_vlan_id_above_max.msg" +# fail_msg: "STP instance VLAN ID above maximum validation should have failed" +# success_msg: "STP instance VLAN ID above maximum validation correctly failed" +# tags: [negative, stp, instance_vlan_id] + +# - name: Test STP instance VLAN ID validation - missing required should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_instance_vlan_id_missing }}" +# register: result_stp_instance_vlan_id_missing +# ignore_errors: true +# tags: [negative, stp, instance_vlan_id] + +# - name: Assert STP instance VLAN ID missing validation failed +# assert: +# that: +# - result_stp_instance_vlan_id_missing.failed == true +# - "'required' in result_stp_instance_vlan_id_missing.msg or 'stp_instance_vlan_id' in result_stp_instance_vlan_id_missing.msg" +# fail_msg: "STP instance VLAN ID missing validation should have failed" +# success_msg: "STP instance VLAN ID missing validation correctly failed" +# tags: [negative, stp, instance_vlan_id] + +# - name: Test STP instance priority validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_instance_priority_invalid_string }}" +# register: result_stp_instance_priority_invalid_string +# ignore_errors: true +# tags: [negative, stp, instance_priority] + +# - name: Assert STP instance priority string validation failed +# assert: +# that: +# - result_stp_instance_priority_invalid_string.failed == true +# - "'must be of type integer' in result_stp_instance_priority_invalid_string.msg" +# fail_msg: "STP instance priority string validation should have failed" +# success_msg: "STP instance priority string validation correctly failed" +# tags: [negative, stp, instance_priority] + +# - name: Test STP instance priority validation - below minimum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_instance_priority_below_min }}" +# register: result_stp_instance_priority_below_min +# ignore_errors: true +# tags: [negative, stp, instance_priority] + +# - name: Assert STP instance priority below minimum validation failed +# assert: +# that: +# - result_stp_instance_priority_below_min.failed == true +# - "'must be within the range' in result_stp_instance_priority_below_min.msg" +# fail_msg: "STP instance priority below minimum validation should have failed" +# success_msg: "STP instance priority below minimum validation correctly failed" +# tags: [negative, stp, instance_priority] + +# - name: Test STP instance priority validation - above maximum should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_instance_priority_above_max }}" +# register: result_stp_instance_priority_above_max +# ignore_errors: true +# tags: [negative, stp, instance_priority] + +# - name: Assert STP instance priority above maximum validation failed +# assert: +# that: +# - result_stp_instance_priority_above_max.failed == true +# - "'must be within the range' in result_stp_instance_priority_above_max.msg" +# fail_msg: "STP instance priority above maximum validation should have failed" +# success_msg: "STP instance priority above maximum validation correctly failed" +# tags: [negative, stp, instance_priority] + +# - name: Test STP instance priority validation - not multiple of 4096 should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_instance_priority_not_multiple }}" +# register: result_stp_instance_priority_not_multiple +# ignore_errors: true +# tags: [negative, stp, instance_priority] + +# - name: Assert STP instance priority not multiple validation failed +# assert: +# that: +# - result_stp_instance_priority_not_multiple.failed == true +# - "'multiple of 4096' in result_stp_instance_priority_not_multiple.msg or 'priority' in result_stp_instance_priority_not_multiple.msg" +# fail_msg: "STP instance priority not multiple validation should have failed" +# success_msg: "STP instance priority not multiple validation correctly failed" +# tags: [negative, stp, instance_priority] + +# # ############################################# +# # Data Type and Structure Tests +# # ############################################# + +# - name: Test STP structure validation - string instead of dict should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_invalid_dict_type }}" +# register: result_stp_invalid_dict_type +# ignore_errors: true +# tags: [negative, stp, structure] + +# - name: Assert STP invalid dict type validation failed +# assert: +# that: +# - result_stp_invalid_dict_type.failed == true +# - "'must be of type dictionary' in result_stp_invalid_dict_type.msg" +# fail_msg: "STP invalid dict type validation should have failed" +# success_msg: "STP invalid dict type validation correctly failed" +# tags: [negative, stp, structure] + +# - name: Test STP instances structure validation - string instead of list should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_instances_invalid_string }}" +# register: result_stp_instances_invalid_string +# ignore_errors: true +# tags: [negative, stp, instances_structure] + +# - name: Assert STP instances invalid string validation failed +# assert: +# that: +# - result_stp_instances_invalid_string.failed == true +# - "'must be a list of dictionaries' in result_stp_instances_invalid_string.msg" +# fail_msg: "STP instances invalid string validation should have failed" +# success_msg: "STP instances invalid string validation correctly failed" +# tags: [negative, stp, instances_structure] + +# - name: Test STP instance item structure validation - string instead of dict should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_instance_invalid_dict_type }}" +# register: result_stp_instance_invalid_dict_type +# ignore_errors: true +# tags: [negative, stp, instance_structure] + +# - name: Assert STP instance invalid dict type validation failed +# assert: +# that: +# - result_stp_instance_invalid_dict_type.failed == true +# - "'must be a dictionary' in result_stp_instance_invalid_dict_type.msg" +# fail_msg: "STP instance invalid dict type validation should have failed" +# success_msg: "STP instance invalid dict type validation correctly failed" +# tags: [negative, stp, instance_structure] + +# # ============================================================================= +# # POSITIVE TEST CASES - STP CREATION (MERGED STATE) +# # ============================================================================= + +# # ############################################# +# # STP Configuration Tests +# # ############################################# + +# - name: Test STP configuration with global parameters only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_create_global_only }}" +# register: result_stp_create_global_only +# tags: [positive, stp, create, global_only] + +# - name: Assert STP global only configuration succeeded +# assert: +# that: +# - result_stp_create_global_only.failed == false +# - result_stp_create_global_only.changed == true +# - "'Successfully' in result_stp_create_global_only.response" +# fail_msg: "STP global only configuration should have succeeded" +# success_msg: "STP global only configuration succeeded" +# tags: [positive, stp, create, global_only] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, stp, create] + +# - name: Test STP configuration with instances only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_create_instances_only }}" +# register: result_stp_create_instances_only +# tags: [positive, stp, create, instances_only] + +# - name: Assert STP instances only configuration succeeded +# assert: +# that: +# - result_stp_create_instances_only.failed == false +# - result_stp_create_instances_only.changed == true +# - "'Successfully' in result_stp_create_instances_only.response" +# fail_msg: "STP instances only configuration should have succeeded" +# success_msg: "STP instances only configuration succeeded" +# tags: [positive, stp, create, instances_only] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, stp, create] + +# - name: Test STP configuration with single instance +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_create_single_instance }}" +# register: result_stp_create_single_instance +# tags: [positive, stp, create, single_instance] + +# - name: Assert STP single instance configuration succeeded +# assert: +# that: +# - result_stp_create_single_instance.failed == false +# - result_stp_create_single_instance.changed == true +# - "'Successfully' in result_stp_create_single_instance.response" +# fail_msg: "STP single instance configuration should have succeeded" +# success_msg: "STP single instance configuration succeeded" +# tags: [positive, stp, create, single_instance] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, stp, create] + +# - name: Test STP configuration with global and instances +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_create_global_and_instances }}" +# register: result_stp_create_global_and_instances +# tags: [positive, stp, create, global_and_instances] + +# - name: Assert STP global and instances configuration succeeded +# assert: +# that: +# - result_stp_create_global_and_instances.failed == false +# - result_stp_create_global_and_instances.changed == true +# - "'Successfully' in result_stp_create_global_and_instances.response" +# fail_msg: "STP global and instances configuration should have succeeded" +# success_msg: "STP global and instances configuration succeeded" +# tags: [positive, stp, create, global_and_instances] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, stp, create] + +# - name: Test STP configuration with minimal global parameters +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_create_minimal_global }}" +# register: result_stp_create_minimal_global +# tags: [positive, stp, create, minimal_global] + +# - name: Assert STP minimal global configuration succeeded +# assert: +# that: +# - result_stp_create_minimal_global.failed == false +# - result_stp_create_minimal_global.changed == true +# - "'Successfully' in result_stp_create_minimal_global.response" +# fail_msg: "STP minimal global configuration should have succeeded" +# success_msg: "STP minimal global configuration succeeded" +# tags: [positive, stp, create, minimal_global] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, stp, create] + +# - name: Test STP configuration with minimal instance parameters +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_create_minimal_instance }}" +# register: result_stp_create_minimal_instance +# tags: [positive, stp, create, minimal_instance] + +# - name: Assert STP minimal instance configuration succeeded +# assert: +# that: +# - result_stp_create_minimal_instance.failed == false +# - result_stp_create_minimal_instance.changed == true +# - "'Successfully' in result_stp_create_minimal_instance.response" +# fail_msg: "STP minimal instance configuration should have succeeded" +# success_msg: "STP minimal instance configuration succeeded" +# tags: [positive, stp, create, minimal_instance] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, stp, create] + +# # ============================================================================= +# # POSITIVE TEST CASES - STP UPDATE (MERGED STATE) +# # ============================================================================= + +# # ############################################# +# # STP Update Tests +# # ############################################# + +# - name: Test STP update - global parameters only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_update_global_only }}" +# register: result_stp_update_global_only +# tags: [positive, stp, update, global_only] + +# - name: Assert STP global only update succeeded +# assert: +# that: +# - result_stp_update_global_only.failed == false +# - result_stp_update_global_only.changed == true +# - "'Successfully' in result_stp_update_global_only.response" +# fail_msg: "STP global only update should have succeeded" +# success_msg: "STP global only update succeeded" +# tags: [positive, stp, update, global_only] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, stp, update] + +# - name: Test STP update - add new instances +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_update_add_instances }}" +# register: result_stp_update_add_instances +# tags: [positive, stp, update, add_instances] + +# - name: Assert STP add instances update succeeded +# assert: +# that: +# - result_stp_update_add_instances.failed == false +# - result_stp_update_add_instances.changed == true +# - "'Successfully' in result_stp_update_add_instances.response" +# fail_msg: "STP add instances update should have succeeded" +# success_msg: "STP add instances update succeeded" +# tags: [positive, stp, update, add_instances] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, stp, update] + +# - name: Test STP update - modify existing instances +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_update_modify_instances }}" +# register: result_stp_update_modify_instances +# tags: [positive, stp, update, modify_instances] + +# - name: Assert STP modify instances update succeeded +# assert: +# that: +# - result_stp_update_modify_instances.failed == false +# - result_stp_update_modify_instances.changed == true +# - "'Successfully' in result_stp_update_modify_instances.response" +# fail_msg: "STP modify instances update should have succeeded" +# success_msg: "STP modify instances update succeeded" +# tags: [positive, stp, update, modify_instances] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, stp, update] + +# - name: Test STP update - global and instances together +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_update_global_and_instances }}" +# register: result_stp_update_global_and_instances +# tags: [positive, stp, update, global_and_instances] + +# - name: Assert STP global and instances update succeeded +# assert: +# that: +# - result_stp_update_global_and_instances.failed == false +# - result_stp_update_global_and_instances.changed == true +# - "'Successfully' in result_stp_update_global_and_instances.response" +# fail_msg: "STP global and instances update should have succeeded" +# success_msg: "STP global and instances update succeeded" +# tags: [positive, stp, update, global_and_instances] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, stp, update] + +# - name: Test STP update - change STP mode +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_update_change_mode }}" +# register: result_stp_update_change_mode +# tags: [positive, stp, update, change_mode] + +# - name: Assert STP mode change update succeeded +# assert: +# that: +# - result_stp_update_change_mode.failed == false +# - result_stp_update_change_mode.changed == true +# - "'Successfully' in result_stp_update_change_mode.response" +# fail_msg: "STP mode change update should have succeeded" +# success_msg: "STP mode change update succeeded" +# tags: [positive, stp, update, change_mode] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, stp, update] + +# - name: Test STP update - modify single instance +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ stp_vars_map.test_stp_update_single_instance }}" +# register: result_stp_update_single_instance +# tags: [positive, stp, update, single_instance] + +# - name: Assert STP single instance update succeeded +# assert: +# that: +# - result_stp_update_single_instance.failed == false +# - result_stp_update_single_instance.changed == true +# - "'Successfully' in result_stp_update_single_instance.response" +# fail_msg: "STP single instance update should have succeeded" +# success_msg: "STP single instance update succeeded" +# tags: [positive, stp, update, single_instance] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, stp, update] + +# # # ============================================================================= +# # # POSITIVE TEST CASES - STP DELETION (DELETED STATE - TYPE 3) +# # # ============================================================================= + +# # # ############################################# +# # # STP Deletion Tests (Type 3) +# # # ############################################# + +# # - name: Test STP delete - instances only (Type 3 - deletes specified instances, preserves global) +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: deleted +# # config: +# # - "{{ stp_vars_map.test_stp_delete_instances_only }}" +# # register: result_stp_delete_instances_only +# # tags: [positive, stp, delete, instances_only] + +# # - name: Assert STP instances only deletion succeeded +# # assert: +# # that: +# # - result_stp_delete_instances_only.failed == false +# # - result_stp_delete_instances_only.changed == true +# # - "'Successfully' in result_stp_delete_instances_only.response" +# # fail_msg: "STP instances only deletion should have succeeded" +# # success_msg: "STP instances only deletion succeeded" +# # tags: [positive, stp, delete, instances_only] + +# # - name: Pause to allow configuration to settle +# # pause: +# # seconds: 10 + +# # - name: Test STP delete - global and instances (Type 3 - deletes instances only, preserves global) +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: deleted +# # config: +# # - "{{ stp_vars_map.test_stp_delete_global_and_instances }}" +# # register: result_stp_delete_global_and_instances +# # tags: [positive, stp, delete, global_and_instances] + +# # - name: Assert STP global and instances deletion succeeded +# # assert: +# # that: +# # - result_stp_delete_global_and_instances.failed == false +# # - result_stp_delete_global_and_instances.changed == true +# # - "'Successfully' in result_stp_delete_global_and_instances.response" +# # fail_msg: "STP global and instances deletion should have succeeded" +# # success_msg: "STP global and instances deletion succeeded" +# # tags: [positive, stp, delete, global_and_instances] + +# # - name: Pause to allow configuration to settle +# # pause: +# # seconds: 10 +# # tags: [positive, stp, delete] + +# # - name: Test STP delete - global only (Type 3 - resets global + deletes instances) +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: deleted +# # config: +# # - "{{ stp_vars_map.test_stp_delete_global_only }}" +# # register: result_stp_delete_global_only +# # tags: [positive, stp, delete, global_only] + +# # - name: Assert STP global only deletion succeeded +# # assert: +# # that: +# # - result_stp_delete_global_only.failed == false +# # - result_stp_delete_global_only.changed == true +# # - "'Successfully' in result_stp_delete_global_only.response" +# # fail_msg: "STP global only deletion should have succeeded" +# # success_msg: "STP global only deletion succeeded" +# # tags: [positive, stp, delete, global_only] + +# # - name: Pause to allow configuration to settle +# # pause: +# # seconds: 10 +# # tags: [positive, stp, delete] + +# # - name: Test STP delete - empty configuration (Type 3 - resets global + deletes all instances) +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: deleted +# # config: +# # - "{{ stp_vars_map.test_stp_delete_empty }}" +# # register: result_stp_delete_empty +# # tags: [positive, stp, delete, empty] + +# # - name: Assert STP empty deletion succeeded +# # assert: +# # that: +# # - result_stp_delete_empty.failed == false +# # - result_stp_delete_empty.changed == true +# # - "'Successfully' in result_stp_delete_empty.response" +# # fail_msg: "STP empty deletion should have succeeded" +# # success_msg: "STP empty deletion succeeded" +# # tags: [positive, stp, delete, empty] + +# # - name: Pause to allow configuration to settle +# # pause: +# # seconds: 10 +# # tags: [positive, stp, delete] + +# # # Setup configuration for deletion tests +# # - name: Setup STP configuration for deletion tests +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ stp_vars_map.test_stp_create_global_and_instances }}" +# # register: result_stp_setup_for_deletion +# # tags: [positive, stp, delete, setup] + +# # - name: Pause to allow configuration to settle +# # pause: +# # seconds: 15 +# # tags: [positive, stp, delete] + +# # # Setup configuration again for instances-only deletion test +# # - name: Setup STP configuration for instances deletion test +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ stp_vars_map.test_stp_create_global_and_instances }}" +# # register: result_stp_setup_for_instances_deletion +# # tags: [positive, stp, delete, setup] + +# # - name: Pause to allow configuration to settle +# # pause: +# # seconds: 15 +# # tags: [positive, stp, delete] + +# # tags: [positive, stp, delete] + +# # - name: Test STP delete - single instance +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: deleted +# # config: +# # - "{{ stp_vars_map.test_stp_delete_single_instance }}" +# # register: result_stp_delete_single_instance +# # tags: [positive, stp, delete, single_instance] + +# # - name: Assert STP single instance deletion succeeded +# # assert: +# # that: +# # - result_stp_delete_single_instance.failed == false +# # - result_stp_delete_single_instance.changed == true +# # - "'Successfully' in result_stp_delete_single_instance.response" +# # fail_msg: "STP single instance deletion should have succeeded" +# # success_msg: "STP single instance deletion succeeded" +# # tags: [positive, stp, delete, single_instance] + +# # - name: Pause to allow configuration to settle +# # pause: +# # seconds: 10 +# # tags: [positive, stp, delete] + +# # # Setup configuration again for global+instances deletion test +# # - name: Setup STP configuration for global+instances deletion test +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ stp_vars_map.test_stp_create_global_and_instances }}" +# # register: result_stp_setup_for_global_instances_deletion +# # tags: [positive, stp, delete, setup] + +# # - name: Pause to allow configuration to settle +# # pause: +# # seconds: 15 +# # tags: [positive, stp, delete] + +# # - name: Test STP delete - multiple specific instances +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: deleted +# # config: +# # - "{{ stp_vars_map.test_stp_delete_multiple_instances }}" +# # register: result_stp_delete_multiple_instances +# # tags: [positive, stp, delete, multiple_instances] + +# # - name: Assert STP multiple instances deletion succeeded +# # assert: +# # that: +# # - result_stp_delete_multiple_instances.failed == false +# # - result_stp_delete_multiple_instances.changed == true +# # - "'Successfully' in result_stp_delete_multiple_instances.response" +# # fail_msg: "STP multiple instances deletion should have succeeded" +# # success_msg: "STP multiple instances deletion succeeded" +# # tags: [positive, stp, delete, multiple_instances] + +# # - name: Pause to allow configuration to settle +# # pause: +# # seconds: 10 +# # tags: [positive, stp, delete] + +# # # ============================================================================= +# # # ADVANCED POSITIVE TEST SCENARIOS +# # # ============================================================================= + +# # # ############################################# +# # # Idempotency Tests +# # # ############################################# + +# # - name: Test STP Idempotency - Configure same STP settings twice +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ stp_vars_map.test_stp_create_global_and_instances }}" +# # register: result_stp_idempotency_first +# # tags: [positive, stp, idempotency] + +# # - name: Pause to allow configuration to settle +# # pause: +# # seconds: 40 +# # tags: [positive, stp, idempotency] + +# # - name: Test STP Idempotency - Configure same STP settings again (should be idempotent) +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: merged +# # config: +# # - "{{ stp_vars_map.test_stp_create_global_and_instances }}" +# # register: result_stp_idempotency_second +# # tags: [positive, stp, idempotency] + +# # - name: Assert STP Idempotency - Second configuration should not change +# # assert: +# # that: +# # - result_stp_idempotency_second.failed == false +# # - result_stp_idempotency_second.changed == false +# # fail_msg: "STP idempotency test failed" +# # success_msg: "STP idempotency test succeeded" +# # tags: [positive, stp, idempotency] + +# # # ############################################# +# # # Cleanup Test Configurations +# # # ############################################# + +# # - name: Cleanup - Reset STP to default configuration +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: deleted +# # config: +# # - ip_address: "204.1.2.3" +# # device_collection_status_check: false +# # layer2_configuration: +# # stp: {} +# # register: result_stp_cleanup +# # ignore_errors: true +# # tags: [cleanup, stp] diff --git a/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_vlans.yml b/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_vlans.yml new file mode 100644 index 0000000000..7d498fec25 --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_vlans.yml @@ -0,0 +1,789 @@ +# --- +# # =================================================================================================== +# # VLAN FEATURE TESTS SUMMARY +# # =================================================================================================== +# # +# # This test suite validates VLAN configuration for Wired Campus Automation. +# # +# # TEST CATEGORIES: +# # +# # 1. NEGATIVE VALIDATION TESTS (Lines 30-230) +# # - VLAN ID validation (below min, above max, zero, negative, missing) +# # - Admin status validation (string/integer instead of boolean) +# # - Data type and structure validation (invalid dict/list types) +# # - Edge case and boundary tests +# # +# # 2. POSITIVE VALIDATION TESTS (Lines 230-750) +# # a) VLAN Creation Tests +# # - Single VLAN creation (minimal params, all params, admin disabled) +# # - Multiple VLANs creation with mixed configurations +# # - Boundary value tests (min/max VLAN IDs) +# # - Special characters and numeric names +# # +# # b) VLAN Update Tests (Merged State) +# # - Single VLAN updates (name only, both name and status) +# # - Multiple VLANs updates +# # - Enable/disable VLAN admin status +# # - Minimal parameter updates +# # +# # c) VLAN Deletion Tests (Deleted State) +# # - Single VLAN deletion +# # - Multiple VLANs deletion +# # - Selective deletion +# # - Boundary VLANs deletion +# # - Special characteristics deletion +# # +# # d) Advanced Test Scenarios +# # - Idempotency tests (same config applied twice) +# # - Cleanup operations +# # +# # =================================================================================================== + +# - debug: msg="Starting campus automation workflow management test" +# - debug: msg="Role Path {{ role_path }}" + +# - block: +# - name: Load vars and declare dnac vars +# include_vars: +# file: "{{ role_path }}/vars/vars_wca_test_feature_vlans.yml" +# name: vlan_vars_map +# vars: +# dnac_login: &dnac_login +# dnac_host: "{{ dnac_host }}" +# dnac_username: "{{ dnac_username }}" +# dnac_password: "{{ dnac_password }}" +# dnac_verify: "{{ dnac_verify }}" +# dnac_port: "{{ dnac_port }}" +# dnac_version: "{{ dnac_version }}" +# dnac_debug: "{{ dnac_debug }}" +# dnac_log: true +# dnac_log_level: DEBUG +# config_verify: true +# dnac_api_task_timeout: "{{ dnac_api_task_timeout }}" +# dnac_log_append: false + +# # ============================================================================= +# # Negative Validation Tests for VLANs +# # ============================================================================= + +# # ############################################# +# # VLAN ID Validation Tests +# # ############################################# + +# - name: Test VLAN - Invalid VLAN ID (below minimum) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_id_below_min }}" +# register: result_vlan_id_below_min +# ignore_errors: true + +# - name: Assert VLAN - Invalid VLAN ID (below minimum) +# assert: +# that: +# - result_vlan_id_below_min.failed == true +# - "'vlan_id' in result_vlan_id_below_min.msg" +# - "'range' in result_vlan_id_below_min.msg" +# - "'2' in result_vlan_id_below_min.msg" +# - "'4094' in result_vlan_id_below_min.msg" + +# - name: Test VLAN - Invalid VLAN ID (above maximum) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_id_above_max }}" +# register: result_vlan_id_above_max +# ignore_errors: true + +# - name: Assert VLAN - Invalid VLAN ID (above maximum) +# assert: +# that: +# - result_vlan_id_above_max.failed == true +# - "'vlan_id' in result_vlan_id_above_max.msg" +# - "'range' in result_vlan_id_above_max.msg" + +# - name: Test VLAN - Invalid VLAN ID (zero) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_id_zero }}" +# register: result_vlan_id_zero +# ignore_errors: true + +# - name: Assert VLAN - Invalid VLAN ID (zero) +# assert: +# that: +# - result_vlan_id_zero.failed == true +# - "'vlan_id' in result_vlan_id_zero.msg" +# - "'range' in result_vlan_id_zero.msg" + +# - name: Test VLAN - Invalid VLAN ID (negative) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_id_negative }}" +# register: result_vlan_id_negative +# ignore_errors: true + +# - name: Assert VLAN - Invalid VLAN ID (negative) +# assert: +# that: +# - result_vlan_id_negative.failed == true +# - "'vlan_id' in result_vlan_id_negative.msg" +# - "'range' in result_vlan_id_negative.msg" + +# - name: Test VLAN - Missing required VLAN ID +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_id_missing }}" +# register: result_vlan_id_missing +# ignore_errors: true + +# - name: Assert VLAN - Missing required VLAN ID +# assert: +# that: +# - result_vlan_id_missing.failed == true +# - "'vlan_id' in result_vlan_id_missing.msg" +# - "'required' in result_vlan_id_missing.msg" + +# # ############################################# +# # VLAN Admin Status Tests +# # ############################################# + +# - name: Test VLAN - Invalid admin status (string instead of boolean) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_admin_status_string }}" +# register: result_vlan_admin_status_string +# ignore_errors: true + +# - name: Assert VLAN - Invalid admin status (string) +# assert: +# that: +# - result_vlan_admin_status_string.failed == true +# - "'vlan_admin_status' in result_vlan_admin_status_string.msg" + +# - name: Test VLAN - Invalid admin status (integer instead of boolean) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_admin_status_integer }}" +# register: result_vlan_admin_status_integer +# ignore_errors: true + +# - name: Assert VLAN - Invalid admin status (integer) +# assert: +# that: +# - result_vlan_admin_status_integer.failed == true +# - "'vlan_admin_status' in result_vlan_admin_status_integer.msg" + +# # ############################################# +# # Data Type and Structure Tests +# # ############################################# + +# - name: Test VLAN - Invalid data type (string instead of dict) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_invalid_dict_type }}" +# register: result_vlan_invalid_dict_type +# ignore_errors: true + +# - name: Assert VLAN - Invalid data type +# assert: +# that: +# - result_vlan_invalid_dict_type.failed == true +# - "'dictionary' in result_vlan_invalid_dict_type.msg" + +# - name: Test VLAN - Invalid VLAN list structure (not a list) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_not_list }}" +# register: result_vlan_not_list +# ignore_errors: true + +# - name: Assert VLAN - Invalid VLAN list structure +# assert: +# that: +# - result_vlan_not_list.failed == true +# - "'str' in result_vlan_not_list.msg" + +# # ############################################# +# # Edge Case and Boundary Tests +# # ############################################# + +# - name: Test VLAN - Minimum valid VLAN ID (1) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_id_min_valid }}" +# register: result_vlan_id_min_valid +# ignore_errors: true + +# - name: Assert VLAN - Minimum valid VLAN ID (should pass) +# assert: +# that: +# - result_vlan_id_min_valid.failed == true +# - result_vlan_id_min_valid.changed == false + +# - name: Test VLAN - Multiple VLANs with one invalid +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_multiple_one_invalid }}" +# register: result_vlan_multiple_one_invalid +# ignore_errors: true + +# - name: Assert VLAN - Multiple VLANs with one invalid +# assert: +# that: +# - result_vlan_multiple_one_invalid.failed == true +# - "'vlan_id' in result_vlan_multiple_one_invalid.msg" + +# - name: Test VLAN Creation - Single VLAN admin disabled +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_create_single_disabled_1 }}" +# register: result_vlan_create_single_disabled +# ignore_errors: true + +# - name: Assert VLAN Creation - Single VLAN disabled (should succeed) +# assert: +# that: +# - result_vlan_create_single_disabled.failed == true +# - result_vlan_create_single_disabled.changed == false +# - "'Failed' in result_vlan_create_single_disabled.response" + +# # ============================================================================= +# # POSITIVE TEST CASES - VLAN UPDATE (MERGED STATE) +# # ============================================================================= + +# # ############################################# +# # Single VLAN Update Tests +# # ############################################# + +# - name: Test VLAN Creation - Single VLAN with minimal parameters +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_create_single_minimal }}" +# register: result_vlan_create_single_minimal + +# - name: Assert VLAN Creation - Single VLAN minimal (should succeed) +# assert: +# that: +# - result_vlan_create_single_minimal.failed == false +# - result_vlan_create_single_minimal.changed == true +# - "'Successfully' in result_vlan_create_single_minimal.response" +# fail_msg: Single VLAN creation with minimal parameters failed +# success_msg: Single VLAN creation with minimal parameters failed + +# - name: Wait 10 seconds before next test to allow configuration to settle +# pause: +# seconds: 10 + +# - name: Test VLAN Creation - Single VLAN with all parameters +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_create_single_all_params }}" +# register: result_vlan_create_single_all_params + +# - name: Assert VLAN Creation - Single VLAN all params (should succeed) +# assert: +# that: +# - result_vlan_create_single_all_params.failed == false +# - result_vlan_create_single_all_params.changed == true +# - "'Successfully' in result_vlan_create_single_all_params.response" +# fail_msg: Single VLAN creation with all parameters failed +# success_msg: Single VLAN creation with all parameters succeeded + +# - name: Wait 10 seconds before next test to allow configuration to settle +# pause: +# seconds: 10 + +# - name: Test VLAN Creation - Single VLAN admin disabled +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_create_single_disabled_2 }}" +# register: result_vlan_create_single_disabled_2 + +# - name: Assert VLAN Creation - Single VLAN disabled (should succeed) +# assert: +# that: +# - result_vlan_create_single_disabled_2.failed == false +# - result_vlan_create_single_disabled_2.changed == true +# - "'Successfully' in result_vlan_create_single_disabled_2.response" +# fail_msg: Single VLAN creation with admin disabled failed +# success_msg: Single VLAN creation with admin disabled succeeded + +# - name: Wait 10 seconds before next test to allow configuration to settle +# pause: +# seconds: 10 + +# # ############################################# +# # Multiple VLANs Creation Tests +# # ############################################# + +# - name: Test VLAN Creation - Multiple VLANs mixed configurations +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_create_multiple_mixed }}" +# register: result_vlan_create_multiple_mixed + +# - name: Assert VLAN Creation - Multiple VLANs mixed (should succeed) +# assert: +# that: +# - result_vlan_create_multiple_mixed.failed == false +# - result_vlan_create_multiple_mixed.changed == true +# - "'Successfully' in result_vlan_create_multiple_mixed.response" +# fail_msg: Multiple VLANs creation with mixed configurations failed +# success_msg: Multiple VLANs creation with mixed configurations succeeded + +# - name: Wait 10 seconds before next test to allow configuration to settle +# pause: +# seconds: 10 + +# # ############################################# +# # Boundary Value Tests +# # ############################################# + +# - name: Test VLAN Creation - Minimum boundary VLAN ID +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_create_boundary_min }}" +# register: result_vlan_create_boundary_min + +# - name: Assert VLAN Creation - Minimum boundary (should succeed) +# assert: +# that: +# - result_vlan_create_boundary_min.failed == false +# - result_vlan_create_boundary_min.changed == true +# - "'Successfully' in result_vlan_create_boundary_min.response" +# fail_msg: Minimum boundary VLAN creation failed +# success_msg: Minimum boundary VLAN creation succeeded + +# - name: Wait 10 seconds before next test to allow configuration to settle +# pause: +# seconds: 10 + +# - name: Test VLAN Creation - Maximum boundary VLAN ID +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_create_boundary_max }}" +# register: result_vlan_create_boundary_max + +# - name: Assert VLAN Creation - Maximum boundary (should succeed) +# assert: +# that: +# - result_vlan_create_boundary_max.failed == false +# - result_vlan_create_boundary_max.changed == true +# - "'Successfully' in result_vlan_create_boundary_max.response" +# fail_msg: Maximum boundary VLAN creation failed +# success_msg: Maximum boundary VLAN creation succeeded + +# - name: Wait 10 seconds before next test to allow configuration to settle +# pause: +# seconds: 10 + +# # ############################################# +# # Special Characters and Names +# # ############################################# + +# - name: Test VLAN Creation - Special characters in name +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_create_special_chars }}" +# register: result_vlan_create_special_chars + +# - name: Assert VLAN Creation - Special characters (should succeed) +# assert: +# that: +# - result_vlan_create_special_chars.failed == false +# - result_vlan_create_special_chars.changed == true +# - "'Successfully' in result_vlan_create_special_chars.response" +# fail_msg: VLAN creation with special characters failed +# success_msg: VLAN creation with special characters succeeded + +# - name: Wait 10 seconds before next test to allow configuration to settle +# pause: +# seconds: 10 + +# - name: Test VLAN Creation - Numeric name +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_create_numeric_name }}" +# register: result_vlan_create_numeric_name + +# - name: Assert VLAN Creation - Numeric name (should succeed) +# assert: +# that: +# - result_vlan_create_numeric_name.failed == false +# - result_vlan_create_numeric_name.changed == true +# - "'Successfully' in result_vlan_create_numeric_name.response" +# fail_msg: VLAN creation with numeric name failed +# success_msg: VLAN creation with numeric name succeeded + +# - name: Wait 10 seconds before next test to allow configuration to settle +# pause: +# seconds: 10 + +# # ============================================================================= +# # POSITIVE TEST CASES - VLAN UPDATE (MERGED STATE) +# # ============================================================================= + +# # ############################################# +# # Single VLAN Update Tests +# # ############################################# + +# - name: Test VLAN Update - Change name only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_update_name_only }}" +# register: result_vlan_update_name_only + +# - name: Assert VLAN Update - Name only (should succeed) +# assert: +# that: +# - result_vlan_update_name_only.failed == false +# - result_vlan_update_name_only.changed == true +# - "'Successfully' in result_vlan_update_name_only.response" +# fail_msg: VLAN name update failed +# success_msg: VLAN name update succeeded + +# - name: Wait 10 seconds before next test to allow configuration to settle +# pause: +# seconds: 10 + +# - name: Test VLAN Update - Change both name and status +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_update_both_params }}" +# register: result_vlan_update_both_params + +# - name: Assert VLAN Update - Both parameters (should succeed) +# assert: +# that: +# - result_vlan_update_both_params.failed == false +# - result_vlan_update_both_params.changed == true +# - "'Successfully' in result_vlan_update_both_params.response" +# fail_msg: VLAN update of both parameters failed +# success_msg: VLAN update of both parameters succeeded + +# - name: Wait 10 seconds before next test to allow configuration to settle +# pause: +# seconds: 10 + +# # ############################################# +# # Multiple VLANs Update Tests +# # ############################################# + +# - name: Test VLAN Update - Multiple VLANs +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_update_multiple }}" +# register: result_vlan_update_multiple + +# - name: Assert VLAN Update - Multiple VLANs (should succeed) +# assert: +# that: +# - result_vlan_update_multiple.failed == false +# - result_vlan_update_multiple.changed == true +# - "'Successfully' in result_vlan_update_multiple.response" +# fail_msg: Multiple VLANs update failed +# success_msg: Multiple VLANs update succeeded + +# - name: Wait 10 seconds before next test to allow configuration to settle +# pause: +# seconds: 10 + +# - name: Test VLAN Update - Enable disabled VLAN +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_update_enable_disabled }}" +# register: result_vlan_update_enable_disabled + +# - name: Assert VLAN Update - Enable disabled (should succeed) +# assert: +# that: +# - result_vlan_update_enable_disabled.failed == false +# - result_vlan_update_enable_disabled.changed == true +# - "'Successfully' in result_vlan_update_enable_disabled.response" +# fail_msg: VLAN enable update failed +# success_msg: VLAN enable update succeeded + +# - name: Wait 10 seconds before next test to allow configuration to settle +# pause: +# seconds: 10 + +# - name: Test VLAN Update - Minimal parameters +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vlan_vars_map.test_vlan_update_minimal }}" +# register: result_vlan_update_minimal + +# - name: Assert VLAN Update - Minimal parameters (should succeed) +# assert: +# that: +# - result_vlan_update_minimal.failed == false +# - result_vlan_update_minimal.changed == true +# - "'Successfully' in result_vlan_update_minimal.response" +# fail_msg: VLAN minimal update failed +# success_msg: VLAN minimal update succeeded + +# - name: Wait 10 seconds before next test to allow configuration to settle +# pause: +# seconds: 10 + +# # ============================================================================= +# # POSITIVE TEST CASES - VLAN DELETION (DELETED STATE) +# # ============================================================================= + +# # ############################################# +# # Single VLAN Deletion Tests +# # ############################################# + +# - name: Test VLAN Deletion - Single VLAN +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - "{{ vlan_vars_map.test_vlan_delete_single }}" +# register: result_vlan_delete_single + +# - name: Assert VLAN Deletion - Single VLAN (should succeed) +# assert: +# that: +# - result_vlan_delete_single.failed == false +# - result_vlan_delete_single.changed == true +# - "'Successfully' in result_vlan_delete_single.response" +# fail_msg: Single VLAN deletion failed +# success_msg: Single VLAN deletion succeeded + +# - name: Wait 10 seconds before next test to allow configuration to settle +# pause: +# seconds: 10 + +# # ############################################# +# # Multiple VLANs Deletion Tests +# # ############################################# + +# - name: Test VLAN Deletion - Multiple VLANs +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - "{{ vlan_vars_map.test_vlan_delete_multiple }}" +# register: result_vlan_delete_multiple + +# - name: Assert VLAN Deletion - Multiple VLANs (should succeed) +# assert: +# that: +# - result_vlan_delete_multiple.failed == false +# - result_vlan_delete_multiple.changed == true +# - "'Successfully' in result_vlan_delete_multiple.response" +# fail_msg: Multiple VLANs deletion failed +# success_msg: Multiple VLANs deletion succeeded + +# - name: Wait 10 seconds before next test to allow configuration to settle +# pause: +# seconds: 10 + +# - name: Test VLAN Deletion - Selective deletion +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - "{{ vlan_vars_map.test_vlan_delete_selective }}" +# register: result_vlan_delete_selective + +# - name: Assert VLAN Deletion - Selective (should succeed) +# assert: +# that: +# - result_vlan_delete_selective.failed == false +# - result_vlan_delete_selective.changed == true +# - "'Successfully' in result_vlan_delete_selective.response" +# fail_msg: Selective VLAN deletion failed +# success_msg: Selective VLAN deletion succeeded + +# - name: Wait 10 seconds before next test to allow configuration to settle +# pause: +# seconds: 10 + +# # ############################################# +# # Boundary VLANs Deletion Tests +# # ############################################# + +# - name: Test VLAN Deletion - Boundary VLANs +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - "{{ vlan_vars_map.test_vlan_delete_boundary }}" +# register: result_vlan_delete_boundary + +# - name: Assert VLAN Deletion - Boundary VLANs (should succeed) +# assert: +# that: +# - result_vlan_delete_boundary.failed == false +# - result_vlan_delete_boundary.changed == true +# - "'Successfully' in result_vlan_delete_boundary.response" +# fail_msg: Boundary VLANs deletion failed +# success_msg: Boundary VLANs deletion succeeded + +# - name: Wait 10 seconds before next test to allow configuration to settle +# pause: +# seconds: 10 + +# # ############################################# +# # Special Characteristics Deletion +# # ############################################# + +# - name: Test VLAN Deletion - Special characteristics VLANs +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - "{{ vlan_vars_map.test_vlan_delete_special_chars }}" +# register: result_vlan_delete_special_chars + +# - name: Assert VLAN Deletion - Special characteristics (should succeed) +# assert: +# that: +# - result_vlan_delete_special_chars.failed == false +# - result_vlan_delete_special_chars.changed == true +# - "'Successfully' in result_vlan_delete_special_chars.response" +# fail_msg: Special characteristics VLANs deletion failed +# success_msg: Special characteristics VLANs deletion succeeded + +# - name: Wait 10 seconds before next test to allow configuration to settle +# pause: +# seconds: 10 + +# # ============================================================================= +# # ADVANCED POSITIVE TEST SCENARIOS +# # ============================================================================= + +# # ############################################# +# # Idempotency Tests +# # ############################################# + +# - name: Test VLAN Idempotency - Create same VLAN twice +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - ip_address: 204.1.2.3 +# device_collection_status_check: false +# layer2_configuration: +# vlans: +# - vlan_id: 3001 +# vlan_name: Idempotency_Test_VLAN +# vlan_admin_status: true +# register: result_vlan_idempotency_first + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 40 + +# - name: Test VLAN Idempotency - Create same VLAN again +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - ip_address: 204.1.2.3 +# device_collection_status_check: false +# layer2_configuration: +# vlans: +# - vlan_id: 3001 +# vlan_name: Idempotency_Test_VLAN +# vlan_admin_status: true +# register: result_vlan_idempotency_second + +# - name: Assert VLAN Idempotency - Second creation should not change +# assert: +# that: +# - result_vlan_idempotency_first.failed == false +# - result_vlan_idempotency_second.failed == false +# - result_vlan_idempotency_first.changed == true +# # should be idempotent +# - result_vlan_idempotency_second.changed == false +# fail_msg: VLAN idempotency test failed +# success_msg: VLAN idempotency test succeeded + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 20 + +# # ############################################# +# # Cleanup Test VLANs +# # ############################################# + +# - name: Cleanup - Delete test VLANs created during positive tests +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - ip_address: 204.1.2.3 +# device_collection_status_check: false +# layer2_configuration: +# vlans: +# - vlan_id: 3001 # Idempotency test VLAN +# register: result_vlan_cleanup +# ignore_errors: true + +# always: +# - name: Display positive test summary +# debug: +# msg: VLAN positive validation tests completed successfully +# # ############################################# +# # Delete ALL VLANS post tests +# # ############################################# + +# # - name: Test VLAN Deletion - ALL +# # cisco.dnac.wired_campus_automation_workflow_manager: +# # <<: *dnac_login +# # state: deleted +# # config: +# # - "{{ vlan_vars_map.test_vlan_delete_all }}" +# # register: result_vlan_delete_all diff --git a/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_vtp.yml b/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_vtp.yml new file mode 100644 index 0000000000..b8f299f326 --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/tests/test_wca_feature_vtp.yml @@ -0,0 +1,1212 @@ +# --- +# # =================================================================================================== +# # VTP FEATURE TESTS SUMMARY +# # =================================================================================================== +# # +# # This test suite validates VTP (VLAN Trunking Protocol) configuration functionality for Wired Campus Automation. +# # +# # TEST CATEGORIES: +# # +# # 1. NEGATIVE VALIDATION TESTS (Lines 75-450) +# # - VTP Mode validation (invalid choices, integer/boolean instead of string) +# # - VTP Version validation (invalid choices, integer/boolean instead of string) +# # - VTP Domain Name validation (integer/boolean instead of string, exceeds maximum length) +# # - VTP Pruning validation (string/integer instead of boolean) +# # - VTP Configuration File Name validation (integer/boolean instead of string, exceeds maximum length) +# # - VTP Source Interface validation (integer/boolean instead of string) +# # - Data type and structure validation (invalid dict/list types) +# # +# # 2. POSITIVE VALIDATION TESTS (Lines 450-1100) +# # a) VTP Creation Tests +# # - Mode only configuration +# # - Version only configuration +# # - Pruning only configuration +# # - Domain name only configuration +# # - All parameters configuration +# # +# # b) VTP Update Tests (Merged State) +# # - Mode changes (SERVER, CLIENT, TRANSPARENT, OFF) +# # - Version changes (VERSION_1, VERSION_2, VERSION_3) +# # - Enable/disable pruning +# # - Configuration file name updates +# # - Source interface updates +# # +# # c) VTP Reset Tests (Deleted State) +# # - Empty configuration reset (resets to defaults) +# # - Configuration with parameters reset +# # +# # d) Boundary Value Tests +# # - Maximum length domain names +# # - Maximum length configuration file names +# # - Minimal configurations +# # +# # e) Special Configuration Tests +# # - Version-specific configurations (VERSION_1, VERSION_2 with pruning) +# # - Mode-specific settings (server, transparent mode comprehensive) +# # +# # f) Advanced Test Scenarios +# # - Idempotency tests (same config applied twice) +# # - Cleanup operations +# # +# # VALIDATION RANGES: +# # - VTP Mode: string choices ["SERVER", "CLIENT", "TRANSPARENT", "OFF"] +# # - VTP Version: string choices ["VERSION_1", "VERSION_2", "VERSION_3"] +# # - VTP Domain Name: string (maximum length validation) +# # - VTP Pruning: boolean (true/false) +# # - VTP Configuration File Name: string (maximum length validation) +# # - VTP Source Interface: string (interface name) +# # +# # EXPECTED BEHAVIORS: +# # - Negative tests should fail with appropriate error messages +# # - Positive tests should succeed with 'Successfully' in response +# # - Idempotency: First run changes=true, second run changes=false +# # - All tests include proper pause times for configuration settlement +# # - Deleted state resets VTP to default configuration +# # - VTP pruning is typically used with VERSION_2 and higher +# # +# # =================================================================================================== + +# - debug: msg="Starting VTP feature tests for Wired Campus Automation" +# - debug: msg="Role Path {{ role_path }}" + +# - block: +# - name: Load VTP test variables +# include_vars: +# file: "{{ role_path }}/vars/vars_wca_test_feature_vtp.yml" +# name: vtp_vars_map +# vars: +# dnac_login: &dnac_login +# dnac_host: "{{ dnac_host }}" +# dnac_username: "{{ dnac_username }}" +# dnac_password: "{{ dnac_password }}" +# dnac_verify: "{{ dnac_verify }}" +# dnac_port: "{{ dnac_port }}" +# dnac_version: "{{ dnac_version }}" +# dnac_debug: "{{ dnac_debug }}" +# dnac_log: true +# dnac_log_level: "DEBUG" +# config_verify: true +# dnac_api_task_timeout: "{{ dnac_api_task_timeout }}" +# dnac_log_append: false + +# # ############################################# +# # Cleanup Test Configurations +# # ############################################# + +# - name: Cleanup - Reset VTP to default configuration +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - ip_address: "204.1.2.3" +# device_collection_status_check: false +# layer2_configuration: +# vtp: {} +# register: result_vtp_cleanup +# ignore_errors: true +# tags: [cleanup, vtp] + +# # ============================================================================= +# # Negative Validation Tests for VTP +# # ============================================================================= + +# # ############################################# +# # VTP Mode Tests +# # ############################################# + +# - name: Test VTP mode validation - invalid choice should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_mode_invalid_choice }}" +# register: result_vtp_mode_invalid_choice +# ignore_errors: true +# tags: [negative, vtp, mode] + +# - name: Assert VTP mode invalid choice validation failed +# assert: +# that: +# - result_vtp_mode_invalid_choice.failed == true +# - "'must be one of' in result_vtp_mode_invalid_choice.msg" +# fail_msg: "VTP mode invalid choice validation should have failed" +# success_msg: "VTP mode invalid choice validation correctly failed" +# tags: [negative, vtp, mode] + +# - name: Test VTP mode validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_mode_integer }}" +# register: result_vtp_mode_integer +# ignore_errors: true +# tags: [negative, vtp, mode] + +# - name: Assert VTP mode integer validation failed +# assert: +# that: +# - result_vtp_mode_integer.failed == true +# - "'must be of type string' in result_vtp_mode_integer.msg" +# fail_msg: "VTP mode integer validation should have failed" +# success_msg: "VTP mode integer validation correctly failed" +# tags: [negative, vtp, mode] + +# - name: Test VTP mode validation - boolean value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_mode_boolean }}" +# register: result_vtp_mode_boolean +# ignore_errors: true +# tags: [negative, vtp, mode] + +# - name: Assert VTP mode boolean validation failed +# assert: +# that: +# - result_vtp_mode_boolean.failed == true +# - "'must be of type string' in result_vtp_mode_boolean.msg" +# fail_msg: "VTP mode boolean validation should have failed" +# success_msg: "VTP mode boolean validation correctly failed" +# tags: [negative, vtp, mode] + +# # ############################################# +# # VTP Version Tests +# # ############################################# + +# - name: Test VTP version validation - invalid choice should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_version_invalid_choice }}" +# register: result_vtp_version_invalid_choice +# ignore_errors: true +# tags: [negative, vtp, version] + +# - name: Assert VTP version invalid choice validation failed +# assert: +# that: +# - result_vtp_version_invalid_choice.failed == true +# - "'must be one of' in result_vtp_version_invalid_choice.msg" +# fail_msg: "VTP version invalid choice validation should have failed" +# success_msg: "VTP version invalid choice validation correctly failed" +# tags: [negative, vtp, version] + +# - name: Test VTP version validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_version_integer }}" +# register: result_vtp_version_integer +# ignore_errors: true +# tags: [negative, vtp, version] + +# - name: Assert VTP version integer validation failed +# assert: +# that: +# - result_vtp_version_integer.failed == true +# - "'must be of type string' in result_vtp_version_integer.msg" +# fail_msg: "VTP version integer validation should have failed" +# success_msg: "VTP version integer validation correctly failed" +# tags: [negative, vtp, version] + +# - name: Test VTP version validation - boolean value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_version_boolean }}" +# register: result_vtp_version_boolean +# ignore_errors: true +# tags: [negative, vtp, version] + +# - name: Assert VTP version boolean validation failed +# assert: +# that: +# - result_vtp_version_boolean.failed == true +# - "'must be of type string' in result_vtp_version_boolean.msg" +# fail_msg: "VTP version boolean validation should have failed" +# success_msg: "VTP version boolean validation correctly failed" +# tags: [negative, vtp, version] + +# # ############################################# +# # VTP Domain Name Tests +# # ############################################# + +# - name: Test VTP domain name validation - exceeds maximum length should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_domain_name_max_length }}" +# register: result_vtp_domain_name_max_length +# ignore_errors: true +# tags: [negative, vtp, domain_name] + +# - name: Assert VTP domain name max length validation failed +# assert: +# that: +# - result_vtp_domain_name_max_length.failed == true +# - "'exceeds maximum length' in result_vtp_domain_name_max_length.msg" +# fail_msg: "VTP domain name max length validation should have failed" +# success_msg: "VTP domain name max length validation correctly failed" +# tags: [negative, vtp, domain_name] + +# - name: Test VTP domain name validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_domain_name_integer }}" +# register: result_vtp_domain_name_integer +# ignore_errors: true +# tags: [negative, vtp, domain_name] + +# - name: Assert VTP domain name integer validation failed +# assert: +# that: +# - result_vtp_domain_name_integer.failed == true +# - "'must be of type string' in result_vtp_domain_name_integer.msg" +# fail_msg: "VTP domain name integer validation should have failed" +# success_msg: "VTP domain name integer validation correctly failed" +# tags: [negative, vtp, domain_name] + +# - name: Test VTP domain name validation - boolean value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_domain_name_boolean }}" +# register: result_vtp_domain_name_boolean +# ignore_errors: true +# tags: [negative, vtp, domain_name] + +# - name: Assert VTP domain name boolean validation failed +# assert: +# that: +# - result_vtp_domain_name_boolean.failed == true +# - "'must be of type string' in result_vtp_domain_name_boolean.msg" +# fail_msg: "VTP domain name boolean validation should have failed" +# success_msg: "VTP domain name boolean validation correctly failed" +# tags: [negative, vtp, domain_name] + +# # ############################################# +# # VTP Pruning Tests +# # ############################################# + +# - name: Test VTP pruning validation - string value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_pruning_string }}" +# register: result_vtp_pruning_string +# ignore_errors: true +# tags: [negative, vtp, pruning] + +# - name: Assert VTP pruning string validation failed +# assert: +# that: +# - result_vtp_pruning_string.failed == true +# - "'must be of type boolean' in result_vtp_pruning_string.msg" +# fail_msg: "VTP pruning string validation should have failed" +# success_msg: "VTP pruning string validation correctly failed" +# tags: [negative, vtp, pruning] + +# - name: Test VTP pruning validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_pruning_integer }}" +# register: result_vtp_pruning_integer +# ignore_errors: true +# tags: [negative, vtp, pruning] + +# - name: Assert VTP pruning integer validation failed +# assert: +# that: +# - result_vtp_pruning_integer.failed == true +# - "'must be of type boolean' in result_vtp_pruning_integer.msg" +# fail_msg: "VTP pruning integer validation should have failed" +# success_msg: "VTP pruning integer validation correctly failed" +# tags: [negative, vtp, pruning] + +# # ############################################# +# # VTP Configuration File Name Tests +# # ############################################# + +# - name: Test VTP configuration file name validation - exceeds maximum length should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_config_file_name_max_length }}" +# register: result_vtp_config_file_name_max_length +# ignore_errors: true +# tags: [negative, vtp, config_file_name] + +# - name: Assert VTP configuration file name max length validation failed +# assert: +# that: +# - result_vtp_config_file_name_max_length.failed == true +# - "'exceeds maximum length' in result_vtp_config_file_name_max_length.msg" +# fail_msg: "VTP configuration file name max length validation should have failed" +# success_msg: "VTP configuration file name max length validation correctly failed" +# tags: [negative, vtp, config_file_name] + +# - name: Test VTP configuration file name validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_config_file_name_integer }}" +# register: result_vtp_config_file_name_integer +# ignore_errors: true +# tags: [negative, vtp, config_file_name] + +# - name: Assert VTP configuration file name integer validation failed +# assert: +# that: +# - result_vtp_config_file_name_integer.failed == true +# - "'must be of type string' in result_vtp_config_file_name_integer.msg" +# fail_msg: "VTP configuration file name integer validation should have failed" +# success_msg: "VTP configuration file name integer validation correctly failed" +# tags: [negative, vtp, config_file_name] + +# - name: Test VTP configuration file name validation - boolean value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_config_file_name_boolean }}" +# register: result_vtp_config_file_name_boolean +# ignore_errors: true +# tags: [negative, vtp, config_file_name] + +# - name: Assert VTP configuration file name boolean validation failed +# assert: +# that: +# - result_vtp_config_file_name_boolean.failed == true +# - "'must be of type string' in result_vtp_config_file_name_boolean.msg" +# fail_msg: "VTP configuration file name boolean validation should have failed" +# success_msg: "VTP configuration file name boolean validation correctly failed" +# tags: [negative, vtp, config_file_name] + +# # ############################################# +# # VTP Source Interface Tests +# # ############################################# + +# - name: Test VTP source interface validation - integer value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_source_interface_integer }}" +# register: result_vtp_source_interface_integer +# ignore_errors: true +# tags: [negative, vtp, source_interface] + +# - name: Assert VTP source interface integer validation failed +# assert: +# that: +# - result_vtp_source_interface_integer.failed == true +# - "'must be of type string' in result_vtp_source_interface_integer.msg" +# fail_msg: "VTP source interface integer validation should have failed" +# success_msg: "VTP source interface integer validation correctly failed" +# tags: [negative, vtp, source_interface] + +# - name: Test VTP source interface validation - boolean value should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_source_interface_boolean }}" +# register: result_vtp_source_interface_boolean +# ignore_errors: true +# tags: [negative, vtp, source_interface] + +# - name: Assert VTP source interface boolean validation failed +# assert: +# that: +# - result_vtp_source_interface_boolean.failed == true +# - "'must be of type string' in result_vtp_source_interface_boolean.msg" +# fail_msg: "VTP source interface boolean validation should have failed" +# success_msg: "VTP source interface boolean validation correctly failed" +# tags: [negative, vtp, source_interface] + +# # ############################################# +# # Data Type and Structure Tests +# # ############################################# + +# - name: Test VTP structure validation - string instead of dict should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_invalid_dict_type }}" +# register: result_vtp_invalid_dict_type +# ignore_errors: true +# tags: [negative, vtp, structure] + +# - name: Assert VTP invalid dict type validation failed +# assert: +# that: +# - result_vtp_invalid_dict_type.failed == true +# - "'must be of type dictionary' in result_vtp_invalid_dict_type.msg" +# fail_msg: "VTP invalid dict type validation should have failed" +# success_msg: "VTP invalid dict type validation correctly failed" +# tags: [negative, vtp, structure] + +# - name: Test VTP structure validation - list instead of dict should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_invalid_list_type }}" +# register: result_vtp_invalid_list_type +# ignore_errors: true +# tags: [negative, vtp, structure] + +# - name: Assert VTP invalid list type validation failed +# assert: +# that: +# - result_vtp_invalid_list_type.failed == true +# - "'must be of type dictionary' in result_vtp_invalid_list_type.msg" +# fail_msg: "VTP invalid list type validation should have failed" +# success_msg: "VTP invalid list type validation correctly failed" +# tags: [negative, vtp, structure] + +# - name: Display negative test summary +# debug: +# msg: "VTP negative validation tests completed successfully" +# tags: [negative, vtp] + +# # ============================================================================= +# # POSITIVE TEST CASES - VTP CREATION (MERGED STATE) +# # ============================================================================= + +# # ############################################# +# # Single VTP Parameter Tests +# ############################################# + +# - name: Test VTP configuration with mode only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_create_mode_only }}" +# register: result_vtp_create_mode_only +# tags: [positive, vtp, create, mode] + +# - name: Assert VTP mode only configuration succeeded +# assert: +# that: +# - result_vtp_create_mode_only.failed == false +# - result_vtp_create_mode_only.changed == true +# - "'Successfully' in result_vtp_create_mode_only.response" +# fail_msg: "VTP mode only configuration should have succeeded" +# success_msg: "VTP mode only configuration succeeded" +# tags: [positive, vtp, create, mode] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, create] + +# - name: Test VTP configuration with version only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_create_version_only }}" +# register: result_vtp_create_version_only +# tags: [positive, vtp, create, version] + +# - name: Assert VTP version only configuration succeeded +# assert: +# that: +# - result_vtp_create_version_only.failed == false +# - result_vtp_create_version_only.changed == true +# - "'Successfully' in result_vtp_create_version_only.response" +# fail_msg: "VTP version only configuration should have succeeded" +# success_msg: "VTP version only configuration succeeded" +# tags: [positive, vtp, create, version] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, create] + +# - name: Test VTP configuration with domain name only +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_create_domain_name_only }}" +# register: result_vtp_create_domain_name_only +# tags: [positive, vtp, create, domain_name] + +# - name: Assert VTP domain name only configuration succeeded +# assert: +# that: +# - result_vtp_create_domain_name_only.failed == false +# - result_vtp_create_domain_name_only.changed == true +# - "'Successfully' in result_vtp_create_domain_name_only.response" +# fail_msg: "VTP domain name only configuration should have succeeded" +# success_msg: "VTP domain name only configuration succeeded" +# tags: [positive, vtp, create, domain_name] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, create] + +# - name: Test VTP configuration with all parameters +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_create_all_params }}" +# register: result_vtp_create_all_params +# tags: [positive, vtp, create, all_params] + +# - name: Assert VTP all parameters configuration succeeded +# assert: +# that: +# - result_vtp_create_all_params.failed == false +# - result_vtp_create_all_params.changed == true +# - "'Successfully' in result_vtp_create_all_params.response" +# fail_msg: "VTP all parameters configuration should have succeeded" +# success_msg: "VTP all parameters configuration succeeded" +# tags: [positive, vtp, create, all_params] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, create] + +# # ============================================================================= +# # POSITIVE TEST CASES - VTP UPDATE (MERGED STATE) +# # ============================================================================= + +# # ############################################# +# # VTP Mode Update Tests +# # ############################################# + +# - name: Test VTP update - change mode to CLIENT +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_update_mode_client }}" +# register: result_vtp_update_mode_client +# tags: [positive, vtp, update, mode] + +# - name: Assert VTP mode update to CLIENT succeeded +# assert: +# that: +# - result_vtp_update_mode_client.failed == false +# - result_vtp_update_mode_client.changed == true +# - "'Successfully' in result_vtp_update_mode_client.response" +# fail_msg: "VTP mode update to CLIENT should have succeeded" +# success_msg: "VTP mode update to CLIENT succeeded" +# tags: [positive, vtp, update, mode] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, update] + +# - name: Test VTP update - change mode to TRANSPARENT +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_update_mode_transparent }}" +# register: result_vtp_update_mode_transparent +# tags: [positive, vtp, update, mode] + +# - name: Assert VTP mode update to TRANSPARENT succeeded +# assert: +# that: +# - result_vtp_update_mode_transparent.failed == false +# - result_vtp_update_mode_transparent.changed == true +# - "'Successfully' in result_vtp_update_mode_transparent.response" +# fail_msg: "VTP mode update to TRANSPARENT should have succeeded" +# success_msg: "VTP mode update to TRANSPARENT succeeded" +# tags: [positive, vtp, update, mode] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, update] + +# - name: Test VTP update - change mode to OFF +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_update_mode_off }}" +# register: result_vtp_update_mode_off +# tags: [positive, vtp, update, mode] + +# - name: Assert VTP mode update to OFF succeeded +# assert: +# that: +# - result_vtp_update_mode_off.failed == false +# - result_vtp_update_mode_off.changed == true +# - "'Successfully' in result_vtp_update_mode_off.response" +# fail_msg: "VTP mode update to OFF should have succeeded" +# success_msg: "VTP mode update to OFF succeeded" +# tags: [positive, vtp, update, mode] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, update] + +# # ############################################# +# # VTP Version Update Tests +# # ############################################# + +# - name: Test VTP update - change version to VERSION_1 +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_update_version_1 }}" +# register: result_vtp_update_version_1 +# tags: [positive, vtp, update, version] + +# - name: Assert VTP version update to VERSION_1 succeeded +# assert: +# that: +# - result_vtp_update_version_1.failed == false +# - result_vtp_update_version_1.changed == true +# - "'Successfully' in result_vtp_update_version_1.response" +# fail_msg: "VTP version update to VERSION_1 should have succeeded" +# success_msg: "VTP version update to VERSION_1 succeeded" +# tags: [positive, vtp, update, version] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, update] + +# - name: Test VTP update - change version to VERSION_2 +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_update_version_2 }}" +# register: result_vtp_update_version_2 +# tags: [positive, vtp, update, version] + +# - name: Assert VTP version update to VERSION_2 succeeded +# assert: +# that: +# - result_vtp_update_version_2.failed == false +# - result_vtp_update_version_2.changed == true +# - "'Successfully' in result_vtp_update_version_2.response" +# fail_msg: "VTP version update to VERSION_2 should have succeeded" +# success_msg: "VTP version update to VERSION_2 succeeded" +# tags: [positive, vtp, update, version] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, update] + +# - name: Test VTP update - change version to VERSION_3 +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_update_version_3 }}" +# register: result_vtp_update_version_3 +# tags: [positive, vtp, update, version] + +# - name: Assert VTP version update to VERSION_3 succeeded +# assert: +# that: +# - result_vtp_update_version_3.failed == false +# - result_vtp_update_version_3.changed == true +# - "'Successfully' in result_vtp_update_version_3.response" +# fail_msg: "VTP version update to VERSION_3 should have succeeded" +# success_msg: "VTP version update to VERSION_3 succeeded" +# tags: [positive, vtp, update, version] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, update] + +# # ############################################# +# # VTP Pruning Update Tests +# # ############################################# + +# - name: Test VTP update - enable pruning +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_update_enable_pruning }}" +# register: result_vtp_update_enable_pruning +# tags: [positive, vtp, update, pruning] + +# - name: Assert VTP enable pruning succeeded +# assert: +# that: +# - result_vtp_update_enable_pruning.failed == false +# - result_vtp_update_enable_pruning.changed == true +# - "'Successfully' in result_vtp_update_enable_pruning.response" +# fail_msg: "VTP enable pruning should have succeeded" +# success_msg: "VTP enable pruning succeeded" +# tags: [positive, vtp, update, pruning] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, update] + +# - name: Test VTP update - disable pruning +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_update_disable_pruning }}" +# register: result_vtp_update_disable_pruning +# tags: [positive, vtp, update, pruning] + +# - name: Assert VTP disable pruning succeeded +# assert: +# that: +# - result_vtp_update_disable_pruning.failed == false +# - result_vtp_update_disable_pruning.changed == true +# - "'Successfully' in result_vtp_update_disable_pruning.response" +# fail_msg: "VTP disable pruning should have succeeded" +# success_msg: "VTP disable pruning succeeded" +# tags: [positive, vtp, update, pruning] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, update] + +# # ############################################# +# # VTP File and Interface Update Tests +# # ############################################# + +# - name: Test VTP update - update configuration file name +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_update_config_file_name }}" +# register: result_vtp_update_config_file_name +# tags: [positive, vtp, update, config_file_name] +# ignore_errors: true + +# - name: Assert VTP configuration file name update succeeded +# assert: +# that: +# - result_vtp_update_config_file_name.failed == true +# - result_vtp_update_config_file_name.changed == false +# - "'Must match ASCII' in result_vtp_update_config_file_name.response" +# fail_msg: "VTP configuration file name update should have succeeded" +# success_msg: "VTP configuration file name update succeeded" +# tags: [positive, vtp, update, config_file_name] + +# - name: Test VTP update - update source interface +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_update_source_interface }}" +# register: result_vtp_update_source_interface +# tags: [positive, vtp, update, source_interface] +# ignore_errors: true + +# - name: Assert VTP source interface update succeeded +# assert: +# that: +# - result_vtp_update_source_interface.failed == true +# - result_vtp_update_source_interface.changed == false +# - "'Payload format is invalid' in result_vtp_update_source_interface.response" +# fail_msg: "VTP source interface update should have succeeded" +# success_msg: "VTP source interface update succeeded" +# tags: [positive, vtp, update, source_interface] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, update] + +# # ############################################# +# # Cleanup Test Configurations +# # ############################################# + +# # Clean up after setting empty cause the deployed returns empty. +# - name: Cleanup - Reset VTP to default configuration +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - ip_address: "204.1.2.3" +# device_collection_status_check: false +# layer2_configuration: +# vtp: {} +# register: result_vtp_cleanup +# ignore_errors: true +# tags: [cleanup, vtp] + +# # ############################################# +# # VTP Domain Name Modification Tests +# # ############################################# + +# - name: Test VTP domain name empty string update - should fail +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_domain_name_empty_string }}" +# register: result_vtp_domain_name_empty_string +# ignore_errors: true +# tags: [negative, vtp, domain_name, empty_string] + +# - name: Assert VTP domain name empty string update failed +# assert: +# that: +# - result_vtp_domain_name_empty_string.failed == true +# - "'Must match ASCII characters' in result_vtp_domain_name_empty_string.msg" +# fail_msg: "VTP domain name empty string update should have failed" +# success_msg: "VTP domain name empty string update correctly failed" +# tags: [negative, vtp, domain_name, empty_string] + +# # ============================================================================= +# # POSITIVE TEST CASES - VTP RESET (DELETED STATE) +# # ============================================================================= + +# # ############################################# +# # VTP Reset Tests +# # ############################################# + +# - name: Test VTP reset - empty configuration (deleted state resets to defaults) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - "{{ vtp_vars_map.test_vtp_reset_empty }}" +# register: result_vtp_reset_empty +# tags: [positive, vtp, reset, empty] + +# - name: Assert VTP empty reset succeeded +# assert: +# that: +# - result_vtp_reset_empty.failed == false +# - result_vtp_reset_empty.changed == true +# - "'Successfully' in result_vtp_reset_empty.response" +# fail_msg: "VTP empty reset should have succeeded" +# success_msg: "VTP empty reset succeeded" +# tags: [positive, vtp, reset, empty] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, reset] + +# - name: Test VTP update - all parameters (setup for reset test) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_create_all_params }}" +# register: result_vtp_update_all_params_setup +# tags: [positive, vtp, reset] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, reset] + +# - name: Test VTP reset - with existing parameters (deleted state resets to defaults) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - "{{ vtp_vars_map.test_vtp_reset_with_params }}" +# register: result_vtp_reset_with_params +# tags: [positive, vtp, reset, params] + +# - name: Assert VTP reset with params succeeded +# assert: +# that: +# - result_vtp_reset_with_params.failed == false +# - result_vtp_reset_with_params.changed == true +# - "'Successfully' in result_vtp_reset_with_params.response" +# fail_msg: "VTP reset with params should have succeeded" +# success_msg: "VTP reset with params succeeded" +# tags: [positive, vtp, reset, params] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, reset] + +# # ============================================================================= +# # BOUNDARY VALUE TESTS +# # ============================================================================= + +# # ############################################# +# # Boundary Value Tests +# # ############################################# + +# - name: Test VTP with maximum length domain name +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_boundary_max_domain_name }}" +# register: result_vtp_boundary_max_domain_name +# tags: [positive, vtp, boundary, max_domain] + +# - name: Assert VTP maximum domain name configuration succeeded +# assert: +# that: +# - result_vtp_boundary_max_domain_name.failed == false +# - result_vtp_boundary_max_domain_name.changed == true +# - "'Successfully' in result_vtp_boundary_max_domain_name.response" +# fail_msg: "VTP maximum domain name configuration should have succeeded" +# success_msg: "VTP maximum domain name configuration succeeded" +# tags: [positive, vtp, boundary, max_domain] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, boundary] + +# - name: Test VTP with maximum length configuration file name +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_boundary_max_config_file_name }}" +# register: result_vtp_boundary_max_config_file_name +# tags: [positive, vtp, boundary, max_config_file] + +# - name: Assert VTP maximum configuration file name succeeded +# assert: +# that: +# - result_vtp_boundary_max_config_file_name.failed == false +# - result_vtp_boundary_max_config_file_name.changed == true +# - "'Successfully' in result_vtp_boundary_max_config_file_name.response" +# fail_msg: "VTP maximum configuration file name should have succeeded" +# success_msg: "VTP maximum configuration file name succeeded" +# tags: [positive, vtp, boundary, max_config_file] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, boundary] + +# - name: Test VTP with minimal configuration +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_boundary_minimal_config }}" +# register: result_vtp_boundary_minimal_config +# tags: [positive, vtp, boundary, minimal] + +# - name: Assert VTP minimal configuration succeeded +# assert: +# that: +# - result_vtp_boundary_minimal_config.failed == false +# - result_vtp_boundary_minimal_config.changed == true +# - "'Successfully' in result_vtp_boundary_minimal_config.response" +# fail_msg: "VTP minimal configuration should have succeeded" +# success_msg: "VTP minimal configuration succeeded" +# tags: [positive, vtp, boundary, minimal] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, boundary] + +# # ============================================================================= +# # SPECIAL CONFIGURATION TESTS +# # ============================================================================= + +# # ############################################# +# # Special Configuration Tests +# # ############################################# + +# - name: Test VTP with version-specific configuration (VERSION_1 with specific features) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_version_1_specific }}" +# register: result_vtp_version_1_specific +# tags: [positive, vtp, special, version_specific] + +# - name: Assert VTP VERSION_1 specific configuration succeeded +# assert: +# that: +# - result_vtp_version_1_specific.failed == false +# - result_vtp_version_1_specific.changed == true +# - "'Successfully' in result_vtp_version_1_specific.response" +# fail_msg: "VTP VERSION_1 specific configuration should have succeeded" +# success_msg: "VTP VERSION_1 specific configuration succeeded" +# tags: [positive, vtp, special, version_specific] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, special] + +# - name: Test VTP with version-specific configuration (VERSION_2 with pruning) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_version_2_with_pruning }}" +# register: result_vtp_version_2_with_pruning +# tags: [positive, vtp, special, version_specific] + +# - name: Assert VTP VERSION_2 with pruning configuration succeeded +# assert: +# that: +# - result_vtp_version_2_with_pruning.failed == false +# - result_vtp_version_2_with_pruning.changed == true +# - "'Successfully' in result_vtp_version_2_with_pruning.response" +# fail_msg: "VTP VERSION_2 with pruning configuration should have succeeded" +# success_msg: "VTP VERSION_2 with pruning configuration succeeded" +# tags: [positive, vtp, special, version_specific] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, special] + +# - name: Test VTP with server mode comprehensive configuration +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_server_mode_comprehensive }}" +# register: result_vtp_server_mode_comprehensive +# tags: [positive, vtp, special, server_mode] + +# - name: Assert VTP server mode comprehensive configuration succeeded +# assert: +# that: +# - result_vtp_server_mode_comprehensive.failed == false +# - result_vtp_server_mode_comprehensive.changed == true +# - "'Successfully' in result_vtp_server_mode_comprehensive.response" +# fail_msg: "VTP server mode comprehensive configuration should have succeeded" +# success_msg: "VTP server mode comprehensive configuration succeeded" +# tags: [positive, vtp, special, server_mode] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, special] + +# - name: Test VTP with transparent mode configuration +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_transparent_mode_config }}" +# register: result_vtp_transparent_mode_config +# tags: [positive, vtp, special, transparent_mode] + +# - name: Assert VTP transparent mode configuration succeeded +# assert: +# that: +# - result_vtp_transparent_mode_config.failed == false +# - result_vtp_transparent_mode_config.changed == true +# - "'Successfully' in result_vtp_transparent_mode_config.response" +# fail_msg: "VTP transparent mode configuration should have succeeded" +# success_msg: "VTP transparent mode configuration succeeded" +# tags: [positive, vtp, special, transparent_mode] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 10 +# tags: [positive, vtp, special] + +# # ============================================================================= +# # ADVANCED POSITIVE TEST SCENARIOS +# # ============================================================================= + +# # ############################################# +# # Idempotency Tests +# # ############################################# + +# - name: Test VTP Idempotency - Configure same VTP settings twice +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_create_all_params }}" +# register: result_vtp_idempotency_first +# tags: [positive, vtp, idempotency] + +# - name: Pause to allow configuration to settle +# pause: +# seconds: 40 +# tags: [positive, vtp, idempotency] + +# - name: Test VTP Idempotency - Configure same VTP settings again (should be idempotent) +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_create_all_params }}" +# register: result_vtp_idempotency_second +# tags: [positive, vtp, idempotency] + +# - name: Assert VTP Idempotency - Second configuration should not change +# assert: +# that: +# - result_vtp_idempotency_second.failed == false +# - result_vtp_idempotency_second.changed == false +# fail_msg: "VTP idempotency test failed" +# success_msg: "VTP idempotency test succeeded" +# tags: [positive, vtp, idempotency] + +# # ############################################# +# # Cleanup Test Configurations +# # ############################################# + +# - name: Cleanup - Reset VTP to default configuration +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: deleted +# config: +# - ip_address: "204.1.2.3" +# device_collection_status_check: false +# layer2_configuration: +# vtp: {} +# register: result_vtp_cleanup +# ignore_errors: true +# tags: [cleanup, vtp] + +# # ############################################# +# # Final Reset to Transparent Mode +# # ############################################# + +# - name: Test VTP update - change mode to TRANSPARENT +# cisco.dnac.wired_campus_automation_workflow_manager: +# <<: *dnac_login +# state: merged +# config: +# - "{{ vtp_vars_map.test_vtp_update_mode_transparent }}" +# register: result_vtp_update_mode_transparent +# tags: [positive, vtp, update, mode] diff --git a/tests/integration/ccc_wired_campus_automation_management/tests/test_wired_campus_automation_workflow_management.yml b/tests/integration/ccc_wired_campus_automation_management/tests/test_wired_campus_automation_workflow_management.yml new file mode 100644 index 0000000000..27b8dce179 --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/tests/test_wired_campus_automation_workflow_management.yml @@ -0,0 +1,170 @@ +--- +# =================================================================================================== +# COMPREHENSIVE POSITIVE TESTS FOR ALL WIRED CAMPUS AUTOMATION FEATURES +# =================================================================================================== +# +# This test suite validates all supported features in comprehensive positive scenarios. +# +# TEST CATEGORIES: +# +# 1. COMPREHENSIVE MERGED STATE TESTS +# - All features combined in single configuration +# - Minimal essential features configuration +# - Medium complexity configuration +# - Boundary value testing (min/max values) +# - Special boolean feature testing +# +# 2. COMPREHENSIVE DELETED STATE TESTS +# - All supported features deletion/reset +# - Minimal essential features deletion +# - Medium complexity deletion +# - Complete cleanup operations +# +# FEATURES COVERED: +# - VLANs (create, update, delete) +# - CDP (create, update, reset) +# - LLDP (create, update, reset) +# - STP (create, update, reset/delete instances) +# - VTP (create, update, reset) +# - DHCP Snooping (create, update, reset) +# - IGMP Snooping (create, update, reset) +# - MLD Snooping (create, update, reset) +# - Authentication (create, update, reset) +# - Logical Ports (create, update) +# - Port Configuration (create, update) +# +# EXPECTED BEHAVIORS: +# - All positive tests should succeed with changed=true +# - Idempotency tests should show changed=false on second run +# - Delete operations should successfully reset/remove configurations +# - Complex configurations should be applied atomically +# +# =================================================================================================== + +- debug: msg="Starting Comprehensive Positive Tests for All Wired Campus Automation Features" +- debug: msg="Role Path {{ role_path }}" + +- block: + - name: Load comprehensive test variables + include_vars: + file: "{{ role_path }}/vars/vars_wired_campus_automation_workflow_management.yml" + name: comprehensive_vars_map + vars: + dnac_login: &dnac_login + dnac_host: "{{ dnac_host }}" + dnac_username: "{{ dnac_username }}" + dnac_password: "{{ dnac_password }}" + dnac_verify: "{{ dnac_verify }}" + dnac_port: "{{ dnac_port }}" + dnac_version: "{{ dnac_version }}" + dnac_debug: "{{ dnac_debug }}" + dnac_log: true + dnac_log_level: DEBUG + config_verify: true + dnac_api_task_timeout: "{{ dnac_api_task_timeout }}" + dnac_log_append: false + + # # ============================================================================= + # # INITIAL CLEANUP - Ensure clean starting state + # # ============================================================================= + + # - name: Initial Cleanup - Reset all features to default state + # cisco.dnac.wired_campus_automation_workflow_manager: + # <<: *dnac_login + # state: deleted + # config: + # - "{{ comprehensive_vars_map.test_complete_cleanup_all_features }}" + # register: result_initial_cleanup + # ignore_errors: true + # tags: [cleanup, initial] + + # - name: Display initial cleanup result + # debug: + # msg: | + # Initial cleanup completed: + # Failed: {{ result_initial_cleanup.failed | default(false) }} + # Changed: {{ result_initial_cleanup.changed | default(false) }} + # tags: [cleanup, initial] + + # - name: Wait 30 seconds after initial cleanup for system stabilization + # pause: + # seconds: 30 + # tags: [cleanup, initial] + + # ============================================================================= + # COMPREHENSIVE MERGED STATE TESTS + # ============================================================================= + + # - name: "COMPREHENSIVE TEST 1: All Features Combined Configuration" + # cisco.dnac.wired_campus_automation_workflow_manager: + # <<: *dnac_login + # state: merged + # config: + # - "{{ comprehensive_vars_map.test_comprehensive_merged_all_features }}" + # register: result_comprehensive_all_features + # tags: [positive, comprehensive, merged, all_features] + + # - name: Assert comprehensive all features configuration succeeded + # assert: + # that: + # - result_comprehensive_all_features.failed == false + # - result_comprehensive_all_features.changed == true + # - "'Successfully' in result_comprehensive_all_features.response" + # fail_msg: "Comprehensive all features configuration failed" + # success_msg: "Comprehensive all features configuration succeeded" + # tags: [positive, comprehensive, merged, all_features] + + # - name: Wait 20 seconds for comprehensive configuration to settle + # pause: + # seconds: 20 + # tags: [positive, comprehensive, merged, all_features] + + # # ============================================================================= + # # COMPREHENSIVE DELETED STATE TESTS + # # ============================================================================= + + # - name: "DELETION TEST 1: Comprehensive Features Deletion" + # cisco.dnac.wired_campus_automation_workflow_manager: + # <<: *dnac_login + # state: deleted + # config: + # - "{{ comprehensive_vars_map.test_comprehensive_deleted_all_features }}" + # register: result_comprehensive_deletion + # tags: [positive, comprehensive, deleted, all_features] + + # - name: Assert comprehensive features deletion succeeded + # assert: + # that: + # - result_comprehensive_deletion.failed == false + # - result_comprehensive_deletion.changed == true or result_comprehensive_deletion.changed == false + # - "'Successfully' in result_comprehensive_deletion.response or 'completed successfully' in result_comprehensive_deletion.response" + # fail_msg: "Comprehensive features deletion failed" + # success_msg: "Comprehensive features deletion succeeded" + # tags: [positive, comprehensive, deleted, all_features] + + # - name: Wait 15 seconds before minimal deletion test + # pause: + # seconds: 15 + # tags: [positive, comprehensive, deleted] + + # # ============================================================================= + # # FINAL CLEANUP + # # ============================================================================= + + # - name: "FINAL CLEANUP: Complete System Reset" + # cisco.dnac.wired_campus_automation_workflow_manager: + # <<: *dnac_login + # state: deleted + # config: + # - "{{ comprehensive_vars_map.test_complete_cleanup_all_features }}" + # register: result_final_cleanup + # tags: [positive, comprehensive, cleanup, final] + + # - name: Assert final cleanup succeeded + # assert: + # that: + # - result_final_cleanup.failed == false + # - result_final_cleanup.changed == true or result_final_cleanup.changed == false + # fail_msg: "Final cleanup failed" + # success_msg: "Final cleanup succeeded" + # tags: [positive, comprehensive, cleanup, final] diff --git a/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_authentication.yml b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_authentication.yml new file mode 100644 index 0000000000..2d3e113320 --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_authentication.yml @@ -0,0 +1,224 @@ +--- +# Common device information +device_info: &device_info + ip_address: "204.1.2.3" + device_collection_status_check: false + +################################################################################################################### +# Negative Validation Tests for Authentication + +############################################# +# Authentication Enable Tests +############################################# + +# Test invalid authentication enable - string instead of boolean +test_authentication_enable_string: + <<: *device_info + layer2_configuration: + authentication: + enable_dot1x_authentication: "true" # Invalid: string instead of boolean + +# Test invalid authentication enable - integer instead of boolean +test_authentication_enable_integer: + <<: *device_info + layer2_configuration: + authentication: + enable_dot1x_authentication: 1 # Invalid: integer instead of boolean + +# Test invalid authentication enable - list instead of boolean +test_authentication_enable_list: + <<: *device_info + layer2_configuration: + authentication: + enable_dot1x_authentication: [true] # Invalid: list instead of boolean + +############################################# +# Authentication Mode Tests +############################################# + +# Test invalid authentication config mode - integer instead of string +test_authentication_config_mode_integer: + <<: *device_info + layer2_configuration: + authentication: + authentication_config_mode: 1 # Invalid: integer instead of string + +# Test invalid authentication config mode - boolean instead of string +test_authentication_config_mode_boolean: + <<: *device_info + layer2_configuration: + authentication: + authentication_config_mode: true # Invalid: boolean instead of string + +# Test invalid authentication config mode - list instead of string +test_authentication_config_mode_list: + <<: *device_info + layer2_configuration: + authentication: + authentication_config_mode: ["LEGACY"] # Invalid: list instead of string + +# Test invalid authentication config mode - invalid choice +test_authentication_config_mode_invalid_choice: + <<: *device_info + layer2_configuration: + authentication: + authentication_config_mode: "INVALID_MODE" # Invalid: not in choices + +# Test invalid authentication config mode - partial match +test_authentication_config_mode_partial: + <<: *device_info + layer2_configuration: + authentication: + authentication_config_mode: "LEG" # Invalid: partial string + +############################################# +# Data Type and Structure Tests +############################################# + +# Test invalid authentication configuration - string instead of dictionary +test_authentication_invalid_dict_type: + <<: *device_info + layer2_configuration: + authentication: "invalid_authentication_string" # Invalid: string instead of dictionary + +# Test invalid authentication configuration - list instead of dictionary +test_authentication_invalid_list_type: + <<: *device_info + layer2_configuration: + authentication: ["invalid", "authentication", "list"] # Invalid: list instead of dictionary + +# Test invalid authentication configuration - integer instead of dictionary +test_authentication_invalid_integer_type: + <<: *device_info + layer2_configuration: + authentication: 12345 # Invalid: integer instead of dictionary + +############################################# +# Mode Change Restriction Tests +############################################# + +# Test authentication mode change restriction - attempt to change from LEGACY to NEW_STYLE +test_authentication_mode_change_legacy_to_new: + <<: *device_info + layer2_configuration: + authentication: + authentication_config_mode: "NEW_STYLE" # This will be used after setting LEGACY first + +# Test authentication mode change restriction - attempt to change from NEW_STYLE to LEGACY +test_authentication_mode_change_new_to_legacy: + <<: *device_info + layer2_configuration: + authentication: + authentication_config_mode: "LEGACY" # This will be used after setting NEW_STYLE first + +################################################################################################################### +# Positive Validation Tests for Authentication + +############################################# +# POSITIVE TEST CASES - CREATE +############################################# + +# Test authentication configuration with enable only +test_authentication_create_enable_only: + <<: *device_info + layer2_configuration: + authentication: + enable_dot1x_authentication: true + +# Test authentication configuration with disable only +test_authentication_create_disable_only: + <<: *device_info + layer2_configuration: + authentication: + enable_dot1x_authentication: false + +# Test authentication configuration with NEW_STYLE mode only +test_authentication_create_new_style_mode_only: + <<: *device_info + layer2_configuration: + authentication: + authentication_config_mode: "NEW_STYLE" + +# Test authentication configuration with enable and LEGACY mode +test_authentication_create_enable_legacy: + <<: *device_info + layer2_configuration: + authentication: + enable_dot1x_authentication: true + authentication_config_mode: "LEGACY" + +# Test authentication configuration with enable and NEW_STYLE mode +test_authentication_create_enable_new_style: + <<: *device_info + layer2_configuration: + authentication: + enable_dot1x_authentication: true + authentication_config_mode: "NEW_STYLE" + +# Test authentication configuration with disable and LEGACY mode +test_authentication_create_disable_legacy: + <<: *device_info + layer2_configuration: + authentication: + enable_dot1x_authentication: false + authentication_config_mode: "LEGACY" + +# Test authentication configuration with disable and NEW_STYLE mode +test_authentication_create_disable_new_style: + <<: *device_info + layer2_configuration: + authentication: + enable_dot1x_authentication: false + authentication_config_mode: "NEW_STYLE" + +############################################# +# POSITIVE TEST CASES - UPDATE +############################################# + +# Test authentication update - enable authentication +test_authentication_update_enable: + <<: *device_info + layer2_configuration: + authentication: + enable_dot1x_authentication: true + +# Test authentication update - disable authentication +test_authentication_update_disable: + <<: *device_info + layer2_configuration: + authentication: + enable_dot1x_authentication: false + +# Test authentication update - enable authentication with existing mode +test_authentication_update_enable_with_mode: + <<: *device_info + layer2_configuration: + authentication: + enable_dot1x_authentication: true + authentication_config_mode: "LEGACY" + +# Test authentication update - disable authentication with existing mode +test_authentication_update_disable_with_mode: + <<: *device_info + layer2_configuration: + authentication: + enable_dot1x_authentication: false + authentication_config_mode: "LEGACY" + +############################################# +# POSITIVE TEST CASES - RESET +############################################# + +# Test authentication reset - empty configuration (deleted state resets to defaults) +test_authentication_reset_empty: + <<: *device_info + layer2_configuration: + authentication: {} + +# Test authentication reset - with existing parameters (deleted state resets to defaults) +test_authentication_reset_with_params: + <<: *device_info + layer2_configuration: + authentication: + enable_dot1x_authentication: true # Parameters provided but should reset to defaults in delete state + authentication_config_mode: "LEGACY" diff --git a/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_cdp.yml b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_cdp.yml new file mode 100644 index 0000000000..72e7dcf436 --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_cdp.yml @@ -0,0 +1,316 @@ +--- +# Common device information +device_info: &device_info + ip_address: "204.1.2.3" + device_collection_status_check: false + +################################################################################################################### +# Negative Validation Tests for CDP + +############################################# +# CDP Admin Status Tests +############################################# + +# Test invalid admin status - string instead of boolean +test_cdp_admin_status_string: + <<: *device_info + layer2_configuration: + cdp: + cdp_admin_status: "true" # Invalid: string instead of boolean + +# Test invalid admin status - integer instead of boolean +test_cdp_admin_status_integer: + <<: *device_info + layer2_configuration: + cdp: + cdp_admin_status: 1 # Invalid: integer instead of boolean + +############################################# +# CDP Hold Time Tests +############################################# + +# Test CDP hold time below minimum (9) +test_cdp_hold_time_below_min: + <<: *device_info + layer2_configuration: + cdp: + cdp_hold_time: 9 # Invalid: below minimum of 10 + +# Test CDP hold time above maximum (256) +test_cdp_hold_time_above_max: + <<: *device_info + layer2_configuration: + cdp: + cdp_hold_time: 256 # Invalid: above maximum of 255 + +# Test CDP hold time as string +test_cdp_hold_time_string: + <<: *device_info + layer2_configuration: + cdp: + cdp_hold_time: "180" # Invalid: string instead of integer + +# Test CDP hold time as float +test_cdp_hold_time_float: + <<: *device_info + layer2_configuration: + cdp: + cdp_hold_time: 180.5 # Invalid: float instead of integer + +# Test CDP hold time negative +test_cdp_hold_time_negative: + <<: *device_info + layer2_configuration: + cdp: + cdp_hold_time: -10 # Invalid: negative value + +############################################# +# CDP Timer Tests +############################################# + +# Test CDP timer below minimum (4) +test_cdp_timer_below_min: + <<: *device_info + layer2_configuration: + cdp: + cdp_timer: 4 # Invalid: below minimum of 5 + +# Test CDP timer above maximum (255) +test_cdp_timer_above_max: + <<: *device_info + layer2_configuration: + cdp: + cdp_timer: 255 # Invalid: above maximum of 254 + +# Test CDP timer as string +test_cdp_timer_string: + <<: *device_info + layer2_configuration: + cdp: + cdp_timer: "60" # Invalid: string instead of integer + +# Test CDP timer as float +test_cdp_timer_float: + <<: *device_info + layer2_configuration: + cdp: + cdp_timer: 60.5 # Invalid: float instead of integer + +# Test CDP timer negative +test_cdp_timer_negative: + <<: *device_info + layer2_configuration: + cdp: + cdp_timer: -5 # Invalid: negative value + +############################################# +# CDP Advertise V2 Tests +############################################# + +# Test invalid advertise v2 - string instead of boolean +test_cdp_advertise_v2_string: + <<: *device_info + layer2_configuration: + cdp: + cdp_advertise_v2: "true" # Invalid: string instead of boolean + +# Test invalid advertise v2 - integer instead of boolean +test_cdp_advertise_v2_integer: + <<: *device_info + layer2_configuration: + cdp: + cdp_advertise_v2: 1 # Invalid: integer instead of boolean + +############################################# +# CDP Log Duplex Mismatch Tests +############################################# + +# Test invalid log duplex mismatch - string instead of boolean +test_cdp_log_duplex_mismatch_string: + <<: *device_info + layer2_configuration: + cdp: + cdp_log_duplex_mismatch: "false" # Invalid: string instead of boolean + +# Test invalid log duplex mismatch - integer instead of boolean +test_cdp_log_duplex_mismatch_integer: + <<: *device_info + layer2_configuration: + cdp: + cdp_log_duplex_mismatch: 0 # Invalid: integer instead of boolean + +############################################# +# Data Type and Structure Tests +############################################# + +# Test invalid CDP configuration - string instead of dictionary +test_cdp_invalid_dict_type: + <<: *device_info + layer2_configuration: + cdp: "invalid_cdp_string" # Invalid: string instead of dictionary + +# Test invalid CDP configuration - list instead of dictionary +test_cdp_invalid_list_type: + <<: *device_info + layer2_configuration: + cdp: ["invalid", "cdp", "list"] # Invalid: list instead of dictionary + +############################################# +# Edge Case Tests +############################################# + +# Test CDP hold time equal to timer (should be valid but not recommended) +test_cdp_hold_time_equal_timer: + <<: *device_info + layer2_configuration: + cdp: + cdp_hold_time: 60 + cdp_timer: 60 # Edge case: hold_time equals timer + +# Test CDP hold time less than timer (should be valid but not recommended) +test_cdp_hold_time_less_than_timer: + <<: *device_info + layer2_configuration: + cdp: + cdp_hold_time: 30 + cdp_timer: 60 # Edge case: hold_time less than timer + +################################################################################################################### +# Positive Validation Tests for CDP + +############################################# +# POSITIVE TEST CASES - CREATE +############################################# + +# Test CDP configuration with admin status only +test_cdp_create_admin_status_only: + <<: *device_info + layer2_configuration: + cdp: + cdp_admin_status: false + +# Test CDP configuration with all parameters +test_cdp_create_all_params: + <<: *device_info + layer2_configuration: + cdp: + cdp_admin_status: true + cdp_hold_time: 180 + cdp_timer: 60 + cdp_advertise_v2: true + cdp_log_duplex_mismatch: true + +# Test CDP configuration with custom timers +test_cdp_create_custom_timers: + <<: *device_info + layer2_configuration: + cdp: + cdp_hold_time: 120 + cdp_timer: 30 + +# Test CDP disabled configuration +test_cdp_create_disabled: + <<: *device_info + layer2_configuration: + cdp: + cdp_admin_status: false + cdp_advertise_v2: false + cdp_log_duplex_mismatch: false + +############################################# +# POSITIVE TEST CASES - UPDATE +############################################# + +# Test CDP update - enable CDP +test_cdp_update_enable: + <<: *device_info + layer2_configuration: + cdp: + cdp_admin_status: true + +# Test CDP update - modify timers +test_cdp_update_timers: + <<: *device_info + layer2_configuration: + cdp: + cdp_hold_time: 240 + cdp_timer: 90 + +# Test CDP update - disable features +test_cdp_update_disable_features: + <<: *device_info + layer2_configuration: + cdp: + cdp_advertise_v2: true + cdp_log_duplex_mismatch: true + +# Test CDP update - all parameters +test_cdp_update_all_params: + <<: *device_info + layer2_configuration: + cdp: + cdp_admin_status: false + cdp_hold_time: 150 + cdp_timer: 45 + cdp_advertise_v2: false + cdp_log_duplex_mismatch: false + +############################################# +# POSITIVE TEST CASES - DELETE +############################################# + +# Test CDP deletion/reset - empty configuration +test_cdp_delete_empty: + <<: *device_info + layer2_configuration: + cdp: {} + +# Test CDP deletion/reset - with existing parameters (should reset to defaults) +test_cdp_delete_with_params: + <<: *device_info + layer2_configuration: + cdp: + cdp_admin_status: true # Parameters provided but should reset to defaults in delete state + cdp_hold_time: 180 + cdp_timer: 60 + +############################################# +# Boundary Value Tests +############################################# + +# Test CDP with minimum valid values +test_cdp_boundary_min_values: + <<: *device_info + layer2_configuration: + cdp: + cdp_hold_time: 10 # Minimum valid + cdp_timer: 5 # Minimum valid + +# Test CDP with maximum valid values +test_cdp_boundary_max_values: + <<: *device_info + layer2_configuration: + cdp: + cdp_hold_time: 255 # Maximum valid + cdp_timer: 254 # Maximum valid + +# Test CDP with recommended values (hold_time > timer) +test_cdp_recommended_values: + <<: *device_info + layer2_configuration: + cdp: + cdp_hold_time: 180 # 3x timer (recommended) + cdp_timer: 60 + +############################################# +# Special Configuration Tests +############################################# + +# Test CDP with null values (should be treated as not provided) +test_cdp_with_nulls: + <<: *device_info + layer2_configuration: + cdp: + cdp_admin_status: null + cdp_hold_time: 180 + cdp_timer: null diff --git a/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_dhcp_snooping.yml b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_dhcp_snooping.yml new file mode 100644 index 0000000000..b582f7112c --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_dhcp_snooping.yml @@ -0,0 +1,414 @@ +--- +# Common device information +device_info: &device_info + ip_address: "204.1.2.3" + device_collection_status_check: false + +################################################################################################################### +# Negative Validation Tests for DHCP Snooping + +############################################# +# DHCP Admin Status Tests +############################################# + +# Test invalid DHCP admin status - string instead of boolean +test_dhcp_snooping_admin_status_string: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_admin_status: "true" # Invalid: string instead of boolean + +# Test invalid DHCP admin status - integer instead of boolean +test_dhcp_snooping_admin_status_integer: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_admin_status: 1 # Invalid: integer instead of boolean + +############################################# +# DHCP Snooping VLANs Tests +############################################# + +# Test invalid DHCP snooping VLANs - string instead of list +test_dhcp_snooping_vlans_string: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_vlans: "100,200" # Invalid: string instead of list + +# Test invalid DHCP snooping VLANs - integer instead of list +test_dhcp_snooping_vlans_integer: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_vlans: 100 # Invalid: integer instead of list + +# Test invalid DHCP snooping VLANs - boolean instead of list +test_dhcp_snooping_vlans_boolean: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_vlans: true # Invalid: boolean instead of list + +# Test invalid DHCP snooping VLANs - VLAN ID out of range (too low) +test_dhcp_snooping_vlans_range_low: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_vlans: [0, 100] # Invalid: VLAN ID 0 is out of range + +# Test invalid DHCP snooping VLANs - VLAN ID out of range (too high) +test_dhcp_snooping_vlans_range_high: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_vlans: [100, 4095] # Invalid: VLAN ID 4095 is out of range + +# Test invalid DHCP snooping VLANs - string elements in list +test_dhcp_snooping_vlans_string_elements: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_vlans: ["100", "200"] # Invalid: string elements instead of integers + +# Test DHCP snooping VLANs - empty list (potential API bug test) +test_dhcp_snooping_vlans_empty_list: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_vlans: [] # Test: empty list may cause API issues + +############################################# +# DHCP Snooping Glean Tests +############################################# + +# Test invalid DHCP snooping glean - string instead of boolean +test_dhcp_snooping_glean_string: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_glean: "false" # Invalid: string instead of boolean + +# Test invalid DHCP snooping glean - integer instead of boolean +test_dhcp_snooping_glean_integer: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_glean: 0 # Invalid: integer instead of boolean + +############################################# +# DHCP Database Agent URL Tests +############################################# + +# Test invalid DHCP database agent URL - integer instead of string +test_dhcp_snooping_database_agent_url_integer: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_database_agent_url: 12345 # Invalid: integer instead of string + +# Test invalid DHCP database agent URL - boolean instead of string +test_dhcp_snooping_database_agent_url_boolean: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_database_agent_url: true # Invalid: boolean instead of string + +# Test invalid DHCP database agent URL - exceeds maximum length (228 characters) +test_dhcp_snooping_database_agent_url_max_length: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_database_agent_url: "tftp://192.168.1.100/this_is_an_extremely_long_filename_that_exceeds_the_maximum_allowed_length_for_dhcp_snooping_database_agent_url_and_should_cause_validation_to_fail_because_it_is_way_too_long_for_the_url_field_to_handle_properly_and_efficiently_so_we_need_to_reject_it_completely.db" # Invalid: > 227 chars + +############################################# +# DHCP Database Timeout Tests +############################################# + +# Test invalid DHCP database timeout - string instead of integer +test_dhcp_snooping_database_timeout_string: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_database_timeout: "300" # Invalid: string instead of integer + +# Test invalid DHCP database timeout - boolean instead of integer +test_dhcp_snooping_database_timeout_boolean: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_database_timeout: false # Invalid: boolean instead of integer + +# Test invalid DHCP database timeout - negative value +test_dhcp_snooping_database_timeout_negative: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_database_timeout: -1 # Invalid: negative value + +# Test invalid DHCP database timeout - exceeds maximum value +test_dhcp_snooping_database_timeout_max: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_database_timeout: 86401 # Invalid: exceeds maximum of 86400 + +############################################# +# DHCP Database Write Delay Tests +############################################# + +# Test invalid DHCP database write delay - string instead of integer +test_dhcp_snooping_database_write_delay_string: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_database_write_delay: "300" # Invalid: string instead of integer + +# Test invalid DHCP database write delay - boolean instead of integer +test_dhcp_snooping_database_write_delay_boolean: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_database_write_delay: true # Invalid: boolean instead of integer + +# Test invalid DHCP database write delay - below minimum value +test_dhcp_snooping_database_write_delay_min: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_database_write_delay: 14 # Invalid: below minimum of 15 + +# Test invalid DHCP database write delay - exceeds maximum value +test_dhcp_snooping_database_write_delay_max: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_database_write_delay: 86401 # Invalid: exceeds maximum of 86400 + +############################################# +# DHCP Proxy Bridge VLANs Tests +############################################# + +# Test invalid DHCP proxy bridge VLANs - string instead of list +test_dhcp_snooping_proxy_bridge_vlans_string: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_proxy_bridge_vlans: "100,200" # Invalid: string instead of list + +# Test invalid DHCP proxy bridge VLANs - integer instead of list +test_dhcp_snooping_proxy_bridge_vlans_integer: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_proxy_bridge_vlans: 100 # Invalid: integer instead of list + +# Test invalid DHCP proxy bridge VLANs - VLAN ID out of range (too low) +test_dhcp_snooping_proxy_bridge_vlans_range_low: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_proxy_bridge_vlans: [0, 100] # Invalid: VLAN ID 0 is out of range + +# Test invalid DHCP proxy bridge VLANs - VLAN ID out of range (too high) +test_dhcp_snooping_proxy_bridge_vlans_range_high: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_proxy_bridge_vlans: [100, 4095] # Invalid: VLAN ID 4095 is out of range + +# Test DHCP proxy bridge VLANs - empty list (potential API bug test) +test_dhcp_snooping_proxy_bridge_vlans_empty_list: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_proxy_bridge_vlans: [] # Test: empty list may cause API issues + +############################################# +# Data Type and Structure Tests +############################################# + +# Test invalid DHCP snooping configuration - string instead of dictionary +test_dhcp_snooping_invalid_dict_type: + <<: *device_info + layer2_configuration: + dhcp_snooping: "invalid_dhcp_snooping_string" # Invalid: string instead of dictionary + +# Test invalid DHCP snooping configuration - list instead of dictionary +test_dhcp_snooping_invalid_list_type: + <<: *device_info + layer2_configuration: + dhcp_snooping: ["invalid", "dhcp_snooping", "list"] # Invalid: list instead of dictionary + +################################################################################################################### +# Positive Validation Tests for DHCP Snooping + +############################################# +# POSITIVE TEST CASES - CREATE +############################################# + +# Test DHCP snooping configuration with admin status only +test_dhcp_snooping_create_admin_status_only: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_admin_status: true + dhcp_snooping_database_agent_url: "tftp://192.168.1.100/test_dhcp_bindings.db" + +# Test DHCP snooping configuration with VLANs only +test_dhcp_snooping_create_vlans_only: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_vlans: [400, 401, 402, 98] + +# Test DHCP snooping configuration with glean only +test_dhcp_snooping_create_glean_only: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_glean: true + +# Test DHCP snooping configuration with database agent URL only +test_dhcp_snooping_create_database_url_only: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_database_agent_url: "tftp://192.168.1.100/dhcp_bindings.db" + +# Test DHCP snooping configuration with database timeout only +test_dhcp_snooping_create_database_timeout_only: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_database_timeout: 600 + +# Test DHCP snooping configuration with database write delay only +test_dhcp_snooping_create_database_write_delay_only: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_database_write_delay: 600 + +# Test DHCP snooping configuration with proxy bridge VLANs only +test_dhcp_snooping_create_proxy_bridge_vlans_only: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_proxy_bridge_vlans: [98, 402] + +# Test DHCP snooping configuration with all parameters +test_dhcp_snooping_create_all_params: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_admin_status: true + dhcp_snooping_vlans: [99, 98, 402, 401] + dhcp_snooping_glean: true + dhcp_snooping_database_agent_url: "tftp://192.168.1.100/comprehensive_dhcp_bindings.db" + dhcp_snooping_database_timeout: 1200 + dhcp_snooping_database_write_delay: 900 + dhcp_snooping_proxy_bridge_vlans: [402, 401] + +############################################# +# POSITIVE TEST CASES - UPDATE +############################################# + +# Test DHCP snooping update - disable admin status +test_dhcp_snooping_update_disable_admin: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_admin_status: false + +# Test DHCP snooping update - update VLANs list +test_dhcp_snooping_update_vlans: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_vlans: [] + +# Test DHCP snooping update - disable glean +test_dhcp_snooping_update_disable_glean: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_glean: false + +# Test DHCP snooping update - update database agent URL +test_dhcp_snooping_update_database_url: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_database_agent_url: "ftp://192.168.2.200/updated_dhcp_bindings.db" + +# Test DHCP snooping update - update database timeout +test_dhcp_snooping_update_database_timeout: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_database_timeout: 1800 + +# Test DHCP snooping update - update database write delay +test_dhcp_snooping_update_database_write_delay: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_database_write_delay: 1200 + +# Test DHCP snooping update - update proxy bridge VLANs +test_dhcp_snooping_update_proxy_bridge_vlans: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_proxy_bridge_vlans: [401, 98] + +# Test DHCP snooping update - comprehensive update +test_dhcp_snooping_update_comprehensive: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_admin_status: true + dhcp_snooping_vlans: [401, 402, 403] + dhcp_snooping_glean: false + dhcp_snooping_database_agent_url: "tftp://192.168.3.300/final_dhcp_bindings.db" + dhcp_snooping_database_timeout: 2400 + dhcp_snooping_database_write_delay: 1800 + dhcp_snooping_proxy_bridge_vlans: [402, 403] + +############################################# +# POSITIVE TEST CASES - RESET +############################################# + +# Test DHCP snooping reset - empty configuration (deleted state resets to defaults) +test_dhcp_snooping_reset_empty: + <<: *device_info + layer2_configuration: + dhcp_snooping: {} + +# Test DHCP snooping reset - with existing parameters (deleted state resets to defaults) +test_dhcp_snooping_reset_with_params: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_admin_status: true # Parameters provided but should reset to defaults in delete state + dhcp_snooping_vlans: [401, 402, 403] + dhcp_snooping_glean: true + +############################################# +# Special Configuration Tests +############################################# + +# Test DHCP snooping with database agent URL empty string +test_dhcp_snooping_special_url_empty_string: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_database_agent_url: "" + +# Test DHCP snooping with proxy bridge VLANs empty list +test_dhcp_snooping_special_vlan_empty_list: + <<: *device_info + layer2_configuration: + dhcp_snooping: + dhcp_snooping_proxy_bridge_vlans: [] diff --git a/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_igmp_snooping.yml b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_igmp_snooping.yml new file mode 100644 index 0000000000..4d619aabb5 --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_igmp_snooping.yml @@ -0,0 +1,437 @@ +--- +# Common device information +device_info: &device_info + ip_address: "204.1.2.3" + device_collection_status_check: false + +################################################################################################################### +# Negative Validation Tests for IGMP Snooping + +############################################# +# IGMP Snooping Global Configuration Tests # +############################################# + +# Test invalid IGMP snooping enabled - string instead of boolean +test_igmp_global_enabled_string: + <<: *device_info + layer2_configuration: + igmp_snooping: + enable_igmp_snooping: "true" # Invalid: string instead of boolean + +# Test invalid IGMP snooping enabled - integer instead of boolean +test_igmp_global_enabled_integer: + <<: *device_info + layer2_configuration: + igmp_snooping: + enable_igmp_snooping: 1 # Invalid: integer instead of boolean + +# Test invalid IGMP querier enabled - string instead of boolean +test_igmp_global_querier_enabled_string: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_querier: "false" # Invalid: string instead of boolean + +# Test invalid IGMP querier enabled - integer instead of boolean +test_igmp_global_querier_enabled_integer: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_querier: 0 # Invalid: integer instead of boolean + +# Test invalid IGMP querier address - integer instead of string +test_igmp_global_querier_address_integer: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_querier_address: 192168001001 # Invalid: integer instead of string + +# Test invalid IGMP querier version - invalid choice +test_igmp_global_querier_version_invalid: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_querier_version: "VERSION_4" # Invalid: not in valid choices + +# Test invalid IGMP querier version - integer instead of string +test_igmp_global_querier_version_integer: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_querier_version: 2 # Invalid: integer instead of string + +# Test invalid IGMP query interval - string instead of integer +test_igmp_global_query_interval_string: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_querier_query_interval: "125" # Invalid: string instead of integer + +# Test invalid IGMP query interval - boolean instead of integer +test_igmp_global_query_interval_boolean: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_querier_query_interval: true # Invalid: boolean instead of integer + +# Test invalid IGMP query interval - below minimum value +test_igmp_global_query_interval_min: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_querier_query_interval: 0 # Invalid: below minimum of 1 + +# Test invalid IGMP query interval - exceeds maximum value +test_igmp_global_query_interval_max: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_querier_query_interval: 18001 # Invalid: exceeds maximum of 18000 + +############################################# +# IGMP Snooping VLAN Configuration Tests # +############################################# + +# Test invalid IGMP VLAN ID - string instead of integer +test_igmp_vlan_id_string: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - igmp_snooping_vlan_id: "98" # Invalid: string instead of integer + enable_igmp_snooping: true + +# Test invalid IGMP VLAN ID - negative value +test_igmp_vlan_id_negative: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - igmp_snooping_vlan_id: -1 # Invalid: negative value + enable_igmp_snooping: true + +# Test invalid IGMP VLAN ID - exceeds maximum value +test_igmp_vlan_id_max: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 4095 # Invalid: exceeds maximum of 4094 + enable_igmp_snooping: true + +# Test invalid IGMP VLAN ID - zero value +test_igmp_vlan_id_zero: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 0 # Invalid: zero value + enable_igmp_snooping: true + +# Test missing required IGMP VLAN ID +test_igmp_vlan_id_missing: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - # igmp_snooping_vlan_id is missing - should fail + enable_igmp_snooping: true + +# Test invalid IGMP VLAN enabled - string instead of boolean +test_igmp_vlan_enabled_string: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 98 + enable_igmp_snooping: "true" # Invalid: string instead of boolean + +# Test invalid IGMP VLAN enabled - integer instead of boolean +test_igmp_vlan_enabled_integer: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 99 + enable_igmp_snooping: 1 # Invalid: integer instead of boolean + +# Test invalid IGMP VLAN querier enabled - string instead of boolean +test_igmp_vlan_querier_enabled_string: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 202 + enable_igmp_snooping: true + igmp_snooping_querier: "false" # Invalid: string instead of boolean + +# Test invalid IGMP VLAN querier enabled - integer instead of boolean +test_igmp_vlan_querier_enabled_integer: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 400 + enable_igmp_snooping: true + igmp_snooping_querier: 0 # Invalid: integer instead of boolean + +# Test invalid IGMP VLAN querier address - integer instead of string +test_igmp_vlan_querier_address_integer: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 401 + enable_igmp_snooping: true + igmp_snooping_querier_address: 192168001001 # Invalid: integer instead of string + +# Test invalid IGMP VLAN querier version - invalid choice +test_igmp_vlan_querier_version_invalid: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 402 + enable_igmp_snooping: true + igmp_snooping_querier_version: "VERSION_4" # Invalid: not in valid choices + +# Test invalid IGMP VLAN query interval - string instead of integer +test_igmp_vlan_query_interval_string: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 403 + enable_igmp_snooping: true + igmp_snooping_querier_query_interval: "125" # Invalid: string instead of integer + +# Test invalid IGMP VLAN query interval - below minimum value +test_igmp_vlan_query_interval_min: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 98 + enable_igmp_snooping: true + igmp_snooping_querier_query_interval: 0 # Invalid: below minimum of 1 + +# Test invalid IGMP VLAN query interval - exceeds maximum value +test_igmp_vlan_query_interval_max: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 99 + enable_igmp_snooping: true + igmp_snooping_querier_query_interval: 18001 # Invalid: exceeds maximum of 18000 + +# Test invalid IGMP VLAN mrouter port list - string instead of list +test_igmp_vlan_mrouter_port_list_string: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 202 + enable_igmp_snooping: true + igmp_snooping_mrouter_port_list: "GigabitEthernet1/0/1" # Invalid: string instead of list + +############################################# +# Data Type and Structure Tests # +############################################# + +# Test invalid IGMP snooping configuration - string instead of dictionary +test_igmp_invalid_dict_type: + <<: *device_info + layer2_configuration: + igmp_snooping: "invalid_igmp_string" # Invalid: string instead of dictionary + +# Test invalid IGMP snooping configuration - list instead of dictionary +test_igmp_invalid_list_type: + <<: *device_info + layer2_configuration: + igmp_snooping: ["invalid", "igmp", "list"] # Invalid: list instead of dictionary + +# Test invalid IGMP VLANs - string instead of list +test_igmp_vlans_invalid_string: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: "invalid_vlans_string" # Invalid: string instead of list + +# Test invalid IGMP VLANs - dictionary instead of list +test_igmp_vlans_invalid_dict: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: { "invalid": "dict" } # Invalid: dictionary instead of list + +# Test invalid IGMP VLAN item - string instead of dictionary +test_igmp_vlan_invalid_dict_type: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - "invalid_vlan_string" # Invalid: string instead of dictionary + +################################################################################################################### +# Positive Test Cases for IGMP Snooping + +############################################# +# POSITIVE TEST CASES - CREATE # +############################################# + +# Test IGMP snooping configuration with global parameters only +test_igmp_create_global_only: + <<: *device_info + layer2_configuration: + igmp_snooping: + enable_igmp_snooping: true + igmp_snooping_querier: false + igmp_snooping_querier_address: "192.168.1.1" + igmp_snooping_querier_version: "VERSION_2" + igmp_snooping_querier_query_interval: 125 + +# Test IGMP snooping configuration with VLANs only +test_igmp_create_vlans_only: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 98 + enable_igmp_snooping: true + igmp_snooping_querier: false + igmp_snooping_querier_address: "192.168.1.1" + igmp_snooping_querier_version: "VERSION_2" + igmp_snooping_querier_query_interval: 125 + igmp_snooping_mrouter_port_list: + ["GigabitEthernet1/0/1", "GigabitEthernet1/0/2"] + +# Test IGMP snooping configuration with single VLAN +test_igmp_create_single_vlan: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 99 + enable_igmp_snooping: true + +# Test IGMP snooping configuration with global and VLANs +test_igmp_create_global_and_vlans: + <<: *device_info + layer2_configuration: + igmp_snooping: + enable_igmp_snooping: true + igmp_snooping_querier: true + igmp_snooping_querier_address: "10.1.1.1" + igmp_snooping_querier_version: "VERSION_3" + igmp_snooping_querier_query_interval: 60 + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 202 + enable_igmp_snooping: true + igmp_snooping_querier: false + igmp_snooping_querier_address: "192.168.1.1" + igmp_snooping_querier_version: "VERSION_2" + igmp_snooping_querier_query_interval: 125 + igmp_snooping_mrouter_port_list: ["GigabitEthernet1/0/1"] + - igmp_snooping_vlan_id: 400 + enable_igmp_snooping: true + igmp_snooping_querier: true + igmp_snooping_querier_address: "192.168.2.1" + igmp_snooping_querier_version: "VERSION_3" + igmp_snooping_querier_query_interval: 90 + igmp_snooping_mrouter_port_list: + ["GigabitEthernet1/0/3", "GigabitEthernet1/0/4"] + +# Test IGMP snooping configuration with minimal VLAN parameters +test_igmp_create_minimal_vlan: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 401 + enable_igmp_snooping: true + +# Test IGMP snooping configuration with multiple VLANs - different settings +test_igmp_create_multiple_vlans_different_settings: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 401 + enable_igmp_snooping: true + igmp_snooping_querier: false + igmp_snooping_querier_query_interval: 60 + - igmp_snooping_vlan_id: 402 + enable_igmp_snooping: true + igmp_snooping_querier: true + igmp_snooping_querier_query_interval: 125 + - igmp_snooping_vlan_id: 403 + enable_igmp_snooping: false + igmp_snooping_querier: false + igmp_snooping_querier_query_interval: 250 + +############################################# +# POSITIVE TEST CASES - UPDATE # +############################################# + +# Test IGMP snooping update - global parameters only +test_igmp_update_global_only: + <<: *device_info + layer2_configuration: + igmp_snooping: + enable_igmp_snooping: false + igmp_snooping_querier: true + igmp_snooping_querier_address: "10.2.2.1" + igmp_snooping_querier_version: "VERSION_1" + igmp_snooping_querier_query_interval: 90 + +# Test IGMP snooping update - modify existing VLANs +test_igmp_update_modify_vlans: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 202 + enable_igmp_snooping: false # Changed from true + igmp_snooping_querier: true # Changed from false + igmp_snooping_querier_query_interval: 180 # Changed from 125 + - igmp_snooping_vlan_id: 400 + enable_igmp_snooping: true + igmp_snooping_querier: false # Changed from true + igmp_snooping_querier_query_interval: 240 # Changed from 90 + +# Test IGMP snooping update - global and VLANs together +test_igmp_update_global_and_vlans: + <<: *device_info + layer2_configuration: + igmp_snooping: + enable_igmp_snooping: true + igmp_snooping_querier: false # Updated global querier + igmp_snooping_querier_query_interval: 180 # Updated global query interval + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 401 + enable_igmp_snooping: true + igmp_snooping_querier: false + igmp_snooping_querier_query_interval: 240 + - igmp_snooping_vlan_id: 402 # Updated VLAN + enable_igmp_snooping: true + igmp_snooping_querier: true + igmp_snooping_querier_query_interval: 150 + +# Test IGMP snooping update - change global enablement state +test_igmp_update_change_global_enablement: + <<: *device_info + layer2_configuration: + igmp_snooping: + enable_igmp_snooping: false # Changed from previous state + +# Test IGMP snooping update - modify single VLAN +test_igmp_update_single_vlan: + <<: *device_info + layer2_configuration: + igmp_snooping: + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 403 + enable_igmp_snooping: true + igmp_snooping_querier: true + igmp_snooping_querier_query_interval: 300 diff --git a/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_lldp.yml b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_lldp.yml new file mode 100644 index 0000000000..06dba6bd31 --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_lldp.yml @@ -0,0 +1,325 @@ +--- +# Common device information +device_info: &device_info + ip_address: "204.1.2.3" + device_collection_status_check: false + +################################################################################################################### +# Negative Validation Tests for LLDP + +############################################# +# LLDP Admin Status Tests +############################################# + +# Test invalid admin status - string instead of boolean +test_lldp_admin_status_string: + <<: *device_info + layer2_configuration: + lldp: + lldp_admin_status: "true" # Invalid: string instead of boolean + +# Test invalid admin status - integer instead of boolean +test_lldp_admin_status_integer: + <<: *device_info + layer2_configuration: + lldp: + lldp_admin_status: 1 # Invalid: integer instead of boolean + +############################################# +# LLDP Hold Time Tests +############################################# + +# Test LLDP hold time below minimum (-1) +test_lldp_hold_time_below_min: + <<: *device_info + layer2_configuration: + lldp: + lldp_hold_time: -1 # Invalid: below minimum of 0 + +# Test LLDP hold time above maximum (32768) +test_lldp_hold_time_above_max: + <<: *device_info + layer2_configuration: + lldp: + lldp_hold_time: 32768 # Invalid: above maximum of 32767 + +# Test LLDP hold time as string +test_lldp_hold_time_string: + <<: *device_info + layer2_configuration: + lldp: + lldp_hold_time: "120" # Invalid: string instead of integer + +# Test LLDP hold time as float +test_lldp_hold_time_float: + <<: *device_info + layer2_configuration: + lldp: + lldp_hold_time: 120.5 # Invalid: float instead of integer + +############################################# +# LLDP Timer Tests +############################################# + +# Test LLDP timer below minimum (4) +test_lldp_timer_below_min: + <<: *device_info + layer2_configuration: + lldp: + lldp_timer: 4 # Invalid: below minimum of 5 + +# Test LLDP timer above maximum (32768) +test_lldp_timer_above_max: + <<: *device_info + layer2_configuration: + lldp: + lldp_timer: 32768 # Invalid: above maximum of 32767 + +# Test LLDP timer as string +test_lldp_timer_string: + <<: *device_info + layer2_configuration: + lldp: + lldp_timer: "30" # Invalid: string instead of integer + +# Test LLDP timer as float +test_lldp_timer_float: + <<: *device_info + layer2_configuration: + lldp: + lldp_timer: 30.5 # Invalid: float instead of integer + +# Test LLDP timer negative +test_lldp_timer_negative: + <<: *device_info + layer2_configuration: + lldp: + lldp_timer: -5 # Invalid: negative value + +############################################# +# LLDP Reinitialization Delay Tests +############################################# + +# Test LLDP reinitialization delay below minimum (1) +test_lldp_reinit_delay_below_min: + <<: *device_info + layer2_configuration: + lldp: + lldp_reinitialization_delay: 1 # Invalid: below minimum of 2 + +# Test LLDP reinitialization delay above maximum (6) +test_lldp_reinit_delay_above_max: + <<: *device_info + layer2_configuration: + lldp: + lldp_reinitialization_delay: 6 # Invalid: above maximum of 5 + +# Test LLDP reinitialization delay as string +test_lldp_reinit_delay_string: + <<: *device_info + layer2_configuration: + lldp: + lldp_reinitialization_delay: "2" # Invalid: string instead of integer + +# Test LLDP reinitialization delay as float +test_lldp_reinit_delay_float: + <<: *device_info + layer2_configuration: + lldp: + lldp_reinitialization_delay: 2.5 # Invalid: float instead of integer + +# Test LLDP reinitialization delay negative +test_lldp_reinit_delay_negative: + <<: *device_info + layer2_configuration: + lldp: + lldp_reinitialization_delay: -2 # Invalid: negative value + +############################################# +# Data Type and Structure Tests +############################################# + +# Test invalid LLDP configuration - string instead of dictionary +test_lldp_invalid_dict_type: + <<: *device_info + layer2_configuration: + lldp: "invalid_lldp_string" # Invalid: string instead of dictionary + +# Test invalid LLDP configuration - list instead of dictionary +test_lldp_invalid_list_type: + <<: *device_info + layer2_configuration: + lldp: ["invalid", "lldp", "list"] # Invalid: list instead of dictionary + +# Test invalid LLDP configuration - null value +test_lldp_invalid_null_type: + <<: *device_info + layer2_configuration: + lldp: null # Invalid: null instead of dictionary + +# ############################################# +# # Edge Case Tests +# ############################################# + +# # Test LLDP hold time equal to timer (should be valid but not recommended) +# test_lldp_hold_time_equal_timer: +# <<: *device_info +# layer2_configuration: +# lldp: +# lldp_hold_time: 30 +# lldp_timer: 30 # Edge case: hold_time equals timer + +# # Test LLDP hold time less than timer (should be valid but not recommended) +# test_lldp_hold_time_less_than_timer: +# <<: *device_info +# layer2_configuration: +# lldp: +# lldp_hold_time: 15 +# lldp_timer: 30 # Edge case: hold_time less than timer + +################################################################################################################### +# Positive Validation Tests for LLDP + +############################################# +# POSITIVE TEST CASES - CREATE +############################################# + +# Test LLDP configuration with admin status only +test_lldp_create_admin_status_only: + <<: *device_info + layer2_configuration: + lldp: + lldp_admin_status: true + +# Test LLDP configuration with all parameters +test_lldp_create_all_params: + <<: *device_info + layer2_configuration: + lldp: + lldp_admin_status: true + lldp_hold_time: 220 + lldp_timer: 40 + lldp_reinitialization_delay: 3 + +# Test LLDP configuration with custom timers +test_lldp_create_custom_timers: + <<: *device_info + layer2_configuration: + lldp: + lldp_hold_time: 240 + lldp_timer: 60 + +# Test LLDP disabled configuration +test_lldp_create_disabled: + <<: *device_info + layer2_configuration: + lldp: + lldp_admin_status: false + +# Test LLDP with only reinitialization delay +test_lldp_create_reinit_delay_only: + <<: *device_info + layer2_configuration: + lldp: + lldp_reinitialization_delay: 4 + +############################################# +# POSITIVE TEST CASES - UPDATE +############################################# + +# Test LLDP update - enable LLDP +test_lldp_update_enable: + <<: *device_info + layer2_configuration: + lldp: + lldp_admin_status: true + +# Test LLDP update - disable LLDP +test_lldp_update_disable: + <<: *device_info + layer2_configuration: + lldp: + lldp_admin_status: false + +# Test LLDP update - modify timers +test_lldp_update_timers: + <<: *device_info + layer2_configuration: + lldp: + lldp_hold_time: 180 + lldp_timer: 45 + +# Test LLDP update - modify reinitialization delay +test_lldp_update_reinit_delay: + <<: *device_info + layer2_configuration: + lldp: + lldp_reinitialization_delay: 2 + +# Test LLDP update - all parameters +test_lldp_update_all_params: + <<: *device_info + layer2_configuration: + lldp: + lldp_admin_status: true + lldp_hold_time: 300 + lldp_timer: 90 + lldp_reinitialization_delay: 5 + +############################################# +# POSITIVE TEST CASES - DELETE +############################################# + +# Test LLDP deletion/reset - empty configuration +test_lldp_delete_empty: + <<: *device_info + layer2_configuration: + lldp: {} + +# Test LLDP deletion/reset - with existing parameters (should reset to defaults) +test_lldp_delete_with_params: + <<: *device_info + layer2_configuration: + lldp: + lldp_admin_status: true # Parameters provided but should reset to defaults in delete state + lldp_hold_time: 120 + lldp_timer: 30 + lldp_reinitialization_delay: 2 + +############################################# +# Boundary Value Tests +############################################# + +# Test LLDP with minimum valid values +test_lldp_boundary_min_values: + <<: *device_info + layer2_configuration: + lldp: + lldp_hold_time: 0 # Minimum valid (0 means no aging) + lldp_timer: 5 # Minimum valid + lldp_reinitialization_delay: 2 # Minimum valid + +# Test LLDP with maximum valid values +test_lldp_boundary_max_values: + <<: *device_info + layer2_configuration: + lldp: + lldp_hold_time: 32767 # Maximum valid + lldp_timer: 32767 # Maximum valid + lldp_reinitialization_delay: 5 # Maximum valid + +# Test LLDP with recommended values (hold_time > timer) +test_lldp_recommended_values: + <<: *device_info + layer2_configuration: + lldp: + lldp_hold_time: 120 # 4x timer (recommended) + lldp_timer: 30 + +# Test LLDP with zero hold time (no aging) +test_lldp_zero_hold_time: + <<: *device_info + layer2_configuration: + lldp: + lldp_hold_time: 0 # Special case: no aging + lldp_timer: 30 diff --git a/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_logical_ports.yml b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_logical_ports.yml new file mode 100644 index 0000000000..4341009d4c --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_logical_ports.yml @@ -0,0 +1,723 @@ +--- +# Common device information +device_info: &device_info + ip_address: "204.1.2.3" + device_collection_status_check: false + +################################################################################################################### +# Negative Validation Tests for Logical Ports + +############################################# +# Logical Ports Global Configuration Tests # +############################################# + +# Test invalid logical port configuration - string instead of dictionary +test_logical_ports_invalid_dict_type: + <<: *device_info + layer2_configuration: + logical_ports: "invalid_logical_ports_string" # Invalid: string instead of dictionary + +# Test invalid logical port configuration - list instead of dictionary +test_logical_ports_invalid_list_type: + <<: *device_info + layer2_configuration: + logical_ports: ["invalid", "logical_ports", "list"] # Invalid: list instead of dictionary + +# Test invalid port_channel_auto - integer instead of boolean +test_port_channel_auto_integer: + <<: *device_info + layer2_configuration: + logical_ports: + port_channel_auto: 1 # Invalid: integer instead of boolean + +# Test invalid port_channel_auto - string instead of boolean +test_port_channel_auto_string: + <<: *device_info + layer2_configuration: + logical_ports: + port_channel_auto: "true" # Invalid: string instead of boolean + +# Test invalid port_channel_lacp_system_priority - string instead of integer +test_port_channel_lacp_system_priority_string: + <<: *device_info + layer2_configuration: + logical_ports: + port_channel_lacp_system_priority: "32768" # Invalid: string instead of integer + +# Test invalid port_channel_lacp_system_priority - boolean instead of integer +test_port_channel_lacp_system_priority_boolean: + <<: *device_info + layer2_configuration: + logical_ports: + port_channel_lacp_system_priority: true # Invalid: boolean instead of integer + +# Test invalid port_channel_lacp_system_priority - negative value +test_port_channel_lacp_system_priority_negative: + <<: *device_info + layer2_configuration: + logical_ports: + port_channel_lacp_system_priority: -1 # Invalid: negative value + +# Test invalid port_channel_lacp_system_priority - exceeds maximum value +test_port_channel_lacp_system_priority_max: + <<: *device_info + layer2_configuration: + logical_ports: + port_channel_lacp_system_priority: 65536 # Invalid: exceeds maximum of 65535 + +# Test invalid port_channel_load_balancing_method - invalid choice +test_port_channel_load_balancing_method_invalid: + <<: *device_info + layer2_configuration: + logical_ports: + port_channel_load_balancing_method: "INVALID_METHOD" # Invalid: not in valid choices + +# Test invalid port_channel_load_balancing_method - integer instead of string +test_port_channel_load_balancing_method_integer: + <<: *device_info + layer2_configuration: + logical_ports: + port_channel_load_balancing_method: 1 # Invalid: integer instead of string + +# Test invalid port_channels - string instead of list +test_port_channels_invalid_list_type: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: "invalid_string" # Invalid: string instead of list + +# Test invalid port_channels - dictionary instead of list +test_port_channels_invalid_dict_type: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: {"invalid": "dict"} # Invalid: dictionary instead of list + +############################################# +# Port Channel Configuration Tests # +############################################# + +# Test invalid port_channel item - string instead of dictionary +test_port_channel_item_invalid_dict_type: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - "invalid_port_channel_string" # Invalid: string instead of dictionary + +# Test missing required port_channel_protocol +test_port_channel_protocol_missing: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - # port_channel_protocol is missing - should fail + port_channel_name: "Port-channel1" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + +# Test invalid port_channel_protocol - invalid choice +test_port_channel_protocol_invalid: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "INVALID_PROTOCOL" # Invalid: only LACP, PAGP, NONE allowed + port_channel_name: "Port-channel1" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + +# Test invalid port_channel_protocol - integer instead of string +test_port_channel_protocol_integer: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: 1 # Invalid: integer instead of string + port_channel_name: "Port-channel1" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + +# Test missing required port_channel_name +test_port_channel_name_missing: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + # port_channel_name is missing - should fail + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + +# Test invalid port_channel_name - integer instead of string +test_port_channel_name_integer: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: 123 # Invalid: integer instead of string + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + +# Test invalid port_channel_name - too short +test_port_channel_name_too_short: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "PC1" # Invalid: less than 13 characters + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + +# Test invalid port_channel_name - too long +test_port_channel_name_too_long: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel123456789" # Invalid: more than 15 characters + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + +# Test invalid port_channel_name - empty string +test_port_channel_name_empty: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "" # Invalid: empty string + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + +# Test invalid port_channel_min_links - string instead of integer +test_port_channel_min_links_string: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_min_links: "2" # Invalid: string instead of integer + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + +# Test invalid port_channel_min_links - negative value +test_port_channel_min_links_negative: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_min_links: -1 # Invalid: negative value + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + +# Test invalid port_channel_min_links - zero value +test_port_channel_min_links_zero: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_min_links: 0 # Invalid: zero value (must be 1-8) + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + +# Test invalid port_channel_min_links - exceeds maximum value +test_port_channel_min_links_max: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_min_links: 9 # Invalid: exceeds maximum of 8 + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + +# Test missing required port_channel_members +test_port_channel_members_missing: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + # port_channel_members is missing - should fail + +# Test invalid port_channel_members - string instead of list +test_port_channel_members_invalid_list_type: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_members: "invalid_string" # Invalid: string instead of list + +# Test invalid port_channel_members - dictionary instead of list +test_port_channel_members_invalid_dict_type: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_members: {"invalid": "dict"} # Invalid: dictionary instead of list + +# Test invalid port_channel_members - empty list +test_port_channel_members_empty_list: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_members: [] # Invalid: empty list + +############################################# +# Port Channel Member Configuration Tests # +############################################# + +# Test invalid port_channel_member item - string instead of dictionary +test_port_channel_member_invalid_dict_type: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_members: + - "invalid_member_string" # Invalid: string instead of dictionary + +# Test missing required port_channel_interface_name +test_port_channel_interface_name_missing: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_members: + - # port_channel_interface_name is missing - should fail + port_channel_mode: "ACTIVE" + +# Test invalid port_channel_interface_name - integer instead of string +test_port_channel_interface_name_integer: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_members: + - port_channel_interface_name: 123 # Invalid: integer instead of string + port_channel_mode: "ACTIVE" + +# Test invalid port_channel_mode - invalid choice for LACP +test_port_channel_mode_invalid_lacp: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + port_channel_mode: "DESIRABLE" # Invalid: DESIRABLE not valid for LACP + +# Test invalid port_channel_mode - invalid choice for PAGP +test_port_channel_mode_invalid_pagp: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "PAGP" + port_channel_name: "Port-channel1" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + port_channel_mode: "ACTIVE" # Invalid: ACTIVE not valid for PAGP + +# Test invalid port_channel_mode - invalid choice for NONE +test_port_channel_mode_invalid_none: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "NONE" + port_channel_name: "Port-channel1" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + port_channel_mode: "ACTIVE" # Invalid: ACTIVE not valid for NONE + +# Test invalid port_channel_mode - integer instead of string +test_port_channel_mode_integer: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + port_channel_mode: 1 # Invalid: integer instead of string + +# Test invalid port_channel_port_priority - string instead of integer +test_port_channel_port_priority_string: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + port_channel_port_priority: "100" # Invalid: string instead of integer + +# Test invalid port_channel_port_priority - negative value for LACP +test_port_channel_port_priority_negative_lacp: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + port_channel_port_priority: -1 # Invalid: negative value for LACP + +# Test invalid port_channel_port_priority - exceeds maximum for LACP +test_port_channel_port_priority_max_lacp: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + port_channel_port_priority: 65536 # Invalid: exceeds maximum of 65535 for LACP + +# Test invalid port_channel_port_priority - exceeds maximum for PAGP +test_port_channel_port_priority_max_pagp: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "PAGP" + port_channel_name: "Port-channel1" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + port_channel_port_priority: 256 # Invalid: exceeds maximum of 255 for PAGP + +# Test invalid port_channel_rate - string instead of integer +test_port_channel_rate_string: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + port_channel_rate: "fast" # Invalid: string instead of integer + +# Test invalid port_channel_rate - invalid choice +test_port_channel_rate_invalid: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + port_channel_rate: 10 # Invalid: only 1 and 30 allowed + +# Test invalid port_channel_learn_method - invalid choice +test_port_channel_learn_method_invalid: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "PAGP" + port_channel_name: "Port-channel1" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + port_channel_learn_method: "INVALID_METHOD" # Invalid: only AGGREGATION_PORT, PHYSICAL_PORT allowed + +# Test invalid port_channel_learn_method - integer instead of string +test_port_channel_learn_method_integer: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "PAGP" + port_channel_name: "Port-channel1" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + port_channel_learn_method: 1 # Invalid: integer instead of string + +# Test port_channel_learn_method on non-PAGP protocol +test_port_channel_learn_method_non_pagp: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + port_channel_learn_method: "AGGREGATION_PORT" # Invalid: learn_method only applicable for PAGP + +# Test duplicate port_channel_names +test_duplicate_port_channel_names: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + - port_channel_protocol: "PAGP" + port_channel_name: "Port-channel1" # Invalid: duplicate name + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/11" + +# Test duplicate member interfaces across port channels +test_duplicate_member_interfaces: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + port_channel_mode: "ACTIVE" + - port_channel_protocol: "PAGP" + port_channel_name: "Port-channel2" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" # Invalid: duplicate interface + port_channel_mode: "AUTO" + +# Test duplicate member interfaces within same port channel +test_duplicate_member_interfaces_same_channel: + <<: *device_info + layer2_configuration: + logical_ports: + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + - port_channel_interface_name: "GigabitEthernet1/0/10" # Invalid: duplicate interface in same channel + +################################################################################################################### +# Positive Test Cases for Logical Ports + +############################################# +# POSITIVE TEST CASES - CREATE/UPDATE # +############################################# + +# Test logical ports configuration with global parameters only - auto enabled +test_logical_ports_create_global_auto_enabled: + <<: *device_info + layer2_configuration: + logical_ports: + port_channel_auto: true + port_channel_lacp_system_priority: 32768 + port_channel_load_balancing_method: "SRC_DST_IP" + +# Test logical ports configuration with global parameters only - auto disabled +test_logical_ports_create_global_auto_disabled: + <<: *device_info + layer2_configuration: + logical_ports: + port_channel_auto: false + port_channel_lacp_system_priority: 16384 + port_channel_load_balancing_method: "SRC_DST_MAC" + +# Test logical ports configuration with single LACP port channel - minimal +test_logical_ports_create_single_lacp_minimal: + <<: *device_info + layer2_configuration: + logical_ports: + port_channel_auto: false + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + port_channel_mode: "ACTIVE" + +# Test logical ports configuration with single LACP port channel - comprehensive +test_logical_ports_create_single_lacp_comprehensive: + <<: *device_info + layer2_configuration: + logical_ports: + port_channel_auto: false + port_channel_lacp_system_priority: 4096 + port_channel_load_balancing_method: "SRC_DST_MIXED_IP_PORT" + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel1" + port_channel_min_links: 2 + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/10" + port_channel_mode: "ACTIVE" + port_channel_port_priority: 100 + port_channel_rate: 1 + - port_channel_interface_name: "GigabitEthernet1/0/11" + port_channel_mode: "PASSIVE" + port_channel_port_priority: 200 + port_channel_rate: 30 + +# Test logical ports configuration with single PAGP port channel - minimal +test_logical_ports_create_single_pagp_minimal: + <<: *device_info + layer2_configuration: + logical_ports: + port_channel_auto: false + port_channels: + - port_channel_protocol: "PAGP" + port_channel_name: "Port-channel2" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/13" + port_channel_mode: "DESIRABLE" + +# Test logical ports configuration with single PAGP port channel - comprehensive +test_logical_ports_create_single_pagp_comprehensive: + <<: *device_info + layer2_configuration: + logical_ports: + port_channel_auto: false + port_channel_load_balancing_method: "DST_MAC" + port_channels: + - port_channel_protocol: "PAGP" + port_channel_name: "Port-channel2" + port_channel_min_links: 3 + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/13" + port_channel_mode: "DESIRABLE" + port_channel_port_priority: 50 + port_channel_learn_method: "PHYSICAL_PORT" + - port_channel_interface_name: "GigabitEthernet1/0/14" + port_channel_mode: "AUTO" + port_channel_port_priority: 110 + port_channel_learn_method: "PHYSICAL_PORT" + - port_channel_interface_name: "GigabitEthernet1/0/15" + port_channel_mode: "AUTO" + port_channel_port_priority: 150 + port_channel_learn_method: "AGGREGATION_PORT" + +# Test logical ports configuration with single static port channel - minimal +test_logical_ports_create_single_static_minimal: + <<: *device_info + layer2_configuration: + logical_ports: + port_channel_auto: false + port_channels: + - port_channel_protocol: "NONE" + port_channel_name: "Port-channel3" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/16" + +# Test logical ports configuration with single static port channel - comprehensive +test_logical_ports_create_single_static_comprehensive: + <<: *device_info + layer2_configuration: + logical_ports: + port_channel_auto: false + port_channels: + - port_channel_protocol: "NONE" + port_channel_name: "Port-channel3" + port_channel_min_links: 2 + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/16" + port_channel_mode: "ON" + - port_channel_interface_name: "GigabitEthernet1/1/1" + port_channel_mode: "ON" + +# Test logical ports configuration with multiple port channels - different protocols +test_logical_ports_create_multiple_different_protocols: + <<: *device_info + layer2_configuration: + logical_ports: + port_channel_auto: false + port_channel_lacp_system_priority: 8192 + port_channel_load_balancing_method: "SRC_MAC" + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel4" + port_channel_min_links: 2 + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/19" + port_channel_mode: "ACTIVE" + port_channel_rate: 1 + - port_channel_interface_name: "GigabitEthernet1/0/20" + port_channel_mode: "ACTIVE" + port_channel_rate: 1 + - port_channel_protocol: "PAGP" + port_channel_name: "Port-channel5" + port_channel_min_links: 2 + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/21" + port_channel_mode: "DESIRABLE" + port_channel_learn_method: "AGGREGATION_PORT" + - port_channel_interface_name: "GigabitEthernet1/0/22" + port_channel_mode: "AUTO" + port_channel_learn_method: "PHYSICAL_PORT" + - port_channel_protocol: "NONE" + port_channel_name: "Port-channel6" + port_channel_min_links: 2 + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/23" + port_channel_mode: "ON" + - port_channel_interface_name: "GigabitEthernet1/0/24" + port_channel_mode: "ON" + +# Test logical ports configuration with all LACP modes +test_logical_ports_create_all_lacp_modes: + <<: *device_info + layer2_configuration: + logical_ports: + port_channel_auto: false + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel7" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/1/2" + port_channel_mode: "ACTIVE" + - port_channel_interface_name: "GigabitEthernet1/1/3" + port_channel_mode: "PASSIVE" + +# Test logical ports update - global parameters only +test_logical_ports_update_global_only: + <<: *device_info + layer2_configuration: + logical_ports: + port_channel_auto: false # Updated from previous + port_channel_lacp_system_priority: 8192 # Updated value + port_channel_load_balancing_method: "DST_MAC" # Updated method + +# Test logical ports update - change load balancing method only +test_logical_ports_update_load_balancing_only: + <<: *device_info + layer2_configuration: + logical_ports: + port_channel_load_balancing_method: "VLAN_DST_MIXED_IP_PORT" # Updated method only + +# Test logical ports update - change LACP system priority only +test_logical_ports_update_lacp_priority_only: + <<: *device_info + layer2_configuration: + logical_ports: + port_channel_lacp_system_priority: 1536 # Updated priority only diff --git a/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_mld_snooping.yml b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_mld_snooping.yml new file mode 100644 index 0000000000..27a066376f --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_mld_snooping.yml @@ -0,0 +1,492 @@ +--- +# Common device information +device_info: &device_info + ip_address: "204.1.2.3" + device_collection_status_check: false + +################################################################################################################### +# Negative Validation Tests for MLD Snooping + +############################################# +# MLD Snooping Global Configuration Tests # +############################################# + +# Test invalid MLD snooping enabled - string instead of boolean +test_mld_global_enabled_string: + <<: *device_info + layer2_configuration: + mld_snooping: + enable_mld_snooping: "true" # Invalid: string instead of boolean + +# Test invalid MLD snooping enabled - integer instead of boolean +test_mld_global_enabled_integer: + <<: *device_info + layer2_configuration: + mld_snooping: + enable_mld_snooping: 1 # Invalid: integer instead of boolean + +# Test invalid MLD querier enabled - string instead of boolean +test_mld_global_querier_enabled_string: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_querier: "false" # Invalid: string instead of boolean + +# Test invalid MLD querier enabled - integer instead of boolean +test_mld_global_querier_enabled_integer: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_querier: 0 # Invalid: integer instead of boolean + +# Test invalid MLD querier address - not IPv6 format +test_mld_global_querier_address_invalid: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_querier_address: "192.168.1.1" # Invalid: IPv4 instead of IPv6 + +# Test invalid MLD querier version - invalid choice +test_mld_global_querier_version_invalid: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_querier_version: "VERSION_3" # Invalid: only VERSION_1 and VERSION_2 allowed + +# Test invalid MLD querier version - integer instead of string +test_mld_global_querier_version_integer: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_querier_version: 2 # Invalid: integer instead of string + +# Test invalid MLD listener - string instead of boolean +test_mld_global_listener_string: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_listener: "true" # Invalid: string instead of boolean + +# Test invalid MLD listener - integer instead of boolean +test_mld_global_listener_integer: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_listener: 1 # Invalid: integer instead of boolean + +# Test invalid MLD query interval - string instead of integer +test_mld_global_query_interval_string: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_querier_query_interval: "125" # Invalid: string instead of integer + +# Test invalid MLD query interval - boolean instead of integer +test_mld_global_query_interval_boolean: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_querier_query_interval: true # Invalid: boolean instead of integer + +# Test invalid MLD query interval - below minimum value +test_mld_global_query_interval_min: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_querier_query_interval: 0 # Invalid: below minimum of 1 + +# Test invalid MLD query interval - exceeds maximum value +test_mld_global_query_interval_max: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_querier_query_interval: 18001 # Invalid: exceeds maximum of 18000 + +############################################# +# MLD Snooping VLAN Configuration Tests # +############################################# + +# Test invalid MLD VLAN ID - string instead of integer +test_mld_vlan_id_string: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: "98" # Invalid: string instead of integer + enable_mld_snooping: true + +# Test invalid MLD VLAN ID - negative value +test_mld_vlan_id_negative: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: -1 # Invalid: negative value + enable_mld_snooping: true + +# Test invalid MLD VLAN ID - exceeds maximum value +test_mld_vlan_id_max: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: 4095 # Invalid: exceeds maximum of 4094 + enable_mld_snooping: true + +# Test invalid MLD VLAN ID - zero value +test_mld_vlan_id_zero: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: 0 # Invalid: zero value + enable_mld_snooping: true + +# Test missing required MLD VLAN ID +test_mld_vlan_id_missing: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - # mld_snooping_vlan_id is missing - should fail + enable_mld_snooping: true + +# Test invalid MLD VLAN enabled - string instead of boolean +test_mld_vlan_enabled_string: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: 98 + enable_mld_snooping: "true" # Invalid: string instead of boolean + +# Test invalid MLD VLAN enabled - integer instead of boolean +test_mld_vlan_enabled_integer: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: 99 + enable_mld_snooping: 1 # Invalid: integer instead of boolean + +# Test invalid MLD VLAN immediate leave - string instead of boolean +test_mld_vlan_immediate_leave_string: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: 202 + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: "false" # Invalid: string instead of boolean + +# Test invalid MLD VLAN immediate leave - integer instead of boolean +test_mld_vlan_immediate_leave_integer: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: 400 + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: 0 # Invalid: integer instead of boolean + +# Test invalid MLD VLAN querier enabled - string instead of boolean +test_mld_vlan_querier_enabled_string: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: 401 + enable_mld_snooping: true + mld_snooping_querier: "false" # Invalid: string instead of boolean + +# Test invalid MLD VLAN querier enabled - integer instead of boolean +test_mld_vlan_querier_enabled_integer: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: 402 + enable_mld_snooping: true + mld_snooping_querier: 0 # Invalid: integer instead of boolean + +# Test invalid MLD VLAN querier address - IPv4 instead of IPv6 +test_mld_vlan_querier_address_ipv4: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: 403 + enable_mld_snooping: true + mld_snooping_querier_address: "192.168.1.1" # Invalid: IPv4 instead of IPv6 + +# Test invalid MLD VLAN querier version - invalid choice +test_mld_vlan_querier_version_invalid: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: 99 + enable_mld_snooping: true + mld_snooping_querier_version: "VERSION_3" # Invalid: only VERSION_1 and VERSION_2 allowed + +# Test invalid MLD VLAN querier version - integer instead of string +test_mld_vlan_querier_version_integer: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: 202 + enable_mld_snooping: true + mld_snooping_querier_version: 1 # Invalid: integer instead of string + +# Test invalid MLD VLAN query interval - string instead of integer +test_mld_vlan_query_interval_string: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: 400 + enable_mld_snooping: true + mld_snooping_querier_query_interval: "125" # Invalid: string instead of integer + +# Test invalid MLD VLAN query interval - below minimum value +test_mld_vlan_query_interval_min: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: 401 + enable_mld_snooping: true + mld_snooping_querier_query_interval: 0 # Invalid: below minimum of 1 + +# Test invalid MLD VLAN query interval - exceeds maximum value +test_mld_vlan_query_interval_max: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: 402 + enable_mld_snooping: true + mld_snooping_querier_query_interval: 18001 # Invalid: exceeds maximum of 18000 + +# Test invalid MLD VLAN mrouter port list - string instead of list +test_mld_vlan_mrouter_port_list_string: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: 403 + enable_mld_snooping: true + mld_snooping_mrouter_port_list: "GigabitEthernet1/0/1" # Invalid: string instead of list + +############################################# +# Data Type and Structure Tests # +############################################# + +# Test invalid MLD snooping configuration - string instead of dictionary +test_mld_invalid_dict_type: + <<: *device_info + layer2_configuration: + mld_snooping: "invalid_mld_string" # Invalid: string instead of dictionary + +# Test invalid MLD snooping configuration - list instead of dictionary +test_mld_invalid_list_type: + <<: *device_info + layer2_configuration: + mld_snooping: ["invalid", "mld", "list"] # Invalid: list instead of dictionary + +# Test invalid MLD VLANs - string instead of list +test_mld_vlans_invalid_string: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: "invalid_vlans_string" # Invalid: string instead of list + +# Test invalid MLD VLANs - dictionary instead of list +test_mld_vlans_invalid_dict: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: { "invalid": "dict" } # Invalid: dictionary instead of list + +# Test invalid MLD VLAN item - string instead of dictionary +test_mld_vlan_invalid_dict_type: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - "invalid_vlan_string" # Invalid: string instead of dictionary + +################################################################################################################### +# Positive Test Cases for MLD Snooping + +############################################# +# POSITIVE TEST CASES - CREATE # +############################################# + +# Test MLD snooping configuration with global parameters only +test_mld_create_global_only: + <<: *device_info + layer2_configuration: + mld_snooping: + enable_mld_snooping: true + mld_snooping_querier: false + mld_snooping_querier_address: "fe80::1" + mld_snooping_querier_version: "VERSION_2" + mld_snooping_listener: true + mld_snooping_querier_query_interval: 125 + mld_snooping_vlans: + - mld_snooping_vlan_id: 300 + mld_snooping_querier_address: "fe80::1" + + +# Test MLD snooping configuration with VLANs only +test_mld_create_vlans_only: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: 98 + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: false + mld_snooping_querier: false + mld_snooping_querier_address: "fe80::403" + mld_snooping_querier_version: "VERSION_2" + mld_snooping_querier_query_interval: 125 + mld_snooping_mrouter_port_list: + ["GigabitEthernet1/0/1", "GigabitEthernet1/0/2"] + +# Test MLD snooping configuration with single VLAN +test_mld_create_single_vlan: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: 99 + enable_mld_snooping: true + +# Test MLD snooping configuration with global and VLANs +test_mld_create_global_and_vlans: + <<: *device_info + layer2_configuration: + mld_snooping: + enable_mld_snooping: true + mld_snooping_querier: true + mld_snooping_querier_address: "fe80::1" + mld_snooping_querier_version: "VERSION_1" + mld_snooping_listener: true + mld_snooping_querier_query_interval: 60 + mld_snooping_vlans: + - mld_snooping_vlan_id: 202 + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: false + mld_snooping_querier: false + mld_snooping_querier_address: "fe80::202" + mld_snooping_querier_version: "VERSION_2" + mld_snooping_querier_query_interval: 125 + mld_snooping_mrouter_port_list: ["GigabitEthernet1/0/1"] + - mld_snooping_vlan_id: 400 + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: true + mld_snooping_querier: true + mld_snooping_querier_address: "fe80::400" + mld_snooping_querier_version: "VERSION_1" + mld_snooping_querier_query_interval: 90 + mld_snooping_mrouter_port_list: + ["GigabitEthernet1/0/3", "GigabitEthernet1/0/4"] + +# Test MLD snooping configuration with minimal VLAN parameters +test_mld_create_minimal_vlan: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: 401 + enable_mld_snooping: false + +# Test MLD snooping configuration with multiple VLANs - different settings +test_mld_create_multiple_vlans_different_settings: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: 401 + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: false + mld_snooping_querier: false + mld_snooping_querier_query_interval: 60 + - mld_snooping_vlan_id: 402 + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: true + mld_snooping_querier: true + mld_snooping_querier_query_interval: 125 + - mld_snooping_vlan_id: 403 + enable_mld_snooping: false + mld_snooping_enable_immediate_leave: false + mld_snooping_querier: false + mld_snooping_querier_query_interval: 250 + +############################################# +# POSITIVE TEST CASES - UPDATE # +############################################# + +# Test MLD snooping update - global parameters only +test_mld_update_global_only: + <<: *device_info + layer2_configuration: + mld_snooping: + enable_mld_snooping: true + mld_snooping_querier: true + mld_snooping_querier_address: "fe80::98" + mld_snooping_querier_version: "VERSION_1" + mld_snooping_listener: false + mld_snooping_querier_query_interval: 90 + +# Test MLD snooping update - modify existing VLANs +test_mld_update_modify_vlans: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: 202 + enable_mld_snooping: false # Changed from true + mld_snooping_enable_immediate_leave: true # Changed from false + mld_snooping_querier: true # Changed from false + mld_snooping_querier_query_interval: 180 # Changed from 125 + - mld_snooping_vlan_id: 400 + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: false # Changed from true + mld_snooping_querier: false # Changed from true + mld_snooping_querier_query_interval: 240 # Changed from 90 + +# Test MLD snooping update - global and VLANs together +test_mld_update_global_and_vlans: + <<: *device_info + layer2_configuration: + mld_snooping: + enable_mld_snooping: true + mld_snooping_querier: false # Updated global querier + mld_snooping_querier_query_interval: 180 # Updated global query interval + mld_snooping_vlans: + - mld_snooping_vlan_id: 401 + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: false + mld_snooping_querier: false + mld_snooping_querier_query_interval: 240 + - mld_snooping_vlan_id: 402 # Updated VLAN + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: true + mld_snooping_querier: true + mld_snooping_querier_query_interval: 150 + +# Test MLD snooping update - modify single VLAN +test_mld_update_single_vlan: + <<: *device_info + layer2_configuration: + mld_snooping: + mld_snooping_vlans: + - mld_snooping_vlan_id: 403 + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: true + mld_snooping_querier: true + mld_snooping_querier_query_interval: 300 diff --git a/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_port_configuration.yml b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_port_configuration.yml new file mode 100644 index 0000000000..a3617e3d56 --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_port_configuration.yml @@ -0,0 +1,1766 @@ +# --- +# # Common device information +# device_info: &device_info +# ip_address: "204.1.2.3" +# device_collection_status_check: false + +# ################################################################################################################### +# # Negative Validation Tests for Port Configuration + +# ############################################# +# # Port Configuration Structure Tests # +# ############################################# + +# # Test invalid port configuration - string instead of list +# test_port_config_invalid_list_type: +# <<: *device_info +# layer2_configuration: +# port_configuration: "invalid_string" # Invalid: string instead of list + +# # Test invalid port configuration - dictionary instead of list +# test_port_config_invalid_dict_type: +# <<: *device_info +# layer2_configuration: +# port_configuration: {"invalid": "dict"} # Invalid: dictionary instead of list + +# # Test invalid port configuration item - string instead of dictionary +# test_port_config_item_invalid_dict_type: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - "invalid_item_string" # Invalid: string instead of dictionary + +# # Test missing interface_name - required field +# test_port_config_missing_interface_name: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - # interface_name is missing - should fail +# switchport_interface_config: +# switchport_description: "Test Port" + +# # Test invalid interface_name - integer instead of string +# test_port_config_interface_name_integer: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: 123 # Invalid: integer instead of string +# switchport_interface_config: +# switchport_description: "Test Port" + +# # Test invalid interface_name - empty string +# test_port_config_interface_name_empty: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "" # Invalid: empty string +# switchport_interface_config: +# switchport_description: "Test Port" + +# ############################################# +# # Switchport Interface Config Tests # +# ############################################# + +# # Test invalid switchport_interface_config - string instead of dict +# test_switchport_config_invalid_dict_type: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# switchport_interface_config: "invalid_string" # Invalid: string instead of dict + +# # Test invalid switchport_description - integer instead of string +# test_switchport_description_integer: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# switchport_interface_config: +# switchport_description: 123 # Invalid: integer instead of string + +# # Test invalid switchport_description - exceeds maximum length +# test_switchport_description_max_length: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# switchport_interface_config: +# switchport_description: "{{ 'x' * 231 }}" # Invalid: exceeds 230 chars + +# # Test invalid switchport_mode - invalid choice +# test_switchport_mode_invalid: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# switchport_interface_config: +# switchport_mode: "INVALID_MODE" # Invalid: not in choices + +# # Test invalid switchport_mode - integer instead of string +# test_switchport_mode_integer: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# switchport_interface_config: +# switchport_mode: 1 # Invalid: integer instead of string + +# # Test invalid access_vlan - string instead of integer +# test_access_vlan_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# switchport_interface_config: +# access_vlan: "100" # Invalid: string instead of integer + +# # Test invalid access_vlan - negative value +# test_access_vlan_negative: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# switchport_interface_config: +# access_vlan: -1 # Invalid: negative value + +# # Test invalid access_vlan - exceeds maximum value +# test_access_vlan_max: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# switchport_interface_config: +# access_vlan: 4095 # Invalid: exceeds maximum of 4094 + +# # Test invalid access_vlan - zero value +# test_access_vlan_zero: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# switchport_interface_config: +# access_vlan: 0 # Invalid: zero value + +# # Test invalid admin_status - string instead of boolean +# test_admin_status_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# switchport_interface_config: +# admin_status: "true" # Invalid: string instead of boolean + +# # Test invalid admin_status - integer instead of boolean +# test_admin_status_integer: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# switchport_interface_config: +# admin_status: 1 # Invalid: integer instead of boolean + +# # Test invalid voice_vlan - string instead of integer +# test_voice_vlan_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# switchport_interface_config: +# voice_vlan: "200" # Invalid: string instead of integer + +# # Test invalid voice_vlan - negative value +# test_voice_vlan_negative: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# switchport_interface_config: +# voice_vlan: -1 # Invalid: negative value + +# # Test invalid voice_vlan - exceeds maximum value +# test_voice_vlan_max: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# switchport_interface_config: +# voice_vlan: 4095 # Invalid: exceeds maximum of 4094 + +# # Test invalid voice_vlan - zero value +# test_voice_vlan_zero: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# switchport_interface_config: +# voice_vlan: 0 # Invalid: zero value (outside range 1-4094) + +# # Test invalid allowed_vlans - string instead of list +# test_allowed_vlans_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# switchport_interface_config: +# allowed_vlans: "100,200,300" # Invalid: string instead of list + +# # Test invalid allowed_vlans - non-integer elements +# test_allowed_vlans_non_integer: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# switchport_interface_config: +# allowed_vlans: ["100", "200", "300"] # Invalid: strings instead of integers + +# # Test invalid allowed_vlans - out of range values +# test_allowed_vlans_out_of_range: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# switchport_interface_config: +# allowed_vlans: [0, 100, 4095] # Invalid: 0 and 4095 out of range + +# # Test invalid native_vlan_id - string instead of integer +# test_native_vlan_id_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# switchport_interface_config: +# native_vlan_id: "1" # Invalid: string instead of integer + +# # Test invalid native_vlan_id - negative value +# test_native_vlan_id_negative: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# switchport_interface_config: +# native_vlan_id: -1 # Invalid: negative value + +# # Test invalid native_vlan_id - exceeds maximum value +# test_native_vlan_id_max: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# switchport_interface_config: +# native_vlan_id: 4095 # Invalid: exceeds maximum of 4094 + +# # Test invalid native_vlan_id - zero value +# test_native_vlan_id_zero: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# switchport_interface_config: +# native_vlan_id: 0 # Invalid: zero value (outside range 1-4094) + +# ############################################# +# # VLAN Trunking Interface Config Tests # +# ############################################# + +# # Test invalid vlan_trunking_interface_config - string instead of dict +# test_vlan_trunking_config_invalid_dict_type: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# vlan_trunking_interface_config: "invalid_string" # Invalid: string instead of dict + +# # Test invalid dtp_negotiation - invalid choice +# test_dtp_negotiation_invalid: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# vlan_trunking_interface_config: +# enable_dtp_negotiation: "INVALID_MODE" # Invalid: not in choices + +# # Test invalid dtp_negotiation - integer instead of string +# test_dtp_negotiation_integer: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# vlan_trunking_interface_config: +# enable_dtp_negotiation: 1 # Invalid: integer instead of string + +# # Test invalid protected - string instead of boolean +# test_protected_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# vlan_trunking_interface_config: +# protected: "true" # Invalid: string instead of boolean + +# # Test invalid protected - integer instead of boolean +# test_protected_integer: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# vlan_trunking_interface_config: +# protected: 1 # Invalid: integer instead of boolean + +# # Test invalid pruning_vlan_ids - string instead of list +# test_pruning_vlan_ids_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# vlan_trunking_interface_config: +# pruning_vlan_ids: "100,200" # Invalid: string instead of list + +# # Test invalid pruning_vlan_ids - out of range values +# test_pruning_vlan_ids_out_of_range: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# vlan_trunking_interface_config: +# pruning_vlan_ids: [0, 100, 4095] # Invalid: 0 and 4095 out of range + +# ############################################# +# # 802.1X Interface Config Tests # +# ############################################# + +# # Test invalid dot1x_interface_config - string instead of dict +# test_dot1x_config_invalid_dict_type: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dot1x_interface_config: "invalid_string" # Invalid: string instead of dict + +# # Test invalid authentication_order - string instead of list +# test_authentication_order_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dot1x_interface_config: +# dot1x_interface_authentication_order: "DOT1X" # Invalid: string instead of list + +# # Test invalid authentication_order - invalid choice in list +# test_authentication_order_invalid_choice: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dot1x_interface_config: +# dot1x_interface_authentication_order: ["DOT1X", "INVALID_METHOD"] # Invalid: INVALID_METHOD not in choices + +# # Test invalid authentication_order - non-string elements +# test_authentication_order_non_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dot1x_interface_config: +# dot1x_interface_authentication_order: [1, 2, 3] # Invalid: integers instead of strings + +# # Test invalid authentication_order - exceeds maximum items +# test_authentication_order_max_items: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dot1x_interface_config: +# dot1x_interface_authentication_order: ["DOT1X", "MAB", "WEBAUTH", "EXTRA"] # Invalid: exceeds max of 3 + +# # Invalid authentication mode choice +# test_invalid_authentication_mode: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dot1x_interface_config: +# dot1x_interface_authentication_mode: "INVALID_MODE" # Should be OPEN or CLOSED + +# # Authentication mode as non-string +# test_authentication_mode_non_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dot1x_interface_config: +# dot1x_interface_authentication_mode: 123 # Should be string + +# # Invalid PAE type choice +# test_invalid_pae_type: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dot1x_interface_config: +# dot1x_interface_pae_type: "INVALID_PAE" # Should be NONE, AUTHENTICATOR, SUPPLICANT, BOTH + +# # Invalid control direction choice +# test_invalid_control_direction: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dot1x_interface_config: +# dot1x_interface_control_direction: "INVALID_DIR" # Should be IN or BOTH + +# # Invalid host mode choice +# test_invalid_host_mode: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dot1x_interface_config: +# dot1x_interface_host_mode: "INVALID_HOST" # Should be MULTI_AUTHENTICATION, MULTI_HOST, SINGLE_HOST, MULTI_DOMAIN + +# # Invalid port control choice +# test_invalid_port_control: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dot1x_interface_config: +# dot1x_interface_port_control: "INVALID_CONTROL" # Should be AUTO, FORCE_AUTHORIZED, FORCE_UNAUTHORIZED + +# # Invalid inactivity timer - below range +# test_inactivity_timer_below_range: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dot1x_interface_config: +# dot1x_interface_inactivity_timer: -1 # Should be 0-65535 + +# # Invalid inactivity timer - above range +# test_inactivity_timer_above_range: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dot1x_interface_config: +# dot1x_interface_inactivity_timer: 70000 # Should be 0-65535 + +# # Invalid max reauth requests - below range +# test_max_reauth_below_range: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dot1x_interface_config: +# dot1x_interface_max_reauth_requests: 0 # Should be 1-10 + +# # Invalid reauth timer - below range +# test_reauth_timer_below_range: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dot1x_interface_config: +# dot1x_interface_reauth_timer: 0 # Should be 1-1073741823 + +# # Invalid tx period - below range +# test_tx_period_below_range: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dot1x_interface_config: +# dot1x_interface_tx_period: 0 # Should be 1-65535 + +# # Boolean parameters as non-boolean +# test_boolean_as_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dot1x_interface_config: +# dot1x_interface_inactivity_timer_from_server: "true" # Should be boolean + +# test_boolean_as_integer: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dot1x_interface_config: +# dot1x_interface_reauthentication: 1 # Should be boolean + +# # Invalid priority - exceeds max items +# test_priority_max_items: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dot1x_interface_config: +# dot1x_interface_priority: ["DOT1X", "MAB", "WEBAUTH", "EXTRA"] # Should be max 3 + +# # Invalid priority - invalid choice +# test_priority_invalid_choice: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dot1x_interface_config: +# dot1x_interface_priority: ["DOT1X", "INVALID_PRIORITY"] # Invalid choice + + +# ############################################# +# # MAB Interface Config Tests # +# ############################################# + +# # Test invalid mab_interface_config - string instead of dict +# test_mab_config_invalid_dict_type: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# mab_interface_config: "invalid_string" # Invalid: string instead of dict + +# # Test invalid enable_mab - string instead of boolean +# test_enable_mab_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# mab_interface_config: +# enable_mab: "true" # Invalid: string instead of boolean + +# # Test invalid enable_mab - integer instead of boolean +# test_enable_mab_integer: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# mab_interface_config: +# enable_mab: 1 # Invalid: integer instead of boolean + +# ############################################# +# # STP Interface Config Tests # +# ############################################# + +# # Test invalid stp_interface_config - string instead of dict +# test_stp_interface_config_invalid_dict_type: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# stp_interface_config: "invalid_string" # Invalid: string instead of dict + +# # Test invalid stp_interface_portfast_mode - invalid choice +# test_stp_interface_portfast_mode_invalid: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# stp_interface_config: +# stp_interface_portfast_mode: "INVALID_MODE" # Invalid: not in choices + +# # Test invalid stp_interface_portfast_mode - integer instead of string +# test_stp_interface_portfast_mode_integer: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# stp_interface_config: +# stp_interface_portfast_mode: 1 # Invalid: integer instead of string + +# # Test invalid stp_interface_bpdu_filter - string instead of boolean +# test_stp_interface_bpdu_filter_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# stp_interface_config: +# stp_interface_bpdu_filter: "true" # Invalid: string instead of boolean + +# # Test invalid stp_interface_bpdu_guard - string instead of boolean +# test_stp_interface_bpdu_guard_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# stp_interface_config: +# stp_interface_bpdu_guard: "false" # Invalid: string instead of boolean + +# # Test invalid stp_interface_cost - string instead of integer +# test_stp_interface_cost_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# stp_interface_config: +# stp_interface_cost: "100" # Invalid: string instead of integer + +# # Test invalid stp_interface_cost - negative value +# test_stp_interface_cost_negative: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# stp_interface_config: +# stp_interface_cost: -1 # Invalid: negative value + +# # Test invalid stp_interface_cost - exceeds maximum value +# test_stp_interface_cost_max: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# stp_interface_config: +# stp_interface_cost: 20000001 # Invalid: exceeds maximum of 20000000 + +# # Test invalid stp_interface_guard - invalid choice +# test_stp_interface_guard_invalid: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# stp_interface_config: +# stp_interface_guard: "INVALID_GUARD" # Invalid: not in choices + +# # Test invalid stp_interface_priority - string instead of integer +# test_stp_interface_priority_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# stp_interface_config: +# stp_interface_priority: "128" # Invalid: string instead of integer + +# # Test invalid stp_interface_priority - negative value +# test_stp_interface_priority_negative: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# stp_interface_config: +# stp_interface_priority: -1 # Invalid: negative value + +# # Test invalid stp_interface_priority - exceeds maximum value +# test_stp_interface_priority_max: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# stp_interface_config: +# stp_interface_priority: 241 # Invalid: exceeds maximum of 240 + +# # Test invalid stp_interface_priority - not multiple of 16 +# test_stp_interface_priority_not_multiple: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# stp_interface_config: +# stp_interface_priority: 129 # Invalid: not multiple of 16 + +# # Test invalid stp_interface_per_vlan_cost - string instead of dict +# test_stp_interface_per_vlan_cost_invalid_dict: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# stp_interface_config: +# stp_interface_per_vlan_cost: "invalid_string" # Invalid: string instead of dict + +# # Test invalid stp_interface_per_vlan_cost priority - string instead of integer +# test_stp_interface_per_vlan_cost_priority_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# stp_interface_config: +# stp_interface_per_vlan_cost: +# priority: "100" # Invalid: string instead of integer +# vlan_ids: [100, 200] + +# # Test invalid stp_interface_per_vlan_cost vlan_ids - string instead of list +# test_stp_interface_per_vlan_cost_vlan_ids_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# stp_interface_config: +# stp_interface_per_vlan_cost: +# priority: 100 +# vlan_ids: "100,200" # Invalid: string instead of list + +# # Test invalid stp_interface_per_vlan_priority - string instead of dict +# test_stp_interface_per_vlan_priority_invalid_dict: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# stp_interface_config: +# stp_interface_per_vlan_priority: "invalid_string" # Invalid: string instead of dict + +# ############################################# +# # DHCP Snooping Interface Config Tests # +# ############################################# + +# # Test invalid dhcp_snooping_interface_config - string instead of dict +# test_dhcp_snooping_interface_config_invalid_dict_type: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dhcp_snooping_interface_config: "invalid_string" # Invalid: string instead of dict + +# # Test invalid dhcp_snooping_interface_rate - string instead of integer +# test_dhcp_snooping_interface_rate_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dhcp_snooping_interface_config: +# dhcp_snooping_interface_rate: "100" # Invalid: string instead of integer + +# # Test invalid dhcp_snooping_interface_rate - negative value +# test_dhcp_snooping_interface_rate_negative: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dhcp_snooping_interface_config: +# dhcp_snooping_interface_rate: -1 # Invalid: negative value + +# # Test invalid dhcp_snooping_interface_rate - exceeds maximum value +# test_dhcp_snooping_interface_rate_max: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dhcp_snooping_interface_config: +# dhcp_snooping_interface_rate: 2049 # Invalid: exceeds maximum of 2048 + +# # Test invalid dhcp_snooping_interface_trust - string instead of boolean +# test_dhcp_snooping_interface_trust_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# dhcp_snooping_interface_config: +# dhcp_snooping_interface_trust: "true" # Invalid: string instead of boolean + +# ############################################# +# # CDP Interface Config Tests # +# ############################################# + +# # Test invalid cdp_interface_config - string instead of dict +# test_cdp_interface_config_invalid_dict_type: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# cdp_interface_config: "invalid_string" # Invalid: string instead of dict + +# # Test invalid cdp_interface_admin_status - string instead of boolean +# test_cdp_interface_admin_status_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# cdp_interface_config: +# cdp_interface_admin_status: "true" # Invalid: string instead of boolean + +# # Test invalid cdp_interface_log_duplex_mismatch - string instead of boolean +# test_cdp_interface_log_duplex_mismatch_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# cdp_interface_config: +# cdp_interface_log_duplex_mismatch: "false" # Invalid: string instead of boolean + +# ############################################# +# # LLDP Interface Config Tests # +# ############################################# + +# # Test invalid lldp_interface_config - string instead of dict +# test_lldp_interface_config_invalid_dict_type: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# lldp_interface_config: "invalid_string" # Invalid: string instead of dict + +# # Test invalid lldp_interface_receive_transmit - invalid choice +# test_lldp_interface_receive_transmit_invalid: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# lldp_interface_config: +# lldp_interface_receive_transmit: "INVALID_MODE" # Invalid: not in choices + +# # Test invalid lldp_interface_receive_transmit - integer instead of string +# test_lldp_interface_receive_transmit_integer: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# lldp_interface_config: +# lldp_interface_receive_transmit: 1 # Invalid: integer instead of string + +# ############################################# +# # VTP Interface Config Tests # +# ############################################# + +# # Test invalid vtp_interface_config - string instead of dict +# test_vtp_interface_config_invalid_dict_type: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# vtp_interface_config: "invalid_string" # Invalid: string instead of dict + +# # Test invalid vtp_interface_admin_status - string instead of boolean +# test_vtp_interface_admin_status_string: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/1" +# vtp_interface_config: +# vtp_interface_admin_status: "true" # Invalid: string instead of boolean + +# ################################################################################################################### +# # Positive Validation Tests for Port Configuration + +# ############################################# +# # POSITIVE TEST CASES - CREATE # +# ############################################# + +# # Test port configuration CREATE - minimal switchport only +# test_port_config_create_minimal_switchport: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/1/1" +# switchport_interface_config: +# switchport_mode: "TRUNK" +# - interface_name: "GigabitEthernet1/1/2" +# switchport_interface_config: +# switchport_mode: "TRUNK" +# - interface_name: "GigabitEthernet1/1/3" +# switchport_interface_config: +# switchport_mode: "TRUNK" +# - interface_name: "GigabitEthernet1/0/10" +# switchport_interface_config: +# switchport_mode: "TRUNK" +# - interface_name: "GigabitEthernet1/0/11" +# switchport_interface_config: +# switchport_mode: "TRUNK" +# - interface_name: "GigabitEthernet1/0/12" +# switchport_interface_config: +# switchport_mode: "TRUNK" +# - interface_name: "GigabitEthernet1/0/13" +# switchport_interface_config: +# switchport_mode: "TRUNK" +# - interface_name: "GigabitEthernet1/0/14" +# switchport_interface_config: +# switchport_mode: "TRUNK" +# - interface_name: "GigabitEthernet1/0/15" +# switchport_interface_config: +# switchport_mode: "TRUNK" +# - interface_name: "GigabitEthernet1/0/11" +# switchport_interface_config: +# switchport_mode: "TRUNK" +# - interface_name: "GigabitEthernet1/0/11" +# switchport_interface_config: +# switchport_mode: "TRUNK" +# - interface_name: "GigabitEthernet1/0/11" +# switchport_interface_config: +# switchport_mode: "TRUNK" +# - interface_name: "GigabitEthernet1/0/11" +# switchport_interface_config: +# switchport_mode: "TRUNK" +# - interface_name: "GigabitEthernet1/0/11" +# switchport_interface_config: +# switchport_mode: "TRUNK" +# - interface_name: "GigabitEthernet1/0/11" +# switchport_interface_config: +# switchport_mode: "TRUNK" +# - interface_name: "GigabitEthernet1/0/11" +# switchport_interface_config: +# switchport_mode: "TRUNK" +# # Test port configuration CREATE - comprehensive switchport access +# test_port_config_create_comprehensive_switchport_access: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/1/2" +# switchport_interface_config: +# switchport_description: "Comprehensive Access Port for Sales Department" +# switchport_mode: "TRUNK" +# access_vlan: 98 +# voice_vlan: 99 +# admin_status: true + +# # Test port configuration CREATE - comprehensive switchport trunk +# test_port_config_create_comprehensive_switchport_trunk: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/1/3" +# switchport_interface_config: +# switchport_description: "Comprehensive Trunk Port for Inter-Switch Link" +# switchport_mode: "TRUNK" +# allowed_vlans: [98, 99, 400, 402, 202] +# native_vlan_id: 98 +# admin_status: true + +# # Test port configuration CREATE - switchport with vlan trunking +# test_port_config_create_switchport_with_trunking: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/10" +# switchport_interface_config: +# switchport_description: "Trunk Port with DTP Configuration" +# switchport_mode: "TRUNK" +# allowed_vlans: [98, 99, 400] +# native_vlan_id: 1 +# admin_status: true +# vlan_trunking_interface_config: +# enable_dtp_negotiation: false +# protected: false +# pruning_vlan_ids: [98, 99] + +# # Test port configuration CREATE - access port with 802.1X +# test_port_config_create_access_with_dot1x: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/11" +# switchport_interface_config: +# switchport_description: "Access Port with 802.1X Authentication" +# switchport_mode: "ACCESS" +# access_vlan: 400 +# admin_status: true +# dot1x_interface_config: +# dot1x_interface_authentication_order: ["DOT1X", "MAB"] + +# # Test port configuration CREATE - access port with MAB +# test_port_config_create_access_with_mab: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/12" +# switchport_interface_config: +# switchport_description: "Access Port with MAB Authentication" +# switchport_mode: "ACCESS" +# access_vlan: 402 +# admin_status: true +# mab_interface_config: +# enable_mab: true + +# # Test port configuration CREATE - access port with STP configuration +# test_port_config_create_access_with_stp: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/13" +# switchport_interface_config: +# switchport_description: "Access Port with STP Configuration" +# switchport_mode: "ACCESS" +# access_vlan: 202 +# admin_status: true +# stp_interface_config: +# stp_interface_portfast_mode: "EDGE" +# stp_interface_bpdu_guard: true +# stp_interface_bpdu_filter: false +# stp_interface_cost: 19 +# stp_interface_priority: 128 + +# # Test port configuration CREATE - access port with DHCP snooping +# test_port_config_create_access_with_dhcp_snooping: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/14" +# switchport_interface_config: +# switchport_description: "Access Port with DHCP Snooping" +# switchport_mode: "ACCESS" +# access_vlan: 401 +# admin_status: true +# dhcp_snooping_interface_config: +# dhcp_snooping_interface_rate: 50 +# dhcp_snooping_interface_trust: false + +# # Test port configuration CREATE - access port with CDP and LLDP +# test_port_config_create_access_with_cdp_lldp: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/15" +# switchport_interface_config: +# switchport_description: "Access Port with CDP and LLDP" +# switchport_mode: "ACCESS" +# access_vlan: 98 +# admin_status: true +# cdp_interface_config: +# cdp_interface_admin_status: true +# cdp_interface_log_duplex_mismatch: true +# lldp_interface_config: +# lldp_interface_receive_transmit: "TRANSMIT_AND_RECEIVE" + +# # Test port configuration CREATE - comprehensive all features single port +# test_port_config_create_comprehensive_all_features: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/16" +# switchport_interface_config: +# switchport_description: "Comprehensive Port with All Features" +# switchport_mode: "ACCESS" +# access_vlan: 99 +# voice_vlan: 400 +# admin_status: true +# vlan_trunking_interface_config: +# protected: true +# dot1x_interface_config: +# dot1x_interface_authentication_order: ["DOT1X", "MAB", "WEBAUTH"] +# mab_interface_config: +# enable_mab: true +# stp_interface_config: +# stp_interface_portfast_mode: "EDGE" +# stp_interface_bpdu_guard: true +# stp_interface_bpdu_filter: false +# stp_interface_cost: 100 +# stp_interface_guard: "ROOT" +# stp_interface_priority: 64 +# dhcp_snooping_interface_config: +# dhcp_snooping_interface_rate: 100 +# dhcp_snooping_interface_trust: false +# cdp_interface_config: +# cdp_interface_admin_status: true +# cdp_interface_log_duplex_mismatch: true +# lldp_interface_config: +# lldp_interface_receive_transmit: "TRANSMIT_AND_RECEIVE" +# vtp_interface_config: +# vtp_interface_admin_status: true + +# # Test port configuration CREATE - multiple ports different configurations +# test_port_config_create_multiple_ports_different: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/17" +# switchport_interface_config: +# switchport_description: "Access Port 1" +# switchport_mode: "ACCESS" +# access_vlan: 402 +# admin_status: true +# - interface_name: "GigabitEthernet1/0/18" +# switchport_interface_config: +# switchport_description: "Trunk Port 1" +# switchport_mode: "TRUNK" +# allowed_vlans: [98, 99, 202] +# native_vlan_id: 1 +# admin_status: true +# - interface_name: "GigabitEthernet1/0/19" +# switchport_interface_config: +# switchport_description: "Voice Port 1" +# switchport_mode: "ACCESS" +# access_vlan: 202 +# voice_vlan: 401 +# admin_status: true +# dot1x_interface_config: +# dot1x_interface_authentication_order: ["DOT1X"] + +# # Test port configuration CREATE - multiple ports same configuration +# test_port_config_create_multiple_ports_same: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/20" +# switchport_interface_config: +# switchport_description: "Standard Access Port 1" +# switchport_mode: "ACCESS" +# access_vlan: 400 +# admin_status: true +# stp_interface_config: +# stp_interface_portfast_mode: "EDGE" +# stp_interface_bpdu_guard: true +# - interface_name: "GigabitEthernet1/0/21" +# switchport_interface_config: +# switchport_description: "Standard Access Port 2" +# switchport_mode: "ACCESS" +# access_vlan: 400 +# admin_status: true +# stp_interface_config: +# stp_interface_portfast_mode: "EDGE" +# stp_interface_bpdu_guard: true + +# # Test port configuration CREATE - dynamic mode configurations +# test_port_config_create_dynamic_modes: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/22" +# switchport_interface_config: +# switchport_description: "Dynamic Auto Port" +# switchport_mode: "DYNAMIC_AUTO" +# admin_status: true +# - interface_name: "GigabitEthernet1/0/23" +# switchport_interface_config: +# switchport_description: "Dynamic Desirable Port" +# switchport_mode: "DYNAMIC_DESIRABLE" +# admin_status: true + +# ############################################# +# # POSITIVE TEST CASES - UPDATE # +# ############################################# + +# # Test port configuration UPDATE - modify switchport description +# test_port_config_update_description: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/1/1" +# switchport_interface_config: +# switchport_description: "Updated Minimal Access Port Description" + +# # Test port configuration UPDATE - change access VLAN +# test_port_config_update_access_vlan: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/1/2" +# switchport_interface_config: +# access_vlan: 402 # Changed from 98 + +# # Test port configuration UPDATE - change switchport mode from access to trunk +# test_port_config_update_mode_access_to_trunk: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/12" +# switchport_interface_config: +# switchport_description: "Changed to Trunk Port" +# switchport_mode: "TRUNK" # Changed from ACCESS +# allowed_vlans: [98, 99, 400] +# native_vlan_id: 1 + +# # Test port configuration UPDATE - change switchport mode from trunk to access +# test_port_config_update_mode_trunk_to_access: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/1/3" +# switchport_interface_config: +# switchport_description: "Changed to Access Port" +# switchport_mode: "ACCESS" # Changed from TRUNK +# access_vlan: 401 + +# # Test port configuration UPDATE - modify trunk allowed VLANs +# test_port_config_update_trunk_allowed_vlans: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/10" +# switchport_interface_config: +# allowed_vlans: [98, 99, 400, 402, 202] # Updated VLAN list + +# # Test port configuration UPDATE - add voice VLAN to access port +# test_port_config_update_add_voice_vlan: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/11" +# switchport_interface_config: +# voice_vlan: 401 # Added voice VLAN + +# # Test port configuration UPDATE - modify admin status +# test_port_config_update_admin_status: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/13" +# switchport_interface_config: +# admin_status: false # Changed from true + +# # Test port configuration UPDATE - add 802.1X to existing port +# test_port_config_update_add_dot1x: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/14" +# dot1x_interface_config: +# dot1x_interface_authentication_order: ["DOT1X", "MAB"] # Added 802.1X config + +# # Test port configuration UPDATE - modify STP settings +# test_port_config_update_stp_settings: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/13" +# stp_interface_config: +# stp_interface_portfast_mode: "DISABLE" # Changed from EDGE +# stp_interface_bpdu_guard: false # Changed from true +# stp_interface_cost: 200 # Changed from 19 + +# # Test port configuration UPDATE - modify DHCP snooping trust +# test_port_config_update_dhcp_snooping_trust: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/14" +# dhcp_snooping_interface_config: +# dhcp_snooping_interface_trust: true # Changed from false + +# # Test port configuration UPDATE - modify CDP and LLDP settings +# test_port_config_update_cdp_lldp: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/15" +# cdp_interface_config: +# cdp_interface_admin_status: false # Changed from true +# lldp_interface_config: +# lldp_interface_receive_transmit: "DISABLED" # Changed from TRANSMIT_AND_RECEIVE + +# # Test port configuration UPDATE - add multiple features to simple port +# test_port_config_update_add_multiple_features: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/1/1" +# switchport_interface_config: +# switchport_description: "Updated with Multiple Features" +# access_vlan: 202 # Updated VLAN +# stp_interface_config: +# stp_interface_portfast_mode: "EDGE" +# stp_interface_bpdu_guard: true +# dhcp_snooping_interface_config: +# dhcp_snooping_interface_rate: 75 +# dhcp_snooping_interface_trust: false + +# # Test port configuration UPDATE - comprehensive update all features +# test_port_config_update_comprehensive_all_features: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/16" +# switchport_interface_config: +# switchport_description: "Updated Comprehensive Port with All Features" +# access_vlan: 402 # Updated VLAN +# voice_vlan: 202 # Updated voice VLAN +# vlan_trunking_interface_config: +# protected: false # Changed from true +# dot1x_interface_config: +# dot1x_interface_authentication_order: ["MAB", "DOT1X"] # Changed order +# mab_interface_config: +# enable_mab: false # Changed from true +# stp_interface_config: +# stp_interface_portfast_mode: "DISABLE" # Changed from EDGE +# stp_interface_bpdu_guard: false # Changed from true +# stp_interface_cost: 200 # Changed from 100 +# stp_interface_guard: "LOOP" # Changed from ROOT +# stp_interface_priority: 32 # Changed from 64 +# dhcp_snooping_interface_config: +# dhcp_snooping_interface_rate: 200 # Changed from 100 +# dhcp_snooping_interface_trust: true # Changed from false +# cdp_interface_config: +# cdp_interface_admin_status: false # Changed from true +# lldp_interface_config: +# lldp_interface_receive_transmit: "RECEIVE_ONLY" # Changed from TRANSMIT_AND_RECEIVE +# vtp_interface_config: +# vtp_interface_admin_status: false # Changed from true + +# # Test port configuration UPDATE - modify multiple ports simultaneously +# test_port_config_update_multiple_ports: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/17" +# switchport_interface_config: +# switchport_description: "Updated Access Port 1" +# access_vlan: 98 # Changed from 402 +# - interface_name: "GigabitEthernet1/0/18" +# switchport_interface_config: +# switchport_description: "Updated Trunk Port 1" +# allowed_vlans: [400, 401, 402] # Updated VLAN list +# - interface_name: "GigabitEthernet1/0/19" +# switchport_interface_config: +# voice_vlan: 99 # Changed from 401 + +# ############################################# +# # POSITIVE TEST CASES - BOUNDARY # +# ############################################# + +# # Test port configuration BOUNDARY - minimum VLAN values +# test_port_config_boundary_min_vlans: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/10" +# switchport_interface_config: +# switchport_description: "Minimum VLAN Values Port" +# switchport_mode: "ACCESS" +# access_vlan: 1 # Minimum VLAN ID +# voice_vlan: 1 # Minimum VLAN ID +# admin_status: true + +# # Test port configuration BOUNDARY - maximum VLAN values (using available VLANs) +# test_port_config_boundary_max_vlans: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/11" +# switchport_interface_config: +# switchport_description: "Maximum VLAN Values Port" +# switchport_mode: "TRUNK" +# allowed_vlans: [402] # Highest available VLAN ID +# native_vlan_id: 402 # Highest available VLAN ID +# admin_status: true + +# # Test port configuration BOUNDARY - minimum STP cost +# test_port_config_boundary_min_stp_cost: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/12" +# switchport_interface_config: +# switchport_description: "Minimum STP Cost Port" +# switchport_mode: "ACCESS" +# access_vlan: 98 +# admin_status: true +# stp_interface_config: +# stp_interface_cost: 1 # Minimum STP cost + +# # Test port configuration BOUNDARY - maximum STP cost +# test_port_config_boundary_max_stp_cost: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/13" +# switchport_interface_config: +# switchport_description: "Maximum STP Cost Port" +# switchport_mode: "ACCESS" +# access_vlan: 99 +# admin_status: true +# stp_interface_config: +# stp_interface_cost: 20000000 # Maximum STP cost + +# # Test port configuration BOUNDARY - minimum STP priority +# test_port_config_boundary_min_stp_priority: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/14" +# switchport_interface_config: +# switchport_description: "Minimum STP Priority Port" +# switchport_mode: "ACCESS" +# access_vlan: 400 +# admin_status: true +# stp_interface_config: +# stp_interface_priority: 0 # Minimum STP priority + +# # Test port configuration BOUNDARY - maximum STP priority +# test_port_config_boundary_max_stp_priority: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/15" +# switchport_interface_config: +# switchport_description: "Maximum STP Priority Port" +# switchport_mode: "ACCESS" +# access_vlan: 402 +# admin_status: true +# stp_interface_config: +# stp_interface_priority: 240 # Maximum STP priority + +# # Test port configuration BOUNDARY - minimum DHCP snooping rate +# test_port_config_boundary_min_dhcp_rate: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/16" +# switchport_interface_config: +# switchport_description: "Minimum DHCP Rate Port" +# switchport_mode: "ACCESS" +# access_vlan: 202 +# admin_status: true +# dhcp_snooping_interface_config: +# dhcp_snooping_interface_rate: 1 # Minimum rate + +# # Test port configuration BOUNDARY - maximum DHCP snooping rate +# test_port_config_boundary_max_dhcp_rate: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/17" +# switchport_interface_config: +# switchport_description: "Maximum DHCP Rate Port" +# switchport_mode: "ACCESS" +# access_vlan: 401 +# admin_status: true +# dhcp_snooping_interface_config: +# dhcp_snooping_interface_rate: 2048 # Maximum rate + +# # Test port configuration BOUNDARY - maximum description length +# test_port_config_boundary_max_description_length: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/18" +# switchport_interface_config: +# switchport_description: "{{ 'A' * 230 }}" # Maximum 230 characters +# switchport_mode: "ACCESS" +# access_vlan: 98 +# admin_status: true + +# # Test port configuration BOUNDARY - maximum allowed VLANs list +# test_port_config_boundary_max_allowed_vlans: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/19" +# switchport_interface_config: +# switchport_description: "Maximum Allowed VLANs Port" +# switchport_mode: "TRUNK" +# allowed_vlans: [98, 99, 400, 402, 202, 401] # All available VLANs +# native_vlan_id: 1 +# admin_status: true + +# ############################################# +# # POSITIVE TEST CASES - SPECIAL # +# ############################################# + +# # Test port configuration SPECIAL - all switchport modes +# test_port_config_special_all_switchport_modes: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/20" +# switchport_interface_config: +# switchport_description: "Access Mode Port" +# switchport_mode: "ACCESS" +# access_vlan: 98 +# admin_status: true +# - interface_name: "GigabitEthernet1/0/21" +# switchport_interface_config: +# switchport_description: "Trunk Mode Port" +# switchport_mode: "TRUNK" +# allowed_vlans: [99, 400, 402] +# native_vlan_id: 1 +# admin_status: true +# - interface_name: "GigabitEthernet1/0/22" +# switchport_interface_config: +# switchport_description: "Dynamic Auto Mode Port" +# switchport_mode: "DYNAMIC_AUTO" +# admin_status: true +# - interface_name: "GigabitEthernet1/0/23" +# switchport_interface_config: +# switchport_description: "Dynamic Desirable Mode Port" +# switchport_mode: "DYNAMIC_DESIRABLE" +# admin_status: true +# - interface_name: "GigabitEthernet1/0/24" +# switchport_interface_config: +# switchport_description: "Dot1Q Tunnel Mode Port" +# switchport_mode: "DOT1Q_TUNNEL" +# admin_status: true + + +# # Test port configuration SPECIAL - all STP guard modes +# test_port_config_special_all_stp_guard_modes: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/11" +# switchport_interface_config: +# switchport_description: "STP Loop Guard" +# switchport_mode: "ACCESS" +# access_vlan: 402 +# stp_interface_config: +# stp_interface_guard: "LOOP" +# - interface_name: "GigabitEthernet1/0/12" +# switchport_interface_config: +# switchport_description: "STP Root Guard" +# switchport_mode: "ACCESS" +# access_vlan: 202 +# stp_interface_config: +# stp_interface_guard: "ROOT" +# - interface_name: "GigabitEthernet1/0/13" +# switchport_interface_config: +# switchport_description: "STP No Guard" +# switchport_mode: "ACCESS" +# access_vlan: 401 +# stp_interface_config: +# stp_interface_guard: "NONE" + +# # Test port configuration SPECIAL - all LLDP modes +# test_port_config_special_all_lldp_modes: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/14" +# switchport_interface_config: +# switchport_description: "LLDP Transmit Only" +# switchport_mode: "ACCESS" +# access_vlan: 98 +# lldp_interface_config: +# lldp_interface_receive_transmit: "TRANSMIT_ONLY" +# - interface_name: "GigabitEthernet1/0/15" +# switchport_interface_config: +# switchport_description: "LLDP Receive Only" +# switchport_mode: "ACCESS" +# access_vlan: 99 +# lldp_interface_config: +# lldp_interface_receive_transmit: "RECEIVE_ONLY" +# - interface_name: "GigabitEthernet1/0/16" +# switchport_interface_config: +# switchport_description: "LLDP Transmit and Receive" +# switchport_mode: "ACCESS" +# access_vlan: 400 +# lldp_interface_config: +# lldp_interface_receive_transmit: "TRANSMIT_AND_RECEIVE" +# - interface_name: "GigabitEthernet1/0/17" +# switchport_interface_config: +# switchport_description: "LLDP Disabled" +# switchport_mode: "ACCESS" +# access_vlan: 402 +# lldp_interface_config: +# lldp_interface_receive_transmit: "DISABLED" + +# # Test port configuration SPECIAL - all boolean combinations +# test_port_config_special_all_boolean_combinations: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/18" +# switchport_interface_config: +# switchport_description: "All True Booleans" +# switchport_mode: "ACCESS" +# access_vlan: 202 +# admin_status: true +# vlan_trunking_interface_config: +# protected: true +# mab_interface_config: +# enable_mab: true +# stp_interface_config: +# stp_interface_bpdu_filter: true +# stp_interface_bpdu_guard: true +# dhcp_snooping_interface_config: +# dhcp_snooping_interface_trust: true +# cdp_interface_config: +# cdp_interface_admin_status: true +# cdp_interface_log_duplex_mismatch: true +# vtp_interface_config: +# vtp_interface_admin_status: true +# - interface_name: "GigabitEthernet1/0/19" +# switchport_interface_config: +# switchport_description: "All False Booleans" +# switchport_mode: "ACCESS" +# access_vlan: 401 +# admin_status: false +# vlan_trunking_interface_config: +# protected: false +# mab_interface_config: +# enable_mab: false +# stp_interface_config: +# stp_interface_bpdu_filter: false +# stp_interface_bpdu_guard: false +# dhcp_snooping_interface_config: +# dhcp_snooping_interface_trust: false +# cdp_interface_config: +# cdp_interface_admin_status: false +# cdp_interface_log_duplex_mismatch: false +# vtp_interface_config: +# vtp_interface_admin_status: false + +# # Test port configuration SPECIAL - comprehensive large scale configuration +# test_port_config_special_large_scale: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/20" +# switchport_interface_config: +# switchport_description: "Large Scale Port 1" +# switchport_mode: "ACCESS" +# access_vlan: 98 +# - interface_name: "GigabitEthernet1/0/21" +# switchport_interface_config: +# switchport_description: "Large Scale Port 2" +# switchport_mode: "ACCESS" +# access_vlan: 99 +# - interface_name: "GigabitEthernet1/0/22" +# switchport_interface_config: +# switchport_description: "Large Scale Port 3" +# switchport_mode: "ACCESS" +# access_vlan: 400 +# - interface_name: "GigabitEthernet1/0/23" +# switchport_interface_config: +# switchport_description: "Large Scale Trunk 1" +# switchport_mode: "TRUNK" +# allowed_vlans: [98, 99, 400] +# - interface_name: "GigabitEthernet1/0/24" +# switchport_interface_config: +# switchport_description: "Large Scale Trunk 2" +# switchport_mode: "TRUNK" +# allowed_vlans: [402, 202, 401] +# - interface_name: "GigabitEthernet1/1/1" +# switchport_interface_config: +# switchport_description: "Large Scale Voice Port" +# switchport_mode: "ACCESS" +# access_vlan: 402 +# voice_vlan: 202 +# - interface_name: "GigabitEthernet1/1/2" +# switchport_interface_config: +# switchport_description: "Large Scale Comprehensive Port" +# switchport_mode: "ACCESS" +# access_vlan: 401 +# voice_vlan: 98 +# admin_status: true +# dot1x_interface_config: +# dot1x_interface_authentication_order: ["DOT1X", "MAB"] +# stp_interface_config: +# stp_interface_portfast_mode: "EDGE" +# stp_interface_bpdu_guard: true + + +# ############################################# +# # POSITIVE TEST CASES - DELETE # +# ############################################# + +# # Test port configuration DELETE - remove single feature (STP only) +# test_port_config_delete_single_feature_stp: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/13" +# stp_interface_config: {} # Remove STP configuration only + +# # Test port configuration DELETE - remove single feature (802.1X only) +# test_port_config_delete_single_feature_dot1x: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/11" +# dot1x_interface_config: {} # Remove 802.1X configuration only + +# # Test port configuration DELETE - remove single feature (MAB only) +# test_port_config_delete_single_feature_mab: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/12" +# mab_interface_config: {} # Remove MAB configuration only + +# # Test port configuration DELETE - remove single feature (DHCP Snooping only) +# test_port_config_delete_single_feature_dhcp_snooping: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/14" +# dhcp_snooping_interface_config: {} # Remove DHCP Snooping configuration only + +# # Test port configuration DELETE - remove multiple specific features +# test_port_config_delete_multiple_features: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/16" +# dot1x_interface_config: {} # Remove 802.1X configuration +# mab_interface_config: {} # Remove MAB configuration +# stp_interface_config: {} # Remove STP configuration +# dhcp_snooping_interface_config: {} # Remove DHCP Snooping configuration + +# # Test port configuration DELETE - remove all features except switchport (comprehensive port) +# test_port_config_delete_all_features_except_switchport: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/16" +# vlan_trunking_interface_config: {} +# dot1x_interface_config: {} +# mab_interface_config: {} +# stp_interface_config: {} +# dhcp_snooping_interface_config: {} +# cdp_interface_config: {} +# lldp_interface_config: {} +# vtp_interface_config: {} + +# # Test port configuration DELETE - remove CDP and LLDP features +# test_port_config_delete_cdp_lldp_features: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/15" +# cdp_interface_config: {} # Remove CDP configuration +# lldp_interface_config: {} # Remove LLDP configuration + +# # Test port configuration DELETE - remove VLAN trunking feature +# test_port_config_delete_vlan_trunking_feature: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/10" +# vlan_trunking_interface_config: {} # Remove VLAN trunking configuration + +# # Test port configuration DELETE - remove VTP feature +# test_port_config_delete_vtp_feature: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/16" +# vtp_interface_config: {} # Remove VTP configuration + +# # Test port configuration DELETE - remove features from multiple ports +# test_port_config_delete_features_multiple_ports: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/0/20" +# stp_interface_config: {} # Remove STP from port 20 +# - interface_name: "GigabitEthernet1/0/21" +# stp_interface_config: {} # Remove STP from port 21 +# - interface_name: "GigabitEthernet1/0/19" +# dot1x_interface_config: {} # Remove 802.1X from port 19 + +# # Test port configuration DELETE - remove complete interface configurations by interface name list +# test_port_config_delete_complete_interfaces: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/1/2" +# # No feature configs specified - removes all configuration for this interface +# - interface_name: "GigabitEthernet1/0/22" +# switchport_interface_config: +# switchport_description: "Large Scale Port 3" +# switchport_mode: "ACCESS" +# access_vlan: 400 +# - interface_name: "GigabitEthernet1/0/23" +# # No feature configs specified - removes all configuration for this interface + +# ############################################# +# # CLEANUP TEST CASES # +# ############################################# + +# # Test port configuration CLEANUP - final cleanup for all remaining test interfaces +# test_port_config_final_cleanup_all_interfaces: +# <<: *device_info +# layer2_configuration: +# port_configuration: +# - interface_name: "GigabitEthernet1/1/1" +# - interface_name: "GigabitEthernet1/1/2" +# - interface_name: "GigabitEthernet1/1/3" +# - interface_name: "GigabitEthernet1/0/10" +# - interface_name: "GigabitEthernet1/0/11" +# - interface_name: "GigabitEthernet1/0/12" +# - interface_name: "GigabitEthernet1/0/13" +# - interface_name: "GigabitEthernet1/0/14" +# - interface_name: "GigabitEthernet1/0/15" +# - interface_name: "GigabitEthernet1/0/16" +# - interface_name: "GigabitEthernet1/0/17" +# - interface_name: "GigabitEthernet1/0/18" +# - interface_name: "GigabitEthernet1/0/19" +# - interface_name: "GigabitEthernet1/0/20" +# - interface_name: "GigabitEthernet1/0/21" +# - interface_name: "GigabitEthernet1/0/22" +# - interface_name: "GigabitEthernet1/0/23" +# - interface_name: "GigabitEthernet1/0/24" diff --git a/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_stp.yml b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_stp.yml new file mode 100644 index 0000000000..8e97cfa20a --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_stp.yml @@ -0,0 +1,659 @@ +--- +# Common device information +device_info: &device_info + ip_address: "204.1.2.3" + device_collection_status_check: false + +################################################################################################################### +# Negative Validation Tests for STP + +############################################# +# STP Global Configuration Tests +############################################# + +# Test invalid STP mode - invalid string value +test_stp_global_mode_invalid_string: + <<: *device_info + layer2_configuration: + stp: + stp_mode: "invalid_mode" # Invalid: not a valid STP mode + +# Test invalid STP mode - integer instead of string +test_stp_global_mode_invalid_integer: + <<: *device_info + layer2_configuration: + stp: + stp_mode: 123 # Invalid: integer instead of string + +# Test invalid STP portfast mode - invalid string value +test_stp_portfast_mode_invalid_string: + <<: *device_info + layer2_configuration: + stp: + stp_portfast_mode: "invalid_portfast_mode" # Invalid: not a valid portfast mode + +# Test invalid STP portfast mode - integer instead of string +test_stp_portfast_mode_invalid_integer: + <<: *device_info + layer2_configuration: + stp: + stp_portfast_mode: 123 # Invalid: integer instead of string + +# Test invalid STP BPDU guard - string instead of boolean +test_stp_bpdu_guard_invalid_string: + <<: *device_info + layer2_configuration: + stp: + stp_bpdu_guard: "true" # Invalid: string instead of boolean + +# Test invalid STP BPDU guard - integer instead of boolean +test_stp_bpdu_guard_invalid_integer: + <<: *device_info + layer2_configuration: + stp: + stp_bpdu_guard: 1 # Invalid: integer instead of boolean + +# Test invalid STP BPDU filter - string instead of boolean +test_stp_bpdu_filter_invalid_string: + <<: *device_info + layer2_configuration: + stp: + stp_bpdu_filter: "false" # Invalid: string instead of boolean + +# Test invalid STP BPDU filter - integer instead of boolean +test_stp_bpdu_filter_invalid_integer: + <<: *device_info + layer2_configuration: + stp: + stp_bpdu_filter: 0 # Invalid: integer instead of boolean + +# Test invalid STP backbonefast - string instead of boolean +test_stp_backbonefast_invalid_string: + <<: *device_info + layer2_configuration: + stp: + stp_backbonefast: "true" # Invalid: string instead of boolean + +# Test invalid STP backbonefast - integer instead of boolean +test_stp_backbonefast_invalid_integer: + <<: *device_info + layer2_configuration: + stp: + stp_backbonefast: 1 # Invalid: integer instead of boolean + +# Test invalid STP extended system ID - string instead of boolean +test_stp_extended_system_id_invalid_string: + <<: *device_info + layer2_configuration: + stp: + stp_extended_system_id: "true" # Invalid: string instead of boolean + +# Test invalid STP extended system ID - integer instead of boolean +test_stp_extended_system_id_invalid_integer: + <<: *device_info + layer2_configuration: + stp: + stp_extended_system_id: 1 # Invalid: integer instead of boolean + +# Test invalid STP logging - string instead of boolean +test_stp_logging_invalid_string: + <<: *device_info + layer2_configuration: + stp: + stp_logging: "false" # Invalid: string instead of boolean + +# Test invalid STP logging - integer instead of boolean +test_stp_logging_invalid_integer: + <<: *device_info + layer2_configuration: + stp: + stp_logging: 0 # Invalid: integer instead of boolean + +# Test invalid STP loopguard - string instead of boolean +test_stp_loopguard_invalid_string: + <<: *device_info + layer2_configuration: + stp: + stp_loopguard: "true" # Invalid: string instead of boolean + +# Test invalid STP loopguard - integer instead of boolean +test_stp_loopguard_invalid_integer: + <<: *device_info + layer2_configuration: + stp: + stp_loopguard: 1 # Invalid: integer instead of boolean + +# Test invalid STP transmit hold count - string instead of integer +test_stp_transmit_hold_count_invalid_string: + <<: *device_info + layer2_configuration: + stp: + stp_transmit_hold_count: "6" # Invalid: string instead of integer + +# Test invalid STP transmit hold count - below minimum range +test_stp_transmit_hold_count_below_min: + <<: *device_info + layer2_configuration: + stp: + stp_transmit_hold_count: 0 # Invalid: below minimum of 1 + +# Test invalid STP transmit hold count - above maximum range +test_stp_transmit_hold_count_above_max: + <<: *device_info + layer2_configuration: + stp: + stp_transmit_hold_count: 21 # Invalid: above maximum of 20 + +# Test invalid STP uplinkfast - string instead of boolean +test_stp_uplinkfast_invalid_string: + <<: *device_info + layer2_configuration: + stp: + stp_uplinkfast: "false" # Invalid: string instead of boolean + +# Test invalid STP uplinkfast - integer instead of boolean +test_stp_uplinkfast_invalid_integer: + <<: *device_info + layer2_configuration: + stp: + stp_uplinkfast: 0 # Invalid: integer instead of boolean + +# Test invalid STP uplinkfast max update rate - string instead of integer +test_stp_uplinkfast_max_update_rate_invalid_string: + <<: *device_info + layer2_configuration: + stp: + stp_uplinkfast_max_update_rate: "150" # Invalid: string instead of integer + +# Test invalid STP uplinkfast max update rate - below minimum range +test_stp_uplinkfast_max_update_rate_below_min: + <<: *device_info + layer2_configuration: + stp: + stp_uplinkfast_max_update_rate: -1 # Invalid: below minimum of 0 + +# Test invalid STP uplinkfast max update rate - above maximum range +test_stp_uplinkfast_max_update_rate_above_max: + <<: *device_info + layer2_configuration: + stp: + stp_uplinkfast_max_update_rate: 32001 # Invalid: above maximum of 32000 + +# Test invalid STP etherchannel guard - string instead of boolean +test_stp_etherchannel_guard_invalid_string: + <<: *device_info + layer2_configuration: + stp: + stp_etherchannel_guard: "true" # Invalid: string instead of boolean + +# Test invalid STP etherchannel guard - integer instead of boolean +test_stp_etherchannel_guard_invalid_integer: + <<: *device_info + layer2_configuration: + stp: + stp_etherchannel_guard: 1 # Invalid: integer instead of boolean + +############################################# +# STP Instance Configuration Tests +############################################# + +# Test invalid STP instance VLAN ID - string instead of integer +test_stp_instance_vlan_id_invalid_string: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: "98" # Invalid: string instead of integer + stp_instance_priority: 32768 + +# Test invalid STP instance VLAN ID - below minimum range +test_stp_instance_vlan_id_below_min: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 0 # Invalid: below minimum of 1 + stp_instance_priority: 32768 + +# Test invalid STP instance VLAN ID - above maximum range +test_stp_instance_vlan_id_above_max: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 4095 # Invalid: above maximum of 4094 + stp_instance_priority: 32768 + +# Test missing required STP instance VLAN ID +test_stp_instance_vlan_id_missing: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - # stp_instance_vlan_id is missing - should fail + stp_instance_priority: 32768 + +# Test invalid STP instance priority - string instead of integer +test_stp_instance_priority_invalid_string: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 99 + stp_instance_priority: "32768" # Invalid: string instead of integer + +# Test invalid STP instance priority - below minimum range +test_stp_instance_priority_below_min: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 400 + stp_instance_priority: -1 # Invalid: below minimum of 0 + +# Test invalid STP instance priority - above maximum range +test_stp_instance_priority_above_max: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 401 + stp_instance_priority: 61441 # Invalid: above maximum of 61440 + +# Test invalid STP instance priority - not multiple of 4096 +test_stp_instance_priority_not_multiple: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 402 + stp_instance_priority: 1000 # Invalid: not a multiple of 4096 + +# Test invalid STP instance enable_stp - string instead of boolean +test_stp_instance_enable_stp_invalid_string: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 202 + enable_stp: "true" # Invalid: string instead of boolean + +# Test invalid STP instance enable_stp - integer instead of boolean +test_stp_instance_enable_stp_invalid_integer: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 98 + enable_stp: 1 # Invalid: integer instead of boolean + +# Test invalid STP instance max age timer - string instead of integer +test_stp_instance_max_age_timer_invalid_string: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 99 + stp_instance_max_age_timer: "20" # Invalid: string instead of integer + +# Test invalid STP instance max age timer - below minimum range +test_stp_instance_max_age_timer_below_min: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 400 + stp_instance_max_age_timer: 5 # Invalid: below minimum of 6 + +# Test invalid STP instance max age timer - above maximum range +test_stp_instance_max_age_timer_above_max: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 401 + stp_instance_max_age_timer: 41 # Invalid: above maximum of 40 + +# Test invalid STP instance hello interval timer - string instead of integer +test_stp_instance_hello_interval_timer_invalid_string: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 402 + stp_instance_hello_interval_timer: "2" # Invalid: string instead of integer + +# Test invalid STP instance hello interval timer - below minimum range +test_stp_instance_hello_interval_timer_below_min: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 202 + stp_instance_hello_interval_timer: 0 # Invalid: below minimum of 1 + +# Test invalid STP instance hello interval timer - above maximum range +test_stp_instance_hello_interval_timer_above_max: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 98 + stp_instance_hello_interval_timer: 11 # Invalid: above maximum of 10 + +# Test invalid STP instance forward delay timer - string instead of integer +test_stp_instance_forward_delay_timer_invalid_string: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 99 + stp_instance_forward_delay_timer: "15" # Invalid: string instead of integer + +# Test invalid STP instance forward delay timer - below minimum range +test_stp_instance_forward_delay_timer_below_min: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 400 + stp_instance_forward_delay_timer: 3 # Invalid: below minimum of 4 + +# Test invalid STP instance forward delay timer - above maximum range +test_stp_instance_forward_delay_timer_above_max: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 401 + stp_instance_forward_delay_timer: 31 # Invalid: above maximum of 30 + +############################################# +# Data Type and Structure Tests +############################################# + +# Test invalid STP configuration - string instead of dictionary +test_stp_invalid_dict_type: + <<: *device_info + layer2_configuration: + stp: "invalid_stp_string" # Invalid: string instead of dictionary + +# Test invalid STP instances - string instead of list +test_stp_instances_invalid_string: + <<: *device_info + layer2_configuration: + stp: + stp_instances: "invalid_instances_string" # Invalid: string instead of list + +# Test invalid STP instance item - string instead of dictionary +test_stp_instance_invalid_dict_type: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - "invalid_instance_string" # Invalid: string instead of dictionary + +################################################################################################################### +# Positive Validation Tests for STP + +############################################# +# POSITIVE TEST CASES - CREATE +############################################# + +# Test STP configuration with global parameters only +test_stp_create_global_only: + <<: *device_info + layer2_configuration: + stp: + stp_mode: "MST" + stp_portfast_mode: "ENABLE" + stp_bpdu_guard: true + stp_bpdu_filter: false + stp_backbonefast: true + stp_extended_system_id: true + stp_logging: true + stp_loopguard: false + stp_transmit_hold_count: 6 + stp_uplinkfast: false + stp_uplinkfast_max_update_rate: 150 + stp_etherchannel_guard: true + +# Test STP configuration with instances only +test_stp_create_instances_only: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 98 + stp_instance_priority: 32768 + enable_stp: true + stp_instance_max_age_timer: 20 + stp_instance_hello_interval_timer: 2 + stp_instance_forward_delay_timer: 15 + - stp_instance_vlan_id: 99 + stp_instance_priority: 16384 + enable_stp: true + stp_instance_max_age_timer: 25 + stp_instance_hello_interval_timer: 3 + stp_instance_forward_delay_timer: 20 + +# Test STP configuration with single instance +test_stp_create_single_instance: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 400 + stp_instance_priority: 8192 + enable_stp: true + +# Test STP configuration with global and instances +test_stp_create_global_and_instances: + <<: *device_info + layer2_configuration: + stp: + stp_mode: "RSTP" + stp_portfast_mode: "ENABLE" + stp_bpdu_guard: true + stp_bpdu_filter: false + stp_backbonefast: false + stp_extended_system_id: true + stp_logging: false + stp_loopguard: true + stp_transmit_hold_count: 10 + stp_uplinkfast: true + stp_uplinkfast_max_update_rate: 300 + stp_etherchannel_guard: true + stp_instances: + - stp_instance_vlan_id: 401 + stp_instance_priority: 4096 + enable_stp: true + stp_instance_max_age_timer: 18 + stp_instance_hello_interval_timer: 1 + stp_instance_forward_delay_timer: 12 + - stp_instance_vlan_id: 402 + stp_instance_priority: 61440 + enable_stp: false + stp_instance_max_age_timer: 30 + stp_instance_hello_interval_timer: 5 + stp_instance_forward_delay_timer: 25 + +# Test STP configuration with minimal global parameters +test_stp_create_minimal_global: + <<: *device_info + layer2_configuration: + stp: + stp_mode: "PVST" + +# Test STP configuration with minimal instance parameters +test_stp_create_minimal_instance: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 202 + +############################################# +# POSITIVE TEST CASES - UPDATE +############################################# + +# Test STP update - global parameters only +test_stp_update_global_only: + <<: *device_info + layer2_configuration: + stp: + stp_mode: "MST" + stp_portfast_mode: "ENABLE" + stp_bpdu_guard: false + stp_bpdu_filter: true + stp_backbonefast: true + stp_extended_system_id: true + stp_logging: true + stp_loopguard: false + stp_transmit_hold_count: 15 + stp_uplinkfast: true + stp_uplinkfast_max_update_rate: 500 + stp_etherchannel_guard: false + +# Test STP update - add new instances +test_stp_update_add_instances: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 98 + stp_instance_priority: 12288 + enable_stp: true + stp_instance_max_age_timer: 22 + stp_instance_hello_interval_timer: 3 + stp_instance_forward_delay_timer: 18 + - stp_instance_vlan_id: 99 + stp_instance_priority: 20480 + enable_stp: false + stp_instance_max_age_timer: 35 + stp_instance_hello_interval_timer: 7 + stp_instance_forward_delay_timer: 28 + +# Test STP update - modify existing instances +test_stp_update_modify_instances: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 98 + stp_instance_priority: 8192 # Changed from 32768 + enable_stp: false # Changed from true + stp_instance_max_age_timer: 30 # Changed from 20 + stp_instance_hello_interval_timer: 5 # Changed from 2 + stp_instance_forward_delay_timer: 25 # Changed from 15 + - stp_instance_vlan_id: 99 + stp_instance_priority: 4096 # Changed from 16384 + enable_stp: true # Keep same + stp_instance_max_age_timer: 15 # Changed from 25 + stp_instance_hello_interval_timer: 1 # Changed from 3 + stp_instance_forward_delay_timer: 10 # Changed from 20 + +# Test STP update - global and instances together +test_stp_update_global_and_instances: + <<: *device_info + layer2_configuration: + stp: + stp_mode: "PVST" # Changed from RSTP + stp_portfast_mode: "ENABLE" # Changed from EDGE + stp_bpdu_guard: false # Changed from true + stp_bpdu_filter: true # Changed from false + stp_backbonefast: true # Changed from false + stp_extended_system_id: true # Changed from true + stp_logging: true # Changed from false + stp_loopguard: false # Changed from true + stp_transmit_hold_count: 20 # Changed from 10 + stp_uplinkfast: false # Changed from true + stp_uplinkfast_max_update_rate: 1000 # Changed from 300 + stp_etherchannel_guard: false # Changed from true + stp_instances: + - stp_instance_vlan_id: 401 + stp_instance_priority: 16384 # Changed from 4096 + enable_stp: false # Changed from true + stp_instance_max_age_timer: 25 # Changed from 18 + stp_instance_hello_interval_timer: 4 # Changed from 1 + stp_instance_forward_delay_timer: 20 # Changed from 12 + - stp_instance_vlan_id: 202 # New instance + stp_instance_priority: 28672 + enable_stp: true + stp_instance_max_age_timer: 12 + stp_instance_hello_interval_timer: 2 + stp_instance_forward_delay_timer: 8 + +# Test STP update - change STP mode only +test_stp_update_change_mode: + <<: *device_info + layer2_configuration: + stp: + stp_mode: "MST" # Changed from previous mode + +# Test STP update - modify single instance +test_stp_update_single_instance: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 400 + stp_instance_priority: 24576 # Changed from 8192 + enable_stp: false # Changed from true + +############################################# +# POSITIVE TEST CASES - DELETE +############################################# + +# Test STP delete - instances only (Type 3 - deletes only specified instances, preserves global) +test_stp_delete_instances_only: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 98 # Delete this instance + - stp_instance_vlan_id: 99 # Delete this instance + +# Test STP delete - global and instances (Type 3 - deletes instances only, preserves global) +test_stp_delete_global_and_instances: + <<: *device_info + layer2_configuration: + stp: + stp_mode: "MST" # Global params ignored in delete state when instances present + stp_portfast_mode: "ENABLE" + stp_instances: + - stp_instance_vlan_id: 401 # Delete this instance + - stp_instance_vlan_id: 402 # Delete this instance + +# Test STP delete - global parameters only (Type 3 - resets global to defaults, deletes instances) +test_stp_delete_global_only: + <<: *device_info + layer2_configuration: + stp: + stp_mode: "PVST" # Global params provided but should reset to defaults in delete state + stp_portfast_mode: "ENABLE" + stp_bpdu_guard: true + +# Test STP delete - single instance (Type 3 - deletes only specified instance) +test_stp_delete_single_instance: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 202 # Delete this instance only + +# Test STP delete - multiple specific instances +test_stp_delete_multiple_instances: + <<: *device_info + layer2_configuration: + stp: + stp_instances: + - stp_instance_vlan_id: 98 # Delete this instance + - stp_instance_vlan_id: 99 # Delete this instance + - stp_instance_vlan_id: 400 # Delete this instance + +# Test STP delete - empty configuration (Type 3 - resets global to defaults, deletes all instances) +test_stp_delete_empty: + <<: *device_info + layer2_configuration: + stp: {} diff --git a/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_vlans.yml b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_vlans.yml new file mode 100644 index 0000000000..88629055d4 --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_vlans.yml @@ -0,0 +1,463 @@ +--- +# Common device information +device_info: &device_info + ip_address: "204.1.2.3" + device_collection_status_check: false + +################################################################################################################### +# Negative Validation Tests for VLANs + +############################################# +# VLAN ID Validation Tests +############################################# + +# Test VLAN ID below minimum (0) +test_vlan_id_below_min: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 0 # Invalid: below minimum of 1 + vlan_name: "Invalid_VLAN_Below_Min" + vlan_admin_status: true + +# Test VLAN ID above maximum (4095) +test_vlan_id_above_max: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 4095 # Invalid: above maximum of 4094 + vlan_name: "Invalid_VLAN_Above_Max" + vlan_admin_status: true + +# Test VLAN ID way above maximum +test_vlan_id_zero: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 0 # Invalid: zero + vlan_name: "Invalid_VLAN_Zero" + +# Test negative VLAN ID +test_vlan_id_negative: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: -1 # Invalid: negative + vlan_name: "Invalid_VLAN_Negative" + vlan_admin_status: false + +# Test missing required VLAN ID +test_vlan_id_missing: + <<: *device_info + layer2_configuration: + vlans: + - # vlan_id is missing - should fail + vlan_name: "Missing_VLAN_ID" + vlan_admin_status: true + +############################################# +# VLAN Name Validation Tests +############################################# + +# Test VLAN name too long (129 characters - exceeds 128 limit) +test_vlan_name_too_long: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 100 + vlan_name: "ThisVLANNameIsTooLongAndExceedsTheMaximumAllowedLengthOfOneHundredAndTwentyEightCharactersWhichWillCauseValidationFailureX" # 129 chars + vlan_admin_status: true + +# Test VLAN name exactly 128 characters (should be valid) +test_vlan_name_max_length: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 101 + vlan_name: "ThisVLANNameIsExactlyOneHundredAndTwentyEightCharactersLongWhichIsTheMaximumAllowedLengthAndShouldPassValidationTest" # Exactly 128 chars + vlan_admin_status: true + +# Test empty VLAN name (should be allowed as it's not required) +test_vlan_name_empty: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 102 + vlan_name: "" # Empty string - should be allowed + vlan_admin_status: true + +############################################# +# VLAN Admin Status Tests +############################################# + +# Test invalid admin status - string instead of boolean +test_vlan_admin_status_string: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 103 + vlan_name: "Invalid_Admin_Status_String" + vlan_admin_status: "true" # Invalid: string instead of boolean + +# Test invalid admin status - integer instead of boolean +test_vlan_admin_status_integer: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 104 + vlan_name: "Invalid_Admin_Status_Integer" + vlan_admin_status: 1 # Invalid: integer instead of boolean + +############################################# +# Data Type and Structure Tests +############################################# + +# Test invalid VLAN item - string instead of dictionary +test_vlan_invalid_dict_type: + <<: *device_info + layer2_configuration: + vlans: + - "invalid_vlan_string" # Invalid: string instead of dictionary + +# Test invalid VLAN structure - not a list +test_vlan_not_list: + <<: *device_info + layer2_configuration: + vlans: "not_a_list" # Invalid: string instead of list + +# Positive test case for VLANs +# # Test empty VLAN list (should be allowed) +# test_vlan_empty_list: +# <<: *device_info +# layer2_configuration: +# vlans: [] # Empty list - should be allowed + +############################################# +# Edge Case and Boundary Tests +############################################# + +# Test minimum valid VLAN ID (1) +test_vlan_id_min_valid: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 1 # Valid: minimum allowed + vlan_name: "Valid_VLAN_Min" + vlan_admin_status: true + +# positive test case for VLANs +# # Test maximum valid VLAN ID (4094) +# test_vlan_id_max_valid: +# <<: *device_info +# layer2_configuration: +# vlans: +# - vlan_id: 4094 # Valid: maximum allowed +# vlan_name: "Valid_VLAN_Max" +# vlan_admin_status: false + +# Test multiple VLANs with one invalid (should fail on first invalid) +test_vlan_multiple_one_invalid: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 200 # Valid + vlan_name: "Valid_VLAN_1" + vlan_admin_status: true + - vlan_id: 5000 # Invalid: above maximum + vlan_name: "Invalid_VLAN_2" + vlan_admin_status: true + - vlan_id: 300 # This shouldn't be reached due to validation failure + vlan_name: "Valid_VLAN_3" + vlan_admin_status: false + +############################################# +# Valid Configuration Tests +############################################# + +# Test valid VLAN with all parameters +test_vlan_all_params_valid: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 500 + vlan_name: "Complete_Valid_VLAN" + vlan_admin_status: true + +# Test valid VLAN with minimal parameters (only required) +test_vlan_minimal_params: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 501 # Only required parameter + +############################################# +# Advanced Edge Cases +############################################# + +# Test VLAN with special characters in name (should be allowed) +test_vlan_name_special_chars: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 502 + vlan_name: "VLAN_With-Special.Chars_123" + vlan_admin_status: true + +# Test VLAN with numeric name (should be allowed) +test_vlan_name_numeric: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 503 + vlan_name: "123456789" + vlan_admin_status: false + +# Test VLAN with space in name (should be allowed) +test_vlan_name_with_spaces: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 504 + vlan_name: "VLAN With Spaces" + vlan_admin_status: true + +# Test reserved VLAN IDs (should be allowed by validation but may fail at device level) +test_vlan_reserved_ids: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 1 # Default VLAN + vlan_name: "Default_VLAN" + vlan_admin_status: true + +# Test VLAN with null/None values +test_vlan_with_nulls: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 505 + vlan_name: null # Should be allowed (treated as not provided) + vlan_admin_status: null # Should be allowed (treated as not provided) + +# Test single VLAN creation with admin disabled +test_vlan_create_single_disabled_1: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 4003 + vlan_name: "Disabled_VLAN" + vlan_admin_status: false # is allowed for vlans between 2-1001 + +################################################################################################################### +# Positive Validation Tests for VLANs + +############################################# +# POSITIVE TEST CASES - CREATE +############################################# + +# Test single VLAN creation with minimal parameters +test_vlan_create_single_minimal: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 4001 # Only required parameter + +# Test single VLAN creation with all parameters +test_vlan_create_single_all_params: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 4002 + vlan_name: "Complete_VLAN_Creation" + vlan_admin_status: true + +test_vlan_create_single_disabled_2: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 503 + vlan_name: "Disabled_VLAN" + vlan_admin_status: false # is allowed for vlans between 2-1001 + +# Test multiple VLANs creation - mixed configurations +test_vlan_create_multiple_mixed: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 4004 + vlan_name: "Multi_VLAN_1" + vlan_admin_status: true + - vlan_id: 4005 # Minimal config + - vlan_id: 4006 + vlan_name: "Multi_VLAN_3" + vlan_admin_status: true + +# Test boundary VLAN IDs - minimum valid +test_vlan_create_boundary_min: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 2 # Minimum practical VLAN (1 is default) + vlan_name: "Boundary_Min_VLAN" + vlan_admin_status: true + +# Test boundary VLAN IDs - maximum valid +test_vlan_create_boundary_max: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 4094 # Maximum allowed VLAN + vlan_name: "Boundary_Max_VLAN" + vlan_admin_status: true + +# Test VLAN creation with special characters in name +test_vlan_create_special_chars: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 4007 + vlan_name: "VLAN_With-Special.Chars_123" + vlan_admin_status: true + +# Test VLAN creation with numeric name +test_vlan_create_numeric_name: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 4009 + vlan_name: "123456789" + vlan_admin_status: true + +############################################# +# POSITIVE TEST CASES - UPDATE +############################################# + +# Test single VLAN update - change name only +test_vlan_update_name_only: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 4001 # Existing VLAN from create test + vlan_name: "Updated_VLAN_Name" + vlan_admin_status: true + +# Test single VLAN update - change both name and status +test_vlan_update_both_params: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 503 # Existing VLAN from create test + vlan_name: "Fully_Updated_VLAN" # Changed name + vlan_admin_status: true # Changed status from false to true + +# Test multiple VLANs update +test_vlan_update_multiple: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 4004 + vlan_name: "Updated_Multi_VLAN_1" + vlan_admin_status: true + - vlan_id: 4005 + vlan_name: "Updated_Multi_VLAN_2" + vlan_admin_status: true + - vlan_id: 4006 + vlan_name: "Updated_Multi_VLAN_3" + vlan_admin_status: true + +# Test VLAN update to enable disabled VLAN +test_vlan_update_enable_disabled: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 2 # Was boundary min in creation + vlan_name: "Now_Enabled_VLAN" + vlan_admin_status: true + +# Test VLAN update with minimal params (should preserve existing name) +test_vlan_update_minimal: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 4094 # Only specify ID and new status + vlan_name: "Now_Updated_Enabled_VLAN" + +# Test VLAN update with special characters +test_vlan_update_special_chars: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 4007 + vlan_name: "Updated_VLAN_With-Special.Chars_456" + vlan_admin_status: true + +# Test VLAN update with numeric name +test_vlan_update_numeric_name: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 4009 + vlan_name: "987654321" + vlan_admin_status: true + +############################################# +# POSITIVE TEST CASES - DELETE +############################################# + +# Test single VLAN deletion +test_vlan_delete_single: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 4001 + +# Test multiple VLANs deletion +test_vlan_delete_multiple: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 4002 + vlan_name: "Complete_VLAN_Creation" # Full config should be accepted during deletion + vlan_admin_status: true + - vlan_id: 503 + - vlan_id: 4004 + +# Test deletion of specific VLANs while keeping others +test_vlan_delete_selective: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 4005 + - vlan_id: 4006 + +# Test deletion of boundary VLANs +test_vlan_delete_boundary: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 2 # Minimum practical + - vlan_id: 4094 # Maximum allowed + +# Test deletion of VLANs with special characteristics +test_vlan_delete_special_chars: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 4007 # VLAN with special chars + - vlan_id: 4009 # VLAN with numeric name + +test_vlan_delete_all: + <<: *device_info + layer2_configuration: + vlans: + - vlan_id: 4003 + - vlan_id: 4001 + - vlan_id: 4002 + - vlan_id: 4004 + - vlan_id: 4005 + - vlan_id: 4006 + - vlan_id: 2 + - vlan_id: 4094 + - vlan_id: 4007 + - vlan_id: 4009 + - vlan_id: 503 diff --git a/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_vtp.yml b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_vtp.yml new file mode 100644 index 0000000000..b1153b0ba3 --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wca_test_feature_vtp.yml @@ -0,0 +1,395 @@ +--- +# Common device information +device_info: &device_info + ip_address: "204.1.2.3" + device_collection_status_check: false + +################################################################################################################### +# Negative Validation Tests for VTP + +############################################# +# VTP Mode Tests +############################################# + +# Test invalid VTP mode - not in choices +test_vtp_mode_invalid_choice: + <<: *device_info + layer2_configuration: + vtp: + vtp_mode: "INVALID_MODE" # Invalid: not in [SERVER, CLIENT, TRANSPARENT, OFF] + +# Test invalid VTP mode - integer instead of string +test_vtp_mode_integer: + <<: *device_info + layer2_configuration: + vtp: + vtp_mode: 1 # Invalid: integer instead of string + +# Test invalid VTP mode - boolean instead of string +test_vtp_mode_boolean: + <<: *device_info + layer2_configuration: + vtp: + vtp_mode: true # Invalid: boolean instead of string + +############################################# +# VTP Version Tests +############################################# + +# Test invalid VTP version - not in choices +test_vtp_version_invalid_choice: + <<: *device_info + layer2_configuration: + vtp: + vtp_version: "VERSION_4" # Invalid: not in [VERSION_1, VERSION_2, VERSION_3] + +# Test invalid VTP version - integer instead of string +test_vtp_version_integer: + <<: *device_info + layer2_configuration: + vtp: + vtp_version: 2 # Invalid: integer instead of string + +# Test invalid VTP version - boolean instead of string +test_vtp_version_boolean: + <<: *device_info + layer2_configuration: + vtp: + vtp_version: false # Invalid: boolean instead of string + +############################################# +# VTP Domain Name Tests +############################################# + +# Test VTP domain name exceeds maximum length (33 characters) +test_vtp_domain_name_max_length: + <<: *device_info + layer2_configuration: + vtp: + vtp_domain_name: "ThisDomainNameIsTooLongForVTPAndExceeds32Characters" # Invalid: 53 chars > 32 + +# Test invalid VTP domain name - integer instead of string +test_vtp_domain_name_integer: + <<: *device_info + layer2_configuration: + vtp: + vtp_domain_name: 12345 # Invalid: integer instead of string + +# Test invalid VTP domain name - boolean instead of string +test_vtp_domain_name_boolean: + <<: *device_info + layer2_configuration: + vtp: + vtp_domain_name: true # Invalid: boolean instead of string + +############################################# +# VTP Domain Name Modification Tests +############################################# + +# Setup initial domain for modification restriction tests +test_vtp_setup_initial_domain: + <<: *device_info + layer2_configuration: + vtp: + vtp_mode: "TRANSPARENT" + vtp_domain_name: "InitialDomain" + +# Test VTP domain name modification - should fail once domain is set +test_vtp_domain_name_modification_attempt: + <<: *device_info + layer2_configuration: + vtp: + vtp_domain_name: "ModifiedDomain" # Invalid: attempting to change existing domain + +# Test VTP domain name empty string update - should fail +test_vtp_domain_name_empty_string: + <<: *device_info + layer2_configuration: + vtp: + vtp_domain_name: "" # Invalid: empty string not allowed for domain name + +############################################# +# VTP Pruning Tests +############################################# + +# Test invalid VTP pruning - string instead of boolean +test_vtp_pruning_string: + <<: *device_info + layer2_configuration: + vtp: + vtp_pruning: "true" # Invalid: string instead of boolean + +# Test invalid VTP pruning - integer instead of boolean +test_vtp_pruning_integer: + <<: *device_info + layer2_configuration: + vtp: + vtp_pruning: 1 # Invalid: integer instead of boolean + +############################################# +# VTP Configuration File Name Tests +############################################# + +# Test VTP configuration file name exceeds maximum length (245 characters) +test_vtp_config_file_name_max_length: + <<: *device_info + layer2_configuration: + vtp: + vtp_configuration_file_name: "flash:/this_is_an_extremely_long_filename_that_exceeds_the_maximum_allowed_length_for_vtp_configuration_file_names_and_should_cause_validation_to_fail_because_it_is_way_too_long_for_the_filesystem_to_handle_properly_and_efficiently_so_we_need_to_reject_it.dat" # Invalid: > 244 chars + +# Test invalid VTP configuration file name - integer instead of string +test_vtp_config_file_name_integer: + <<: *device_info + layer2_configuration: + vtp: + vtp_configuration_file_name: 12345 # Invalid: integer instead of string + +# Test invalid VTP configuration file name - boolean instead of string +test_vtp_config_file_name_boolean: + <<: *device_info + layer2_configuration: + vtp: + vtp_configuration_file_name: false # Invalid: boolean instead of string + +############################################# +# VTP Source Interface Tests +############################################# + +# Test invalid VTP source interface - integer instead of string +test_vtp_source_interface_integer: + <<: *device_info + layer2_configuration: + vtp: + vtp_source_interface: 1 # Invalid: integer instead of string + +# Test invalid VTP source interface - boolean instead of string +test_vtp_source_interface_boolean: + <<: *device_info + layer2_configuration: + vtp: + vtp_source_interface: true # Invalid: boolean instead of string + +############################################# +# Data Type and Structure Tests +############################################# + +# Test invalid VTP configuration - string instead of dictionary +test_vtp_invalid_dict_type: + <<: *device_info + layer2_configuration: + vtp: "invalid_vtp_string" # Invalid: string instead of dictionary + +# Test invalid VTP configuration - list instead of dictionary +test_vtp_invalid_list_type: + <<: *device_info + layer2_configuration: + vtp: ["invalid", "vtp", "list"] # Invalid: list instead of dictionary + +################################################################################################################### +# Positive Validation Tests for VTP + +############################################# +# POSITIVE TEST CASES - CREATE +############################################# + +# Test VTP configuration with mode only +test_vtp_create_mode_only: + <<: *device_info + layer2_configuration: + vtp: + vtp_mode: "OFF" + vtp_domain_name: "TestDomain" + vtp_source_interface: "Loopback0" + vtp_configuration_file_name: "flash:vtp_config_backup.dat" + +# Test VTP configuration with version only +test_vtp_create_version_only: + <<: *device_info + layer2_configuration: + vtp: + vtp_version: "VERSION_2" + +# Test VTP configuration with domain name only +test_vtp_create_domain_name_only: + <<: *device_info + layer2_configuration: + vtp: + vtp_domain_name: "TestDomain2" + +# Test VTP configuration with all parameters +test_vtp_create_all_params: + <<: *device_info + layer2_configuration: + vtp: + vtp_mode: "TRANSPARENT" + vtp_version: "VERSION_2" + vtp_domain_name: "ComprehensiveDomain" + vtp_pruning: false + vtp_configuration_file_name: "flash:vtp_config_backup.dat" + vtp_source_interface: "Loopback0" + +############################################# +# POSITIVE TEST CASES - UPDATE +############################################# + +# Test VTP update - change mode to CLIENT +test_vtp_update_mode_client: + <<: *device_info + layer2_configuration: + vtp: + vtp_mode: "OFF" + +# Test VTP update - change mode to TRANSPARENT +test_vtp_update_mode_transparent: + <<: *device_info + layer2_configuration: + vtp: + vtp_mode: "TRANSPARENT" + +# Test VTP update - change mode to OFF +test_vtp_update_mode_off: + <<: *device_info + layer2_configuration: + vtp: + vtp_mode: "OFF" + +# Test VTP update - change version to VERSION_1 +test_vtp_update_version_1: + <<: *device_info + layer2_configuration: + vtp: + vtp_version: "VERSION_1" + +# Test VTP update - change version to VERSION_2 +test_vtp_update_version_2: + <<: *device_info + layer2_configuration: + vtp: + vtp_version: "VERSION_2" + +# Test VTP update - change version to VERSION_3 +test_vtp_update_version_3: + <<: *device_info + layer2_configuration: + vtp: + vtp_version: "VERSION_3" + +# Test VTP update - enable pruning +test_vtp_update_enable_pruning: + <<: *device_info + layer2_configuration: + vtp: + vtp_pruning: false + vtp_source_interface: "GigabitEthernet1/1/1" + +# Test VTP update - disable pruning +test_vtp_update_disable_pruning: + <<: *device_info + layer2_configuration: + vtp: + vtp_pruning: false + vtp_configuration_file_name: "flash:updated_vtp_config.dat" + +# Test VTP update - update configuration file name +test_vtp_update_config_file_name: + <<: *device_info + layer2_configuration: + vtp: + vtp_configuration_file_name: "" + +# Test VTP update - update source interface +test_vtp_update_source_interface: + <<: *device_info + layer2_configuration: + vtp: + vtp_source_interface: "" + +############################################# +# POSITIVE TEST CASES - RESET +############################################# + +# Test VTP reset - empty configuration (deleted state resets to defaults) +test_vtp_reset_empty: + <<: *device_info + layer2_configuration: + vtp: {} + +# Test VTP reset - with existing parameters (deleted state resets to defaults) +test_vtp_reset_with_params: + <<: *device_info + layer2_configuration: + vtp: + vtp_mode: "SERVER" # Parameters provided but should reset to defaults in delete state + vtp_version: "VERSION_2" + vtp_domain_name: "TestDomain" + vtp_pruning: true + +############################################# +# Boundary Value Tests +############################################# + +# Test VTP with maximum length domain name (32 characters) +test_vtp_boundary_max_domain_name: + <<: *device_info + layer2_configuration: + vtp: + vtp_domain_name: "MaxLengthDomainNameThirtyTwoChar" # Exactly 32 characters + +# Test VTP with maximum length configuration file name (244 characters) +test_vtp_boundary_max_config_file_name: + <<: *device_info + layer2_configuration: + vtp: + vtp_configuration_file_name: "flash:/this_is_a_very_long_filename_that_reaches_exactly_244_characters_which_is_the_maximum_allowed_length_for_vtp_configuration_file_names_and_should_be_accepted_by_validation_since_it_meets_boundary_requirements_test.dat" # Exactly 244 characters + +# Test VTP with minimal configuration +test_vtp_boundary_minimal_config: + <<: *device_info + layer2_configuration: + vtp: + vtp_mode: "OFF" + +############################################# +# Special Configuration Tests +############################################# + +# Test VTP with version-specific configuration (VERSION_1 with specific features) +test_vtp_version_1_specific: + <<: *device_info + layer2_configuration: + vtp: + vtp_mode: "SERVER" + vtp_version: "VERSION_1" + vtp_domain_name: "V1Domain" + +# Test VTP with version-specific configuration (VERSION_2 with pruning) +test_vtp_version_2_with_pruning: + <<: *device_info + layer2_configuration: + vtp: + vtp_mode: "SERVER" + vtp_version: "VERSION_2" + vtp_domain_name: "V2Domain" + vtp_pruning: true + +# Test VTP with server mode comprehensive configuration +test_vtp_server_mode_comprehensive: + <<: *device_info + layer2_configuration: + vtp: + vtp_mode: "SERVER" + vtp_version: "VERSION_3" + vtp_domain_name: "ServerDomain" + vtp_pruning: true + vtp_configuration_file_name: "flash:server_vtp_config.dat" + vtp_source_interface: "Loopback0" + +# Test VTP with transparent mode configuration +test_vtp_transparent_mode_config: + <<: *device_info + layer2_configuration: + vtp: + vtp_mode: "TRANSPARENT" + vtp_version: "VERSION_2" + vtp_domain_name: "TransparentDomain" + vtp_configuration_file_name: "flash:transparent_vtp.dat" diff --git a/tests/integration/ccc_wired_campus_automation_management/vars/vars_wired_campus_automation_workflow_management.yml b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wired_campus_automation_workflow_management.yml new file mode 100644 index 0000000000..f354d625bb --- /dev/null +++ b/tests/integration/ccc_wired_campus_automation_management/vars/vars_wired_campus_automation_workflow_management.yml @@ -0,0 +1,635 @@ +--- +# Common device information +device_info: &device_info + ip_address: "204.1.2.3" + device_collection_status_check: false + +################################################################################################################### +# Comprehensive Positive Test Cases for All Features + +############################################# +# MERGED STATE - All Features Combined # +############################################# + +# Test comprehensive configuration with all supported features +test_comprehensive_merged_all_features: + <<: *device_info + layer2_configuration: + # VTP Configuration + vtp: + vtp_mode: "TRANSPARENT" + vtp_version: "VERSION_2" + vtp_domain_name: "ComprehensiveTestDomain" + vtp_pruning: false + vtp_configuration_file_name: "flash:comprehensive_vtp.dat" + vtp_source_interface: "Loopback0" + + # VLANs Configuration - These can use any VLAN IDs for testing + vlans: + - vlan_id: 4010 + vlan_name: "Test_VLAN_Merged_All" + vlan_admin_status: true + - vlan_id: 4011 + vlan_name: "Test_VLAN_Merged_All_2" + vlan_admin_status: true + - vlan_id: 4012 + vlan_admin_status: true + + # CDP Configuration + cdp: + cdp_admin_status: true + cdp_hold_time: 180 + cdp_timer: 60 + cdp_advertise_v2: true + cdp_log_duplex_mismatch: true + + # LLDP Configuration + lldp: + lldp_admin_status: true + lldp_hold_time: 220 + lldp_timer: 30 + lldp_reinitialization_delay: 3 + + # STP Configuration - Using available experimental VLANs + stp: + stp_mode: "MST" + stp_portfast_mode: "ENABLE" + stp_bpdu_guard: true + stp_bpdu_filter: false + stp_backbonefast: true + stp_extended_system_id: true + stp_logging: true + stp_loopguard: false + stp_transmit_hold_count: 8 + stp_uplinkfast: false + stp_uplinkfast_max_update_rate: 200 + stp_etherchannel_guard: true + stp_instances: + - stp_instance_vlan_id: 98 + stp_instance_priority: 32768 + enable_stp: true + stp_instance_max_age_timer: 20 + stp_instance_hello_interval_timer: 2 + stp_instance_forward_delay_timer: 15 + - stp_instance_vlan_id: 99 + stp_instance_priority: 28672 + enable_stp: true + + # DHCP Snooping Configuration - Using available experimental VLANs + dhcp_snooping: + dhcp_admin_status: true + dhcp_snooping_vlans: [98, 99, 202] + dhcp_snooping_glean: true + dhcp_snooping_database_agent_url: "tftp://192.168.1.100/comprehensive_dhcp.db" + dhcp_snooping_database_timeout: 600 + dhcp_snooping_database_write_delay: 300 + dhcp_snooping_proxy_bridge_vlans: [98, 99] + + # IGMP Snooping Configuration - Using available experimental VLANs and interfaces + igmp_snooping: + enable_igmp_snooping: true + igmp_snooping_querier: false + igmp_snooping_querier_address: "192.168.1.10" + igmp_snooping_querier_version: "VERSION_2" + igmp_snooping_querier_query_interval: 125 + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 400 + enable_igmp_snooping: true + igmp_snooping_querier: false + igmp_snooping_querier_address: "192.168.1.11" + igmp_snooping_querier_version: "VERSION_2" + igmp_snooping_querier_query_interval: 125 + igmp_snooping_mrouter_port_list: ["GigabitEthernet1/0/10", "GigabitEthernet1/0/11"] + - igmp_snooping_vlan_id: 401 + enable_igmp_snooping: true + igmp_snooping_querier: true + igmp_snooping_querier_address: "192.168.1.12" + igmp_snooping_querier_version: "VERSION_3" + igmp_snooping_querier_query_interval: 90 + - igmp_snooping_vlan_id: 98 + enable_igmp_snooping: true + igmp_snooping_querier: false + igmp_snooping_querier_address: "192.168.1.13" # FIXED: Added valid IP + igmp_snooping_querier_version: "VERSION_2" + igmp_snooping_querier_query_interval: 125 + igmp_snooping_mrouter_port_list: ["GigabitEthernet1/0/1", "GigabitEthernet1/0/2"] + - igmp_snooping_vlan_id: 99 + enable_igmp_snooping: true + igmp_snooping_querier: false + igmp_snooping_querier_address: "192.168.1.14" # FIXED: Added valid IP + igmp_snooping_querier_version: "VERSION_2" + igmp_snooping_querier_query_interval: 125 + - igmp_snooping_vlan_id: 202 + enable_igmp_snooping: true + igmp_snooping_querier: false + igmp_snooping_querier_address: "192.168.1.15" # FIXED: Added valid IP + igmp_snooping_querier_version: "VERSION_2" + igmp_snooping_querier_query_interval: 125 + - igmp_snooping_vlan_id: 402 + enable_igmp_snooping: true + igmp_snooping_querier: false + igmp_snooping_querier_address: "192.168.1.16" # FIXED: Added valid IP + igmp_snooping_querier_version: "VERSION_2" + igmp_snooping_querier_query_interval: 125 + - igmp_snooping_vlan_id: 403 + enable_igmp_snooping: true + igmp_snooping_querier: false + igmp_snooping_querier_address: "192.168.1.17" # FIXED: Added valid IP + igmp_snooping_querier_version: "VERSION_2" + igmp_snooping_querier_query_interval: 125 + + # MLD Snooping Configuration - Using available experimental VLANs and interfaces + mld_snooping: + enable_mld_snooping: true + mld_snooping_querier: false + mld_snooping_querier_address: "fe80::1" + mld_snooping_querier_version: "VERSION_2" + mld_snooping_listener: true + mld_snooping_querier_query_interval: 125 + mld_snooping_vlans: + - mld_snooping_vlan_id: 402 + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: false + mld_snooping_querier: false + mld_snooping_querier_address: "fe80::10" + mld_snooping_querier_version: "VERSION_2" + mld_snooping_querier_query_interval: 125 + mld_snooping_mrouter_port_list: ["GigabitEthernet1/0/12", "GigabitEthernet1/0/13"] + - mld_snooping_vlan_id: 403 + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: true + mld_snooping_querier: true + mld_snooping_querier_address: "fe80::11" + mld_snooping_querier_version: "VERSION_1" + mld_snooping_querier_query_interval: 90 + - mld_snooping_vlan_id: 98 + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: false + mld_snooping_querier: false + mld_snooping_querier_address: "fe80::13" + mld_snooping_querier_version: "VERSION_2" + mld_snooping_querier_query_interval: 125 + mld_snooping_mrouter_port_list: ["GigabitEthernet1/0/1", "GigabitEthernet1/0/2"] + - mld_snooping_vlan_id: 99 + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: false + mld_snooping_querier: false + mld_snooping_querier_address: "fe80::14" + mld_snooping_querier_version: "VERSION_2" + mld_snooping_querier_query_interval: 125 + - mld_snooping_vlan_id: 202 + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: false + mld_snooping_querier: false + mld_snooping_querier_address: "fe80::15" + mld_snooping_querier_version: "VERSION_2" + mld_snooping_querier_query_interval: 125 + - mld_snooping_vlan_id: 300 + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: false + mld_snooping_querier: false + mld_snooping_querier_address: "fe80::15" + mld_snooping_querier_version: "VERSION_2" + mld_snooping_querier_query_interval: 125 + - mld_snooping_vlan_id: 400 + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: false + mld_snooping_querier: false + mld_snooping_querier_address: "fe80::16" + mld_snooping_querier_version: "VERSION_2" + mld_snooping_querier_query_interval: 125 + mld_snooping_mrouter_port_list: ["GigabitEthernet1/0/10", "GigabitEthernet1/0/11"] + - mld_snooping_vlan_id: 401 + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: false + mld_snooping_querier: false + mld_snooping_querier_address: "fe80::17" + mld_snooping_querier_version: "VERSION_2" + mld_snooping_querier_query_interval: 125 + + # Authentication Configuration + authentication: + enable_dot1x_authentication: true + authentication_config_mode: "LEGACY" + + # Logical Ports Configuration - Using available interfaces + logical_ports: + port_channel_auto: false + port_channel_lacp_system_priority: 4096 + port_channel_load_balancing_method: "SRC_DST_MIXED_IP_PORT" + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel10" + port_channel_min_links: 2 + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/14" + port_channel_mode: "ACTIVE" + port_channel_port_priority: 128 + port_channel_rate: 30 + - port_channel_interface_name: "GigabitEthernet1/0/15" + port_channel_mode: "ACTIVE" + port_channel_port_priority: 128 + port_channel_rate: 30 + - port_channel_protocol: "PAGP" + port_channel_name: "Port-channel11" + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/16" + port_channel_mode: "DESIRABLE" + port_channel_port_priority: 128 + port_channel_learn_method: "AGGREGATION_PORT" + + # Port Configuration - Using available interfaces and experimental VLANs + port_configuration: + - interface_name: "GigabitEthernet1/0/17" + switchport_interface_config: + switchport_description: "Comprehensive Test Port 1" + switchport_mode: "ACCESS" + access_vlan: 98 + admin_status: true + voice_vlan: 99 + vlan_trunking_interface_config: + enable_dtp_negotiation: false + protected: false + dot1x_interface_config: + dot1x_interface_authentication_order: ["DOT1X", "MAB"] + dot1x_interface_authentication_mode: "OPEN" + dot1x_interface_pae_type: "AUTHENTICATOR" + dot1x_interface_control_direction: "BOTH" + dot1x_interface_host_mode: "MULTI_AUTHENTICATION" + dot1x_interface_port_control: "AUTO" + dot1x_interface_inactivity_timer: 300 + dot1x_interface_max_reauth_requests: 3 + dot1x_interface_reauth_timer: 3600 + mab_interface_config: + mab_interface_enable: true + stp_interface_config: + stp_interface_enable_portfast: true + stp_interface_enable_bpdu_guard: true + stp_interface_enable_bpdu_filter: false + stp_interface_enable_root_guard: false + stp_interface_enable_loop_guard: false + stp_interface_port_priority: 128 + stp_interface_cost: 19 + dhcp_snooping_interface_config: + dhcp_snooping_interface_rate_limit: 100 + dhcp_snooping_interface_trust: true + cdp_interface_config: + cdp_interface_admin_status: true + cdp_interface_logging: true + lldp_interface_config: + lldp_interface_transmit: true + lldp_interface_receive: true + vtp_interface_config: + vtp_interface_admin_status: true + - interface_name: "GigabitEthernet1/0/18" + switchport_interface_config: + switchport_description: "Comprehensive Test Port 2" + switchport_mode: "TRUNK" + allowed_vlans: [202, 400, 401] + native_vlan_id: 202 + admin_status: true + vlan_trunking_interface_config: + enable_dtp_negotiation: true + protected: true + pruning_vlan_ids: [400, 401] + stp_interface_config: + stp_interface_enable_portfast: false + stp_interface_enable_bpdu_guard: false + stp_interface_enable_bpdu_filter: false + stp_interface_enable_root_guard: true + stp_interface_enable_loop_guard: true + stp_interface_port_priority: 64 + stp_interface_cost: 100 + +############################################# +# UPDATED STATE - All Features Modified # +############################################# + +# Test comprehensive parameter updates across all supported features +test_comprehensive_updated_all_features: + <<: *device_info + layer2_configuration: + # VLANs Configuration - Update existing VLANs with modified parameters + vlans: + - vlan_id: 4010 + vlan_name: "Test_VLAN_Updated_All" + vlan_admin_status: false # Changed from true + - vlan_id: 4011 + vlan_name: "Test_VLAN_Updated_All_2" + vlan_admin_status: false # Changed from true + - vlan_id: 4012 + vlan_name: "Test_VLAN_Updated_New_Name" # Added name + vlan_admin_status: true # Explicit true + + # CDP Configuration - Updated parameters + cdp: + cdp_admin_status: true + cdp_hold_time: 200 # Changed from 180 + cdp_timer: 90 # Changed from 60 + cdp_advertise_v2: false # Changed from true + cdp_log_duplex_mismatch: false # Changed from true + + # LLDP Configuration - Updated parameters + lldp: + lldp_admin_status: true + lldp_hold_time: 240 # Changed from 220 + lldp_timer: 45 # Changed from 30 + lldp_reinitialization_delay: 5 # Changed from 3 + + # STP Configuration - Updated parameters and instances + stp: + stp_mode: "RAPID_PVST" # CORRECTED: was "RAPID_PVST", should be "RSTP" + stp_portfast_mode: "DISABLE" # Changed from ENABLE + stp_bpdu_guard: false # Changed from true + stp_bpdu_filter: true # Changed from false + stp_backbonefast: false # Changed from true + stp_extended_system_id: true + stp_logging: false # Changed from true + stp_loopguard: true # Changed from false + stp_transmit_hold_count: 12 # Changed from 8 + stp_uplinkfast: true # Changed from false + stp_uplinkfast_max_update_rate: 150 # Changed from 200 + stp_etherchannel_guard: false # Changed from true + stp_instances: + - stp_instance_vlan_id: 98 + stp_instance_priority: 24576 # Changed from 32768 + enable_stp: true + stp_instance_max_age_timer: 25 # Changed from 20 + stp_instance_hello_interval_timer: 3 # Changed from 2 + stp_instance_forward_delay_timer: 20 # Changed from 15 + - stp_instance_vlan_id: 99 + stp_instance_priority: 20480 # Changed from 28672 + enable_stp: false # Changed from true + # Add new STP instance + - stp_instance_vlan_id: 202 + stp_instance_priority: 16384 + enable_stp: true + stp_instance_max_age_timer: 18 + stp_instance_hello_interval_timer: 2 + stp_instance_forward_delay_timer: 12 + + # VTP Configuration - Updated parameters + vtp: + vtp_mode: "CLIENT" # Changed from TRANSPARENT + vtp_version: "VERSION_3" # Changed from VERSION_2 + vtp_pruning: true # Changed from false + vtp_configuration_file_name: "flash:updated_vtp.dat" # Changed filename + vtp_source_interface: "Loopback1" # Changed from Loopback0 + + # DHCP Snooping Configuration - Updated parameters and VLANs + dhcp_snooping: + dhcp_admin_status: true + dhcp_snooping_vlans: [99, 202, 400] # Modified VLAN list + dhcp_snooping_glean: false # Changed from true + dhcp_snooping_database_agent_url: "tftp://192.168.1.200/updated_dhcp.db" # Changed URL + dhcp_snooping_database_timeout: 900 # Changed from 600 + dhcp_snooping_database_write_delay: 450 # Changed from 300 + dhcp_snooping_proxy_bridge_vlans: [99, 202] # Modified VLAN list + + # IGMP Snooping Configuration - Updated parameters and VLAN settings + igmp_snooping: + enable_igmp_snooping: true + igmp_snooping_querier: true # Changed from false + igmp_snooping_querier_address: "192.168.1.20" # Changed IP + igmp_snooping_querier_version: "VERSION_3" # Changed from VERSION_2 + igmp_snooping_querier_query_interval: 100 # Changed from 125 + igmp_snooping_vlans: + - igmp_snooping_vlan_id: 400 + enable_igmp_snooping: true + igmp_snooping_querier: true # Changed from false + igmp_snooping_querier_address: "192.168.1.21" # Changed IP + igmp_snooping_querier_version: "VERSION_3" # Changed from VERSION_2 + igmp_snooping_querier_query_interval: 110 # Changed from 125 + igmp_snooping_mrouter_port_list: ["GigabitEthernet1/0/19", "GigabitEthernet1/0/20"] # Changed ports + - igmp_snooping_vlan_id: 401 + enable_igmp_snooping: false # Changed from true + igmp_snooping_querier: false # Changed from true + igmp_snooping_querier_address: "192.168.1.22" # Changed IP + igmp_snooping_querier_version: "VERSION_2" # Changed from VERSION_3 + igmp_snooping_querier_query_interval: 80 # Changed from 90 + # Add new IGMP VLAN + - igmp_snooping_vlan_id: 402 + enable_igmp_snooping: true + igmp_snooping_querier: false + igmp_snooping_querier_address: "192.168.1.23" + igmp_snooping_querier_version: "VERSION_2" + igmp_snooping_querier_query_interval: 120 + igmp_snooping_mrouter_port_list: ["GigabitEthernet1/0/21"] + + # MLD Snooping Configuration - Updated parameters and VLAN settings + mld_snooping: + enable_mld_snooping: true + mld_snooping_querier: true # Changed from false + mld_snooping_querier_address: "fe80::2" # Changed IPv6 + mld_snooping_querier_version: "VERSION_1" # Changed from VERSION_2 + mld_snooping_listener: false # Changed from true + mld_snooping_querier_query_interval: 100 # Changed from 125 + mld_snooping_vlans: + - mld_snooping_vlan_id: 402 + enable_mld_snooping: false # Changed from true + mld_snooping_enable_immediate_leave: true # Changed from false + mld_snooping_querier: true # Changed from false + mld_snooping_querier_address: "fe80::20" # Changed IPv6 + mld_snooping_querier_version: "VERSION_1" # Changed from VERSION_2 + mld_snooping_querier_query_interval: 110 # Changed from 125 + mld_snooping_mrouter_port_list: ["GigabitEthernet1/0/22", "GigabitEthernet1/1/1"] # Changed ports + - mld_snooping_vlan_id: 403 + enable_mld_snooping: true + mld_snooping_enable_immediate_leave: false # Changed from true + mld_snooping_querier: false # Changed from true + mld_snooping_querier_address: "fe80::21" # Changed IPv6 + mld_snooping_querier_version: "VERSION_2" # Changed from VERSION_1 + mld_snooping_querier_query_interval: 70 # Changed from 90 + + # Authentication Configuration - Updated parameters + authentication: + enable_dot1x_authentication: false # Changed from true + authentication_config_mode: "LEGACY" # Changed from NEW_STYLE + + # Logical Ports Configuration - Updated parameters and new port channel + logical_ports: + port_channel_auto: true # Changed from false + port_channel_lacp_system_priority: 8192 # Changed from 4096 + port_channel_load_balancing_method: "SRC_DST_IP" # Changed method + port_channels: + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel10" + port_channel_min_links: 1 # Changed from 2 + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/14" + port_channel_mode: "PASSIVE" # Changed from ACTIVE + port_channel_port_priority: 64 # Changed from 128 + port_channel_rate: 30 # CORRECTED: was 60, should be 30 (valid range) + - port_channel_interface_name: "GigabitEthernet1/0/15" + port_channel_mode: "PASSIVE" # Changed from ACTIVE + port_channel_port_priority: 64 # Changed from 128 + port_channel_rate: 30 # CORRECTED: was 60, should be 30 (valid range) + - port_channel_protocol: "PAGP" + port_channel_name: "Port-channel11" + port_channel_min_links: 1 + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/0/16" + port_channel_mode: "AUTO" # Changed from DESIRABLE + port_channel_port_priority: 64 # Changed from 128 + port_channel_learn_method: "PHYSICAL_PORT" # CORRECTED: was "INGRESS_PORT" + # Add new port channel + - port_channel_protocol: "LACP" + port_channel_name: "Port-channel12" + port_channel_min_links: 1 + port_channel_members: + - port_channel_interface_name: "GigabitEthernet1/1/2" + port_channel_mode: "ACTIVE" + port_channel_port_priority: 128 + port_channel_rate: 30 + + # Port Configuration - Updated interface parameters + port_configuration: + - interface_name: "GigabitEthernet1/0/17" + switchport_interface_config: + switchport_description: "Updated Test Port 1" # Changed description + switchport_mode: "TRUNK" # Changed from ACCESS + allowed_vlans: [98, 99, 202] # Changed to trunk config + native_vlan_id: 98 # Changed native VLAN + admin_status: false # Changed from true + vlan_trunking_interface_config: + enable_dtp_negotiation: true # CORRECTED: was "ON", changed to boolean + protected: true # Changed from false + pruning_vlan_ids: [99, 202] # Added pruning + dot1x_interface_config: + dot1x_interface_authentication_order: ["MAB", "DOT1X"] # Changed order + dot1x_interface_authentication_mode: "CLOSED" # Changed from OPEN + dot1x_interface_pae_type: "AUTHENTICATOR" + dot1x_interface_control_direction: "IN" # Changed from BOTH + dot1x_interface_host_mode: "SINGLE_HOST" # Changed from MULTI_AUTHENTICATION + dot1x_interface_port_control: "FORCE_AUTHORIZED" # Changed from AUTO + dot1x_interface_inactivity_timer: 600 # Changed from 300 + dot1x_interface_max_reauth_requests: 5 # Changed from 3 + dot1x_interface_reauth_timer: 7200 # Changed from 3600 + mab_interface_config: + mab_interface_enable: false # Changed from true + stp_interface_config: + stp_interface_enable_portfast: false # Changed from true + stp_interface_enable_bpdu_guard: false # Changed from true + stp_interface_enable_bpdu_filter: true # Changed from false + stp_interface_enable_root_guard: true # Changed from false + stp_interface_enable_loop_guard: true # Changed from false + stp_interface_port_priority: 64 # Changed from 128 + stp_interface_cost: 39 # Changed from 19 + dhcp_snooping_interface_config: + dhcp_snooping_interface_rate_limit: 200 # Changed from 100 + dhcp_snooping_interface_trust: false # Changed from true + cdp_interface_config: + cdp_interface_admin_status: false # Changed from true + cdp_interface_logging: false # Changed from true + lldp_interface_config: + lldp_interface_transmit: false # Changed from true + lldp_interface_receive: false # Changed from true + vtp_interface_config: + vtp_interface_admin_status: false # Changed from true + - interface_name: "GigabitEthernet1/0/18" + switchport_interface_config: + switchport_description: "Updated Test Port 2" # Changed description + switchport_mode: "ACCESS" # Changed from TRUNK + access_vlan: 400 # Changed to access config + admin_status: false # Changed from true + voice_vlan: 401 # Added voice VLAN + vlan_trunking_interface_config: + enable_dtp_negotiation: false # CORRECTED: was "AUTO", changed to boolean + protected: false # Changed from true + stp_interface_config: + stp_interface_enable_portfast: true # Changed from false + stp_interface_enable_bpdu_guard: true # Changed from false + stp_interface_enable_bpdu_filter: true # Changed from false + stp_interface_enable_root_guard: false # Changed from true + stp_interface_enable_loop_guard: false # Changed from true + stp_interface_port_priority: 32 # Changed from 64 + stp_interface_cost: 200 # Changed from 100 + # Add new interface configuration + - interface_name: "GigabitEthernet1/1/3" + switchport_interface_config: + switchport_description: "New Updated Test Port 3" + switchport_mode: "ACCESS" + access_vlan: 403 + admin_status: true + vlan_trunking_interface_config: + enable_dtp_negotiation: false # CORRECTED: was "AUTO", changed to boolean + protected: false + stp_interface_config: + stp_interface_enable_portfast: true + stp_interface_enable_bpdu_guard: true + stp_interface_enable_bpdu_filter: false + stp_interface_enable_root_guard: false + stp_interface_enable_loop_guard: false + stp_interface_port_priority: 128 + stp_interface_cost: 19 + +############################################# +# DELETED STATE - All Features Combined # +############################################# + +# Test comprehensive deletion with all supported features (Note: some features may not support delete) +test_comprehensive_deleted_all_features: + <<: *device_info + layer2_configuration: + # VLANs - supports delete + vlans: + - vlan_id: 4010 + - vlan_id: 4011 + - vlan_id: 4012 + + # CDP - supports delete (reset to defaults) + cdp: {} + + # LLDP - supports delete (reset to defaults) + lldp: {} + + # STP - supports delete (reset global + delete instances) + stp: + stp_instances: + - stp_instance_vlan_id: 98 + - stp_instance_vlan_id: 99 + - stp_instance_vlan_id: 202 + + # VTP - supports delete (reset to defaults) + vtp: {} + + # DHCP Snooping - supports delete (reset to defaults) + dhcp_snooping: {} + + # IGMP Snooping - supports delete (reset to defaults) + igmp_snooping: {} + + # MLD Snooping - supports delete (reset to defaults) + mld_snooping: {} + + # Authentication - supports delete (reset to defaults) + authentication: {} + + # Note: Logical Ports and Port Configuration typically don't support global delete + # They require specific interface/port channel targeting + +############################################# +# Cleanup Test Configuration # +############################################# + +# Complete cleanup configuration for all created test VLANs and features +test_complete_cleanup_all_features: + <<: *device_info + layer2_configuration: + # Cleanup all test VLANs + vlans: + - vlan_id: 4010 + - vlan_id: 4011 + - vlan_id: 4012 + + # Reset all protocols to defaults + cdp: {} + lldp: {} + vtp: {} + dhcp_snooping: {} + igmp_snooping: {} + mld_snooping: {} + authentication: {} + + # Reset STP to defaults and cleanup instances + stp: {} diff --git a/tests/unit/modules/dnac/fixtures/accesspoint_workflow_manager.json b/tests/unit/modules/dnac/fixtures/accesspoint_workflow_manager.json index 9b71e6f411..382e57398b 100644 --- a/tests/unit/modules/dnac/fixtures/accesspoint_workflow_manager.json +++ b/tests/unit/modules/dnac/fixtures/accesspoint_workflow_manager.json @@ -178,6 +178,7 @@ "ap_mode": "Local", "ap_name": "LTTS_Test_9124_T2", "eth_mac": "34:b8:83:15:7c:6c", + "provisioning_status": true, "failover_priority": "Low", "led_brightness_level": 8, "led_status": "Enabled", @@ -864,6 +865,7 @@ } ] }, + "get_membership_empty": [ { "site": { @@ -1103,5 +1105,303 @@ "siteId": "17301034-7715-4363-952f-3d290ea6ca59" } ] + }, + + "playbook_config_provision_new_positive": [ + { + "mac_address": "a4:88:73:d4:dd:80", + "management_ip_address": null, + "hostname": null, + "rf_profile": "LOW", + "site": { + "floor": { + "name": "FLOOR1", + "parent_name": "Global/USA/SAN JOSE/SJ_BLD23" + } + }, + "type": null + } + ], + + "get_device_detail_for_provision": { + "response": [{ + "type": "Cisco Catalyst 9120AXE Unified Access Point", + "up_time": "20:42:51.040", + "mac_address": "a4:88:73:d4:dd:80", + "device_support_level": "Supported", + "software_type": null, + "software_version": "17.15.4.18", + "serial_number": "FJC24391KBC", + "last_managed_resync_reasons": "", + "management_state": "Managed", + "pending_sync_requests_count": "0", + "reasons_for_device_resync": "", + "reasons_for_pending_sync_requests": "", + "inventory_status_detail": "NA", + "sync_requested_by_app": "", + "collection_interval": "NA", + "dns_resolved_management_address": "", + "last_updated": "2025-11-13 14:08:42", + "boot_date_time": null, + "ap_manager_interface_ip": "204.192.4.200", + "collection_status": "Managed", + "family": "Unified AP", + "hostname": "AP687D.B402.1614-AP-Test6", + "last_update_time": 1763042922702, + "location_name": null, + "management_ip_address": "204.192.106.2", + "platform_id": "C9120AXE-B", + "reachability_failure_reason": "NA", + "reachability_status": "Reachable", + "series": "Cisco Catalyst 9120AXE Series Unified Access Points", + "snmp_contact": "", + "snmp_location": "Global/USA/SAN JOSE/SJ_BLD23/FLOOR4", + "role_source": "AUTO", + "interface_count": "0", + "ap_ethernet_mac_address": "a4:88:73:ce:9b:b0", + "error_code": "null", + "error_description": null, + "last_device_resync_start_time": "", + "line_card_count": "0", + "line_card_id": "", + "managed_atleast_once": false, + "memory_size": "NA", + "tag_count": "0", + "tunnel_udp_port": null, + "uptime_seconds": 81363, + "vendor": "NA", + "waas_device_mode": null, + "associated_wlc_ip": "204.192.4.200", + "description": null, + "location": null, + "role": "ACCESS", + "instance_tenant_id": "68593aeecd0f400013b8604e", + "instance_uuid": "8954ff26-aa2d-4991-b2be-6462121ee710", + "id": "8954ff26-aa2d-4991-b2be-6462121ee710" + } + ] + }, + + "get_config_detail_for_provision": { + "mac_address": "a4:88:73:d4:dd:80", + "ap_name": "AP687D.B402.1614-AP-Test6", + "location": "Global/USA/SAN JOSE/SJ_BLD23/FLOOR2", + "led_status": "Enabled", + "led_brightness_level": 2, + "failover_priority": "Low", + "ap_mode": "Local", + "ap_height": 0.0, + "admin_status": "Enabled", + "primary_controller_name": "SJ-EWLC-1.cisco.local", + "primary_ip_address": "204.192.4.200", + "secondary_controller_name": "NY-EWLC-1.cisco.local", + "secondary_ip_address": "204.192.6.200", + "tertiary_controller_name": "Clear", + "tertiary_ip_address": "0.0.0.0", + "eth_mac": "a4:88:73:ce:9b:b0", + "provisioning_status": true, + "mesh_dtos": [], + "radio_dtos": [ + { + "bss_color": null, + "bss_color_assignment_mode": null, + "bss_color_radio_admin_status": null, + "bss_color_capable": false, + "if_type": 3, + "if_type_value": "Dual Radio", + "slot_id": 0, + "mac_address": "a4:88:73:d4:dd:80", + "admin_status": "Disabled", + "power_assignment_mode": "Global", + "powerlevel": 1, + "channel_assignment_mode": "Global", + "channel_number": 11, + "channel_width": null, + "antenna_pattern_name": null, + "antenna_angle": 0, + "antenna_elev_angle": 0, + "antenna_gain": 6, + "radio_role_assignment": "Auto", + "radio_band": null, + "clean_air_si": "Enabled", + "dual_radio_mode": null + }, + { + "bss_color": null, + "bss_color_assignment_mode": null, + "bss_color_radio_admin_status": null, + "bss_color_capable": false, + "if_type": 2, + "if_type_value": "5 GHz", + "slot_id": 1, + "mac_address": "a4:88:73:d4:dd:80", + "admin_status": "Enabled", + "power_assignment_mode": "Global", + "powerlevel": 3, + "channel_assignment_mode": "Global", + "channel_number": 36, + "channel_width": "40 MHz", + "antenna_pattern_name": null, + "antenna_angle": 0, + "antenna_elev_angle": 0, + "antenna_gain": 6, + "radio_role_assignment": null, + "radio_band": null, + "clean_air_si": "Enabled", + "dual_radio_mode": null + } + ], + "cable_length": 10, + "is_geolocation_supported": true, + "is_cable_length_supported": false, + "cable_length_supported": false, + "geolocation_supported": true + }, + + "get_site_for_provision": { + "response": [{ + "id": "4f3b43a9-b29b-43f8-8ca8-7c141efcdf95", + "siteHierarchyId": "73273999-4fde-4376-b071-25ebee51d155/0cc72385-0e00-4a5a-b11b-a9b79fe2abd1/18d688cb-e9ca-4a16-abdc-5923edadfb00/ca6442ab-00e7-4454-b52c-cba2137fa66f/4f3b43a9-b29b-43f8-8ca8-7c141efcdf95", + "parentId": "ca6442ab-00e7-4454-b52c-cba2137fa66f", + "name": "FLOOR1", + "nameHierarchy": "Global/USA/SAN JOSE/SJ_BLD23/FLOOR1", + "type": "floor", + "floorNumber": 1, + "rfModel": "Cubes And Walled Offices", + "width": 100.0, + "length": 100.0, + "height": 10.0, + "unitsOfMeasure": "feet" + }] + }, + + "get_device_for_site_for_provision": { + "response": [ + { + "deviceId": "52898352-c099-4869-8f18-9c94fe8a560e", + "siteId": "4f3b43a9-b29b-43f8-8ca8-7c141efcdf95", + "siteNameHierarchy": "Global/USA/SAN JOSE/SJ_BLD23/FLOOR1", + "siteType": "floor" + }, + { + "deviceId": "7427ea0a-627b-434d-aa59-ac8ea474f21c", + "siteId": "4f3b43a9-b29b-43f8-8ca8-7c141efcdf95", + "siteNameHierarchy": "Global/USA/SAN JOSE/SJ_BLD23/FLOOR1", + "siteType": "floor" + }, + { + "deviceId": "e5ef32dc-0897-43a8-b79d-a5f8c1e1c3b2", + "siteId": "4f3b43a9-b29b-43f8-8ca8-7c141efcdf95", + "siteNameHierarchy": "Global/USA/SAN JOSE/SJ_BLD23/FLOOR1", + "siteType": "floor" + } + ], + "version": "1.0" + }, + + "get_wlc_device_for_provision": { + "name": "SJ-EWLC-1.cisco.local", + "roles": [ + "Wireless Controller" + ], + "deviceManagementIpAddress": "204.192.4.200", + "siteNameHierarchy": "Global/USA/SAN JOSE/SJ_BLD23", + "status": "success", + "description": "Fabric device info successfully retrieved from sda fabric.", + "executionId": "8769a224-0315-4f6e-a48d-8941ae703f9a" + }, + + "get_deviceip_for_deviceid_provision": { + "response": [ + { + "type": "Cisco Catalyst 9120AXE Unified Access Point", + "upTime": "20:42:51.040", + "macAddress": "a4:88:73:d4:dd:80", + "deviceSupportLevel": "Supported", + "softwareType": null, + "softwareVersion": "17.15.4.18", + "serialNumber": "FJC24391KBC", + "lastManagedResyncReasons": "", + "managementState": "Managed", + "pendingSyncRequestsCount": "0", + "reasonsForDeviceResync": "", + "reasonsForPendingSyncRequests": "", + "inventoryStatusDetail": "NA", + "syncRequestedByApp": "", + "collectionInterval": "NA", + "dnsResolvedManagementAddress": "", + "lastUpdated": "2025-11-13 14:08:42", + "bootDateTime": null, + "apManagerInterfaceIp": "204.192.4.200", + "collectionStatus": "Managed", + "family": "Unified AP", + "hostname": "AP687D.B402.1614-AP-Test6", + "lastUpdateTime": 1763042922702, + "locationName": null, + "managementIpAddress": "204.192.106.2", + "platformId": "C9120AXE-B", + "reachabilityFailureReason": "NA", + "reachabilityStatus": "Reachable", + "series": "Cisco Catalyst 9120AXE Series Unified Access Points", + "snmpContact": "", + "snmpLocation": "Global/USA/SAN JOSE/SJ_BLD23/FLOOR4", + "roleSource": "AUTO", + "interfaceCount": "0", + "apEthernetMacAddress": "a4:88:73:ce:9b:b0", + "errorCode": "null", + "errorDescription": null, + "lastDeviceResyncStartTime": "", + "lineCardCount": "0", + "lineCardId": "", + "managedAtleastOnce": false, + "memorySize": "NA", + "tagCount": "0", + "tunnelUdpPort": null, + "uptimeSeconds": 81367, + "vendor": "NA", + "waasDeviceMode": null, + "associatedWlcIp": "204.192.4.200", + "description": null, + "location": null, + "role": "ACCESS", + "instanceTenantId": "68593aeecd0f400013b8604e", + "instanceUuid": "8954ff26-aa2d-4991-b2be-6462121ee710", + "id": "8954ff26-aa2d-4991-b2be-6462121ee710" + } + ], + "version": "1.0" + }, + + "assign_site_for_task_execution": { + "taskId": "019a7df3-c538-730e-bf0f-589468da2025", + "url": "/api/v1/task/019a7df3-c538-730e-bf0f-589468da2025" + }, + + "assign_device_to_site_for_provision": { + "response": { + "endTime": 1763049719304, + "status": "success", + "startTime": 1763049719096, + "resultLocation": "/dna/intent/api/v1/tasks/019a7df3-c538-730e-bf0f-589468da2025/detail", + "id": "019a7df3-c538-730e-bf0f-589468da2025" + }, + "version": "1.0" + }, + + "provision_site_for_task_execution": { + "taskId": "019a7df3-c7e3-7c6f-8f5b-a9de17278c79", + "url": "/api/v1/task/019a7df3-c7e3-7c6f-8f5b-a9de17278c79" + }, + + "task_details_for_provision": { + "response": { + "endTime": 1763049779573, + "lastUpdate": 1763049722553, + "status": "success", + "startTime": 1763049719779, + "resultLocation": "/dna/intent/api/v1/tasks/019a7df3-c7e3-7c6f-8f5b-a9de17278c79/detail", + "id": "019a7df3-c7e3-7c6f-8f5b-a9de17278c79" + }, + "version": "1.0" } } diff --git a/tests/unit/modules/dnac/fixtures/backup_and_restore_workflow_manager.json b/tests/unit/modules/dnac/fixtures/backup_and_restore_workflow_manager.json new file mode 100644 index 0000000000..02e9ce3a43 --- /dev/null +++ b/tests/unit/modules/dnac/fixtures/backup_and_restore_workflow_manager.json @@ -0,0 +1,11950 @@ +{ + "playbook_delete_schedule_backup": [ + { + "backup": [ + { + "name": "BACKUP25_07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE" + } + ] + } + ], + "get_all_backup_v1": { + "version": "2.0", + "response": [ + { + "name": "BACKUP25_07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "versions": [ + "mks-upgrade:3.1.166", + "core-platform:0.10.205", + "managed-services-shared:0.10.19", + "gateway:0.10.12", + "core-addons:0.10.47", + "iam:5.4.47", + "harbor-ui:3.5.144", + "idm-ui-plugin:5.4.29", + "data-ingestion-infra:6.6.15", + "data-contextstore:6.6.10", + "data-analytics-infra:6.6.127", + "data-platform-pipeline-infra:6.6.126", + "data-store-management:6.6.45", + "data-platform-ui:6.6.101", + "cloud-connectivity-datahub:6.10.26", + "cloud-connectivity:6.10.13", + "dnacaap:6.9.43", + "dnac-search:6.9.3", + "system-management-operations:1.6.50", + "telemetry:4.7.20", + "cloud-connectivity-sense:7.0.7", + "digestor:7.0.5", + "rca-scripts:0.5.6", + "platform-refresh:1.3.69", + "catalyst-center-api-catalog:6.8.96", + "b2b-upgrade:2.732.65310", + "system-commons:2.732.65310", + "ise-bridge:3.150.1", + "ncp:2.732.65310", + "assurance-base-analytics:3.150.100202", + "appsvc-remediation:3.15.5", + "assurance:3.150.141", + "ai-network-analytics:4.0.32", + "ssa:2.732.1095118", + "aca:2.732.65310", + "group-based-policy-analytics:3.150.8", + "sda:2.732.65310", + "app-visibility-and-policy:2.732.117621", + "endpoint-analytics:1.11.1702", + "rogue-management:3.1.202", + "app-hosting:2.3.325072204", + "sea-app-package:2.732.685009", + "icap:2.732.65310", + "wide-area-bonjour:2.732.755006", + "sensor-automation:2.732.65310", + "sensor-assurance:3.150.140", + "disaster-recovery:2.732.365017", + "support-services:2.732.885097", + "multi-dnac-enablement:2.732.65310" + ], + "id": "c1090199-d006-4809-8027-125e0988dc84", + "size": 41852525848, + "createdDate": "2025-07-29T10:35:36Z", + "isBackupAvailable": true, + "isCompatible": true, + "context": { + "type": "ON_DEMAND", + "schedule": "" + }, + "storage": { + "type": "NFS", + "mountPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "id": "1fa840d8-ee9f-4e0d-a560-694c5e939680", + "name": "", + "host": "", + "serverPath": "" + }, + "status": "SUCCESS", + "endDate": "2025-07-29T10:50:44Z", + "createdBy": "Anonymous", + "duration": 908, + "releaseName": "uber-dnac", + "releaseDisplayName": "Cisco Catalyst Center", + "releaseVersion": "", + "releaseDisplayVersion": "3.1.5-75085", + "installedPackages": [ + { + "name": "mks-upgrade", + "displayName": "System", + "version": "3.1.166" + }, + { + "name": "core-platform", + "displayName": "Core Platform", + "version": "0.10.205" + }, + { + "name": "managed-services-shared", + "displayName": "Shared Managed Services", + "version": "0.10.19" + }, + { + "name": "gateway", + "displayName": "Catalyst Center Gateway Service", + "version": "0.10.12" + }, + { + "name": "core-addons", + "displayName": "System Addons", + "version": "0.10.47" + }, + { + "name": "iam", + "displayName": "Identity and Access Management", + "version": "5.4.47" + }, + { + "name": "harbor-ui", + "displayName": "Cisco Catalyst Center UI", + "version": "3.5.144" + }, + { + "name": "idm-ui-plugin", + "displayName": "Identity and Access Management - UI", + "version": "5.4.29" + }, + { + "name": "data-ingestion-infra", + "displayName": "Network Data Platform - Ingestion Infra", + "version": "6.6.15" + }, + { + "name": "data-contextstore", + "displayName": "Network Data Platform - Caching Infra", + "version": "6.6.10" + }, + { + "name": "data-analytics-infra", + "displayName": "Network Data Platform - Core", + "version": "6.6.127" + }, + { + "name": "data-platform-pipeline-infra", + "displayName": "Network Data Platform - Pipeline Infra", + "version": "6.6.126" + }, + { + "name": "data-store-management", + "displayName": "Network Data Platform - Storage Management", + "version": "6.6.45" + }, + { + "name": "data-platform-ui", + "displayName": "Network Data Platform - Manager", + "version": "6.6.101" + }, + { + "name": "cloud-connectivity-datahub", + "displayName": "DxHub Cloud Connectivity", + "version": "6.10.26" + }, + { + "name": "cloud-connectivity", + "displayName": "Cloud Connectivity", + "version": "6.10.13" + }, + { + "name": "dnacaap", + "displayName": "Cisco Catalyst Center Platform", + "version": "6.9.43" + }, + { + "name": "dnac-search", + "displayName": "Cisco Catalyst Center Global Search", + "version": "6.9.3" + }, + { + "name": "system-management-operations", + "displayName": "System Management Operations", + "version": "1.6.50" + }, + { + "name": "telemetry", + "displayName": "Telemetry", + "version": "4.7.20" + }, + { + "name": "cloud-connectivity-sense", + "displayName": "Cloud Connectivity - Contextual Content", + "version": "7.0.7" + }, + { + "name": "digestor", + "displayName": "Cloud Connectivity - Digestor", + "version": "7.0.5" + }, + { + "name": "rca-scripts", + "displayName": "RCA-Scripts Package", + "version": "0.5.6" + }, + { + "name": "platform-refresh", + "displayName": "Platform Refresh", + "version": "1.3.69" + }, + { + "name": "catalyst-center-api-catalog", + "displayName": "Catalyst Center API Catalog", + "version": "6.8.96" + }, + { + "name": "b2b-upgrade", + "displayName": "", + "version": "2.732.65310" + }, + { + "name": "system-commons", + "displayName": "System Commons", + "version": "2.732.65310" + }, + { + "name": "ise-bridge", + "displayName": "Cisco Identity Services Engine Bridge", + "version": "3.150.1" + }, + { + "name": "ncp", + "displayName": "Network Controller Platform", + "version": "2.732.65310" + }, + { + "name": "assurance-base-analytics", + "displayName": "Network Data Platform - Base Analytics", + "version": "3.150.100202" + }, + { + "name": "appsvc-remediation", + "displayName": "Application and Service Remediation", + "version": "3.15.5" + }, + { + "name": "assurance", + "displayName": "Assurance", + "version": "3.150.141" + }, + { + "name": "ai-network-analytics", + "displayName": "AI Network Analytics", + "version": "4.0.32" + }, + { + "name": "ssa", + "displayName": "Stealthwatch Security Analytics", + "version": "2.732.1095118" + }, + { + "name": "aca", + "displayName": "Access Control Application", + "version": "2.732.65310" + }, + { + "name": "group-based-policy-analytics", + "displayName": "Group Based Policy Analytics", + "version": "3.150.8" + }, + { + "name": "sda", + "displayName": "SD Access", + "version": "2.732.65310" + }, + { + "name": "app-visibility-and-policy", + "displayName": "Application Visibility and Policy", + "version": "2.732.117621" + }, + { + "name": "endpoint-analytics", + "displayName": "AI Endpoint Analytics", + "version": "1.11.1702" + }, + { + "name": "rogue-management", + "displayName": "Rogue and aWIPS", + "version": "3.1.202" + }, + { + "name": "app-hosting", + "displayName": "Application Hosting", + "version": "2.3.325072204" + }, + { + "name": "sea-app-package", + "displayName": "SEA App", + "version": "2.732.685009" + }, + { + "name": "icap", + "displayName": "Automation - Intelligent Capture", + "version": "2.732.65310" + }, + { + "name": "wide-area-bonjour", + "displayName": "Wide Area Bonjour", + "version": "2.732.755006" + }, + { + "name": "sensor-automation", + "displayName": "Automation - Sensor", + "version": "2.732.65310" + }, + { + "name": "sensor-assurance", + "displayName": "Assurance - Sensor", + "version": "3.150.140" + }, + { + "name": "disaster-recovery", + "displayName": "Disaster Recovery", + "version": "2.732.365017" + }, + { + "name": "support-services", + "displayName": "Support Services", + "version": "2.732.885097" + }, + { + "name": "multi-dnac-enablement", + "displayName": "Multiple Cisco Catalyst Center", + "version": "2.732.65310" + } + ], + "numberOfNodes": 1, + "productType": "DNAC", + "productVersion": "3.732.75085", + "internetProtocolVersion": "ipv4", + "fipsEnabled": false, + "compatibilityError": [ + + ] + } + ], + "page": { + "limit": 50, + "offset": 1, + "count": 1, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + "delete_backup_v1": { + "version": "2.0", + "response": { + "taskId": "bfd9629c-ca87-455a-a313-54bd5d91aa26", + "url": "/dna/intent/api/v1/backupRestoreExecutions/bfd9629c-ca87-455a-a313-54bd5d91aa26" + } + }, + "get_backup_and_restore_execution2": { + "version": "2.0", + "response": { + "backupId": "c1090199-d006-4809-8027-125e0988dc84", + "id": "bfd9629c-ca87-455a-a313-54bd5d91aa26", + "jobType": "DELETE_BACKUP", + "status": "IN_PROGRESS", + "endDate": "1970-01-01T00:00:00.000Z", + "startDate": "2025-07-29T11:07:20.548Z", + "createdBy": "Anonymous", + "errorMessage": "", + "errorCode": "", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "isForceUpdate": false, + "updateMessage": "", + "failedTaskDetail": { + + }, + "systemErrorMessage": "", + "completedPercentage": 0, + "_metadata": null, + "tasks": [ + { + "id": "9594b76c-23c5-4c48-8f4d-fd5546f8fb43", + "taskName": "Trigger", + "status": "SUCCESS", + "endDate": "2025-07-29T11:07:20.549Z", + "startDate": "2025-07-29T11:07:20.549Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "78ea9271-7dd7-4f32-9e88-22c0c83791f7", + "taskName": "initialize Values", + "status": "SUCCESS", + "endDate": "2025-07-29T11:07:21.334Z", + "startDate": "2025-07-29T11:07:21.235Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "0d2658b1-2e66-4329-82ce-d28979dc70b7", + "taskName": "Validate Disk Reachability", + "status": "SUCCESS", + "endDate": "2025-07-29T11:07:21.649Z", + "startDate": "2025-07-29T11:07:21.335Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "4fccf608-a17c-4ac6-8f3b-ec544ed5211d", + "taskName": "Get Backup", + "status": "SUCCESS", + "endDate": "2025-07-29T11:07:21.728Z", + "startDate": "2025-07-29T11:07:21.649Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "cf1eb717-bf2e-4a51-af87-37ab158d2f55", + "taskName": "Validate Backup", + "status": "INPROGRESS", + "endDate": "", + "startDate": "2025-07-29T11:07:21.729Z", + "message": "", + "failedTaskDetail": { + + } + } + ] + } + }, + "get_backup_and_restore_execution3": { + "version": "2.0", + "response": { + "backupId": "c1090199-d006-4809-8027-125e0988dc84", + "id": "bfd9629c-ca87-455a-a313-54bd5d91aa26", + "jobType": "DELETE_BACKUP", + "status": "SUCCESS", + "endDate": "2025-07-29T11:07:52.871Z", + "startDate": "2025-07-29T11:07:20.548Z", + "createdBy": "Anonymous", + "errorMessage": "", + "errorCode": "", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "isForceUpdate": false, + "updateMessage": "", + "failedTaskDetail": { + + }, + "systemErrorMessage": "", + "completedPercentage": 0, + "_metadata": null, + "tasks": [ + { + "id": "f6315e33-4c2a-4b54-90e9-f8851551fd22", + "taskName": "Trigger", + "status": "SUCCESS", + "endDate": "2025-07-29T11:07:20.549Z", + "startDate": "2025-07-29T11:07:20.549Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "9dd74432-12be-4169-bc45-f5806f20ab76", + "taskName": "initialize Values", + "status": "SUCCESS", + "endDate": "2025-07-29T11:07:21.334Z", + "startDate": "2025-07-29T11:07:21.235Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "b7e4c820-500c-4ef5-bc1f-bce7ae6bb0a5", + "taskName": "Validate Disk Reachability", + "status": "SUCCESS", + "endDate": "2025-07-29T11:07:21.649Z", + "startDate": "2025-07-29T11:07:21.335Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "271bddf8-b064-4967-8e07-5f42f7ec8581", + "taskName": "Get Backup", + "status": "SUCCESS", + "endDate": "2025-07-29T11:07:21.728Z", + "startDate": "2025-07-29T11:07:21.649Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "180be9e0-b422-493e-98de-62dce992b2d3", + "taskName": "Validate Backup", + "status": "SUCCESS", + "endDate": "2025-07-29T11:07:22.124Z", + "startDate": "2025-07-29T11:07:21.729Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "a09b7d0b-708a-4d6b-9944-eb2ea10dfe03", + "taskName": "Prepare Delete", + "status": "SUCCESS", + "endDate": "2025-07-29T11:07:22.232Z", + "startDate": "2025-07-29T11:07:22.124Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "d4d40af3-ce4b-4e75-9a5e-fb314a4aebb4", + "taskName": "Delete MKS / System Manager / Managed Service Backup", + "status": "SUCCESS", + "endDate": "2025-07-29T11:07:52.816Z", + "startDate": "2025-07-29T11:07:22.233Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "e40dafe1-2cd0-4364-9198-51c1f0b2d055", + "taskName": "Delete Backup Metadata", + "status": "SUCCESS", + "endDate": "2025-07-29T11:07:52.856Z", + "startDate": "2025-07-29T11:07:52.842Z", + "message": "", + "failedTaskDetail": { + + } + } + ] + } + }, + "playbook_create_schedule_backup": [ + { + "backup": [ + { + "name": "BACKUP24_07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE" + } + ] + } + ], + "get_all_backup2": { + "version": "2.0", + "response": [ + { + "name": "BACKUP25_07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "versions": [ + "mks-upgrade:3.1.166", + "core-platform:0.10.205", + "managed-services-shared:0.10.19", + "gateway:0.10.12", + "core-addons:0.10.47", + "iam:5.4.47", + "harbor-ui:3.5.144", + "idm-ui-plugin:5.4.29", + "data-ingestion-infra:6.6.15", + "data-contextstore:6.6.10", + "data-analytics-infra:6.6.127", + "data-platform-pipeline-infra:6.6.126", + "data-store-management:6.6.45", + "data-platform-ui:6.6.101", + "cloud-connectivity-datahub:6.10.26", + "cloud-connectivity:6.10.13", + "dnacaap:6.9.43", + "dnac-search:6.9.3", + "system-management-operations:1.6.50", + "telemetry:4.7.20", + "cloud-connectivity-sense:7.0.7", + "digestor:7.0.5", + "rca-scripts:0.5.6", + "platform-refresh:1.3.69", + "catalyst-center-api-catalog:6.8.96", + "b2b-upgrade:2.732.65310", + "system-commons:2.732.65310", + "ise-bridge:3.150.1", + "ncp:2.732.65310", + "assurance-base-analytics:3.150.100202", + "appsvc-remediation:3.15.5", + "assurance:3.150.141", + "ai-network-analytics:4.0.32", + "ssa:2.732.1095118", + "aca:2.732.65310", + "group-based-policy-analytics:3.150.8", + "sda:2.732.65310", + "app-visibility-and-policy:2.732.117621", + "endpoint-analytics:1.11.1702", + "rogue-management:3.1.202", + "app-hosting:2.3.325072204", + "sea-app-package:2.732.685009", + "icap:2.732.65310", + "wide-area-bonjour:2.732.755006", + "sensor-automation:2.732.65310", + "sensor-assurance:3.150.140", + "disaster-recovery:2.732.365017", + "support-services:2.732.885097", + "multi-dnac-enablement:2.732.65310" + ], + "id": "c10185e5-0783-4ae9-abea-8059a2cd3f94", + "size": 41856826480, + "createdDate": "2025-07-31T06:31:00Z", + "isBackupAvailable": true, + "isCompatible": true, + "context": { + "type": "ON_DEMAND", + "schedule": "" + }, + "storage": { + "type": "NFS", + "mountPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "id": "1fa840d8-ee9f-4e0d-a560-694c5e939680", + "name": "", + "host": "", + "serverPath": "" + }, + "status": "SUCCESS", + "endDate": "2025-07-31T06:46:09Z", + "createdBy": "Anonymous", + "duration": 909, + "releaseName": "uber-dnac", + "releaseDisplayName": "Cisco Catalyst Center", + "releaseVersion": "", + "releaseDisplayVersion": "3.1.5-75085", + "installedPackages": [ + { + "name": "mks-upgrade", + "displayName": "System", + "version": "3.1.166" + }, + { + "name": "core-platform", + "displayName": "Core Platform", + "version": "0.10.205" + }, + { + "name": "managed-services-shared", + "displayName": "Shared Managed Services", + "version": "0.10.19" + }, + { + "name": "gateway", + "displayName": "Catalyst Center Gateway Service", + "version": "0.10.12" + }, + { + "name": "core-addons", + "displayName": "System Addons", + "version": "0.10.47" + }, + { + "name": "iam", + "displayName": "Identity and Access Management", + "version": "5.4.47" + }, + { + "name": "harbor-ui", + "displayName": "Cisco Catalyst Center UI", + "version": "3.5.144" + }, + { + "name": "idm-ui-plugin", + "displayName": "Identity and Access Management - UI", + "version": "5.4.29" + }, + { + "name": "data-ingestion-infra", + "displayName": "Network Data Platform - Ingestion Infra", + "version": "6.6.15" + }, + { + "name": "data-contextstore", + "displayName": "Network Data Platform - Caching Infra", + "version": "6.6.10" + }, + { + "name": "data-analytics-infra", + "displayName": "Network Data Platform - Core", + "version": "6.6.127" + }, + { + "name": "data-platform-pipeline-infra", + "displayName": "Network Data Platform - Pipeline Infra", + "version": "6.6.126" + }, + { + "name": "data-store-management", + "displayName": "Network Data Platform - Storage Management", + "version": "6.6.45" + }, + { + "name": "data-platform-ui", + "displayName": "Network Data Platform - Manager", + "version": "6.6.101" + }, + { + "name": "cloud-connectivity-datahub", + "displayName": "DxHub Cloud Connectivity", + "version": "6.10.26" + }, + { + "name": "cloud-connectivity", + "displayName": "Cloud Connectivity", + "version": "6.10.13" + }, + { + "name": "dnacaap", + "displayName": "Cisco Catalyst Center Platform", + "version": "6.9.43" + }, + { + "name": "dnac-search", + "displayName": "Cisco Catalyst Center Global Search", + "version": "6.9.3" + }, + { + "name": "system-management-operations", + "displayName": "System Management Operations", + "version": "1.6.50" + }, + { + "name": "telemetry", + "displayName": "Telemetry", + "version": "4.7.20" + }, + { + "name": "cloud-connectivity-sense", + "displayName": "Cloud Connectivity - Contextual Content", + "version": "7.0.7" + }, + { + "name": "digestor", + "displayName": "Cloud Connectivity - Digestor", + "version": "7.0.5" + }, + { + "name": "rca-scripts", + "displayName": "RCA-Scripts Package", + "version": "0.5.6" + }, + { + "name": "platform-refresh", + "displayName": "Platform Refresh", + "version": "1.3.69" + }, + { + "name": "catalyst-center-api-catalog", + "displayName": "Catalyst Center API Catalog", + "version": "6.8.96" + }, + { + "name": "b2b-upgrade", + "displayName": "", + "version": "2.732.65310" + }, + { + "name": "system-commons", + "displayName": "System Commons", + "version": "2.732.65310" + }, + { + "name": "ise-bridge", + "displayName": "Cisco Identity Services Engine Bridge", + "version": "3.150.1" + }, + { + "name": "ncp", + "displayName": "Network Controller Platform", + "version": "2.732.65310" + }, + { + "name": "assurance-base-analytics", + "displayName": "Network Data Platform - Base Analytics", + "version": "3.150.100202" + }, + { + "name": "appsvc-remediation", + "displayName": "Application and Service Remediation", + "version": "3.15.5" + }, + { + "name": "assurance", + "displayName": "Assurance", + "version": "3.150.141" + }, + { + "name": "ai-network-analytics", + "displayName": "AI Network Analytics", + "version": "4.0.32" + }, + { + "name": "ssa", + "displayName": "Stealthwatch Security Analytics", + "version": "2.732.1095118" + }, + { + "name": "aca", + "displayName": "Access Control Application", + "version": "2.732.65310" + }, + { + "name": "group-based-policy-analytics", + "displayName": "Group Based Policy Analytics", + "version": "3.150.8" + }, + { + "name": "sda", + "displayName": "SD Access", + "version": "2.732.65310" + }, + { + "name": "app-visibility-and-policy", + "displayName": "Application Visibility and Policy", + "version": "2.732.117621" + }, + { + "name": "endpoint-analytics", + "displayName": "AI Endpoint Analytics", + "version": "1.11.1702" + }, + { + "name": "rogue-management", + "displayName": "Rogue and aWIPS", + "version": "3.1.202" + }, + { + "name": "app-hosting", + "displayName": "Application Hosting", + "version": "2.3.325072204" + }, + { + "name": "sea-app-package", + "displayName": "SEA App", + "version": "2.732.685009" + }, + { + "name": "icap", + "displayName": "Automation - Intelligent Capture", + "version": "2.732.65310" + }, + { + "name": "wide-area-bonjour", + "displayName": "Wide Area Bonjour", + "version": "2.732.755006" + }, + { + "name": "sensor-automation", + "displayName": "Automation - Sensor", + "version": "2.732.65310" + }, + { + "name": "sensor-assurance", + "displayName": "Assurance - Sensor", + "version": "3.150.140" + }, + { + "name": "disaster-recovery", + "displayName": "Disaster Recovery", + "version": "2.732.365017" + }, + { + "name": "support-services", + "displayName": "Support Services", + "version": "2.732.885097" + }, + { + "name": "multi-dnac-enablement", + "displayName": "Multiple Cisco Catalyst Center", + "version": "2.732.65310" + } + ], + "numberOfNodes": 1, + "productType": "DNAC", + "productVersion": "3.732.75085", + "internetProtocolVersion": "ipv4", + "fipsEnabled": false, + "compatibilityError": [ + + ] + } + ], + "page": { + "limit": 50, + "offset": 1, + "count": 1, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + "create_backup1": { + "version": "2.0", + "response": { + "taskId": "4aa1e24b-db26-468c-9686-ad8b6d997385", + "url": "/dna/intent/api/v1/backupRestoreExecutions/4aa1e24b-db26-468c-9686-ad8b6d997385" + } + }, + "get_backup_and_restore_execution4": { + "version": "2.0", + "response": { + "backupId": "fcace3ff-02e5-4dcc-89a0-092823ffea22", + "id": "4aa1e24b-db26-468c-9686-ad8b6d997385", + "jobType": "CREATE_BACKUP", + "status": "IN_PROGRESS", + "endDate": "1970-01-01T00:00:00.000Z", + "startDate": "2025-08-01T10:09:20.251Z", + "createdBy": "Anonymous", + "errorMessage": "", + "errorCode": "", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "isForceUpdate": false, + "updateMessage": "", + "failedTaskDetail": { + + }, + "systemErrorMessage": "", + "completedPercentage": 0, + "_metadata": null, + "tasks": [ + { + "id": "92265ecd-708a-45ac-b611-4cc1e490254b", + "taskName": "Create Backup Metadata", + "status": "SUCCESS", + "endDate": "2025-08-01T10:09:20.289Z", + "startDate": "2025-08-01T10:09:20.252Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "d1fe4abd-7652-4cd0-ba59-2164ce175fe7", + "taskName": "Validation", + "status": "INPROGRESS", + "endDate": "", + "startDate": "2025-08-01T10:09:20.289Z", + "message": "", + "failedTaskDetail": { + + } + } + ] + } + }, + "get_backup_and_restore_execution5": { + "version": "2.0", + "response": { + "backupId": "fcace3ff-02e5-4dcc-89a0-092823ffea22", + "id": "4aa1e24b-db26-468c-9686-ad8b6d997385", + "jobType": "CREATE_BACKUP", + "status": "SUCCESS", + "endDate": "2025-08-01T10:24:26.618Z", + "startDate": "2025-08-01T10:09:20.251Z", + "createdBy": "Anonymous", + "errorMessage": "", + "errorCode": "", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "isForceUpdate": false, + "updateMessage": "", + "failedTaskDetail": { + + }, + "systemErrorMessage": "", + "completedPercentage": 0, + "_metadata": null, + "tasks": [ + { + "id": "aaf6e7b8-fa1a-49c3-8d3d-cc5ac1b7c8da", + "taskName": "Create Backup Metadata", + "status": "SUCCESS", + "endDate": "2025-08-01T10:09:20.289Z", + "startDate": "2025-08-01T10:09:20.252Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "157fd380-21d6-4946-a695-553ecc7c5f38", + "taskName": "Validation", + "status": "SUCCESS", + "endDate": "2025-08-01T10:09:20.971Z", + "startDate": "2025-08-01T10:09:20.289Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "142efd7f-06e1-40a1-8bbe-d62a5ca6d1a7", + "taskName": "MKS Backup", + "status": "SUCCESS", + "endDate": "2025-08-01T10:09:51.152Z", + "startDate": "2025-08-01T10:09:20.971Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "9f4d3cd9-ad11-4b26-8f79-ec1d865e51b2", + "taskName": "System Manager Backup", + "status": "SUCCESS", + "endDate": "2025-08-01T10:09:52.384Z", + "startDate": "2025-08-01T10:09:51.152Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "f79f2ce7-a0e6-44a6-8f8d-ce16a5f33a08", + "taskName": "Managed Services Backup", + "status": "SUCCESS", + "endDate": "2025-08-01T10:24:25.756Z", + "startDate": "2025-08-01T10:09:52.387Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "bd44ad79-875a-4cc5-aabc-06d482721763", + "taskName": "Admin Service Hook", + "status": "SUCCESS", + "endDate": "2025-08-01T10:24:26.372Z", + "startDate": "2025-08-01T10:24:25.764Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "57ec353f-cdb1-4a9c-b282-a633db0bc73c", + "taskName": "Finalize Backup", + "status": "SUCCESS", + "endDate": "2025-08-01T10:24:26.471Z", + "startDate": "2025-08-01T10:24:26.383Z", + "message": "", + "failedTaskDetail": { + + } + } + ] + } + }, + "get_all_backup3": { + "version": "2.0", + "response": [ + { + "name": "BACKUP24_07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "versions": [ + "mks-upgrade:3.1.166", + "core-platform:0.10.205", + "managed-services-shared:0.10.19", + "gateway:0.10.12", + "core-addons:0.10.47", + "iam:5.4.47", + "harbor-ui:3.5.144", + "idm-ui-plugin:5.4.29", + "data-ingestion-infra:6.6.15", + "data-contextstore:6.6.10", + "data-analytics-infra:6.6.127", + "data-platform-pipeline-infra:6.6.126", + "data-store-management:6.6.45", + "data-platform-ui:6.6.101", + "cloud-connectivity-datahub:6.10.26", + "cloud-connectivity:6.10.13", + "dnacaap:6.9.43", + "dnac-search:6.9.3", + "system-management-operations:1.6.50", + "telemetry:4.7.20", + "cloud-connectivity-sense:7.0.7", + "digestor:7.0.5", + "rca-scripts:0.5.6", + "platform-refresh:1.3.69", + "catalyst-center-api-catalog:6.8.96", + "b2b-upgrade:2.732.65310", + "system-commons:2.732.65310", + "ise-bridge:3.150.1", + "ncp:2.732.65310", + "assurance-base-analytics:3.150.100202", + "appsvc-remediation:3.15.5", + "assurance:3.150.141", + "ai-network-analytics:4.0.32", + "ssa:2.732.1095118", + "aca:2.732.65310", + "group-based-policy-analytics:3.150.8", + "sda:2.732.65310", + "app-visibility-and-policy:2.732.117621", + "endpoint-analytics:1.11.1702", + "rogue-management:3.1.202", + "app-hosting:2.3.325072204", + "sea-app-package:2.732.685009", + "icap:2.732.65310", + "wide-area-bonjour:2.732.755006", + "sensor-automation:2.732.65310", + "sensor-assurance:3.150.140", + "disaster-recovery:2.732.365017", + "support-services:2.732.885097", + "multi-dnac-enablement:2.732.65310" + ], + "id": "a6adee73-2d54-443b-b4c6-14c5bc5837b4", + "size": 0, + "createdDate": "2025-08-04T10:03:25Z", + "isBackupAvailable": false, + "isCompatible": false, + "context": { + "type": "ON_DEMAND", + "schedule": "" + }, + "storage": { + "type": "NFS", + "mountPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "id": "1fa840d8-ee9f-4e0d-a560-694c5e939680", + "name": "", + "host": "", + "serverPath": "" + }, + "status": "IN_PROGRESS", + "endDate": "0001-01-01T00:00:00Z", + "createdBy": "Anonymous", + "releaseName": "uber-dnac", + "releaseDisplayName": "Cisco Catalyst Center", + "releaseVersion": "", + "releaseDisplayVersion": "3.1.5-75085", + "installedPackages": [ + { + "name": "mks-upgrade", + "displayName": "System", + "version": "3.1.166" + }, + { + "name": "core-platform", + "displayName": "Core Platform", + "version": "0.10.205" + }, + { + "name": "managed-services-shared", + "displayName": "Shared Managed Services", + "version": "0.10.19" + }, + { + "name": "gateway", + "displayName": "Catalyst Center Gateway Service", + "version": "0.10.12" + }, + { + "name": "core-addons", + "displayName": "System Addons", + "version": "0.10.47" + }, + { + "name": "iam", + "displayName": "Identity and Access Management", + "version": "5.4.47" + }, + { + "name": "harbor-ui", + "displayName": "Cisco Catalyst Center UI", + "version": "3.5.144" + }, + { + "name": "idm-ui-plugin", + "displayName": "Identity and Access Management - UI", + "version": "5.4.29" + }, + { + "name": "data-ingestion-infra", + "displayName": "Network Data Platform - Ingestion Infra", + "version": "6.6.15" + }, + { + "name": "data-contextstore", + "displayName": "Network Data Platform - Caching Infra", + "version": "6.6.10" + }, + { + "name": "data-analytics-infra", + "displayName": "Network Data Platform - Core", + "version": "6.6.127" + }, + { + "name": "data-platform-pipeline-infra", + "displayName": "Network Data Platform - Pipeline Infra", + "version": "6.6.126" + }, + { + "name": "data-store-management", + "displayName": "Network Data Platform - Storage Management", + "version": "6.6.45" + }, + { + "name": "data-platform-ui", + "displayName": "Network Data Platform - Manager", + "version": "6.6.101" + }, + { + "name": "cloud-connectivity-datahub", + "displayName": "DxHub Cloud Connectivity", + "version": "6.10.26" + }, + { + "name": "cloud-connectivity", + "displayName": "Cloud Connectivity", + "version": "6.10.13" + }, + { + "name": "dnacaap", + "displayName": "Cisco Catalyst Center Platform", + "version": "6.9.43" + }, + { + "name": "dnac-search", + "displayName": "Cisco Catalyst Center Global Search", + "version": "6.9.3" + }, + { + "name": "system-management-operations", + "displayName": "System Management Operations", + "version": "1.6.50" + }, + { + "name": "telemetry", + "displayName": "Telemetry", + "version": "4.7.20" + }, + { + "name": "cloud-connectivity-sense", + "displayName": "Cloud Connectivity - Contextual Content", + "version": "7.0.7" + }, + { + "name": "digestor", + "displayName": "Cloud Connectivity - Digestor", + "version": "7.0.5" + }, + { + "name": "rca-scripts", + "displayName": "RCA-Scripts Package", + "version": "0.5.6" + }, + { + "name": "platform-refresh", + "displayName": "Platform Refresh", + "version": "1.3.69" + }, + { + "name": "catalyst-center-api-catalog", + "displayName": "Catalyst Center API Catalog", + "version": "6.8.96" + }, + { + "name": "b2b-upgrade", + "displayName": "", + "version": "2.732.65310" + }, + { + "name": "system-commons", + "displayName": "System Commons", + "version": "2.732.65310" + }, + { + "name": "ise-bridge", + "displayName": "Cisco Identity Services Engine Bridge", + "version": "3.150.1" + }, + { + "name": "ncp", + "displayName": "Network Controller Platform", + "version": "2.732.65310" + }, + { + "name": "assurance-base-analytics", + "displayName": "Network Data Platform - Base Analytics", + "version": "3.150.100202" + }, + { + "name": "appsvc-remediation", + "displayName": "Application and Service Remediation", + "version": "3.15.5" + }, + { + "name": "assurance", + "displayName": "Assurance", + "version": "3.150.141" + }, + { + "name": "ai-network-analytics", + "displayName": "AI Network Analytics", + "version": "4.0.32" + }, + { + "name": "ssa", + "displayName": "Stealthwatch Security Analytics", + "version": "2.732.1095118" + }, + { + "name": "aca", + "displayName": "Access Control Application", + "version": "2.732.65310" + }, + { + "name": "group-based-policy-analytics", + "displayName": "Group Based Policy Analytics", + "version": "3.150.8" + }, + { + "name": "sda", + "displayName": "SD Access", + "version": "2.732.65310" + }, + { + "name": "app-visibility-and-policy", + "displayName": "Application Visibility and Policy", + "version": "2.732.117621" + }, + { + "name": "endpoint-analytics", + "displayName": "AI Endpoint Analytics", + "version": "1.11.1702" + }, + { + "name": "rogue-management", + "displayName": "Rogue and aWIPS", + "version": "3.1.202" + }, + { + "name": "app-hosting", + "displayName": "Application Hosting", + "version": "2.3.325072204" + }, + { + "name": "sea-app-package", + "displayName": "SEA App", + "version": "2.732.685009" + }, + { + "name": "icap", + "displayName": "Automation - Intelligent Capture", + "version": "2.732.65310" + }, + { + "name": "wide-area-bonjour", + "displayName": "Wide Area Bonjour", + "version": "2.732.755006" + }, + { + "name": "sensor-automation", + "displayName": "Automation - Sensor", + "version": "2.732.65310" + }, + { + "name": "sensor-assurance", + "displayName": "Assurance - Sensor", + "version": "3.150.140" + }, + { + "name": "disaster-recovery", + "displayName": "Disaster Recovery", + "version": "2.732.365017" + }, + { + "name": "support-services", + "displayName": "Support Services", + "version": "2.732.885097" + }, + { + "name": "multi-dnac-enablement", + "displayName": "Multiple Cisco Catalyst Center", + "version": "2.732.65310" + } + ], + "numberOfNodes": 1, + "productType": "DNAC", + "productVersion": "3.732.75085", + "internetProtocolVersion": "ipv4", + "fipsEnabled": false, + "compatibilityError": [ + + ] + } + ], + "page": { + "limit": 50, + "offset": 1, + "count": 1, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + "playbook_backup_alreadyexists": [ + { + "backup": [ + { + "name": "BACKUP24_07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE" + } + ] + } + ], + "get_all_backup4": { + "version": "2.0", + "response": [ + { + "name": "BACKUP24_07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "versions": [ + "mks-upgrade:3.1.166", + "core-platform:0.10.205", + "managed-services-shared:0.10.19", + "gateway:0.10.12", + "core-addons:0.10.47", + "iam:5.4.47", + "harbor-ui:3.5.144", + "idm-ui-plugin:5.4.29", + "data-ingestion-infra:6.6.15", + "data-contextstore:6.6.10", + "data-analytics-infra:6.6.127", + "data-platform-pipeline-infra:6.6.126", + "data-store-management:6.6.45", + "data-platform-ui:6.6.101", + "cloud-connectivity-datahub:6.10.26", + "cloud-connectivity:6.10.13", + "dnacaap:6.9.43", + "dnac-search:6.9.3", + "system-management-operations:1.6.50", + "telemetry:4.7.20", + "cloud-connectivity-sense:7.0.7", + "digestor:7.0.5", + "rca-scripts:0.5.6", + "platform-refresh:1.3.69", + "catalyst-center-api-catalog:6.8.96", + "b2b-upgrade:2.732.65310", + "system-commons:2.732.65310", + "ise-bridge:3.150.1", + "ncp:2.732.65310", + "assurance-base-analytics:3.150.100202", + "appsvc-remediation:3.15.5", + "assurance:3.150.141", + "ai-network-analytics:4.0.32", + "ssa:2.732.1095118", + "aca:2.732.65310", + "group-based-policy-analytics:3.150.8", + "sda:2.732.65310", + "app-visibility-and-policy:2.732.117621", + "endpoint-analytics:1.11.1702", + "rogue-management:3.1.202", + "app-hosting:2.3.325072204", + "sea-app-package:2.732.685009", + "icap:2.732.65310", + "wide-area-bonjour:2.732.755006", + "sensor-automation:2.732.65310", + "sensor-assurance:3.150.140", + "disaster-recovery:2.732.365017", + "support-services:2.732.885097", + "multi-dnac-enablement:2.732.65310" + ], + "id": "a6adee73-2d54-443b-b4c6-14c5bc5837b4", + "size": 41869391641, + "createdDate": "2025-08-04T10:03:25Z", + "isBackupAvailable": true, + "isCompatible": true, + "context": { + "type": "ON_DEMAND", + "schedule": "" + }, + "storage": { + "type": "NFS", + "mountPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "id": "1fa840d8-ee9f-4e0d-a560-694c5e939680", + "name": "", + "host": "", + "serverPath": "" + }, + "status": "SUCCESS", + "endDate": "2025-08-04T10:18:33Z", + "createdBy": "Anonymous", + "duration": 908, + "releaseName": "uber-dnac", + "releaseDisplayName": "Cisco Catalyst Center", + "releaseVersion": "", + "releaseDisplayVersion": "3.1.5-75085", + "installedPackages": [ + { + "name": "mks-upgrade", + "displayName": "System", + "version": "3.1.166" + }, + { + "name": "core-platform", + "displayName": "Core Platform", + "version": "0.10.205" + }, + { + "name": "managed-services-shared", + "displayName": "Shared Managed Services", + "version": "0.10.19" + }, + { + "name": "gateway", + "displayName": "Catalyst Center Gateway Service", + "version": "0.10.12" + }, + { + "name": "core-addons", + "displayName": "System Addons", + "version": "0.10.47" + }, + { + "name": "iam", + "displayName": "Identity and Access Management", + "version": "5.4.47" + }, + { + "name": "harbor-ui", + "displayName": "Cisco Catalyst Center UI", + "version": "3.5.144" + }, + { + "name": "idm-ui-plugin", + "displayName": "Identity and Access Management - UI", + "version": "5.4.29" + }, + { + "name": "data-ingestion-infra", + "displayName": "Network Data Platform - Ingestion Infra", + "version": "6.6.15" + }, + { + "name": "data-contextstore", + "displayName": "Network Data Platform - Caching Infra", + "version": "6.6.10" + }, + { + "name": "data-analytics-infra", + "displayName": "Network Data Platform - Core", + "version": "6.6.127" + }, + { + "name": "data-platform-pipeline-infra", + "displayName": "Network Data Platform - Pipeline Infra", + "version": "6.6.126" + }, + { + "name": "data-store-management", + "displayName": "Network Data Platform - Storage Management", + "version": "6.6.45" + }, + { + "name": "data-platform-ui", + "displayName": "Network Data Platform - Manager", + "version": "6.6.101" + }, + { + "name": "cloud-connectivity-datahub", + "displayName": "DxHub Cloud Connectivity", + "version": "6.10.26" + }, + { + "name": "cloud-connectivity", + "displayName": "Cloud Connectivity", + "version": "6.10.13" + }, + { + "name": "dnacaap", + "displayName": "Cisco Catalyst Center Platform", + "version": "6.9.43" + }, + { + "name": "dnac-search", + "displayName": "Cisco Catalyst Center Global Search", + "version": "6.9.3" + }, + { + "name": "system-management-operations", + "displayName": "System Management Operations", + "version": "1.6.50" + }, + { + "name": "telemetry", + "displayName": "Telemetry", + "version": "4.7.20" + }, + { + "name": "cloud-connectivity-sense", + "displayName": "Cloud Connectivity - Contextual Content", + "version": "7.0.7" + }, + { + "name": "digestor", + "displayName": "Cloud Connectivity - Digestor", + "version": "7.0.5" + }, + { + "name": "rca-scripts", + "displayName": "RCA-Scripts Package", + "version": "0.5.6" + }, + { + "name": "platform-refresh", + "displayName": "Platform Refresh", + "version": "1.3.69" + }, + { + "name": "catalyst-center-api-catalog", + "displayName": "Catalyst Center API Catalog", + "version": "6.8.96" + }, + { + "name": "b2b-upgrade", + "displayName": "", + "version": "2.732.65310" + }, + { + "name": "system-commons", + "displayName": "System Commons", + "version": "2.732.65310" + }, + { + "name": "ise-bridge", + "displayName": "Cisco Identity Services Engine Bridge", + "version": "3.150.1" + }, + { + "name": "ncp", + "displayName": "Network Controller Platform", + "version": "2.732.65310" + }, + { + "name": "assurance-base-analytics", + "displayName": "Network Data Platform - Base Analytics", + "version": "3.150.100202" + }, + { + "name": "appsvc-remediation", + "displayName": "Application and Service Remediation", + "version": "3.15.5" + }, + { + "name": "assurance", + "displayName": "Assurance", + "version": "3.150.141" + }, + { + "name": "ai-network-analytics", + "displayName": "AI Network Analytics", + "version": "4.0.32" + }, + { + "name": "ssa", + "displayName": "Stealthwatch Security Analytics", + "version": "2.732.1095118" + }, + { + "name": "aca", + "displayName": "Access Control Application", + "version": "2.732.65310" + }, + { + "name": "group-based-policy-analytics", + "displayName": "Group Based Policy Analytics", + "version": "3.150.8" + }, + { + "name": "sda", + "displayName": "SD Access", + "version": "2.732.65310" + }, + { + "name": "app-visibility-and-policy", + "displayName": "Application Visibility and Policy", + "version": "2.732.117621" + }, + { + "name": "endpoint-analytics", + "displayName": "AI Endpoint Analytics", + "version": "1.11.1702" + }, + { + "name": "rogue-management", + "displayName": "Rogue and aWIPS", + "version": "3.1.202" + }, + { + "name": "app-hosting", + "displayName": "Application Hosting", + "version": "2.3.325072204" + }, + { + "name": "sea-app-package", + "displayName": "SEA App", + "version": "2.732.685009" + }, + { + "name": "icap", + "displayName": "Automation - Intelligent Capture", + "version": "2.732.65310" + }, + { + "name": "wide-area-bonjour", + "displayName": "Wide Area Bonjour", + "version": "2.732.755006" + }, + { + "name": "sensor-automation", + "displayName": "Automation - Sensor", + "version": "2.732.65310" + }, + { + "name": "sensor-assurance", + "displayName": "Assurance - Sensor", + "version": "3.150.140" + }, + { + "name": "disaster-recovery", + "displayName": "Disaster Recovery", + "version": "2.732.365017" + }, + { + "name": "support-services", + "displayName": "Support Services", + "version": "2.732.885097" + }, + { + "name": "multi-dnac-enablement", + "displayName": "Multiple Cisco Catalyst Center", + "version": "2.732.65310" + } + ], + "numberOfNodes": 1, + "productType": "DNAC", + "productVersion": "3.732.75085", + "internetProtocolVersion": "ipv4", + "fipsEnabled": false, + "compatibilityError": [ + + ] + } + ], + "page": { + "limit": 50, + "offset": 1, + "count": 1, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + "get_all_backup5": { + "version": "2.0", + "response": [ + { + "name": "BACKUP24_07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "versions": [ + "mks-upgrade:3.1.166", + "core-platform:0.10.205", + "managed-services-shared:0.10.19", + "gateway:0.10.12", + "core-addons:0.10.47", + "iam:5.4.47", + "harbor-ui:3.5.144", + "idm-ui-plugin:5.4.29", + "data-ingestion-infra:6.6.15", + "data-contextstore:6.6.10", + "data-analytics-infra:6.6.127", + "data-platform-pipeline-infra:6.6.126", + "data-store-management:6.6.45", + "data-platform-ui:6.6.101", + "cloud-connectivity-datahub:6.10.26", + "cloud-connectivity:6.10.13", + "dnacaap:6.9.43", + "dnac-search:6.9.3", + "system-management-operations:1.6.50", + "telemetry:4.7.20", + "cloud-connectivity-sense:7.0.7", + "digestor:7.0.5", + "rca-scripts:0.5.6", + "platform-refresh:1.3.69", + "catalyst-center-api-catalog:6.8.96", + "b2b-upgrade:2.732.65310", + "system-commons:2.732.65310", + "ise-bridge:3.150.1", + "ncp:2.732.65310", + "assurance-base-analytics:3.150.100202", + "appsvc-remediation:3.15.5", + "assurance:3.150.141", + "ai-network-analytics:4.0.32", + "ssa:2.732.1095118", + "aca:2.732.65310", + "group-based-policy-analytics:3.150.8", + "sda:2.732.65310", + "app-visibility-and-policy:2.732.117621", + "endpoint-analytics:1.11.1702", + "rogue-management:3.1.202", + "app-hosting:2.3.325072204", + "sea-app-package:2.732.685009", + "icap:2.732.65310", + "wide-area-bonjour:2.732.755006", + "sensor-automation:2.732.65310", + "sensor-assurance:3.150.140", + "disaster-recovery:2.732.365017", + "support-services:2.732.885097", + "multi-dnac-enablement:2.732.65310" + ], + "id": "a6adee73-2d54-443b-b4c6-14c5bc5837b4", + "size": 41869391641, + "createdDate": "2025-08-04T10:03:25Z", + "isBackupAvailable": true, + "isCompatible": true, + "context": { + "type": "ON_DEMAND", + "schedule": "" + }, + "storage": { + "type": "NFS", + "mountPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "id": "1fa840d8-ee9f-4e0d-a560-694c5e939680", + "name": "", + "host": "", + "serverPath": "" + }, + "status": "SUCCESS", + "endDate": "2025-08-04T10:18:33Z", + "createdBy": "Anonymous", + "duration": 908, + "releaseName": "uber-dnac", + "releaseDisplayName": "Cisco Catalyst Center", + "releaseVersion": "", + "releaseDisplayVersion": "3.1.5-75085", + "installedPackages": [ + { + "name": "mks-upgrade", + "displayName": "System", + "version": "3.1.166" + }, + { + "name": "core-platform", + "displayName": "Core Platform", + "version": "0.10.205" + }, + { + "name": "managed-services-shared", + "displayName": "Shared Managed Services", + "version": "0.10.19" + }, + { + "name": "gateway", + "displayName": "Catalyst Center Gateway Service", + "version": "0.10.12" + }, + { + "name": "core-addons", + "displayName": "System Addons", + "version": "0.10.47" + }, + { + "name": "iam", + "displayName": "Identity and Access Management", + "version": "5.4.47" + }, + { + "name": "harbor-ui", + "displayName": "Cisco Catalyst Center UI", + "version": "3.5.144" + }, + { + "name": "idm-ui-plugin", + "displayName": "Identity and Access Management - UI", + "version": "5.4.29" + }, + { + "name": "data-ingestion-infra", + "displayName": "Network Data Platform - Ingestion Infra", + "version": "6.6.15" + }, + { + "name": "data-contextstore", + "displayName": "Network Data Platform - Caching Infra", + "version": "6.6.10" + }, + { + "name": "data-analytics-infra", + "displayName": "Network Data Platform - Core", + "version": "6.6.127" + }, + { + "name": "data-platform-pipeline-infra", + "displayName": "Network Data Platform - Pipeline Infra", + "version": "6.6.126" + }, + { + "name": "data-store-management", + "displayName": "Network Data Platform - Storage Management", + "version": "6.6.45" + }, + { + "name": "data-platform-ui", + "displayName": "Network Data Platform - Manager", + "version": "6.6.101" + }, + { + "name": "cloud-connectivity-datahub", + "displayName": "DxHub Cloud Connectivity", + "version": "6.10.26" + }, + { + "name": "cloud-connectivity", + "displayName": "Cloud Connectivity", + "version": "6.10.13" + }, + { + "name": "dnacaap", + "displayName": "Cisco Catalyst Center Platform", + "version": "6.9.43" + }, + { + "name": "dnac-search", + "displayName": "Cisco Catalyst Center Global Search", + "version": "6.9.3" + }, + { + "name": "system-management-operations", + "displayName": "System Management Operations", + "version": "1.6.50" + }, + { + "name": "telemetry", + "displayName": "Telemetry", + "version": "4.7.20" + }, + { + "name": "cloud-connectivity-sense", + "displayName": "Cloud Connectivity - Contextual Content", + "version": "7.0.7" + }, + { + "name": "digestor", + "displayName": "Cloud Connectivity - Digestor", + "version": "7.0.5" + }, + { + "name": "rca-scripts", + "displayName": "RCA-Scripts Package", + "version": "0.5.6" + }, + { + "name": "platform-refresh", + "displayName": "Platform Refresh", + "version": "1.3.69" + }, + { + "name": "catalyst-center-api-catalog", + "displayName": "Catalyst Center API Catalog", + "version": "6.8.96" + }, + { + "name": "b2b-upgrade", + "displayName": "", + "version": "2.732.65310" + }, + { + "name": "system-commons", + "displayName": "System Commons", + "version": "2.732.65310" + }, + { + "name": "ise-bridge", + "displayName": "Cisco Identity Services Engine Bridge", + "version": "3.150.1" + }, + { + "name": "ncp", + "displayName": "Network Controller Platform", + "version": "2.732.65310" + }, + { + "name": "assurance-base-analytics", + "displayName": "Network Data Platform - Base Analytics", + "version": "3.150.100202" + }, + { + "name": "appsvc-remediation", + "displayName": "Application and Service Remediation", + "version": "3.15.5" + }, + { + "name": "assurance", + "displayName": "Assurance", + "version": "3.150.141" + }, + { + "name": "ai-network-analytics", + "displayName": "AI Network Analytics", + "version": "4.0.32" + }, + { + "name": "ssa", + "displayName": "Stealthwatch Security Analytics", + "version": "2.732.1095118" + }, + { + "name": "aca", + "displayName": "Access Control Application", + "version": "2.732.65310" + }, + { + "name": "group-based-policy-analytics", + "displayName": "Group Based Policy Analytics", + "version": "3.150.8" + }, + { + "name": "sda", + "displayName": "SD Access", + "version": "2.732.65310" + }, + { + "name": "app-visibility-and-policy", + "displayName": "Application Visibility and Policy", + "version": "2.732.117621" + }, + { + "name": "endpoint-analytics", + "displayName": "AI Endpoint Analytics", + "version": "1.11.1702" + }, + { + "name": "rogue-management", + "displayName": "Rogue and aWIPS", + "version": "3.1.202" + }, + { + "name": "app-hosting", + "displayName": "Application Hosting", + "version": "2.3.325072204" + }, + { + "name": "sea-app-package", + "displayName": "SEA App", + "version": "2.732.685009" + }, + { + "name": "icap", + "displayName": "Automation - Intelligent Capture", + "version": "2.732.65310" + }, + { + "name": "wide-area-bonjour", + "displayName": "Wide Area Bonjour", + "version": "2.732.755006" + }, + { + "name": "sensor-automation", + "displayName": "Automation - Sensor", + "version": "2.732.65310" + }, + { + "name": "sensor-assurance", + "displayName": "Assurance - Sensor", + "version": "3.150.140" + }, + { + "name": "disaster-recovery", + "displayName": "Disaster Recovery", + "version": "2.732.365017" + }, + { + "name": "support-services", + "displayName": "Support Services", + "version": "2.732.885097" + }, + { + "name": "multi-dnac-enablement", + "displayName": "Multiple Cisco Catalyst Center", + "version": "2.732.65310" + } + ], + "numberOfNodes": 1, + "productType": "DNAC", + "productVersion": "3.732.75085", + "internetProtocolVersion": "ipv4", + "fipsEnabled": false, + "compatibilityError": [ + + ] + } + ], + "page": { + "limit": 50, + "offset": 1, + "count": 1, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + "playbook_nfs_config_alreadyexists": [ + { + "nfs_configuration": [ + { + "nfs_port": 2049, + "nfs_portmapper_port": 111, + "nfs_version": "nfs4", + "server_ip": "172.27.17.90", + "source_path": "/home/nfsshare/backups/TB19" + } + ] + } + ], + "get_all_n_f_s_configurations1": { + "version": "2.0", + "response": [ + { + "id": "1e9d1dfd-7683-45df-b976-11f2989d36b7", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB20" + }, + "status": { + "destinationPath": "/data/external/nfs-06423689-51b1-5852-b886-47e1cca30364", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "8b737087-5995-40fd-8c78-1f3422958e13", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "92412366-361d-4595-9f43-cb1244cfc0c3", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB19" + }, + "status": { + "destinationPath": "/data/external/nfs-0e1cfb41-59f4-52d8-9895-f5c784d5596e", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "c2f70608-3864-4457-9247-ce63841b386f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_all_n_f_s_configurations2": { + "version": "2.0", + "response": [ + { + "id": "1e9d1dfd-7683-45df-b976-11f2989d36b7", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB20" + }, + "status": { + "destinationPath": "/data/external/nfs-06423689-51b1-5852-b886-47e1cca30364", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "8b737087-5995-40fd-8c78-1f3422958e13", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "92412366-361d-4595-9f43-cb1244cfc0c3", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB19" + }, + "status": { + "destinationPath": "/data/external/nfs-0e1cfb41-59f4-52d8-9895-f5c784d5596e", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "c2f70608-3864-4457-9247-ce63841b386f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "playbook_backup_config_alreadyexists": [ + { + "backup_storage_configuration": [ + { + "data_retention_period": 53, + "encryption_passphrase": "Karthick@zigzag333", + "nfs_details": { + "nfs_port": 2049, + "nfs_portmapper_port": 111, + "nfs_version": "nfs4", + "server_ip": "172.27.17.90", + "source_path": "/home/nfsshare/backups/TB17" + }, + "server_type": "NFS" + } + ] + } + ], + "get_backup_configuration1": { + "version": "2.0", + "response": { + "type": "NFS", + "dataRetention": 53, + "mountPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "id": "1fa840d8-ee9f-4e0d-a560-694c5e939680", + "isEncryptionPassPhraseAvailable": true + } + }, + "get_all_n_f_s_configurations_v1": { + "version": "2.0", + "response": [ + { + "id": "1e9d1dfd-7683-45df-b976-11f2989d36b7", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB20" + }, + "status": { + "destinationPath": "/data/external/nfs-06423689-51b1-5852-b886-47e1cca30364", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "8b737087-5995-40fd-8c78-1f3422958e13", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "92412366-361d-4595-9f43-cb1244cfc0c3", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB19" + }, + "status": { + "destinationPath": "/data/external/nfs-0e1cfb41-59f4-52d8-9895-f5c784d5596e", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "c2f70608-3864-4457-9247-ce63841b386f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_backup_configuration2": { + "version": "2.0", + "response": { + "type": "NFS", + "dataRetention": 53, + "mountPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "id": "1fa840d8-ee9f-4e0d-a560-694c5e939680", + "isEncryptionPassPhraseAvailable": true + } + }, + "playbook_nfs_config_delete": [ + { + "nfs_configuration": [ + { + "nfs_port": 2049, + "nfs_portmapper_port": 111, + "nfs_version": "nfs4", + "server_ip": "172.27.17.90", + "source_path": "/home/nfsshare/backups/TB19" + } + ] + } + ], + "get_all_n_f_s_configurations3": { + "version": "2.0", + "response": [ + { + "id": "8b737087-5995-40fd-8c78-1f3422958e13", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "c2f70608-3864-4457-9247-ce63841b386f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_all_n_f_s_configurations4": { + "version": "2.0", + "response": [ + { + "id": "8b737087-5995-40fd-8c78-1f3422958e13", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "c2f70608-3864-4457-9247-ce63841b386f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "playbook_backup_schedule_alreadydeleted": [ + { + "backup": [ + { + "name": "BACKUP24_07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE" + } + ] + } + ], + "get_all_backup9": { + "version": "2.0", + "response": [ + + ], + "page": { + "limit": 50, + "offset": 1, + "count": 0, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + "get_all_backup10": { + "version": "2.0", + "response": [ + + ], + "page": { + "limit": 50, + "offset": 1, + "count": 0, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + "playbook_backup_configuration_exception_dataretention_period": [ + { + "backup_storage_configuration": [ + { + "data_retention_period": 61, + "encryption_passphrase": "Karthick@zigzag333", + "nfs_details": { + "nfs_port": 2049, + "nfs_portmapper_port": 111, + "nfs_version": "nfs4", + "server_ip": "172.27.17.90", + "source_path": "/home/nfsshare/backups/TB18" + }, + "server_type": "NFS" + } + ] + } + ], + "get_backup_configuration5": { + "version": "2.0", + "response": { + "type": "NFS", + "dataRetention": 53, + "mountPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "id": "1fa840d8-ee9f-4e0d-a560-694c5e939680", + "isEncryptionPassPhraseAvailable": true + } + }, + "get_all_n_f_s_configurations7": { + "version": "2.0", + "response": [ + { + "id": "1c31871f-c24b-42b9-9b26-3b0c4656300a", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB19" + }, + "status": { + "destinationPath": "/data/external/nfs-0e1cfb41-59f4-52d8-9895-f5c784d5596e", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "8b737087-5995-40fd-8c78-1f3422958e13", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "c2f70608-3864-4457-9247-ce63841b386f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "playbook_negative_scenario1": [ + { + "backup": [ + { + "name": "BACKUP25_07" + } + ] + } + ], + "get_all_backup6": { + "version": "2.0", + "response": [ + { + "name": "BACKUP24_07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "versions": [ + "mks-upgrade:3.1.166", + "core-platform:0.10.205", + "managed-services-shared:0.10.19", + "gateway:0.10.12", + "core-addons:0.10.47", + "iam:5.4.47", + "harbor-ui:3.5.144", + "idm-ui-plugin:5.4.29", + "data-ingestion-infra:6.6.15", + "data-contextstore:6.6.10", + "data-analytics-infra:6.6.127", + "data-platform-pipeline-infra:6.6.126", + "data-store-management:6.6.45", + "data-platform-ui:6.6.101", + "cloud-connectivity-datahub:6.10.26", + "cloud-connectivity:6.10.13", + "dnacaap:6.9.43", + "dnac-search:6.9.3", + "system-management-operations:1.6.50", + "telemetry:4.7.20", + "cloud-connectivity-sense:7.0.7", + "digestor:7.0.5", + "rca-scripts:0.5.6", + "platform-refresh:1.3.69", + "catalyst-center-api-catalog:6.8.96", + "b2b-upgrade:2.732.65310", + "system-commons:2.732.65310", + "ise-bridge:3.150.1", + "ncp:2.732.65310", + "assurance-base-analytics:3.150.100202", + "appsvc-remediation:3.15.5", + "assurance:3.150.141", + "ai-network-analytics:4.0.32", + "ssa:2.732.1095118", + "aca:2.732.65310", + "group-based-policy-analytics:3.150.8", + "sda:2.732.65310", + "app-visibility-and-policy:2.732.117621", + "endpoint-analytics:1.11.1702", + "rogue-management:3.1.202", + "app-hosting:2.3.325072204", + "sea-app-package:2.732.685009", + "icap:2.732.65310", + "wide-area-bonjour:2.732.755006", + "sensor-automation:2.732.65310", + "sensor-assurance:3.150.140", + "disaster-recovery:2.732.365017", + "support-services:2.732.885097", + "multi-dnac-enablement:2.732.65310" + ], + "id": "7dcb4431-ba36-4170-b175-f230c3518e13", + "size": 41869614093, + "createdDate": "2025-08-04T10:56:57Z", + "isBackupAvailable": true, + "isCompatible": true, + "context": { + "type": "ON_DEMAND", + "schedule": "" + }, + "storage": { + "type": "NFS", + "mountPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "id": "1fa840d8-ee9f-4e0d-a560-694c5e939680", + "name": "", + "host": "", + "serverPath": "" + }, + "status": "SUCCESS", + "endDate": "2025-08-04T11:12:04Z", + "createdBy": "Anonymous", + "duration": 907, + "releaseName": "uber-dnac", + "releaseDisplayName": "Cisco Catalyst Center", + "releaseVersion": "", + "releaseDisplayVersion": "3.1.5-75085", + "installedPackages": [ + { + "name": "mks-upgrade", + "displayName": "System", + "version": "3.1.166" + }, + { + "name": "core-platform", + "displayName": "Core Platform", + "version": "0.10.205" + }, + { + "name": "managed-services-shared", + "displayName": "Shared Managed Services", + "version": "0.10.19" + }, + { + "name": "gateway", + "displayName": "Catalyst Center Gateway Service", + "version": "0.10.12" + }, + { + "name": "core-addons", + "displayName": "System Addons", + "version": "0.10.47" + }, + { + "name": "iam", + "displayName": "Identity and Access Management", + "version": "5.4.47" + }, + { + "name": "harbor-ui", + "displayName": "Cisco Catalyst Center UI", + "version": "3.5.144" + }, + { + "name": "idm-ui-plugin", + "displayName": "Identity and Access Management - UI", + "version": "5.4.29" + }, + { + "name": "data-ingestion-infra", + "displayName": "Network Data Platform - Ingestion Infra", + "version": "6.6.15" + }, + { + "name": "data-contextstore", + "displayName": "Network Data Platform - Caching Infra", + "version": "6.6.10" + }, + { + "name": "data-analytics-infra", + "displayName": "Network Data Platform - Core", + "version": "6.6.127" + }, + { + "name": "data-platform-pipeline-infra", + "displayName": "Network Data Platform - Pipeline Infra", + "version": "6.6.126" + }, + { + "name": "data-store-management", + "displayName": "Network Data Platform - Storage Management", + "version": "6.6.45" + }, + { + "name": "data-platform-ui", + "displayName": "Network Data Platform - Manager", + "version": "6.6.101" + }, + { + "name": "cloud-connectivity-datahub", + "displayName": "DxHub Cloud Connectivity", + "version": "6.10.26" + }, + { + "name": "cloud-connectivity", + "displayName": "Cloud Connectivity", + "version": "6.10.13" + }, + { + "name": "dnacaap", + "displayName": "Cisco Catalyst Center Platform", + "version": "6.9.43" + }, + { + "name": "dnac-search", + "displayName": "Cisco Catalyst Center Global Search", + "version": "6.9.3" + }, + { + "name": "system-management-operations", + "displayName": "System Management Operations", + "version": "1.6.50" + }, + { + "name": "telemetry", + "displayName": "Telemetry", + "version": "4.7.20" + }, + { + "name": "cloud-connectivity-sense", + "displayName": "Cloud Connectivity - Contextual Content", + "version": "7.0.7" + }, + { + "name": "digestor", + "displayName": "Cloud Connectivity - Digestor", + "version": "7.0.5" + }, + { + "name": "rca-scripts", + "displayName": "RCA-Scripts Package", + "version": "0.5.6" + }, + { + "name": "platform-refresh", + "displayName": "Platform Refresh", + "version": "1.3.69" + }, + { + "name": "catalyst-center-api-catalog", + "displayName": "Catalyst Center API Catalog", + "version": "6.8.96" + }, + { + "name": "b2b-upgrade", + "displayName": "", + "version": "2.732.65310" + }, + { + "name": "system-commons", + "displayName": "System Commons", + "version": "2.732.65310" + }, + { + "name": "ise-bridge", + "displayName": "Cisco Identity Services Engine Bridge", + "version": "3.150.1" + }, + { + "name": "ncp", + "displayName": "Network Controller Platform", + "version": "2.732.65310" + }, + { + "name": "assurance-base-analytics", + "displayName": "Network Data Platform - Base Analytics", + "version": "3.150.100202" + }, + { + "name": "appsvc-remediation", + "displayName": "Application and Service Remediation", + "version": "3.15.5" + }, + { + "name": "assurance", + "displayName": "Assurance", + "version": "3.150.141" + }, + { + "name": "ai-network-analytics", + "displayName": "AI Network Analytics", + "version": "4.0.32" + }, + { + "name": "ssa", + "displayName": "Stealthwatch Security Analytics", + "version": "2.732.1095118" + }, + { + "name": "aca", + "displayName": "Access Control Application", + "version": "2.732.65310" + }, + { + "name": "group-based-policy-analytics", + "displayName": "Group Based Policy Analytics", + "version": "3.150.8" + }, + { + "name": "sda", + "displayName": "SD Access", + "version": "2.732.65310" + }, + { + "name": "app-visibility-and-policy", + "displayName": "Application Visibility and Policy", + "version": "2.732.117621" + }, + { + "name": "endpoint-analytics", + "displayName": "AI Endpoint Analytics", + "version": "1.11.1702" + }, + { + "name": "rogue-management", + "displayName": "Rogue and aWIPS", + "version": "3.1.202" + }, + { + "name": "app-hosting", + "displayName": "Application Hosting", + "version": "2.3.325072204" + }, + { + "name": "sea-app-package", + "displayName": "SEA App", + "version": "2.732.685009" + }, + { + "name": "icap", + "displayName": "Automation - Intelligent Capture", + "version": "2.732.65310" + }, + { + "name": "wide-area-bonjour", + "displayName": "Wide Area Bonjour", + "version": "2.732.755006" + }, + { + "name": "sensor-automation", + "displayName": "Automation - Sensor", + "version": "2.732.65310" + }, + { + "name": "sensor-assurance", + "displayName": "Assurance - Sensor", + "version": "3.150.140" + }, + { + "name": "disaster-recovery", + "displayName": "Disaster Recovery", + "version": "2.732.365017" + }, + { + "name": "support-services", + "displayName": "Support Services", + "version": "2.732.885097" + }, + { + "name": "multi-dnac-enablement", + "displayName": "Multiple Cisco Catalyst Center", + "version": "2.732.65310" + } + ], + "numberOfNodes": 1, + "productType": "DNAC", + "productVersion": "3.732.75085", + "internetProtocolVersion": "ipv4", + "fipsEnabled": false, + "compatibilityError": [ + + ] + } + ], + "page": { + "limit": 50, + "offset": 1, + "count": 1, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + "playbook_negative_scenario2": [ + { + "backup": [ + { + "name": "BACKUP25_07" + } + ] + } + ], + "playbook_negative_scenario3": [ + { + "restore_operations": [ + { + "encryption_passphrase": "Karthick@", + "name": "BACKUP25_07" + } + ] + } + ], + "get_all_backup7": { + "version": "2.0", + "response": [ + { + "name": "BACKUP24_07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "versions": [ + "mks-upgrade:3.1.166", + "core-platform:0.10.205", + "managed-services-shared:0.10.19", + "gateway:0.10.12", + "core-addons:0.10.47", + "iam:5.4.47", + "harbor-ui:3.5.144", + "idm-ui-plugin:5.4.29", + "data-ingestion-infra:6.6.15", + "data-contextstore:6.6.10", + "data-analytics-infra:6.6.127", + "data-platform-pipeline-infra:6.6.126", + "data-store-management:6.6.45", + "data-platform-ui:6.6.101", + "cloud-connectivity-datahub:6.10.26", + "cloud-connectivity:6.10.13", + "dnacaap:6.9.43", + "dnac-search:6.9.3", + "system-management-operations:1.6.50", + "telemetry:4.7.20", + "cloud-connectivity-sense:7.0.7", + "digestor:7.0.5", + "rca-scripts:0.5.6", + "platform-refresh:1.3.69", + "catalyst-center-api-catalog:6.8.96", + "b2b-upgrade:2.732.65310", + "system-commons:2.732.65310", + "ise-bridge:3.150.1", + "ncp:2.732.65310", + "assurance-base-analytics:3.150.100202", + "appsvc-remediation:3.15.5", + "assurance:3.150.141", + "ai-network-analytics:4.0.32", + "ssa:2.732.1095118", + "aca:2.732.65310", + "group-based-policy-analytics:3.150.8", + "sda:2.732.65310", + "app-visibility-and-policy:2.732.117621", + "endpoint-analytics:1.11.1702", + "rogue-management:3.1.202", + "app-hosting:2.3.325072204", + "sea-app-package:2.732.685009", + "icap:2.732.65310", + "wide-area-bonjour:2.732.755006", + "sensor-automation:2.732.65310", + "sensor-assurance:3.150.140", + "disaster-recovery:2.732.365017", + "support-services:2.732.885097", + "multi-dnac-enablement:2.732.65310" + ], + "id": "7dcb4431-ba36-4170-b175-f230c3518e13", + "size": 41869614093, + "createdDate": "2025-08-04T10:56:57Z", + "isBackupAvailable": true, + "isCompatible": true, + "context": { + "type": "ON_DEMAND", + "schedule": "" + }, + "storage": { + "type": "NFS", + "mountPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "id": "1fa840d8-ee9f-4e0d-a560-694c5e939680", + "name": "", + "host": "", + "serverPath": "" + }, + "status": "SUCCESS", + "endDate": "2025-08-04T11:12:04Z", + "createdBy": "Anonymous", + "duration": 907, + "releaseName": "uber-dnac", + "releaseDisplayName": "Cisco Catalyst Center", + "releaseVersion": "", + "releaseDisplayVersion": "3.1.5-75085", + "installedPackages": [ + { + "name": "mks-upgrade", + "displayName": "System", + "version": "3.1.166" + }, + { + "name": "core-platform", + "displayName": "Core Platform", + "version": "0.10.205" + }, + { + "name": "managed-services-shared", + "displayName": "Shared Managed Services", + "version": "0.10.19" + }, + { + "name": "gateway", + "displayName": "Catalyst Center Gateway Service", + "version": "0.10.12" + }, + { + "name": "core-addons", + "displayName": "System Addons", + "version": "0.10.47" + }, + { + "name": "iam", + "displayName": "Identity and Access Management", + "version": "5.4.47" + }, + { + "name": "harbor-ui", + "displayName": "Cisco Catalyst Center UI", + "version": "3.5.144" + }, + { + "name": "idm-ui-plugin", + "displayName": "Identity and Access Management - UI", + "version": "5.4.29" + }, + { + "name": "data-ingestion-infra", + "displayName": "Network Data Platform - Ingestion Infra", + "version": "6.6.15" + }, + { + "name": "data-contextstore", + "displayName": "Network Data Platform - Caching Infra", + "version": "6.6.10" + }, + { + "name": "data-analytics-infra", + "displayName": "Network Data Platform - Core", + "version": "6.6.127" + }, + { + "name": "data-platform-pipeline-infra", + "displayName": "Network Data Platform - Pipeline Infra", + "version": "6.6.126" + }, + { + "name": "data-store-management", + "displayName": "Network Data Platform - Storage Management", + "version": "6.6.45" + }, + { + "name": "data-platform-ui", + "displayName": "Network Data Platform - Manager", + "version": "6.6.101" + }, + { + "name": "cloud-connectivity-datahub", + "displayName": "DxHub Cloud Connectivity", + "version": "6.10.26" + }, + { + "name": "cloud-connectivity", + "displayName": "Cloud Connectivity", + "version": "6.10.13" + }, + { + "name": "dnacaap", + "displayName": "Cisco Catalyst Center Platform", + "version": "6.9.43" + }, + { + "name": "dnac-search", + "displayName": "Cisco Catalyst Center Global Search", + "version": "6.9.3" + }, + { + "name": "system-management-operations", + "displayName": "System Management Operations", + "version": "1.6.50" + }, + { + "name": "telemetry", + "displayName": "Telemetry", + "version": "4.7.20" + }, + { + "name": "cloud-connectivity-sense", + "displayName": "Cloud Connectivity - Contextual Content", + "version": "7.0.7" + }, + { + "name": "digestor", + "displayName": "Cloud Connectivity - Digestor", + "version": "7.0.5" + }, + { + "name": "rca-scripts", + "displayName": "RCA-Scripts Package", + "version": "0.5.6" + }, + { + "name": "platform-refresh", + "displayName": "Platform Refresh", + "version": "1.3.69" + }, + { + "name": "catalyst-center-api-catalog", + "displayName": "Catalyst Center API Catalog", + "version": "6.8.96" + }, + { + "name": "b2b-upgrade", + "displayName": "", + "version": "2.732.65310" + }, + { + "name": "system-commons", + "displayName": "System Commons", + "version": "2.732.65310" + }, + { + "name": "ise-bridge", + "displayName": "Cisco Identity Services Engine Bridge", + "version": "3.150.1" + }, + { + "name": "ncp", + "displayName": "Network Controller Platform", + "version": "2.732.65310" + }, + { + "name": "assurance-base-analytics", + "displayName": "Network Data Platform - Base Analytics", + "version": "3.150.100202" + }, + { + "name": "appsvc-remediation", + "displayName": "Application and Service Remediation", + "version": "3.15.5" + }, + { + "name": "assurance", + "displayName": "Assurance", + "version": "3.150.141" + }, + { + "name": "ai-network-analytics", + "displayName": "AI Network Analytics", + "version": "4.0.32" + }, + { + "name": "ssa", + "displayName": "Stealthwatch Security Analytics", + "version": "2.732.1095118" + }, + { + "name": "aca", + "displayName": "Access Control Application", + "version": "2.732.65310" + }, + { + "name": "group-based-policy-analytics", + "displayName": "Group Based Policy Analytics", + "version": "3.150.8" + }, + { + "name": "sda", + "displayName": "SD Access", + "version": "2.732.65310" + }, + { + "name": "app-visibility-and-policy", + "displayName": "Application Visibility and Policy", + "version": "2.732.117621" + }, + { + "name": "endpoint-analytics", + "displayName": "AI Endpoint Analytics", + "version": "1.11.1702" + }, + { + "name": "rogue-management", + "displayName": "Rogue and aWIPS", + "version": "3.1.202" + }, + { + "name": "app-hosting", + "displayName": "Application Hosting", + "version": "2.3.325072204" + }, + { + "name": "sea-app-package", + "displayName": "SEA App", + "version": "2.732.685009" + }, + { + "name": "icap", + "displayName": "Automation - Intelligent Capture", + "version": "2.732.65310" + }, + { + "name": "wide-area-bonjour", + "displayName": "Wide Area Bonjour", + "version": "2.732.755006" + }, + { + "name": "sensor-automation", + "displayName": "Automation - Sensor", + "version": "2.732.65310" + }, + { + "name": "sensor-assurance", + "displayName": "Assurance - Sensor", + "version": "3.150.140" + }, + { + "name": "disaster-recovery", + "displayName": "Disaster Recovery", + "version": "2.732.365017" + }, + { + "name": "support-services", + "displayName": "Support Services", + "version": "2.732.885097" + }, + { + "name": "multi-dnac-enablement", + "displayName": "Multiple Cisco Catalyst Center", + "version": "2.732.65310" + } + ], + "numberOfNodes": 1, + "productType": "DNAC", + "productVersion": "3.732.75085", + "internetProtocolVersion": "ipv4", + "fipsEnabled": false, + "compatibilityError": [ + + ] + } + ], + "page": { + "limit": 50, + "offset": 1, + "count": 1, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + "playbook_negative_scenario4": [ + { + "nfs_configuration": [ + { + "nfs_port": 2049, + "nfs_portmapper_port": 111, + "nfs_version": "nfs4", + "server_ip": "172.27.17.90" + } + ] + } + ], + "get_all_n_f_s_configurations": { + "version": "2.0", + "response": [ + { + "id": "8b737087-5995-40fd-8c78-1f3422958e13", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "c2f70608-3864-4457-9247-ce63841b386f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "playbook_negative_scenario5": [ + { + "backup_storage_configuration": [ + { + "data_retention_period": 53, + "encryption_passphrase": "Karthick@zigzag333", + "nfs_details": { + "nfs_port": 2049, + "nfs_portmapper_port": 111, + "nfs_version": "nfs4", + "server_ip": "172.27.17.90", + "source_path": "/home/nfsshare/backups/TB19" + }, + "server_type": "NFS" + } + ] + } + ], + "get_backup_configuration_v1": { + "version": "2.0", + "response": { + "type": "NFS", + "dataRetention": 53, + "mountPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "id": "1fa840d8-ee9f-4e0d-a560-694c5e939680", + "isEncryptionPassPhraseAvailable": true + } + }, + "get_all_n_f_s_configurations5": { + "version": "2.0", + "response": [ + { + "id": "1c31871f-c24b-42b9-9b26-3b0c4656300a", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB19" + }, + "status": { + "destinationPath": "/data/external/nfs-0e1cfb41-59f4-52d8-9895-f5c784d5596e", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "8b737087-5995-40fd-8c78-1f3422958e13", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "c2f70608-3864-4457-9247-ce63841b386f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "playbook_negative_scenario6": [ + { + "restore_operations": [ + { + "name": "BACKUP25_07" + } + ] + } + ], + "playbook_negative_scenario7": [ + { + "restore_operations": [ + { + "encryption_passphrase": "Karthick@", + "name": "BACKUP24_07" + } + ] + } + ], + "get_all_backup1": { + "version": "2.0", + "response": [ + { + "name": "BACKUP24_07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "versions": [ + "mks-upgrade:3.1.166", + "core-platform:0.10.205", + "managed-services-shared:0.10.19", + "gateway:0.10.12", + "core-addons:0.10.47", + "iam:5.4.47", + "harbor-ui:3.5.144", + "idm-ui-plugin:5.4.29", + "data-ingestion-infra:6.6.15", + "data-contextstore:6.6.10", + "data-analytics-infra:6.6.127", + "data-platform-pipeline-infra:6.6.126", + "data-store-management:6.6.45", + "data-platform-ui:6.6.101", + "cloud-connectivity-datahub:6.10.26", + "cloud-connectivity:6.10.13", + "dnacaap:6.9.43", + "dnac-search:6.9.3", + "system-management-operations:1.6.50", + "telemetry:4.7.20", + "cloud-connectivity-sense:7.0.7", + "digestor:7.0.5", + "rca-scripts:0.5.6", + "platform-refresh:1.3.69", + "catalyst-center-api-catalog:6.8.96", + "b2b-upgrade:2.732.65310", + "system-commons:2.732.65310", + "ise-bridge:3.150.1", + "ncp:2.732.65310", + "assurance-base-analytics:3.150.100202", + "appsvc-remediation:3.15.5", + "assurance:3.150.141", + "ai-network-analytics:4.0.32", + "ssa:2.732.1095118", + "aca:2.732.65310", + "group-based-policy-analytics:3.150.8", + "sda:2.732.65310", + "app-visibility-and-policy:2.732.117621", + "endpoint-analytics:1.11.1702", + "rogue-management:3.1.202", + "app-hosting:2.3.325072204", + "sea-app-package:2.732.685009", + "icap:2.732.65310", + "wide-area-bonjour:2.732.755006", + "sensor-automation:2.732.65310", + "sensor-assurance:3.150.140", + "disaster-recovery:2.732.365017", + "support-services:2.732.885097", + "multi-dnac-enablement:2.732.65310" + ], + "id": "7dcb4431-ba36-4170-b175-f230c3518e13", + "size": 41869614093, + "createdDate": "2025-08-04T10:56:57Z", + "isBackupAvailable": true, + "isCompatible": true, + "context": { + "type": "ON_DEMAND", + "schedule": "" + }, + "storage": { + "type": "NFS", + "mountPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "id": "1fa840d8-ee9f-4e0d-a560-694c5e939680", + "name": "", + "host": "", + "serverPath": "" + }, + "status": "SUCCESS", + "endDate": "2025-08-04T11:12:04Z", + "createdBy": "Anonymous", + "duration": 907, + "releaseName": "uber-dnac", + "releaseDisplayName": "Cisco Catalyst Center", + "releaseVersion": "", + "releaseDisplayVersion": "3.1.5-75085", + "installedPackages": [ + { + "name": "mks-upgrade", + "displayName": "System", + "version": "3.1.166" + }, + { + "name": "core-platform", + "displayName": "Core Platform", + "version": "0.10.205" + }, + { + "name": "managed-services-shared", + "displayName": "Shared Managed Services", + "version": "0.10.19" + }, + { + "name": "gateway", + "displayName": "Catalyst Center Gateway Service", + "version": "0.10.12" + }, + { + "name": "core-addons", + "displayName": "System Addons", + "version": "0.10.47" + }, + { + "name": "iam", + "displayName": "Identity and Access Management", + "version": "5.4.47" + }, + { + "name": "harbor-ui", + "displayName": "Cisco Catalyst Center UI", + "version": "3.5.144" + }, + { + "name": "idm-ui-plugin", + "displayName": "Identity and Access Management - UI", + "version": "5.4.29" + }, + { + "name": "data-ingestion-infra", + "displayName": "Network Data Platform - Ingestion Infra", + "version": "6.6.15" + }, + { + "name": "data-contextstore", + "displayName": "Network Data Platform - Caching Infra", + "version": "6.6.10" + }, + { + "name": "data-analytics-infra", + "displayName": "Network Data Platform - Core", + "version": "6.6.127" + }, + { + "name": "data-platform-pipeline-infra", + "displayName": "Network Data Platform - Pipeline Infra", + "version": "6.6.126" + }, + { + "name": "data-store-management", + "displayName": "Network Data Platform - Storage Management", + "version": "6.6.45" + }, + { + "name": "data-platform-ui", + "displayName": "Network Data Platform - Manager", + "version": "6.6.101" + }, + { + "name": "cloud-connectivity-datahub", + "displayName": "DxHub Cloud Connectivity", + "version": "6.10.26" + }, + { + "name": "cloud-connectivity", + "displayName": "Cloud Connectivity", + "version": "6.10.13" + }, + { + "name": "dnacaap", + "displayName": "Cisco Catalyst Center Platform", + "version": "6.9.43" + }, + { + "name": "dnac-search", + "displayName": "Cisco Catalyst Center Global Search", + "version": "6.9.3" + }, + { + "name": "system-management-operations", + "displayName": "System Management Operations", + "version": "1.6.50" + }, + { + "name": "telemetry", + "displayName": "Telemetry", + "version": "4.7.20" + }, + { + "name": "cloud-connectivity-sense", + "displayName": "Cloud Connectivity - Contextual Content", + "version": "7.0.7" + }, + { + "name": "digestor", + "displayName": "Cloud Connectivity - Digestor", + "version": "7.0.5" + }, + { + "name": "rca-scripts", + "displayName": "RCA-Scripts Package", + "version": "0.5.6" + }, + { + "name": "platform-refresh", + "displayName": "Platform Refresh", + "version": "1.3.69" + }, + { + "name": "catalyst-center-api-catalog", + "displayName": "Catalyst Center API Catalog", + "version": "6.8.96" + }, + { + "name": "b2b-upgrade", + "displayName": "", + "version": "2.732.65310" + }, + { + "name": "system-commons", + "displayName": "System Commons", + "version": "2.732.65310" + }, + { + "name": "ise-bridge", + "displayName": "Cisco Identity Services Engine Bridge", + "version": "3.150.1" + }, + { + "name": "ncp", + "displayName": "Network Controller Platform", + "version": "2.732.65310" + }, + { + "name": "assurance-base-analytics", + "displayName": "Network Data Platform - Base Analytics", + "version": "3.150.100202" + }, + { + "name": "appsvc-remediation", + "displayName": "Application and Service Remediation", + "version": "3.15.5" + }, + { + "name": "assurance", + "displayName": "Assurance", + "version": "3.150.141" + }, + { + "name": "ai-network-analytics", + "displayName": "AI Network Analytics", + "version": "4.0.32" + }, + { + "name": "ssa", + "displayName": "Stealthwatch Security Analytics", + "version": "2.732.1095118" + }, + { + "name": "aca", + "displayName": "Access Control Application", + "version": "2.732.65310" + }, + { + "name": "group-based-policy-analytics", + "displayName": "Group Based Policy Analytics", + "version": "3.150.8" + }, + { + "name": "sda", + "displayName": "SD Access", + "version": "2.732.65310" + }, + { + "name": "app-visibility-and-policy", + "displayName": "Application Visibility and Policy", + "version": "2.732.117621" + }, + { + "name": "endpoint-analytics", + "displayName": "AI Endpoint Analytics", + "version": "1.11.1702" + }, + { + "name": "rogue-management", + "displayName": "Rogue and aWIPS", + "version": "3.1.202" + }, + { + "name": "app-hosting", + "displayName": "Application Hosting", + "version": "2.3.325072204" + }, + { + "name": "sea-app-package", + "displayName": "SEA App", + "version": "2.732.685009" + }, + { + "name": "icap", + "displayName": "Automation - Intelligent Capture", + "version": "2.732.65310" + }, + { + "name": "wide-area-bonjour", + "displayName": "Wide Area Bonjour", + "version": "2.732.755006" + }, + { + "name": "sensor-automation", + "displayName": "Automation - Sensor", + "version": "2.732.65310" + }, + { + "name": "sensor-assurance", + "displayName": "Assurance - Sensor", + "version": "3.150.140" + }, + { + "name": "disaster-recovery", + "displayName": "Disaster Recovery", + "version": "2.732.365017" + }, + { + "name": "support-services", + "displayName": "Support Services", + "version": "2.732.885097" + }, + { + "name": "multi-dnac-enablement", + "displayName": "Multiple Cisco Catalyst Center", + "version": "2.732.65310" + } + ], + "numberOfNodes": 1, + "productType": "DNAC", + "productVersion": "3.732.75085", + "internetProtocolVersion": "ipv4", + "fipsEnabled": false, + "compatibilityError": [ + + ] + } + ], + "page": { + "limit": 50, + "offset": 1, + "count": 1, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + "get_backup_configuration3": { + "version": "2.0", + "response": { + "type": "NFS", + "dataRetention": 53, + "mountPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "id": "1fa840d8-ee9f-4e0d-a560-694c5e939680", + "isEncryptionPassPhraseAvailable": true + } + }, + "playbook_negative_scenario8": [ + { + "backup": [ + { + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE" + } + ] + } + ], + "get_all_backup8": { + "version": "2.0", + "response": [ + { + "name": "BACKUP24_07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "versions": [ + "mks-upgrade:3.1.166", + "core-platform:0.10.205", + "managed-services-shared:0.10.19", + "gateway:0.10.12", + "core-addons:0.10.47", + "iam:5.4.47", + "harbor-ui:3.5.144", + "idm-ui-plugin:5.4.29", + "data-ingestion-infra:6.6.15", + "data-contextstore:6.6.10", + "data-analytics-infra:6.6.127", + "data-platform-pipeline-infra:6.6.126", + "data-store-management:6.6.45", + "data-platform-ui:6.6.101", + "cloud-connectivity-datahub:6.10.26", + "cloud-connectivity:6.10.13", + "dnacaap:6.9.43", + "dnac-search:6.9.3", + "system-management-operations:1.6.50", + "telemetry:4.7.20", + "cloud-connectivity-sense:7.0.7", + "digestor:7.0.5", + "rca-scripts:0.5.6", + "platform-refresh:1.3.69", + "catalyst-center-api-catalog:6.8.96", + "b2b-upgrade:2.732.65310", + "system-commons:2.732.65310", + "ise-bridge:3.150.1", + "ncp:2.732.65310", + "assurance-base-analytics:3.150.100202", + "appsvc-remediation:3.15.5", + "assurance:3.150.141", + "ai-network-analytics:4.0.32", + "ssa:2.732.1095118", + "aca:2.732.65310", + "group-based-policy-analytics:3.150.8", + "sda:2.732.65310", + "app-visibility-and-policy:2.732.117621", + "endpoint-analytics:1.11.1702", + "rogue-management:3.1.202", + "app-hosting:2.3.325072204", + "sea-app-package:2.732.685009", + "icap:2.732.65310", + "wide-area-bonjour:2.732.755006", + "sensor-automation:2.732.65310", + "sensor-assurance:3.150.140", + "disaster-recovery:2.732.365017", + "support-services:2.732.885097", + "multi-dnac-enablement:2.732.65310" + ], + "id": "7dcb4431-ba36-4170-b175-f230c3518e13", + "size": 41869614093, + "createdDate": "2025-08-04T10:56:57Z", + "isBackupAvailable": true, + "isCompatible": true, + "context": { + "type": "ON_DEMAND", + "schedule": "" + }, + "storage": { + "type": "NFS", + "mountPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "id": "1fa840d8-ee9f-4e0d-a560-694c5e939680", + "name": "", + "host": "", + "serverPath": "" + }, + "status": "SUCCESS", + "endDate": "2025-08-04T11:12:04Z", + "createdBy": "Anonymous", + "duration": 907, + "releaseName": "uber-dnac", + "releaseDisplayName": "Cisco Catalyst Center", + "releaseVersion": "", + "releaseDisplayVersion": "3.1.5-75085", + "installedPackages": [ + { + "name": "mks-upgrade", + "displayName": "System", + "version": "3.1.166" + }, + { + "name": "core-platform", + "displayName": "Core Platform", + "version": "0.10.205" + }, + { + "name": "managed-services-shared", + "displayName": "Shared Managed Services", + "version": "0.10.19" + }, + { + "name": "gateway", + "displayName": "Catalyst Center Gateway Service", + "version": "0.10.12" + }, + { + "name": "core-addons", + "displayName": "System Addons", + "version": "0.10.47" + }, + { + "name": "iam", + "displayName": "Identity and Access Management", + "version": "5.4.47" + }, + { + "name": "harbor-ui", + "displayName": "Cisco Catalyst Center UI", + "version": "3.5.144" + }, + { + "name": "idm-ui-plugin", + "displayName": "Identity and Access Management - UI", + "version": "5.4.29" + }, + { + "name": "data-ingestion-infra", + "displayName": "Network Data Platform - Ingestion Infra", + "version": "6.6.15" + }, + { + "name": "data-contextstore", + "displayName": "Network Data Platform - Caching Infra", + "version": "6.6.10" + }, + { + "name": "data-analytics-infra", + "displayName": "Network Data Platform - Core", + "version": "6.6.127" + }, + { + "name": "data-platform-pipeline-infra", + "displayName": "Network Data Platform - Pipeline Infra", + "version": "6.6.126" + }, + { + "name": "data-store-management", + "displayName": "Network Data Platform - Storage Management", + "version": "6.6.45" + }, + { + "name": "data-platform-ui", + "displayName": "Network Data Platform - Manager", + "version": "6.6.101" + }, + { + "name": "cloud-connectivity-datahub", + "displayName": "DxHub Cloud Connectivity", + "version": "6.10.26" + }, + { + "name": "cloud-connectivity", + "displayName": "Cloud Connectivity", + "version": "6.10.13" + }, + { + "name": "dnacaap", + "displayName": "Cisco Catalyst Center Platform", + "version": "6.9.43" + }, + { + "name": "dnac-search", + "displayName": "Cisco Catalyst Center Global Search", + "version": "6.9.3" + }, + { + "name": "system-management-operations", + "displayName": "System Management Operations", + "version": "1.6.50" + }, + { + "name": "telemetry", + "displayName": "Telemetry", + "version": "4.7.20" + }, + { + "name": "cloud-connectivity-sense", + "displayName": "Cloud Connectivity - Contextual Content", + "version": "7.0.7" + }, + { + "name": "digestor", + "displayName": "Cloud Connectivity - Digestor", + "version": "7.0.5" + }, + { + "name": "rca-scripts", + "displayName": "RCA-Scripts Package", + "version": "0.5.6" + }, + { + "name": "platform-refresh", + "displayName": "Platform Refresh", + "version": "1.3.69" + }, + { + "name": "catalyst-center-api-catalog", + "displayName": "Catalyst Center API Catalog", + "version": "6.8.96" + }, + { + "name": "b2b-upgrade", + "displayName": "", + "version": "2.732.65310" + }, + { + "name": "system-commons", + "displayName": "System Commons", + "version": "2.732.65310" + }, + { + "name": "ise-bridge", + "displayName": "Cisco Identity Services Engine Bridge", + "version": "3.150.1" + }, + { + "name": "ncp", + "displayName": "Network Controller Platform", + "version": "2.732.65310" + }, + { + "name": "assurance-base-analytics", + "displayName": "Network Data Platform - Base Analytics", + "version": "3.150.100202" + }, + { + "name": "appsvc-remediation", + "displayName": "Application and Service Remediation", + "version": "3.15.5" + }, + { + "name": "assurance", + "displayName": "Assurance", + "version": "3.150.141" + }, + { + "name": "ai-network-analytics", + "displayName": "AI Network Analytics", + "version": "4.0.32" + }, + { + "name": "ssa", + "displayName": "Stealthwatch Security Analytics", + "version": "2.732.1095118" + }, + { + "name": "aca", + "displayName": "Access Control Application", + "version": "2.732.65310" + }, + { + "name": "group-based-policy-analytics", + "displayName": "Group Based Policy Analytics", + "version": "3.150.8" + }, + { + "name": "sda", + "displayName": "SD Access", + "version": "2.732.65310" + }, + { + "name": "app-visibility-and-policy", + "displayName": "Application Visibility and Policy", + "version": "2.732.117621" + }, + { + "name": "endpoint-analytics", + "displayName": "AI Endpoint Analytics", + "version": "1.11.1702" + }, + { + "name": "rogue-management", + "displayName": "Rogue and aWIPS", + "version": "3.1.202" + }, + { + "name": "app-hosting", + "displayName": "Application Hosting", + "version": "2.3.325072204" + }, + { + "name": "sea-app-package", + "displayName": "SEA App", + "version": "2.732.685009" + }, + { + "name": "icap", + "displayName": "Automation - Intelligent Capture", + "version": "2.732.65310" + }, + { + "name": "wide-area-bonjour", + "displayName": "Wide Area Bonjour", + "version": "2.732.755006" + }, + { + "name": "sensor-automation", + "displayName": "Automation - Sensor", + "version": "2.732.65310" + }, + { + "name": "sensor-assurance", + "displayName": "Assurance - Sensor", + "version": "3.150.140" + }, + { + "name": "disaster-recovery", + "displayName": "Disaster Recovery", + "version": "2.732.365017" + }, + { + "name": "support-services", + "displayName": "Support Services", + "version": "2.732.885097" + }, + { + "name": "multi-dnac-enablement", + "displayName": "Multiple Cisco Catalyst Center", + "version": "2.732.65310" + } + ], + "numberOfNodes": 1, + "productType": "DNAC", + "productVersion": "3.732.75085", + "internetProtocolVersion": "ipv4", + "fipsEnabled": false, + "compatibilityError": [ + + ] + } + ], + "page": { + "limit": 50, + "offset": 1, + "count": 1, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + "playbook_negative_scenario9": [ + { + "nfs_configuration": [ + { + "server_ip": "172.27.17.90" + } + ] + } + ], + "get_all_n_f_s_configurations6": { + "version": "2.0", + "response": [ + { + "id": "1c31871f-c24b-42b9-9b26-3b0c4656300a", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB19" + }, + "status": { + "destinationPath": "/data/external/nfs-0e1cfb41-59f4-52d8-9895-f5c784d5596e", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "8b737087-5995-40fd-8c78-1f3422958e13", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "c2f70608-3864-4457-9247-ce63841b386f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "playbook_negative_scenario10": [ + { + "backup": [ + { + "name": "BACKUP24/07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE" + } + ] + } + ], + "get_all_backup11": { + "version": "2.0", + "response": [ + + ], + "page": { + "limit": 50, + "offset": 1, + "count": 0, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + "playbook_negative_scenario11": [ + { + "backup_storage_configuration": [ + { + "data_retention_period": 53, + "encryption_passphrase": "Karthick@zigzag333", + "nfs_details": { + "nfs_port": 2049, + "nfs_portmapper_port": 111, + "nfs_version": "nfs4", + "source_path": "/home/nfsshare/backups/TB19" + }, + "server_type": "NFS" + } + ] + } + ], + "get_backup_configuration4": { + "version": "2.0", + "response": { + "type": "NFS", + "dataRetention": 53, + "mountPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "id": "1fa840d8-ee9f-4e0d-a560-694c5e939680", + "isEncryptionPassPhraseAvailable": true + } + }, + "playbook_negative_scenario12": [ + { + "backup_configurations": [ + { + "data_retention_period": 53, + "encryption_passphrase": "Karthick@zigzag333", + "nfs_details": { + "nfs_port": 2049, + "nfs_portmapper_port": 111, + "nfs_version": "nfs4", + "source_path": "/home/nfsshare/backups/TB19" + }, + "server_type": "NFS" + } + ] + } + ], + "playbook_negative_scenario13": [ + { + "backup_storage_configuration": { + "data_retention_period": 53, + "encryption_passphrase": "Karthick@zigzag333", + "nfs_details": { + "nfs_port": 2049, + "nfs_portmapper_port": 111, + "nfs_version": "nfs4", + "server_ip": "172.27.17.90", + "source_path": "/home/nfsshare/backups/TB19" + }, + "server_type": "NFS" + } + } + ], + "playbook_create_nfs_config": [ + { + "nfs_configuration": [ + { + "nfs_port": 2049, + "nfs_portmapper_port": 111, + "nfs_version": "nfs4", + "server_ip": "172.27.17.90", + "source_path": "/home/nfsshare/backups/TB22" + } + ] + } + ], + "get_all_n_f_s_configurations8": { + "version": "2.0", + "response": [ + { + "id": "8b737087-5995-40fd-8c78-1f3422958e13", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "c2f70608-3864-4457-9247-ce63841b386f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "create_n_f_s_configuration": null, + "get_all_n_f_s_configurations9": { + "version": "2.0", + "response": [ + { + "id": "8b737087-5995-40fd-8c78-1f3422958e13", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "c2f70608-3864-4457-9247-ce63841b386f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "eff7ff94-00e1-49ed-a703-e5a4ce1445d9", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB22" + }, + "status": { + "destinationPath": "/data/external/nfs-68f9d1cf-acf0-53da-9676-ba325f65c637", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=Validation of configuration in progress", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + } + ] + }, + "playbook_update_backup_config": [ + { + "backup_storage_configuration": [ + { + "data_retention_period": 51, + "encryption_passphrase": "Karthick@zigzag33333", + "nfs_details": { + "nfs_port": 2049, + "nfs_portmapper_port": 111, + "nfs_version": "nfs4", + "server_ip": "172.27.17.90", + "source_path": "/home/nfsshare/backups/TB18" + }, + "server_type": "NFS" + } + ] + } + ], + "get_all_n_f_s_configurations10": { + "version": "2.0", + "response": [ + { + "id": "597b3978-4255-481e-9b76-cc97500a5e18", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB19" + }, + "status": { + "destinationPath": "/data/external/nfs-0e1cfb41-59f4-52d8-9895-f5c784d5596e", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "cfa6d1f6-5a78-4272-892a-db77d443a497", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_backup_configuration": { + "version": "2.0", + "response": { + "type": "NFS", + "dataRetention": 52, + "mountPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "id": "5dfb56b0-3dd6-4ae2-8244-535d06618afb", + "isEncryptionPassPhraseAvailable": true + } + }, + "get_all_n_f_s_configurations11": { + "version": "2.0", + "response": [ + { + "id": "597b3978-4255-481e-9b76-cc97500a5e18", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB19" + }, + "status": { + "destinationPath": "/data/external/nfs-0e1cfb41-59f4-52d8-9895-f5c784d5596e", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "cfa6d1f6-5a78-4272-892a-db77d443a497", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_all_n_f_s_configurations12": { + "version": "2.0", + "response": [ + { + "id": "597b3978-4255-481e-9b76-cc97500a5e18", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB19" + }, + "status": { + "destinationPath": "/data/external/nfs-0e1cfb41-59f4-52d8-9895-f5c784d5596e", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "cfa6d1f6-5a78-4272-892a-db77d443a497", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "create_backup_configuration": { + "version": "2.0", + "response": { + "message": "Backup configuration created successfully." + } + }, + "get_all_n_f_s_configurations31": { + "version": "2.0", + "response": [ + { + "id": "597b3978-4255-481e-9b76-cc97500a5e18", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB19" + }, + "status": { + "destinationPath": "/data/external/nfs-0e1cfb41-59f4-52d8-9895-f5c784d5596e", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "cfa6d1f6-5a78-4272-892a-db77d443a497", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_backup_configuration7": { + "version": "2.0", + "response": { + "type": "NFS", + "dataRetention": 51, + "mountPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "id": "5dfb56b0-3dd6-4ae2-8244-535d06618afb", + "isEncryptionPassPhraseAvailable": true + } + }, + "playbook_backup_config_alreadyexists1": [ + { + "backup_storage_configuration": [ + { + "data_retention_period": 51, + "encryption_passphrase": "Karthick@zigzag33333", + "nfs_details": { + "nfs_port": 2049, + "nfs_portmapper_port": 111, + "nfs_version": "nfs4", + "server_ip": "172.27.17.90", + "source_path": "/home/nfsshare/backups/TB18" + }, + "server_type": "NFS" + } + ] + } +], + "get_all_n_f_s_configurations13": { + "version": "2.0", + "response": [ + { + "id": "597b3978-4255-481e-9b76-cc97500a5e18", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB19" + }, + "status": { + "destinationPath": "/data/external/nfs-0e1cfb41-59f4-52d8-9895-f5c784d5596e", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "cfa6d1f6-5a78-4272-892a-db77d443a497", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_backup_configuration8": { + "version": "2.0", + "response": { + "type": "NFS", + "dataRetention": 51, + "mountPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "id": "5dfb56b0-3dd6-4ae2-8244-535d06618afb", + "isEncryptionPassPhraseAvailable": true + } + }, + "get_all_n_f_s_configurations14": { + "version": "2.0", + "response": [ + { + "id": "597b3978-4255-481e-9b76-cc97500a5e18", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB19" + }, + "status": { + "destinationPath": "/data/external/nfs-0e1cfb41-59f4-52d8-9895-f5c784d5596e", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "cfa6d1f6-5a78-4272-892a-db77d443a497", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_all_n_f_s_configurations15": { + "version": "2.0", + "response": [ + { + "id": "597b3978-4255-481e-9b76-cc97500a5e18", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB19" + }, + "status": { + "destinationPath": "/data/external/nfs-0e1cfb41-59f4-52d8-9895-f5c784d5596e", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "cfa6d1f6-5a78-4272-892a-db77d443a497", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_all_n_f_s_configurations32": { + "version": "2.0", + "response": [ + { + "id": "597b3978-4255-481e-9b76-cc97500a5e18", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB19" + }, + "status": { + "destinationPath": "/data/external/nfs-0e1cfb41-59f4-52d8-9895-f5c784d5596e", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "cfa6d1f6-5a78-4272-892a-db77d443a497", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_backup_configuration9": { + "version": "2.0", + "response": { + "type": "NFS", + "dataRetention": 51, + "mountPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "id": "5dfb56b0-3dd6-4ae2-8244-535d06618afb", + "isEncryptionPassPhraseAvailable": true + } + }, + + "playbook_backup_config_password_exception": [ + { + "backup_storage_configuration": [ + { + "data_retention_period": 9, + "encryption_passphrase": "Karthick@zigza", + "nfs_details": { + "nfs_port": 2049, + "nfs_portmapper_port": 111, + "nfs_version": "nfs4", + "server_ip": "172.27.17.90", + "source_path": "/home/nfsshare/backups/TB18" + }, + "server_type": "NFS" + } + ] + } + ], + "get_all_n_f_s_configurations16": { + "version": "2.0", + "response": [ + { + "id": "2c3b1518-12c9-4ae1-abe6-3ee858fac515", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB21" + }, + "status": { + "destinationPath": "/data/external/nfs-17a13b08-f346-5694-9ad6-6a32620cec1f", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "4f09c3d7-ba64-49e1-ac2d-a27e80ac63df", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "7c2f408d-c218-431e-aab6-95a65970737f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_backup_configuration10": { + "version": "2.0", + "response": { + "type": "NFS", + "dataRetention": 9, + "mountPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "id": "1fa840d8-ee9f-4e0d-a560-694c5e939680", + "isEncryptionPassPhraseAvailable": true + } + }, + "get_all_n_f_s_configurations17": { + "version": "2.0", + "response": [ + { + "id": "2c3b1518-12c9-4ae1-abe6-3ee858fac515", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB21" + }, + "status": { + "destinationPath": "/data/external/nfs-17a13b08-f346-5694-9ad6-6a32620cec1f", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "4f09c3d7-ba64-49e1-ac2d-a27e80ac63df", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "7c2f408d-c218-431e-aab6-95a65970737f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + + "playbook_negative_scenario14": [ + { + "restore_operations": [ + { + "names": "BACKUP24_07" + } + ] + } + ], + "get_all_n_f_s_configurations20": { + "version": "2.0", + "response": [ + { + "id": "2c3b1518-12c9-4ae1-abe6-3ee858fac515", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB21" + }, + "status": { + "destinationPath": "/data/external/nfs-17a13b08-f346-5694-9ad6-6a32620cec1f", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "4f09c3d7-ba64-49e1-ac2d-a27e80ac63df", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "6a1490a6-4ca5-42b8-8a3f-9a352430b9fc", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB22" + }, + "status": { + "destinationPath": "/data/external/nfs-68f9d1cf-acf0-53da-9676-ba325f65c637", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "7c2f408d-c218-431e-aab6-95a65970737f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "playbook_restore_exception": [ + { + "restore_operations": [ + { + "encryption_passphrase": "Karthick@", + "name": "BACKUP24_07" + } + ] + } + ], + "get_all_n_f_s_configurations21": { + "version": "2.0", + "response": [ + { + "id": "2c3b1518-12c9-4ae1-abe6-3ee858fac515", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB21" + }, + "status": { + "destinationPath": "/data/external/nfs-17a13b08-f346-5694-9ad6-6a32620cec1f", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "4f09c3d7-ba64-49e1-ac2d-a27e80ac63df", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "6a1490a6-4ca5-42b8-8a3f-9a352430b9fc", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB22" + }, + "status": { + "destinationPath": "/data/external/nfs-68f9d1cf-acf0-53da-9676-ba325f65c637", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "7c2f408d-c218-431e-aab6-95a65970737f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_all_backup": { + "version": "2.0", + "response": [ + { + "name": "BACKUP24_07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "versions": [ + "mks-upgrade:3.1.166", + "core-platform:0.10.205", + "managed-services-shared:0.10.19", + "gateway:0.10.12", + "core-addons:0.10.47", + "iam:5.4.47", + "harbor-ui:3.5.144", + "idm-ui-plugin:5.4.29", + "data-ingestion-infra:6.6.15", + "data-contextstore:6.6.10", + "data-analytics-infra:6.6.127", + "data-platform-pipeline-infra:6.6.126", + "data-store-management:6.6.45", + "data-platform-ui:6.6.101", + "cloud-connectivity-datahub:6.10.26", + "cloud-connectivity:6.10.13", + "dnacaap:6.9.43", + "dnac-search:6.9.3", + "system-management-operations:1.6.50", + "telemetry:4.7.20", + "cloud-connectivity-sense:7.0.7", + "digestor:7.0.5", + "rca-scripts:0.5.6", + "platform-refresh:1.3.69", + "catalyst-center-api-catalog:6.8.96", + "b2b-upgrade:2.732.65310", + "system-commons:2.732.65310", + "ise-bridge:3.150.1", + "ncp:2.732.65310", + "assurance-base-analytics:3.150.100202", + "appsvc-remediation:3.15.5", + "assurance:3.150.141", + "ai-network-analytics:4.0.32", + "ssa:2.732.1095118", + "aca:2.732.65310", + "group-based-policy-analytics:3.150.8", + "sda:2.732.65310", + "app-visibility-and-policy:2.732.117621", + "endpoint-analytics:1.11.1702", + "rogue-management:3.1.202", + "app-hosting:2.3.325072204", + "sea-app-package:2.732.685009", + "icap:2.732.65310", + "wide-area-bonjour:2.732.755006", + "sensor-automation:2.732.65310", + "sensor-assurance:3.150.140", + "disaster-recovery:2.732.365017", + "support-services:2.732.885097", + "multi-dnac-enablement:2.732.65310" + ], + "id": "6ac1a2f0-20a7-42cc-aae8-1120cdb73128", + "size": 41873023765, + "createdDate": "2025-08-05T09:45:48Z", + "isBackupAvailable": true, + "isCompatible": true, + "context": { + "type": "ON_DEMAND", + "schedule": "" + }, + "storage": { + "type": "NFS", + "mountPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "id": "1fa840d8-ee9f-4e0d-a560-694c5e939680", + "name": "", + "host": "", + "serverPath": "" + }, + "status": "SUCCESS", + "endDate": "2025-08-05T10:01:25Z", + "createdBy": "Anonymous", + "duration": 937, + "releaseName": "uber-dnac", + "releaseDisplayName": "Cisco Catalyst Center", + "releaseVersion": "", + "releaseDisplayVersion": "3.1.5-75085", + "installedPackages": [ + { + "name": "mks-upgrade", + "displayName": "System", + "version": "3.1.166" + }, + { + "name": "core-platform", + "displayName": "Core Platform", + "version": "0.10.205" + }, + { + "name": "managed-services-shared", + "displayName": "Shared Managed Services", + "version": "0.10.19" + }, + { + "name": "gateway", + "displayName": "Catalyst Center Gateway Service", + "version": "0.10.12" + }, + { + "name": "core-addons", + "displayName": "System Addons", + "version": "0.10.47" + }, + { + "name": "iam", + "displayName": "Identity and Access Management", + "version": "5.4.47" + }, + { + "name": "harbor-ui", + "displayName": "Cisco Catalyst Center UI", + "version": "3.5.144" + }, + { + "name": "idm-ui-plugin", + "displayName": "Identity and Access Management - UI", + "version": "5.4.29" + }, + { + "name": "data-ingestion-infra", + "displayName": "Network Data Platform - Ingestion Infra", + "version": "6.6.15" + }, + { + "name": "data-contextstore", + "displayName": "Network Data Platform - Caching Infra", + "version": "6.6.10" + }, + { + "name": "data-analytics-infra", + "displayName": "Network Data Platform - Core", + "version": "6.6.127" + }, + { + "name": "data-platform-pipeline-infra", + "displayName": "Network Data Platform - Pipeline Infra", + "version": "6.6.126" + }, + { + "name": "data-store-management", + "displayName": "Network Data Platform - Storage Management", + "version": "6.6.45" + }, + { + "name": "data-platform-ui", + "displayName": "Network Data Platform - Manager", + "version": "6.6.101" + }, + { + "name": "cloud-connectivity-datahub", + "displayName": "DxHub Cloud Connectivity", + "version": "6.10.26" + }, + { + "name": "cloud-connectivity", + "displayName": "Cloud Connectivity", + "version": "6.10.13" + }, + { + "name": "dnacaap", + "displayName": "Cisco Catalyst Center Platform", + "version": "6.9.43" + }, + { + "name": "dnac-search", + "displayName": "Cisco Catalyst Center Global Search", + "version": "6.9.3" + }, + { + "name": "system-management-operations", + "displayName": "System Management Operations", + "version": "1.6.50" + }, + { + "name": "telemetry", + "displayName": "Telemetry", + "version": "4.7.20" + }, + { + "name": "cloud-connectivity-sense", + "displayName": "Cloud Connectivity - Contextual Content", + "version": "7.0.7" + }, + { + "name": "digestor", + "displayName": "Cloud Connectivity - Digestor", + "version": "7.0.5" + }, + { + "name": "rca-scripts", + "displayName": "RCA-Scripts Package", + "version": "0.5.6" + }, + { + "name": "platform-refresh", + "displayName": "Platform Refresh", + "version": "1.3.69" + }, + { + "name": "catalyst-center-api-catalog", + "displayName": "Catalyst Center API Catalog", + "version": "6.8.96" + }, + { + "name": "b2b-upgrade", + "displayName": "", + "version": "2.732.65310" + }, + { + "name": "system-commons", + "displayName": "System Commons", + "version": "2.732.65310" + }, + { + "name": "ise-bridge", + "displayName": "Cisco Identity Services Engine Bridge", + "version": "3.150.1" + }, + { + "name": "ncp", + "displayName": "Network Controller Platform", + "version": "2.732.65310" + }, + { + "name": "assurance-base-analytics", + "displayName": "Network Data Platform - Base Analytics", + "version": "3.150.100202" + }, + { + "name": "appsvc-remediation", + "displayName": "Application and Service Remediation", + "version": "3.15.5" + }, + { + "name": "assurance", + "displayName": "Assurance", + "version": "3.150.141" + }, + { + "name": "ai-network-analytics", + "displayName": "AI Network Analytics", + "version": "4.0.32" + }, + { + "name": "ssa", + "displayName": "Stealthwatch Security Analytics", + "version": "2.732.1095118" + }, + { + "name": "aca", + "displayName": "Access Control Application", + "version": "2.732.65310" + }, + { + "name": "group-based-policy-analytics", + "displayName": "Group Based Policy Analytics", + "version": "3.150.8" + }, + { + "name": "sda", + "displayName": "SD Access", + "version": "2.732.65310" + }, + { + "name": "app-visibility-and-policy", + "displayName": "Application Visibility and Policy", + "version": "2.732.117621" + }, + { + "name": "endpoint-analytics", + "displayName": "AI Endpoint Analytics", + "version": "1.11.1702" + }, + { + "name": "rogue-management", + "displayName": "Rogue and aWIPS", + "version": "3.1.202" + }, + { + "name": "app-hosting", + "displayName": "Application Hosting", + "version": "2.3.325072204" + }, + { + "name": "sea-app-package", + "displayName": "SEA App", + "version": "2.732.685009" + }, + { + "name": "icap", + "displayName": "Automation - Intelligent Capture", + "version": "2.732.65310" + }, + { + "name": "wide-area-bonjour", + "displayName": "Wide Area Bonjour", + "version": "2.732.755006" + }, + { + "name": "sensor-automation", + "displayName": "Automation - Sensor", + "version": "2.732.65310" + }, + { + "name": "sensor-assurance", + "displayName": "Assurance - Sensor", + "version": "3.150.140" + }, + { + "name": "disaster-recovery", + "displayName": "Disaster Recovery", + "version": "2.732.365017" + }, + { + "name": "support-services", + "displayName": "Support Services", + "version": "2.732.885097" + }, + { + "name": "multi-dnac-enablement", + "displayName": "Multiple Cisco Catalyst Center", + "version": "2.732.65310" + } + ], + "numberOfNodes": 1, + "productType": "DNAC", + "productVersion": "3.732.75085", + "internetProtocolVersion": "ipv4", + "fipsEnabled": false, + "compatibilityError": [ + + ] + } + ], + "page": { + "limit": 50, + "offset": 1, + "count": 1, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + "playbook_nfs_delete": [ + { + "nfs_configuration": [ + { + "nfs_port": 2049, + "nfs_portmapper_port": 111, + "nfs_version": "nfs4", + "server_ip": "172.27.17.90", + "source_path": "/home/nfsshare/backups/TB22" + } + ] + } + ], + "get_all_n_f_s_configurations22": { + "version": "2.0", + "response": [ + { + "id": "2c3b1518-12c9-4ae1-abe6-3ee858fac515", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB21" + }, + "status": { + "destinationPath": "/data/external/nfs-17a13b08-f346-5694-9ad6-6a32620cec1f", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "4f09c3d7-ba64-49e1-ac2d-a27e80ac63df", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "6a1490a6-4ca5-42b8-8a3f-9a352430b9fc", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB22" + }, + "status": { + "destinationPath": "/data/external/nfs-68f9d1cf-acf0-53da-9676-ba325f65c637", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "7c2f408d-c218-431e-aab6-95a65970737f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "delete_n_f_s_configuration": {"version": "2.0", "response": {"message": "Delete NFS configuration submitted successfully."}}, + "get_all_n_f_s_configurations23": { + "version": "2.0", + "response": [ + { + "id": "2c3b1518-12c9-4ae1-abe6-3ee858fac515", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB21" + }, + "status": { + "destinationPath": "/data/external/nfs-17a13b08-f346-5694-9ad6-6a32620cec1f", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "4f09c3d7-ba64-49e1-ac2d-a27e80ac63df", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "6a1490a6-4ca5-42b8-8a3f-9a352430b9fc", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB22" + }, + "status": { + "destinationPath": "/data/external/nfs-68f9d1cf-acf0-53da-9676-ba325f65c637", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "7c2f408d-c218-431e-aab6-95a65970737f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "playbook_backup_schedule_alreadydeleted1": [ + { + "backup": [ + { + "name": "BACKUP25_07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE" + } + ] + } + ], + "get_all_n_f_s_configurations24": { + "version": "2.0", + "response": [ + { + "id": "2c3b1518-12c9-4ae1-abe6-3ee858fac515", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB21" + }, + "status": { + "destinationPath": "/data/external/nfs-17a13b08-f346-5694-9ad6-6a32620cec1f", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "4f09c3d7-ba64-49e1-ac2d-a27e80ac63df", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "7c2f408d-c218-431e-aab6-95a65970737f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_all_backup12": { + "version": "2.0", + "response": [ + { + "name": "BACKUP24_07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "versions": [ + "mks-upgrade:3.1.166", + "core-platform:0.10.205", + "managed-services-shared:0.10.19", + "gateway:0.10.12", + "core-addons:0.10.47", + "iam:5.4.47", + "harbor-ui:3.5.144", + "idm-ui-plugin:5.4.29", + "data-ingestion-infra:6.6.15", + "data-contextstore:6.6.10", + "data-analytics-infra:6.6.127", + "data-platform-pipeline-infra:6.6.126", + "data-store-management:6.6.45", + "data-platform-ui:6.6.101", + "cloud-connectivity-datahub:6.10.26", + "cloud-connectivity:6.10.13", + "dnacaap:6.9.43", + "dnac-search:6.9.3", + "system-management-operations:1.6.50", + "telemetry:4.7.20", + "cloud-connectivity-sense:7.0.7", + "digestor:7.0.5", + "rca-scripts:0.5.6", + "platform-refresh:1.3.69", + "catalyst-center-api-catalog:6.8.96", + "b2b-upgrade:2.732.65310", + "system-commons:2.732.65310", + "ise-bridge:3.150.1", + "ncp:2.732.65310", + "assurance-base-analytics:3.150.100202", + "appsvc-remediation:3.15.5", + "assurance:3.150.141", + "ai-network-analytics:4.0.32", + "ssa:2.732.1095118", + "aca:2.732.65310", + "group-based-policy-analytics:3.150.8", + "sda:2.732.65310", + "app-visibility-and-policy:2.732.117621", + "endpoint-analytics:1.11.1702", + "rogue-management:3.1.202", + "app-hosting:2.3.325072204", + "sea-app-package:2.732.685009", + "icap:2.732.65310", + "wide-area-bonjour:2.732.755006", + "sensor-automation:2.732.65310", + "sensor-assurance:3.150.140", + "disaster-recovery:2.732.365017", + "support-services:2.732.885097", + "multi-dnac-enablement:2.732.65310" + ], + "id": "6ac1a2f0-20a7-42cc-aae8-1120cdb73128", + "size": 41873023765, + "createdDate": "2025-08-05T09:45:48Z", + "isBackupAvailable": true, + "isCompatible": true, + "context": { + "type": "ON_DEMAND", + "schedule": "" + }, + "storage": { + "type": "NFS", + "mountPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "id": "1fa840d8-ee9f-4e0d-a560-694c5e939680", + "name": "", + "host": "", + "serverPath": "" + }, + "status": "SUCCESS", + "endDate": "2025-08-05T10:01:25Z", + "createdBy": "Anonymous", + "duration": 937, + "releaseName": "uber-dnac", + "releaseDisplayName": "Cisco Catalyst Center", + "releaseVersion": "", + "releaseDisplayVersion": "3.1.5-75085", + "installedPackages": [ + { + "name": "mks-upgrade", + "displayName": "System", + "version": "3.1.166" + }, + { + "name": "core-platform", + "displayName": "Core Platform", + "version": "0.10.205" + }, + { + "name": "managed-services-shared", + "displayName": "Shared Managed Services", + "version": "0.10.19" + }, + { + "name": "gateway", + "displayName": "Catalyst Center Gateway Service", + "version": "0.10.12" + }, + { + "name": "core-addons", + "displayName": "System Addons", + "version": "0.10.47" + }, + { + "name": "iam", + "displayName": "Identity and Access Management", + "version": "5.4.47" + }, + { + "name": "harbor-ui", + "displayName": "Cisco Catalyst Center UI", + "version": "3.5.144" + }, + { + "name": "idm-ui-plugin", + "displayName": "Identity and Access Management - UI", + "version": "5.4.29" + }, + { + "name": "data-ingestion-infra", + "displayName": "Network Data Platform - Ingestion Infra", + "version": "6.6.15" + }, + { + "name": "data-contextstore", + "displayName": "Network Data Platform - Caching Infra", + "version": "6.6.10" + }, + { + "name": "data-analytics-infra", + "displayName": "Network Data Platform - Core", + "version": "6.6.127" + }, + { + "name": "data-platform-pipeline-infra", + "displayName": "Network Data Platform - Pipeline Infra", + "version": "6.6.126" + }, + { + "name": "data-store-management", + "displayName": "Network Data Platform - Storage Management", + "version": "6.6.45" + }, + { + "name": "data-platform-ui", + "displayName": "Network Data Platform - Manager", + "version": "6.6.101" + }, + { + "name": "cloud-connectivity-datahub", + "displayName": "DxHub Cloud Connectivity", + "version": "6.10.26" + }, + { + "name": "cloud-connectivity", + "displayName": "Cloud Connectivity", + "version": "6.10.13" + }, + { + "name": "dnacaap", + "displayName": "Cisco Catalyst Center Platform", + "version": "6.9.43" + }, + { + "name": "dnac-search", + "displayName": "Cisco Catalyst Center Global Search", + "version": "6.9.3" + }, + { + "name": "system-management-operations", + "displayName": "System Management Operations", + "version": "1.6.50" + }, + { + "name": "telemetry", + "displayName": "Telemetry", + "version": "4.7.20" + }, + { + "name": "cloud-connectivity-sense", + "displayName": "Cloud Connectivity - Contextual Content", + "version": "7.0.7" + }, + { + "name": "digestor", + "displayName": "Cloud Connectivity - Digestor", + "version": "7.0.5" + }, + { + "name": "rca-scripts", + "displayName": "RCA-Scripts Package", + "version": "0.5.6" + }, + { + "name": "platform-refresh", + "displayName": "Platform Refresh", + "version": "1.3.69" + }, + { + "name": "catalyst-center-api-catalog", + "displayName": "Catalyst Center API Catalog", + "version": "6.8.96" + }, + { + "name": "b2b-upgrade", + "displayName": "", + "version": "2.732.65310" + }, + { + "name": "system-commons", + "displayName": "System Commons", + "version": "2.732.65310" + }, + { + "name": "ise-bridge", + "displayName": "Cisco Identity Services Engine Bridge", + "version": "3.150.1" + }, + { + "name": "ncp", + "displayName": "Network Controller Platform", + "version": "2.732.65310" + }, + { + "name": "assurance-base-analytics", + "displayName": "Network Data Platform - Base Analytics", + "version": "3.150.100202" + }, + { + "name": "appsvc-remediation", + "displayName": "Application and Service Remediation", + "version": "3.15.5" + }, + { + "name": "assurance", + "displayName": "Assurance", + "version": "3.150.141" + }, + { + "name": "ai-network-analytics", + "displayName": "AI Network Analytics", + "version": "4.0.32" + }, + { + "name": "ssa", + "displayName": "Stealthwatch Security Analytics", + "version": "2.732.1095118" + }, + { + "name": "aca", + "displayName": "Access Control Application", + "version": "2.732.65310" + }, + { + "name": "group-based-policy-analytics", + "displayName": "Group Based Policy Analytics", + "version": "3.150.8" + }, + { + "name": "sda", + "displayName": "SD Access", + "version": "2.732.65310" + }, + { + "name": "app-visibility-and-policy", + "displayName": "Application Visibility and Policy", + "version": "2.732.117621" + }, + { + "name": "endpoint-analytics", + "displayName": "AI Endpoint Analytics", + "version": "1.11.1702" + }, + { + "name": "rogue-management", + "displayName": "Rogue and aWIPS", + "version": "3.1.202" + }, + { + "name": "app-hosting", + "displayName": "Application Hosting", + "version": "2.3.325072204" + }, + { + "name": "sea-app-package", + "displayName": "SEA App", + "version": "2.732.685009" + }, + { + "name": "icap", + "displayName": "Automation - Intelligent Capture", + "version": "2.732.65310" + }, + { + "name": "wide-area-bonjour", + "displayName": "Wide Area Bonjour", + "version": "2.732.755006" + }, + { + "name": "sensor-automation", + "displayName": "Automation - Sensor", + "version": "2.732.65310" + }, + { + "name": "sensor-assurance", + "displayName": "Assurance - Sensor", + "version": "3.150.140" + }, + { + "name": "disaster-recovery", + "displayName": "Disaster Recovery", + "version": "2.732.365017" + }, + { + "name": "support-services", + "displayName": "Support Services", + "version": "2.732.885097" + }, + { + "name": "multi-dnac-enablement", + "displayName": "Multiple Cisco Catalyst Center", + "version": "2.732.65310" + } + ], + "numberOfNodes": 1, + "productType": "DNAC", + "productVersion": "3.732.75085", + "internetProtocolVersion": "ipv4", + "fipsEnabled": false, + "compatibilityError": [ + + ] + } + ], + "page": { + "limit": 50, + "offset": 1, + "count": 1, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + "get_all_n_f_s_configurations25": { + "version": "2.0", + "response": [ + { + "id": "2c3b1518-12c9-4ae1-abe6-3ee858fac515", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB21" + }, + "status": { + "destinationPath": "/data/external/nfs-17a13b08-f346-5694-9ad6-6a32620cec1f", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "4f09c3d7-ba64-49e1-ac2d-a27e80ac63df", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "7c2f408d-c218-431e-aab6-95a65970737f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_all_backup13": { + "version": "2.0", + "response": [ + { + "name": "BACKUP24_07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "versions": [ + "mks-upgrade:3.1.166", + "core-platform:0.10.205", + "managed-services-shared:0.10.19", + "gateway:0.10.12", + "core-addons:0.10.47", + "iam:5.4.47", + "harbor-ui:3.5.144", + "idm-ui-plugin:5.4.29", + "data-ingestion-infra:6.6.15", + "data-contextstore:6.6.10", + "data-analytics-infra:6.6.127", + "data-platform-pipeline-infra:6.6.126", + "data-store-management:6.6.45", + "data-platform-ui:6.6.101", + "cloud-connectivity-datahub:6.10.26", + "cloud-connectivity:6.10.13", + "dnacaap:6.9.43", + "dnac-search:6.9.3", + "system-management-operations:1.6.50", + "telemetry:4.7.20", + "cloud-connectivity-sense:7.0.7", + "digestor:7.0.5", + "rca-scripts:0.5.6", + "platform-refresh:1.3.69", + "catalyst-center-api-catalog:6.8.96", + "b2b-upgrade:2.732.65310", + "system-commons:2.732.65310", + "ise-bridge:3.150.1", + "ncp:2.732.65310", + "assurance-base-analytics:3.150.100202", + "appsvc-remediation:3.15.5", + "assurance:3.150.141", + "ai-network-analytics:4.0.32", + "ssa:2.732.1095118", + "aca:2.732.65310", + "group-based-policy-analytics:3.150.8", + "sda:2.732.65310", + "app-visibility-and-policy:2.732.117621", + "endpoint-analytics:1.11.1702", + "rogue-management:3.1.202", + "app-hosting:2.3.325072204", + "sea-app-package:2.732.685009", + "icap:2.732.65310", + "wide-area-bonjour:2.732.755006", + "sensor-automation:2.732.65310", + "sensor-assurance:3.150.140", + "disaster-recovery:2.732.365017", + "support-services:2.732.885097", + "multi-dnac-enablement:2.732.65310" + ], + "id": "6ac1a2f0-20a7-42cc-aae8-1120cdb73128", + "size": 41873023765, + "createdDate": "2025-08-05T09:45:48Z", + "isBackupAvailable": true, + "isCompatible": true, + "context": { + "type": "ON_DEMAND", + "schedule": "" + }, + "storage": { + "type": "NFS", + "mountPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "id": "1fa840d8-ee9f-4e0d-a560-694c5e939680", + "name": "", + "host": "", + "serverPath": "" + }, + "status": "SUCCESS", + "endDate": "2025-08-05T10:01:25Z", + "createdBy": "Anonymous", + "duration": 937, + "releaseName": "uber-dnac", + "releaseDisplayName": "Cisco Catalyst Center", + "releaseVersion": "", + "releaseDisplayVersion": "3.1.5-75085", + "installedPackages": [ + { + "name": "mks-upgrade", + "displayName": "System", + "version": "3.1.166" + }, + { + "name": "core-platform", + "displayName": "Core Platform", + "version": "0.10.205" + }, + { + "name": "managed-services-shared", + "displayName": "Shared Managed Services", + "version": "0.10.19" + }, + { + "name": "gateway", + "displayName": "Catalyst Center Gateway Service", + "version": "0.10.12" + }, + { + "name": "core-addons", + "displayName": "System Addons", + "version": "0.10.47" + }, + { + "name": "iam", + "displayName": "Identity and Access Management", + "version": "5.4.47" + }, + { + "name": "harbor-ui", + "displayName": "Cisco Catalyst Center UI", + "version": "3.5.144" + }, + { + "name": "idm-ui-plugin", + "displayName": "Identity and Access Management - UI", + "version": "5.4.29" + }, + { + "name": "data-ingestion-infra", + "displayName": "Network Data Platform - Ingestion Infra", + "version": "6.6.15" + }, + { + "name": "data-contextstore", + "displayName": "Network Data Platform - Caching Infra", + "version": "6.6.10" + }, + { + "name": "data-analytics-infra", + "displayName": "Network Data Platform - Core", + "version": "6.6.127" + }, + { + "name": "data-platform-pipeline-infra", + "displayName": "Network Data Platform - Pipeline Infra", + "version": "6.6.126" + }, + { + "name": "data-store-management", + "displayName": "Network Data Platform - Storage Management", + "version": "6.6.45" + }, + { + "name": "data-platform-ui", + "displayName": "Network Data Platform - Manager", + "version": "6.6.101" + }, + { + "name": "cloud-connectivity-datahub", + "displayName": "DxHub Cloud Connectivity", + "version": "6.10.26" + }, + { + "name": "cloud-connectivity", + "displayName": "Cloud Connectivity", + "version": "6.10.13" + }, + { + "name": "dnacaap", + "displayName": "Cisco Catalyst Center Platform", + "version": "6.9.43" + }, + { + "name": "dnac-search", + "displayName": "Cisco Catalyst Center Global Search", + "version": "6.9.3" + }, + { + "name": "system-management-operations", + "displayName": "System Management Operations", + "version": "1.6.50" + }, + { + "name": "telemetry", + "displayName": "Telemetry", + "version": "4.7.20" + }, + { + "name": "cloud-connectivity-sense", + "displayName": "Cloud Connectivity - Contextual Content", + "version": "7.0.7" + }, + { + "name": "digestor", + "displayName": "Cloud Connectivity - Digestor", + "version": "7.0.5" + }, + { + "name": "rca-scripts", + "displayName": "RCA-Scripts Package", + "version": "0.5.6" + }, + { + "name": "platform-refresh", + "displayName": "Platform Refresh", + "version": "1.3.69" + }, + { + "name": "catalyst-center-api-catalog", + "displayName": "Catalyst Center API Catalog", + "version": "6.8.96" + }, + { + "name": "b2b-upgrade", + "displayName": "", + "version": "2.732.65310" + }, + { + "name": "system-commons", + "displayName": "System Commons", + "version": "2.732.65310" + }, + { + "name": "ise-bridge", + "displayName": "Cisco Identity Services Engine Bridge", + "version": "3.150.1" + }, + { + "name": "ncp", + "displayName": "Network Controller Platform", + "version": "2.732.65310" + }, + { + "name": "assurance-base-analytics", + "displayName": "Network Data Platform - Base Analytics", + "version": "3.150.100202" + }, + { + "name": "appsvc-remediation", + "displayName": "Application and Service Remediation", + "version": "3.15.5" + }, + { + "name": "assurance", + "displayName": "Assurance", + "version": "3.150.141" + }, + { + "name": "ai-network-analytics", + "displayName": "AI Network Analytics", + "version": "4.0.32" + }, + { + "name": "ssa", + "displayName": "Stealthwatch Security Analytics", + "version": "2.732.1095118" + }, + { + "name": "aca", + "displayName": "Access Control Application", + "version": "2.732.65310" + }, + { + "name": "group-based-policy-analytics", + "displayName": "Group Based Policy Analytics", + "version": "3.150.8" + }, + { + "name": "sda", + "displayName": "SD Access", + "version": "2.732.65310" + }, + { + "name": "app-visibility-and-policy", + "displayName": "Application Visibility and Policy", + "version": "2.732.117621" + }, + { + "name": "endpoint-analytics", + "displayName": "AI Endpoint Analytics", + "version": "1.11.1702" + }, + { + "name": "rogue-management", + "displayName": "Rogue and aWIPS", + "version": "3.1.202" + }, + { + "name": "app-hosting", + "displayName": "Application Hosting", + "version": "2.3.325072204" + }, + { + "name": "sea-app-package", + "displayName": "SEA App", + "version": "2.732.685009" + }, + { + "name": "icap", + "displayName": "Automation - Intelligent Capture", + "version": "2.732.65310" + }, + { + "name": "wide-area-bonjour", + "displayName": "Wide Area Bonjour", + "version": "2.732.755006" + }, + { + "name": "sensor-automation", + "displayName": "Automation - Sensor", + "version": "2.732.65310" + }, + { + "name": "sensor-assurance", + "displayName": "Assurance - Sensor", + "version": "3.150.140" + }, + { + "name": "disaster-recovery", + "displayName": "Disaster Recovery", + "version": "2.732.365017" + }, + { + "name": "support-services", + "displayName": "Support Services", + "version": "2.732.885097" + }, + { + "name": "multi-dnac-enablement", + "displayName": "Multiple Cisco Catalyst Center", + "version": "2.732.65310" + } + ], + "numberOfNodes": 1, + "productType": "DNAC", + "productVersion": "3.732.75085", + "internetProtocolVersion": "ipv4", + "fipsEnabled": false, + "compatibilityError": [ + + ] + } + ], + "page": { + "limit": 50, + "offset": 1, + "count": 1, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + "playbook_delete_backup_schedule": [ + { + "backup": [ + { + "name": "BACKUP24_07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE" + } + ] + } + ], + "get_all_n_f_s_configurations26": { + "version": "2.0", + "response": [ + { + "id": "2c3b1518-12c9-4ae1-abe6-3ee858fac515", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB21" + }, + "status": { + "destinationPath": "/data/external/nfs-17a13b08-f346-5694-9ad6-6a32620cec1f", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "4f09c3d7-ba64-49e1-ac2d-a27e80ac63df", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "7c2f408d-c218-431e-aab6-95a65970737f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_all_backup14": { + "version": "2.0", + "response": [ + { + "name": "BACKUP24_07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "versions": [ + "mks-upgrade:3.1.166", + "core-platform:0.10.205", + "managed-services-shared:0.10.19", + "gateway:0.10.12", + "core-addons:0.10.47", + "iam:5.4.47", + "harbor-ui:3.5.144", + "idm-ui-plugin:5.4.29", + "data-ingestion-infra:6.6.15", + "data-contextstore:6.6.10", + "data-analytics-infra:6.6.127", + "data-platform-pipeline-infra:6.6.126", + "data-store-management:6.6.45", + "data-platform-ui:6.6.101", + "cloud-connectivity-datahub:6.10.26", + "cloud-connectivity:6.10.13", + "dnacaap:6.9.43", + "dnac-search:6.9.3", + "system-management-operations:1.6.50", + "telemetry:4.7.20", + "cloud-connectivity-sense:7.0.7", + "digestor:7.0.5", + "rca-scripts:0.5.6", + "platform-refresh:1.3.69", + "catalyst-center-api-catalog:6.8.96", + "b2b-upgrade:2.732.65310", + "system-commons:2.732.65310", + "ise-bridge:3.150.1", + "ncp:2.732.65310", + "assurance-base-analytics:3.150.100202", + "appsvc-remediation:3.15.5", + "assurance:3.150.141", + "ai-network-analytics:4.0.32", + "ssa:2.732.1095118", + "aca:2.732.65310", + "group-based-policy-analytics:3.150.8", + "sda:2.732.65310", + "app-visibility-and-policy:2.732.117621", + "endpoint-analytics:1.11.1702", + "rogue-management:3.1.202", + "app-hosting:2.3.325072204", + "sea-app-package:2.732.685009", + "icap:2.732.65310", + "wide-area-bonjour:2.732.755006", + "sensor-automation:2.732.65310", + "sensor-assurance:3.150.140", + "disaster-recovery:2.732.365017", + "support-services:2.732.885097", + "multi-dnac-enablement:2.732.65310" + ], + "id": "6ac1a2f0-20a7-42cc-aae8-1120cdb73128", + "size": 41873023765, + "createdDate": "2025-08-05T09:45:48Z", + "isBackupAvailable": true, + "isCompatible": true, + "context": { + "type": "ON_DEMAND", + "schedule": "" + }, + "storage": { + "type": "NFS", + "mountPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "id": "1fa840d8-ee9f-4e0d-a560-694c5e939680", + "name": "", + "host": "", + "serverPath": "" + }, + "status": "SUCCESS", + "endDate": "2025-08-05T10:01:25Z", + "createdBy": "Anonymous", + "duration": 937, + "releaseName": "uber-dnac", + "releaseDisplayName": "Cisco Catalyst Center", + "releaseVersion": "", + "releaseDisplayVersion": "3.1.5-75085", + "installedPackages": [ + { + "name": "mks-upgrade", + "displayName": "System", + "version": "3.1.166" + }, + { + "name": "core-platform", + "displayName": "Core Platform", + "version": "0.10.205" + }, + { + "name": "managed-services-shared", + "displayName": "Shared Managed Services", + "version": "0.10.19" + }, + { + "name": "gateway", + "displayName": "Catalyst Center Gateway Service", + "version": "0.10.12" + }, + { + "name": "core-addons", + "displayName": "System Addons", + "version": "0.10.47" + }, + { + "name": "iam", + "displayName": "Identity and Access Management", + "version": "5.4.47" + }, + { + "name": "harbor-ui", + "displayName": "Cisco Catalyst Center UI", + "version": "3.5.144" + }, + { + "name": "idm-ui-plugin", + "displayName": "Identity and Access Management - UI", + "version": "5.4.29" + }, + { + "name": "data-ingestion-infra", + "displayName": "Network Data Platform - Ingestion Infra", + "version": "6.6.15" + }, + { + "name": "data-contextstore", + "displayName": "Network Data Platform - Caching Infra", + "version": "6.6.10" + }, + { + "name": "data-analytics-infra", + "displayName": "Network Data Platform - Core", + "version": "6.6.127" + }, + { + "name": "data-platform-pipeline-infra", + "displayName": "Network Data Platform - Pipeline Infra", + "version": "6.6.126" + }, + { + "name": "data-store-management", + "displayName": "Network Data Platform - Storage Management", + "version": "6.6.45" + }, + { + "name": "data-platform-ui", + "displayName": "Network Data Platform - Manager", + "version": "6.6.101" + }, + { + "name": "cloud-connectivity-datahub", + "displayName": "DxHub Cloud Connectivity", + "version": "6.10.26" + }, + { + "name": "cloud-connectivity", + "displayName": "Cloud Connectivity", + "version": "6.10.13" + }, + { + "name": "dnacaap", + "displayName": "Cisco Catalyst Center Platform", + "version": "6.9.43" + }, + { + "name": "dnac-search", + "displayName": "Cisco Catalyst Center Global Search", + "version": "6.9.3" + }, + { + "name": "system-management-operations", + "displayName": "System Management Operations", + "version": "1.6.50" + }, + { + "name": "telemetry", + "displayName": "Telemetry", + "version": "4.7.20" + }, + { + "name": "cloud-connectivity-sense", + "displayName": "Cloud Connectivity - Contextual Content", + "version": "7.0.7" + }, + { + "name": "digestor", + "displayName": "Cloud Connectivity - Digestor", + "version": "7.0.5" + }, + { + "name": "rca-scripts", + "displayName": "RCA-Scripts Package", + "version": "0.5.6" + }, + { + "name": "platform-refresh", + "displayName": "Platform Refresh", + "version": "1.3.69" + }, + { + "name": "catalyst-center-api-catalog", + "displayName": "Catalyst Center API Catalog", + "version": "6.8.96" + }, + { + "name": "b2b-upgrade", + "displayName": "", + "version": "2.732.65310" + }, + { + "name": "system-commons", + "displayName": "System Commons", + "version": "2.732.65310" + }, + { + "name": "ise-bridge", + "displayName": "Cisco Identity Services Engine Bridge", + "version": "3.150.1" + }, + { + "name": "ncp", + "displayName": "Network Controller Platform", + "version": "2.732.65310" + }, + { + "name": "assurance-base-analytics", + "displayName": "Network Data Platform - Base Analytics", + "version": "3.150.100202" + }, + { + "name": "appsvc-remediation", + "displayName": "Application and Service Remediation", + "version": "3.15.5" + }, + { + "name": "assurance", + "displayName": "Assurance", + "version": "3.150.141" + }, + { + "name": "ai-network-analytics", + "displayName": "AI Network Analytics", + "version": "4.0.32" + }, + { + "name": "ssa", + "displayName": "Stealthwatch Security Analytics", + "version": "2.732.1095118" + }, + { + "name": "aca", + "displayName": "Access Control Application", + "version": "2.732.65310" + }, + { + "name": "group-based-policy-analytics", + "displayName": "Group Based Policy Analytics", + "version": "3.150.8" + }, + { + "name": "sda", + "displayName": "SD Access", + "version": "2.732.65310" + }, + { + "name": "app-visibility-and-policy", + "displayName": "Application Visibility and Policy", + "version": "2.732.117621" + }, + { + "name": "endpoint-analytics", + "displayName": "AI Endpoint Analytics", + "version": "1.11.1702" + }, + { + "name": "rogue-management", + "displayName": "Rogue and aWIPS", + "version": "3.1.202" + }, + { + "name": "app-hosting", + "displayName": "Application Hosting", + "version": "2.3.325072204" + }, + { + "name": "sea-app-package", + "displayName": "SEA App", + "version": "2.732.685009" + }, + { + "name": "icap", + "displayName": "Automation - Intelligent Capture", + "version": "2.732.65310" + }, + { + "name": "wide-area-bonjour", + "displayName": "Wide Area Bonjour", + "version": "2.732.755006" + }, + { + "name": "sensor-automation", + "displayName": "Automation - Sensor", + "version": "2.732.65310" + }, + { + "name": "sensor-assurance", + "displayName": "Assurance - Sensor", + "version": "3.150.140" + }, + { + "name": "disaster-recovery", + "displayName": "Disaster Recovery", + "version": "2.732.365017" + }, + { + "name": "support-services", + "displayName": "Support Services", + "version": "2.732.885097" + }, + { + "name": "multi-dnac-enablement", + "displayName": "Multiple Cisco Catalyst Center", + "version": "2.732.65310" + } + ], + "numberOfNodes": 1, + "productType": "DNAC", + "productVersion": "3.732.75085", + "internetProtocolVersion": "ipv4", + "fipsEnabled": false, + "compatibilityError": [ + + ] + } + ], + "page": { + "limit": 50, + "offset": 1, + "count": 1, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + "delete_backup": { + "version": "2.0", + "response": { + "taskId": "4c83b96b-a852-4e11-8e0a-0716cb9a036c", + "url": "/dna/intent/api/v1/backupRestoreExecutions/4c83b96b-a852-4e11-8e0a-0716cb9a036c" + } + }, + "get_backup_and_restore_execution": { + "version": "2.0", + "response": { + "backupId": "6ac1a2f0-20a7-42cc-aae8-1120cdb73128", + "id": "4c83b96b-a852-4e11-8e0a-0716cb9a036c", + "jobType": "DELETE_BACKUP", + "status": "IN_PROGRESS", + "endDate": "1970-01-01T00:00:00.000Z", + "startDate": "2025-08-23T02:28:10.856Z", + "createdBy": "Anonymous", + "errorMessage": "", + "errorCode": "", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "isForceUpdate": false, + "updateMessage": "", + "failedTaskDetail": { + + }, + "systemErrorMessage": "", + "completedPercentage": 0, + "_metadata": null, + "tasks": [ + { + "id": "634b2292-11ac-4fa4-98b2-c71ea674e6b9", + "taskName": "Trigger", + "status": "SUCCESS", + "endDate": "2025-08-23T02:28:10.859Z", + "startDate": "2025-08-23T02:28:10.859Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "6e6892a8-49ce-4c05-91f7-5aeea029e6c9", + "taskName": "initialize Values", + "status": "SUCCESS", + "endDate": "2025-08-23T02:28:11.075Z", + "startDate": "2025-08-23T02:28:11.061Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "d0cd3142-774d-495f-ae16-a8b1860668ec", + "taskName": "Validate Disk Reachability", + "status": "SUCCESS", + "endDate": "2025-08-23T02:28:11.135Z", + "startDate": "2025-08-23T02:28:11.075Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "6e02ebb2-fc6e-4917-9448-1850f29c0683", + "taskName": "Get Backup", + "status": "SUCCESS", + "endDate": "2025-08-23T02:28:11.144Z", + "startDate": "2025-08-23T02:28:11.135Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "7d68bae8-b9cc-4e8f-8505-662460dc4218", + "taskName": "Validate Backup", + "status": "SUCCESS", + "endDate": "2025-08-23T02:28:11.244Z", + "startDate": "2025-08-23T02:28:11.144Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "3d3ac83b-1c38-4496-b3c3-2617bd0b1e20", + "taskName": "Prepare Delete", + "status": "INPROGRESS", + "endDate": "", + "startDate": "2025-08-23T02:28:11.244Z", + "message": "", + "failedTaskDetail": { + + } + } + ] + } + }, + "get_backup_and_restore_execution1": { + "version": "2.0", + "response": { + "backupId": "6ac1a2f0-20a7-42cc-aae8-1120cdb73128", + "id": "4c83b96b-a852-4e11-8e0a-0716cb9a036c", + "jobType": "DELETE_BACKUP", + "status": "SUCCESS", + "endDate": "2025-08-23T02:28:42.033Z", + "startDate": "2025-08-23T02:28:10.856Z", + "createdBy": "Anonymous", + "errorMessage": "", + "errorCode": "", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "isForceUpdate": false, + "updateMessage": "", + "failedTaskDetail": { + + }, + "systemErrorMessage": "", + "completedPercentage": 0, + "_metadata": null, + "tasks": [ + { + "id": "c1c12e9b-e1b9-4022-a738-8ecec84f73d2", + "taskName": "Trigger", + "status": "SUCCESS", + "endDate": "2025-08-23T02:28:10.859Z", + "startDate": "2025-08-23T02:28:10.859Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "0c57c245-343c-43d0-a9f0-a9c2ad0ccd94", + "taskName": "initialize Values", + "status": "SUCCESS", + "endDate": "2025-08-23T02:28:11.075Z", + "startDate": "2025-08-23T02:28:11.061Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "f90bdae7-5a5d-42e0-ba6e-1beded885b97", + "taskName": "Validate Disk Reachability", + "status": "SUCCESS", + "endDate": "2025-08-23T02:28:11.135Z", + "startDate": "2025-08-23T02:28:11.075Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "868d403f-9db8-4d96-939d-77337f2f21ba", + "taskName": "Get Backup", + "status": "SUCCESS", + "endDate": "2025-08-23T02:28:11.144Z", + "startDate": "2025-08-23T02:28:11.135Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "edc21a0a-bf59-443a-93fa-9b2a05152602", + "taskName": "Validate Backup", + "status": "SUCCESS", + "endDate": "2025-08-23T02:28:11.244Z", + "startDate": "2025-08-23T02:28:11.144Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "0c240a90-0f47-4a77-9216-0dda9d34aefa", + "taskName": "Prepare Delete", + "status": "SUCCESS", + "endDate": "2025-08-23T02:28:11.333Z", + "startDate": "2025-08-23T02:28:11.244Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "0a3ed9fb-3237-4447-9fb4-b0caab2b0c8d", + "taskName": "Delete MKS / System Manager / Managed Service Backup", + "status": "SUCCESS", + "endDate": "2025-08-23T02:28:41.985Z", + "startDate": "2025-08-23T02:28:11.333Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "cf2a772d-1908-4f6c-8b40-36733db275f0", + "taskName": "Delete Backup Metadata", + "status": "SUCCESS", + "endDate": "2025-08-23T02:28:42.021Z", + "startDate": "2025-08-23T02:28:42.008Z", + "message": "", + "failedTaskDetail": { + + } + } + ] + } + }, + "get_all_n_f_s_configurations27": { + "version": "2.0", + "response": [ + { + "id": "2c3b1518-12c9-4ae1-abe6-3ee858fac515", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB21" + }, + "status": { + "destinationPath": "/data/external/nfs-17a13b08-f346-5694-9ad6-6a32620cec1f", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "4f09c3d7-ba64-49e1-ac2d-a27e80ac63df", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "7c2f408d-c218-431e-aab6-95a65970737f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_all_backup15": { + "version": "2.0", + "response": [ + + ], + "page": { + "limit": 50, + "offset": 1, + "count": 0, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + "playbook_backup_schedule_alreadyexists": [ + { + "backup": [ + { + "name": "BACKUP24_07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE" + } + ] + } + ], + "get_all_n_f_s_configurations28": { + "version": "2.0", + "response": [ + { + "id": "2c3b1518-12c9-4ae1-abe6-3ee858fac515", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB21" + }, + "status": { + "destinationPath": "/data/external/nfs-17a13b08-f346-5694-9ad6-6a32620cec1f", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "4f09c3d7-ba64-49e1-ac2d-a27e80ac63df", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "7c2f408d-c218-431e-aab6-95a65970737f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_all_backup16": { + "version": "2.0", + "response": [ + { + "name": "BACKUP24_07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "versions": [ + "mks-upgrade:3.1.171", + "core-platform:0.10.210", + "managed-services-shared:0.10.19", + "gateway:0.10.8-1", + "core-addons:0.10.48", + "iam:5.4.47", + "harbor-ui:3.5.144", + "idm-ui-plugin:5.4.29", + "data-ingestion-infra:6.6.15", + "data-contextstore:6.6.11", + "data-analytics-infra:6.6.128", + "data-platform-pipeline-infra:6.6.131", + "data-store-management:6.6.45", + "data-platform-ui:6.6.101", + "cloud-connectivity-datahub:6.10.29", + "cloud-connectivity:6.10.14", + "dnacaap:6.9.46", + "dnac-search:6.9.3", + "system-management-operations:1.6.51", + "telemetry:4.7.20", + "cloud-connectivity-sense:7.0.7", + "digestor:7.0.5", + "rca-scripts:0.5.6", + "platform-refresh:1.3.88", + "catalyst-center-api-catalog:6.8.98", + "b2b-upgrade:2.732.65395", + "system-commons:2.732.65395", + "ise-bridge:3.150.2", + "ncp:2.732.65395", + "assurance-base-analytics:3.150.100240", + "appsvc-remediation:3.15.5", + "assurance:3.150.160", + "ai-network-analytics:4.0.33", + "ssa:2.732.1095138", + "aca:2.732.65395", + "group-based-policy-analytics:3.150.10", + "sda:2.732.65395", + "app-visibility-and-policy:2.732.117639", + "endpoint-analytics:1.11.1708", + "rogue-management:3.1.203", + "app-hosting:2.3.325081309", + "app-orchestrator:2.732.685041", + "icap:2.732.65395", + "wide-area-bonjour:2.732.755006", + "sensor-automation:2.732.65395", + "sensor-assurance:3.150.151", + "disaster-recovery:2.732.365024", + "support-services:2.732.885097", + "umbrella-app:2.732.595003", + "multi-dnac-enablement:2.732.65395" + ], + "id": "73f0f48a-76dc-4bc1-98d2-5e9df17c0ba9", + "size": 41908510658, + "createdDate": "2025-08-23T02:42:20Z", + "isBackupAvailable": true, + "isCompatible": true, + "context": { + "type": "ON_DEMAND", + "schedule": "" + }, + "storage": { + "type": "NFS", + "mountPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "id": "1fa840d8-ee9f-4e0d-a560-694c5e939680", + "name": "", + "host": "", + "serverPath": "" + }, + "status": "SUCCESS", + "endDate": "2025-08-23T02:57:56Z", + "createdBy": "Anonymous", + "duration": 936, + "releaseName": "uber-dnac", + "releaseDisplayName": "Cisco Catalyst Center", + "releaseVersion": "", + "releaseDisplayVersion": "3.1.5-75130", + "installedPackages": [ + { + "name": "mks-upgrade", + "displayName": "System", + "version": "3.1.171" + }, + { + "name": "core-platform", + "displayName": "Core Platform", + "version": "0.10.210" + }, + { + "name": "managed-services-shared", + "displayName": "Shared Managed Services", + "version": "0.10.19" + }, + { + "name": "gateway", + "displayName": "Catalyst Center Gateway Service", + "version": "0.10.8-1" + }, + { + "name": "core-addons", + "displayName": "System Addons", + "version": "0.10.48" + }, + { + "name": "iam", + "displayName": "Identity and Access Management", + "version": "5.4.47" + }, + { + "name": "harbor-ui", + "displayName": "Cisco Catalyst Center UI", + "version": "3.5.144" + }, + { + "name": "idm-ui-plugin", + "displayName": "Identity and Access Management - UI", + "version": "5.4.29" + }, + { + "name": "data-ingestion-infra", + "displayName": "Network Data Platform - Ingestion Infra", + "version": "6.6.15" + }, + { + "name": "data-contextstore", + "displayName": "Network Data Platform - Caching Infra", + "version": "6.6.11" + }, + { + "name": "data-analytics-infra", + "displayName": "Network Data Platform - Core", + "version": "6.6.128" + }, + { + "name": "data-platform-pipeline-infra", + "displayName": "Network Data Platform - Pipeline Infra", + "version": "6.6.131" + }, + { + "name": "data-store-management", + "displayName": "Network Data Platform - Storage Management", + "version": "6.6.45" + }, + { + "name": "data-platform-ui", + "displayName": "Network Data Platform - Manager", + "version": "6.6.101" + }, + { + "name": "cloud-connectivity-datahub", + "displayName": "DxHub Cloud Connectivity", + "version": "6.10.29" + }, + { + "name": "cloud-connectivity", + "displayName": "Cloud Connectivity", + "version": "6.10.14" + }, + { + "name": "dnacaap", + "displayName": "Cisco Catalyst Center Platform", + "version": "6.9.46" + }, + { + "name": "dnac-search", + "displayName": "Cisco Catalyst Center Global Search", + "version": "6.9.3" + }, + { + "name": "system-management-operations", + "displayName": "System Management Operations", + "version": "1.6.51" + }, + { + "name": "telemetry", + "displayName": "Telemetry", + "version": "4.7.20" + }, + { + "name": "cloud-connectivity-sense", + "displayName": "Cloud Connectivity - Contextual Content", + "version": "7.0.7" + }, + { + "name": "digestor", + "displayName": "Cloud Connectivity - Digestor", + "version": "7.0.5" + }, + { + "name": "rca-scripts", + "displayName": "RCA-Scripts Package", + "version": "0.5.6" + }, + { + "name": "platform-refresh", + "displayName": "Platform Upgrade", + "version": "1.3.88" + }, + { + "name": "catalyst-center-api-catalog", + "displayName": "Catalyst Center API Catalog", + "version": "6.8.98" + }, + { + "name": "b2b-upgrade", + "displayName": "", + "version": "2.732.65395" + }, + { + "name": "system-commons", + "displayName": "System Commons", + "version": "2.732.65395" + }, + { + "name": "ise-bridge", + "displayName": "Cisco Identity Services Engine Bridge", + "version": "3.150.2" + }, + { + "name": "ncp", + "displayName": "Network Controller Platform", + "version": "2.732.65395" + }, + { + "name": "assurance-base-analytics", + "displayName": "Network Data Platform - Base Analytics", + "version": "3.150.100240" + }, + { + "name": "appsvc-remediation", + "displayName": "Application and Service Remediation", + "version": "3.15.5" + }, + { + "name": "assurance", + "displayName": "Assurance", + "version": "3.150.160" + }, + { + "name": "ai-network-analytics", + "displayName": "AI Network Analytics", + "version": "4.0.33" + }, + { + "name": "ssa", + "displayName": "Stealthwatch Security Analytics", + "version": "2.732.1095138" + }, + { + "name": "aca", + "displayName": "Access Control Application", + "version": "2.732.65395" + }, + { + "name": "group-based-policy-analytics", + "displayName": "Group Based Policy Analytics", + "version": "3.150.10" + }, + { + "name": "sda", + "displayName": "SD Access", + "version": "2.732.65395" + }, + { + "name": "app-visibility-and-policy", + "displayName": "Application Visibility and Policy", + "version": "2.732.117639" + }, + { + "name": "endpoint-analytics", + "displayName": "AI Endpoint Analytics", + "version": "1.11.1708" + }, + { + "name": "rogue-management", + "displayName": "Rogue and aWIPS", + "version": "3.1.203" + }, + { + "name": "app-hosting", + "displayName": "Application Hosting", + "version": "2.3.325081309" + }, + { + "name": "app-orchestrator", + "displayName": "Industrial App Orchestrator", + "version": "2.732.685041" + }, + { + "name": "icap", + "displayName": "Automation - Intelligent Capture", + "version": "2.732.65395" + }, + { + "name": "wide-area-bonjour", + "displayName": "Wide Area Bonjour", + "version": "2.732.755006" + }, + { + "name": "sensor-automation", + "displayName": "Automation - Sensor", + "version": "2.732.65395" + }, + { + "name": "sensor-assurance", + "displayName": "Assurance - Sensor", + "version": "3.150.151" + }, + { + "name": "disaster-recovery", + "displayName": "Disaster Recovery", + "version": "2.732.365024" + }, + { + "name": "support-services", + "displayName": "Support Services", + "version": "2.732.885097" + }, + { + "name": "umbrella-app", + "displayName": "Umbrella App", + "version": "2.732.595003" + }, + { + "name": "multi-dnac-enablement", + "displayName": "Multiple Cisco Catalyst Center", + "version": "2.732.65395" + } + ], + "numberOfNodes": 1, + "productType": "DNAC", + "productVersion": "3.732.75130", + "internetProtocolVersion": "ipv4", + "fipsEnabled": false, + "compatibilityError": [ + + ] + } + ], + "page": { + "limit": 50, + "offset": 1, + "count": 1, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + "get_all_n_f_s_configurations29": { + "version": "2.0", + "response": [ + { + "id": "2c3b1518-12c9-4ae1-abe6-3ee858fac515", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB21" + }, + "status": { + "destinationPath": "/data/external/nfs-17a13b08-f346-5694-9ad6-6a32620cec1f", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "4f09c3d7-ba64-49e1-ac2d-a27e80ac63df", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "7c2f408d-c218-431e-aab6-95a65970737f", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_all_backup17": { + "version": "2.0", + "response": [ + { + "name": "BACKUP24_07", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "versions": [ + "mks-upgrade:3.1.171", + "core-platform:0.10.210", + "managed-services-shared:0.10.19", + "gateway:0.10.8-1", + "core-addons:0.10.48", + "iam:5.4.47", + "harbor-ui:3.5.144", + "idm-ui-plugin:5.4.29", + "data-ingestion-infra:6.6.15", + "data-contextstore:6.6.11", + "data-analytics-infra:6.6.128", + "data-platform-pipeline-infra:6.6.131", + "data-store-management:6.6.45", + "data-platform-ui:6.6.101", + "cloud-connectivity-datahub:6.10.29", + "cloud-connectivity:6.10.14", + "dnacaap:6.9.46", + "dnac-search:6.9.3", + "system-management-operations:1.6.51", + "telemetry:4.7.20", + "cloud-connectivity-sense:7.0.7", + "digestor:7.0.5", + "rca-scripts:0.5.6", + "platform-refresh:1.3.88", + "catalyst-center-api-catalog:6.8.98", + "b2b-upgrade:2.732.65395", + "system-commons:2.732.65395", + "ise-bridge:3.150.2", + "ncp:2.732.65395", + "assurance-base-analytics:3.150.100240", + "appsvc-remediation:3.15.5", + "assurance:3.150.160", + "ai-network-analytics:4.0.33", + "ssa:2.732.1095138", + "aca:2.732.65395", + "group-based-policy-analytics:3.150.10", + "sda:2.732.65395", + "app-visibility-and-policy:2.732.117639", + "endpoint-analytics:1.11.1708", + "rogue-management:3.1.203", + "app-hosting:2.3.325081309", + "app-orchestrator:2.732.685041", + "icap:2.732.65395", + "wide-area-bonjour:2.732.755006", + "sensor-automation:2.732.65395", + "sensor-assurance:3.150.151", + "disaster-recovery:2.732.365024", + "support-services:2.732.885097", + "umbrella-app:2.732.595003", + "multi-dnac-enablement:2.732.65395" + ], + "id": "73f0f48a-76dc-4bc1-98d2-5e9df17c0ba9", + "size": 41908510658, + "createdDate": "2025-08-23T02:42:20Z", + "isBackupAvailable": true, + "isCompatible": true, + "context": { + "type": "ON_DEMAND", + "schedule": "" + }, + "storage": { + "type": "NFS", + "mountPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "id": "1fa840d8-ee9f-4e0d-a560-694c5e939680", + "name": "", + "host": "", + "serverPath": "" + }, + "status": "SUCCESS", + "endDate": "2025-08-23T02:57:56Z", + "createdBy": "Anonymous", + "duration": 936, + "releaseName": "uber-dnac", + "releaseDisplayName": "Cisco Catalyst Center", + "releaseVersion": "", + "releaseDisplayVersion": "3.1.5-75130", + "installedPackages": [ + { + "name": "mks-upgrade", + "displayName": "System", + "version": "3.1.171" + }, + { + "name": "core-platform", + "displayName": "Core Platform", + "version": "0.10.210" + }, + { + "name": "managed-services-shared", + "displayName": "Shared Managed Services", + "version": "0.10.19" + }, + { + "name": "gateway", + "displayName": "Catalyst Center Gateway Service", + "version": "0.10.8-1" + }, + { + "name": "core-addons", + "displayName": "System Addons", + "version": "0.10.48" + }, + { + "name": "iam", + "displayName": "Identity and Access Management", + "version": "5.4.47" + }, + { + "name": "harbor-ui", + "displayName": "Cisco Catalyst Center UI", + "version": "3.5.144" + }, + { + "name": "idm-ui-plugin", + "displayName": "Identity and Access Management - UI", + "version": "5.4.29" + }, + { + "name": "data-ingestion-infra", + "displayName": "Network Data Platform - Ingestion Infra", + "version": "6.6.15" + }, + { + "name": "data-contextstore", + "displayName": "Network Data Platform - Caching Infra", + "version": "6.6.11" + }, + { + "name": "data-analytics-infra", + "displayName": "Network Data Platform - Core", + "version": "6.6.128" + }, + { + "name": "data-platform-pipeline-infra", + "displayName": "Network Data Platform - Pipeline Infra", + "version": "6.6.131" + }, + { + "name": "data-store-management", + "displayName": "Network Data Platform - Storage Management", + "version": "6.6.45" + }, + { + "name": "data-platform-ui", + "displayName": "Network Data Platform - Manager", + "version": "6.6.101" + }, + { + "name": "cloud-connectivity-datahub", + "displayName": "DxHub Cloud Connectivity", + "version": "6.10.29" + }, + { + "name": "cloud-connectivity", + "displayName": "Cloud Connectivity", + "version": "6.10.14" + }, + { + "name": "dnacaap", + "displayName": "Cisco Catalyst Center Platform", + "version": "6.9.46" + }, + { + "name": "dnac-search", + "displayName": "Cisco Catalyst Center Global Search", + "version": "6.9.3" + }, + { + "name": "system-management-operations", + "displayName": "System Management Operations", + "version": "1.6.51" + }, + { + "name": "telemetry", + "displayName": "Telemetry", + "version": "4.7.20" + }, + { + "name": "cloud-connectivity-sense", + "displayName": "Cloud Connectivity - Contextual Content", + "version": "7.0.7" + }, + { + "name": "digestor", + "displayName": "Cloud Connectivity - Digestor", + "version": "7.0.5" + }, + { + "name": "rca-scripts", + "displayName": "RCA-Scripts Package", + "version": "0.5.6" + }, + { + "name": "platform-refresh", + "displayName": "Platform Upgrade", + "version": "1.3.88" + }, + { + "name": "catalyst-center-api-catalog", + "displayName": "Catalyst Center API Catalog", + "version": "6.8.98" + }, + { + "name": "b2b-upgrade", + "displayName": "", + "version": "2.732.65395" + }, + { + "name": "system-commons", + "displayName": "System Commons", + "version": "2.732.65395" + }, + { + "name": "ise-bridge", + "displayName": "Cisco Identity Services Engine Bridge", + "version": "3.150.2" + }, + { + "name": "ncp", + "displayName": "Network Controller Platform", + "version": "2.732.65395" + }, + { + "name": "assurance-base-analytics", + "displayName": "Network Data Platform - Base Analytics", + "version": "3.150.100240" + }, + { + "name": "appsvc-remediation", + "displayName": "Application and Service Remediation", + "version": "3.15.5" + }, + { + "name": "assurance", + "displayName": "Assurance", + "version": "3.150.160" + }, + { + "name": "ai-network-analytics", + "displayName": "AI Network Analytics", + "version": "4.0.33" + }, + { + "name": "ssa", + "displayName": "Stealthwatch Security Analytics", + "version": "2.732.1095138" + }, + { + "name": "aca", + "displayName": "Access Control Application", + "version": "2.732.65395" + }, + { + "name": "group-based-policy-analytics", + "displayName": "Group Based Policy Analytics", + "version": "3.150.10" + }, + { + "name": "sda", + "displayName": "SD Access", + "version": "2.732.65395" + }, + { + "name": "app-visibility-and-policy", + "displayName": "Application Visibility and Policy", + "version": "2.732.117639" + }, + { + "name": "endpoint-analytics", + "displayName": "AI Endpoint Analytics", + "version": "1.11.1708" + }, + { + "name": "rogue-management", + "displayName": "Rogue and aWIPS", + "version": "3.1.203" + }, + { + "name": "app-hosting", + "displayName": "Application Hosting", + "version": "2.3.325081309" + }, + { + "name": "app-orchestrator", + "displayName": "Industrial App Orchestrator", + "version": "2.732.685041" + }, + { + "name": "icap", + "displayName": "Automation - Intelligent Capture", + "version": "2.732.65395" + }, + { + "name": "wide-area-bonjour", + "displayName": "Wide Area Bonjour", + "version": "2.732.755006" + }, + { + "name": "sensor-automation", + "displayName": "Automation - Sensor", + "version": "2.732.65395" + }, + { + "name": "sensor-assurance", + "displayName": "Assurance - Sensor", + "version": "3.150.151" + }, + { + "name": "disaster-recovery", + "displayName": "Disaster Recovery", + "version": "2.732.365024" + }, + { + "name": "support-services", + "displayName": "Support Services", + "version": "2.732.885097" + }, + { + "name": "umbrella-app", + "displayName": "Umbrella App", + "version": "2.732.595003" + }, + { + "name": "multi-dnac-enablement", + "displayName": "Multiple Cisco Catalyst Center", + "version": "2.732.65395" + } + ], + "numberOfNodes": 1, + "productType": "DNAC", + "productVersion": "3.732.75130", + "internetProtocolVersion": "ipv4", + "fipsEnabled": false, + "compatibilityError": [ + + ] + } + ], + "page": { + "limit": 50, + "offset": 1, + "count": 1, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + + "playbook_backup_retention_days": [ + { + "backup": [ + { + "backup_retention_days": 1 + } + ] + } +], +"get_all_n_f_s_configurations33": { + "version": "2.0", + "response": [ + { + "id": "089acacc-72bc-4305-9937-fb1f856206f5", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB25" + }, + "status": { + "destinationPath": "/data/external/nfs-5e800996-e8e8-56cc-b755-a6d76ca5e0b4", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "2e69163c-8db8-4206-8c1c-f83272b07a0d", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "2ec88c43-c554-455c-a63a-7b8400fae245", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "58c02bda-36f8-4fbd-b7f3-91232dbbc4d4", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB23" + }, + "status": { + "destinationPath": "/data/external/nfs-086061ac-e99a-52d1-ac1a-3ca805da35b1", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "8cce7e5b-4f76-4c98-ac43-1d25b5dc5a56", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB24" + }, + "status": { + "destinationPath": "/data/external/nfs-2b4a348d-f4b3-5214-926d-5b96b44c4068", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] +}, +"get_all_backup33": { + "version": "2.0", + "response": [ + { + "name": "BACKUP_Without_Assurance", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "versions": [ + "mks-upgrade:3.1.277", + "core-platform:0.10.525", + "managed-services-shared:0.10.504", + "gateway:0.10.8-3", + "core-addons:0.10.502", + "iam:5.4.504", + "harbor-ui:3.5.504", + "idm-ui-plugin:5.4.502", + "data-ingestion-infra:6.6.502", + "data-contextstore:6.6.503", + "data-analytics-infra:6.6.502", + "data-platform-pipeline-infra:6.6.504", + "data-store-management:6.6.503", + "data-platform-ui:6.6.101", + "cloud-connectivity-datahub:6.10.502", + "cloud-connectivity:6.10.502", + "dnacaap:6.9.507", + "dnac-search:6.9.4", + "system-management-operations:1.6.507", + "telemetry:4.7.502", + "cloud-connectivity-sense:7.0.9", + "digestor:7.0.5", + "rca-scripts:0.5.7", + "platform-refresh:1.4.28", + "catalyst-center-api-catalog:6.8.504", + "b2b-upgrade:2.734.65213", + "system-commons:2.734.65213", + "ise-bridge:3.160.30", + "ncp:2.734.65213", + "assurance-base-analytics:3.160.10076", + "appsvc-remediation:3.16.16", + "assurance:3.160.139", + "ai-network-analytics:4.0.35", + "ssa:2.734.1095065", + "aca:2.734.65213", + "group-based-policy-analytics:3.160.1", + "sda:2.734.65213", + "app-visibility-and-policy:2.734.117570", + "endpoint-analytics:3.160.19", + "rogue-management:3.1.404", + "app-hosting:2.4.125100107", + "app-orchestrator:2.734.685028", + "icap:2.734.65213", + "wide-area-bonjour:2.734.755004", + "sensor-automation:2.734.65213", + "sensor-assurance:3.160.121", + "disaster-recovery:2.734.365013", + "support-services:2.734.885074", + "multi-dnac-enablement:2.734.65213" + ], + "id": "bce6aa1a-26c7-40bc-b591-6cd15cad831a", + "size": 11043152974, + "createdDate": "2025-10-30T01:24:36Z", + "isBackupAvailable": true, + "isCompatible": true, + "context": { + "type": "ON_DEMAND", + "schedule": "" + }, + "storage": { + "type": "NFS", + "mountPath": "/data/external/nfs-086061ac-e99a-52d1-ac1a-3ca805da35b1", + "id": "5dfb56b0-3dd6-4ae2-8244-535d06618afb", + "name": "", + "host": "", + "serverPath": "" + }, + "status": "SUCCESS", + "endDate": "2025-10-30T01:32:42Z", + "createdBy": "Anonymous", + "duration": 486, + "releaseName": "uber-dnac", + "releaseDisplayName": "Cisco Catalyst Center", + "releaseVersion": "", + "releaseDisplayVersion": "3.1.6-75089", + "installedPackages": [ + { + "name": "mks-upgrade", + "displayName": "System Infrastructure", + "version": "3.1.277" + }, + { + "name": "core-platform", + "displayName": "Core Platform", + "version": "0.10.525" + }, + { + "name": "managed-services-shared", + "displayName": "Shared Managed Services", + "version": "0.10.504" + }, + { + "name": "gateway", + "displayName": "Gateway Service", + "version": "0.10.8-3" + }, + { + "name": "core-addons", + "displayName": "System Addons", + "version": "0.10.502" + }, + { + "name": "iam", + "displayName": "Identity and Access Management", + "version": "5.4.504" + }, + { + "name": "harbor-ui", + "displayName": "User Interface Platform", + "version": "3.5.504" + }, + { + "name": "idm-ui-plugin", + "displayName": "Identity and Access Management User Interface", + "version": "5.4.502" + }, + { + "name": "data-ingestion-infra", + "displayName": "Network Data Platform Data Ingestion", + "version": "6.6.502" + }, + { + "name": "data-contextstore", + "displayName": "Network Data Platform Caching", + "version": "6.6.503" + }, + { + "name": "data-analytics-infra", + "displayName": "Network Data Platform Core", + "version": "6.6.502" + }, + { + "name": "data-platform-pipeline-infra", + "displayName": "Network Data Platform Pipeline", + "version": "6.6.504" + }, + { + "name": "data-store-management", + "displayName": "Network Data Platform Storage Management", + "version": "6.6.503" + }, + { + "name": "data-platform-ui", + "displayName": "Network Data Platform Management User Interface", + "version": "6.6.101" + }, + { + "name": "cloud-connectivity-datahub", + "displayName": "DxHub Cloud Connectivity", + "version": "6.10.502" + }, + { + "name": "cloud-connectivity", + "displayName": "Cloud Connectivity", + "version": "6.10.502" + }, + { + "name": "dnacaap", + "displayName": "Platform", + "version": "6.9.507" + }, + { + "name": "dnac-search", + "displayName": "Global Search", + "version": "6.9.4" + }, + { + "name": "system-management-operations", + "displayName": "System Management", + "version": "1.6.507" + }, + { + "name": "telemetry", + "displayName": "Connected Telemetry", + "version": "4.7.502" + }, + { + "name": "cloud-connectivity-sense", + "displayName": "Cloud Connectivity Sense", + "version": "7.0.9" + }, + { + "name": "digestor", + "displayName": "Cloud Connectivity Communication", + "version": "7.0.5" + }, + { + "name": "rca-scripts", + "displayName": "RCA Collector", + "version": "0.5.7" + }, + { + "name": "platform-refresh", + "displayName": "Catalyst Center 3.x Upgrade", + "version": "1.4.28" + }, + { + "name": "catalyst-center-api-catalog", + "displayName": "API Catalog", + "version": "6.8.504" + }, + { + "name": "b2b-upgrade", + "displayName": "", + "version": "2.734.65213" + }, + { + "name": "system-commons", + "displayName": "System Commons", + "version": "2.734.65213" + }, + { + "name": "ise-bridge", + "displayName": "Cisco Identity Services Engine Bridge", + "version": "3.160.30" + }, + { + "name": "ncp", + "displayName": "Network Controller Platform", + "version": "2.734.65213" + }, + { + "name": "assurance-base-analytics", + "displayName": "Network Data Platform - Base Analytics", + "version": "3.160.10076" + }, + { + "name": "appsvc-remediation", + "displayName": "Application and Service Remediation", + "version": "3.16.16" + }, + { + "name": "assurance", + "displayName": "Assurance", + "version": "3.160.139" + }, + { + "name": "ai-network-analytics", + "displayName": "AI Network Analytics", + "version": "4.0.35" + }, + { + "name": "ssa", + "displayName": "Stealthwatch Security Analytics", + "version": "2.734.1095065" + }, + { + "name": "aca", + "displayName": "Access Control Application", + "version": "2.734.65213" + }, + { + "name": "group-based-policy-analytics", + "displayName": "Group Based Policy Analytics", + "version": "3.160.1" + }, + { + "name": "sda", + "displayName": "SD Access", + "version": "2.734.65213" + }, + { + "name": "app-visibility-and-policy", + "displayName": "Application Visibility and Policy", + "version": "2.734.117570" + }, + { + "name": "endpoint-analytics", + "displayName": "AI Endpoint Analytics", + "version": "3.160.19" + }, + { + "name": "rogue-management", + "displayName": "Rogue and aWIPS", + "version": "3.1.404" + }, + { + "name": "app-hosting", + "displayName": "Application Hosting", + "version": "2.4.125100107" + }, + { + "name": "app-orchestrator", + "displayName": "Industrial App Orchestrator", + "version": "2.734.685028" + }, + { + "name": "icap", + "displayName": "Automation Intelligent Capture", + "version": "2.734.65213" + }, + { + "name": "wide-area-bonjour", + "displayName": "Wide Area Bonjour", + "version": "2.734.755004" + }, + { + "name": "sensor-automation", + "displayName": "Automation Sensor", + "version": "2.734.65213" + }, + { + "name": "sensor-assurance", + "displayName": "Assurance Sensor", + "version": "3.160.121" + }, + { + "name": "disaster-recovery", + "displayName": "Disaster Recovery", + "version": "2.734.365013" + }, + { + "name": "support-services", + "displayName": "Support Services", + "version": "2.734.885074" + }, + { + "name": "multi-dnac-enablement", + "displayName": "Multiple Instance Enablement", + "version": "2.734.65213" + } + ], + "numberOfNodes": 1, + "productType": "DNAC", + "productVersion": "3.734.75089", + "internetProtocolVersion": "ipv4", + "fipsEnabled": false, + "compatibilityError": [ + + ] + } + ], + "page": { + "limit": 50, + "offset": 1, + "count": 1, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } +}, +"delete_backup30": { + "version": "2.0", + "response": { + "taskId": "c3c79fdd-4505-416b-b544-789f3828b8cc", + "url": "/dna/intent/api/v1/backupRestoreExecutions/c3c79fdd-4505-416b-b544-789f3828b8cc" + } +}, +"get_backup_and_restore_execution50": { + "version": "2.0", + "response": { + "backupId": "bce6aa1a-26c7-40bc-b591-6cd15cad831a", + "id": "c3c79fdd-4505-416b-b544-789f3828b8cc", + "jobType": "DELETE_BACKUP", + "status": "IN_PROGRESS", + "endDate": "1970-01-01T00:00:00.000Z", + "startDate": "2025-10-31T05:02:15.877Z", + "createdBy": "Anonymous", + "errorMessage": "", + "errorCode": "", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "isForceUpdate": false, + "updateMessage": "", + "failedTaskDetail": { + + }, + "systemErrorMessage": "", + "completedPercentage": 0, + "_metadata": null, + "tasks": [ + { + "id": "d93ea39d-a3b6-4ccb-84fd-642862f02077", + "taskName": "Trigger", + "status": "SUCCESS", + "endDate": "2025-10-31T05:02:15.877Z", + "startDate": "2025-10-31T05:02:15.877Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "18e1eebc-4ca7-4c38-9465-0045c932b5d8", + "taskName": "initialize Values", + "status": "SUCCESS", + "endDate": "2025-10-31T05:02:16.793Z", + "startDate": "2025-10-31T05:02:16.713Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "a55adf3b-894f-4133-b8ae-998f979f2ab2", + "taskName": "Validate Disk Reachability", + "status": "SUCCESS", + "endDate": "2025-10-31T05:02:17.106Z", + "startDate": "2025-10-31T05:02:16.794Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "b4e785b5-3ed6-416a-a922-f1c677690276", + "taskName": "Get Backup", + "status": "SUCCESS", + "endDate": "2025-10-31T05:02:17.205Z", + "startDate": "2025-10-31T05:02:17.106Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "adee05f4-61f6-4870-b842-271fe430b427", + "taskName": "Validate Backup", + "status": "SUCCESS", + "endDate": "2025-10-31T05:02:18.805Z", + "startDate": "2025-10-31T05:02:17.205Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "df146a7d-5f55-4c78-b28a-1c15c61b0f92", + "taskName": "Prepare Delete", + "status": "SUCCESS", + "endDate": "2025-10-31T05:02:18.994Z", + "startDate": "2025-10-31T05:02:18.805Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "2def360c-db84-42c7-bece-152895636d08", + "taskName": "Delete MKS / System Manager / Managed Service Backup", + "status": "INPROGRESS", + "endDate": "", + "startDate": "2025-10-31T05:02:18.994Z", + "message": "", + "failedTaskDetail": { + + } + } + ] + } +}, +"get_backup_and_restore_execution51": { + "version": "2.0", + "response": { + "backupId": "bce6aa1a-26c7-40bc-b591-6cd15cad831a", + "id": "c3c79fdd-4505-416b-b544-789f3828b8cc", + "jobType": "DELETE_BACKUP", + "status": "SUCCESS", + "endDate": "2025-10-31T05:02:50.122Z", + "startDate": "2025-10-31T05:02:15.877Z", + "createdBy": "Anonymous", + "errorMessage": "", + "errorCode": "", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "isForceUpdate": false, + "updateMessage": "", + "failedTaskDetail": { + + }, + "systemErrorMessage": "", + "completedPercentage": 0, + "_metadata": null, + "tasks": [ + { + "id": "7e5e44b0-c22b-4c86-b465-e91660e7880a", + "taskName": "Trigger", + "status": "SUCCESS", + "endDate": "2025-10-31T05:02:15.877Z", + "startDate": "2025-10-31T05:02:15.877Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "6c6e58bc-9016-4d0a-a79c-8a3b3bf9bc75", + "taskName": "initialize Values", + "status": "SUCCESS", + "endDate": "2025-10-31T05:02:16.793Z", + "startDate": "2025-10-31T05:02:16.713Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "220ada93-5445-41b9-be22-431dfb7d69a6", + "taskName": "Validate Disk Reachability", + "status": "SUCCESS", + "endDate": "2025-10-31T05:02:17.106Z", + "startDate": "2025-10-31T05:02:16.794Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "c96a27a1-23ea-4d1a-8419-b9c46f81a7e5", + "taskName": "Get Backup", + "status": "SUCCESS", + "endDate": "2025-10-31T05:02:17.205Z", + "startDate": "2025-10-31T05:02:17.106Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "c950dce9-1135-4ede-9b4d-ea17950c1788", + "taskName": "Validate Backup", + "status": "SUCCESS", + "endDate": "2025-10-31T05:02:18.805Z", + "startDate": "2025-10-31T05:02:17.205Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "60a44515-f823-4510-8878-6acd51413038", + "taskName": "Prepare Delete", + "status": "SUCCESS", + "endDate": "2025-10-31T05:02:18.994Z", + "startDate": "2025-10-31T05:02:18.805Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "dca8b9fd-d092-4a23-aaa4-0591ffa5e9a2", + "taskName": "Delete MKS / System Manager / Managed Service Backup", + "status": "SUCCESS", + "endDate": "2025-10-31T05:02:50.052Z", + "startDate": "2025-10-31T05:02:18.994Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "6860fe33-be99-451d-816a-d4b4fcbe7453", + "taskName": "Delete Backup Metadata", + "status": "SUCCESS", + "endDate": "2025-10-31T05:02:50.106Z", + "startDate": "2025-10-31T05:02:50.075Z", + "message": "", + "failedTaskDetail": { + + } + } + ] + } +}, +"get_all_n_f_s_configurations34": { + "version": "2.0", + "response": [ + { + "id": "089acacc-72bc-4305-9937-fb1f856206f5", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB25" + }, + "status": { + "destinationPath": "/data/external/nfs-5e800996-e8e8-56cc-b755-a6d76ca5e0b4", + "state": "UnHealthy", + "subResourceState": "172.23.9.219=NFS mount point could not be mounted on the node", + "unhealthyNodes": [ + "172.23.9.219" + ] + } + }, + { + "id": "2e69163c-8db8-4206-8c1c-f83272b07a0d", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "2ec88c43-c554-455c-a63a-7b8400fae245", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "58c02bda-36f8-4fbd-b7f3-91232dbbc4d4", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB23" + }, + "status": { + "destinationPath": "/data/external/nfs-086061ac-e99a-52d1-ac1a-3ca805da35b1", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "8cce7e5b-4f76-4c98-ac43-1d25b5dc5a56", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB24" + }, + "status": { + "destinationPath": "/data/external/nfs-2b4a348d-f4b3-5214-926d-5b96b44c4068", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] +}, +"get_all_backup34": { + "version": "2.0", + "response": [ + + ], + "page": { + "limit": 50, + "offset": 1, + "count": 0, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } +}, + + "playbook_delete_all_backup": [ + { + "backup": [ + { + "delete_all_backup": true + } + ] + } + ], + "get_all_n_f_s_configurations35": { + "version": "2.0", + "response": [ + { + "id": "3648a5e6-f9d9-42fb-a66f-32beec8387e6", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "7b960aea-b2ef-4f19-969b-aa16eb608223", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB30" + }, + "status": { + "destinationPath": "/data/external/nfs-b42b9005-d788-596e-a2e7-cb9d89f7466e", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "9aa86409-69ff-4e5c-8a7b-68b8d40fa142", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "10.195.189.95", + "serverType": "NFS", + "sourcePath": "/data/nfsshare/iac" + }, + "status": { + "destinationPath": "/data/external/nfs-13fdd4b5-e0bf-54a3-92a1-e296ecda3e0c", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "d9ceef12-5979-4a73-8772-d72989206d71", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_all_backup35": { + "version": "2.0", + "response": [ + { + "name": "BACKUP03_10_20251003_165205", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "versions": [ + "mks-upgrade:3.1.59", + "core-platform:0.10.186", + "managed-services-shared:0.10.18", + "gateway:0.10.8-1", + "core-addons:0.10.40", + "iam:5.4.39", + "harbor-ui:3.5.141", + "idm-ui-plugin:5.4.24", + "data-ingestion-infra:6.6.15", + "data-contextstore:6.6.10", + "data-analytics-infra:6.6.123", + "data-platform-pipeline-infra:6.6.123", + "data-store-management:6.6.41", + "data-platform-ui:6.6.101", + "cloud-connectivity-datahub:6.10.22", + "cloud-connectivity:6.10.10", + "dnacaap:6.9.41", + "dnac-search:6.9.3", + "system-management-operations:1.6.42", + "telemetry:4.7.17", + "cloud-connectivity-sense:7.0.7", + "digestor:7.0.4", + "rca-scripts:0.5.6", + "platform-refresh:1.3.31", + "catalyst-center-api-catalog:6.8.91", + "b2b-upgrade:2.732.65192", + "system-commons:2.732.65192", + "ise-bridge:3.130.14", + "ncp:2.732.65192", + "assurance-base-analytics:3.150.100154", + "appsvc-remediation:3.15.0", + "assurance:3.150.111", + "ai-network-analytics:4.0.30", + "ssa:2.732.1095081", + "aca:2.732.65192", + "group-based-policy-analytics:3.150.1", + "sda:2.732.65192", + "app-visibility-and-policy:2.732.117578", + "endpoint-analytics:1.11.1540", + "rogue-management:3.1.200", + "app-hosting:2.3.225061205", + "sea-app-package:2.732.685005", + "icap:2.732.65192", + "sensor-automation:2.732.65192", + "sensor-assurance:3.150.109", + "disaster-recovery:2.732.365010", + "support-services:2.730.885298", + "multi-dnac-enablement:2.732.65192" + ], + "id": "1efb2b97-7245-48c2-b307-63c5c8fbd34b", + "size": 0, + "createdDate": "2025-10-03T11:22:05Z", + "isBackupAvailable": false, + "isCompatible": false, + "context": { + "type": "ON_DEMAND", + "schedule": "" + }, + "storage": { + "type": "NFS", + "mountPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "id": "ff131865-4f07-43f6-a320-71540b24b37d", + "name": "", + "host": "", + "serverPath": "" + }, + "status": "FAILED", + "endDate": "2025-10-03T11:23:09Z", + "createdBy": "Anonymous", + "duration": 64, + "releaseName": "uber-dnac", + "releaseDisplayName": "Cisco Catalyst Center", + "releaseVersion": "", + "releaseDisplayVersion": "3.1.5-75038", + "installedPackages": [ + { + "name": "mks-upgrade", + "displayName": "System", + "version": "3.1.59" + }, + { + "name": "core-platform", + "displayName": "Core Platform", + "version": "0.10.186" + }, + { + "name": "managed-services-shared", + "displayName": "Shared Managed Services", + "version": "0.10.18" + }, + { + "name": "gateway", + "displayName": "Catalyst Center Gateway Service", + "version": "0.10.8-1" + }, + { + "name": "core-addons", + "displayName": "System Addons", + "version": "0.10.40" + }, + { + "name": "iam", + "displayName": "Identity and Access Management", + "version": "5.4.39" + }, + { + "name": "harbor-ui", + "displayName": "Cisco Catalyst Center UI", + "version": "3.5.141" + }, + { + "name": "idm-ui-plugin", + "displayName": "Identity and Access Management - UI", + "version": "5.4.24" + }, + { + "name": "data-ingestion-infra", + "displayName": "Network Data Platform - Ingestion Infra", + "version": "6.6.15" + }, + { + "name": "data-contextstore", + "displayName": "Network Data Platform - Caching Infra", + "version": "6.6.10" + }, + { + "name": "data-analytics-infra", + "displayName": "Network Data Platform - Core", + "version": "6.6.123" + }, + { + "name": "data-platform-pipeline-infra", + "displayName": "Network Data Platform - Pipeline Infra", + "version": "6.6.123" + }, + { + "name": "data-store-management", + "displayName": "Network Data Platform - Storage Management", + "version": "6.6.41" + }, + { + "name": "data-platform-ui", + "displayName": "Network Data Platform - Manager", + "version": "6.6.101" + }, + { + "name": "cloud-connectivity-datahub", + "displayName": "DxHub Cloud Connectivity", + "version": "6.10.22" + }, + { + "name": "cloud-connectivity", + "displayName": "Cloud Connectivity", + "version": "6.10.10" + }, + { + "name": "dnacaap", + "displayName": "Cisco Catalyst Center Platform", + "version": "6.9.41" + }, + { + "name": "dnac-search", + "displayName": "Cisco Catalyst Center Global Search", + "version": "6.9.3" + }, + { + "name": "system-management-operations", + "displayName": "System Management Operations", + "version": "1.6.42" + }, + { + "name": "telemetry", + "displayName": "Telemetry", + "version": "4.7.17" + }, + { + "name": "cloud-connectivity-sense", + "displayName": "Cloud Connectivity - Contextual Content", + "version": "7.0.7" + }, + { + "name": "digestor", + "displayName": "Cloud Connectivity - Digestor", + "version": "7.0.4" + }, + { + "name": "rca-scripts", + "displayName": "RCA-Scripts Package", + "version": "0.5.6" + }, + { + "name": "platform-refresh", + "displayName": "Platform Refresh", + "version": "1.3.31" + }, + { + "name": "catalyst-center-api-catalog", + "displayName": "Catalyst Center API Catalog", + "version": "6.8.91" + }, + { + "name": "b2b-upgrade", + "displayName": "", + "version": "2.732.65192" + }, + { + "name": "system-commons", + "displayName": "System Commons", + "version": "2.732.65192" + }, + { + "name": "ise-bridge", + "displayName": "Cisco Identity Services Engine Bridge", + "version": "3.130.14" + }, + { + "name": "ncp", + "displayName": "Network Controller Platform", + "version": "2.732.65192" + }, + { + "name": "assurance-base-analytics", + "displayName": "Network Data Platform - Base Analytics", + "version": "3.150.100154" + }, + { + "name": "appsvc-remediation", + "displayName": "Application and Service Remediation", + "version": "3.15.0" + }, + { + "name": "assurance", + "displayName": "Assurance", + "version": "3.150.111" + }, + { + "name": "ai-network-analytics", + "displayName": "AI Network Analytics", + "version": "4.0.30" + }, + { + "name": "ssa", + "displayName": "Stealthwatch Security Analytics", + "version": "2.732.1095081" + }, + { + "name": "aca", + "displayName": "Access Control Application", + "version": "2.732.65192" + }, + { + "name": "group-based-policy-analytics", + "displayName": "Group Based Policy Analytics", + "version": "3.150.1" + }, + { + "name": "sda", + "displayName": "SD Access", + "version": "2.732.65192" + }, + { + "name": "app-visibility-and-policy", + "displayName": "Application Visibility and Policy", + "version": "2.732.117578" + }, + { + "name": "endpoint-analytics", + "displayName": "AI Endpoint Analytics", + "version": "1.11.1540" + }, + { + "name": "rogue-management", + "displayName": "Rogue and aWIPS", + "version": "3.1.200" + }, + { + "name": "app-hosting", + "displayName": "Application Hosting", + "version": "2.3.225061205" + }, + { + "name": "sea-app-package", + "displayName": "SEA App", + "version": "2.732.685005" + }, + { + "name": "icap", + "displayName": "Automation - Intelligent Capture", + "version": "2.732.65192" + }, + { + "name": "sensor-automation", + "displayName": "Automation - Sensor", + "version": "2.732.65192" + }, + { + "name": "sensor-assurance", + "displayName": "Assurance - Sensor", + "version": "3.150.109" + }, + { + "name": "disaster-recovery", + "displayName": "Disaster Recovery", + "version": "2.732.365010" + }, + { + "name": "support-services", + "displayName": "Support Services", + "version": "2.730.885298" + }, + { + "name": "multi-dnac-enablement", + "displayName": "Multiple Cisco Catalyst Center", + "version": "2.732.65192" + } + ], + "numberOfNodes": 1, + "productType": "DNAC", + "productVersion": "3.732.75038", + "internetProtocolVersion": "ipv4", + "fipsEnabled": false, + "compatibilityError": [ + { + "endDate": "2025-10-03T11:23:36.405Z", + "startDate": "2025-10-03T11:23:36.404Z", + "serviceName": "System", + "namespace": "", + "response": { + "error": "backups with status FAILED are incompatible.", + "status": "not ok" + } + } + ] + } + ], + "page": { + "limit": 50, + "offset": 1, + "count": 1, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + "delete_backup35": { + "version": "2.0", + "response": { + "taskId": "5d4d3a9f-b4c7-4bee-af70-3cea42bdc13e", + "url": "/dna/intent/api/v1/backupRestoreExecutions/5d4d3a9f-b4c7-4bee-af70-3cea42bdc13e" + } + }, + "get_backup_and_restore_execution35": { + "version": "2.0", + "response": { + "backupId": "1efb2b97-7245-48c2-b307-63c5c8fbd34b", + "id": "5d4d3a9f-b4c7-4bee-af70-3cea42bdc13e", + "jobType": "DELETE_BACKUP", + "status": "IN_PROGRESS", + "endDate": "1970-01-01T00:00:00.000Z", + "startDate": "2025-10-03T12:01:53.656Z", + "createdBy": "Anonymous", + "errorMessage": "", + "errorCode": "", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "isForceUpdate": false, + "updateMessage": "", + "failedTaskDetail": { + + }, + "systemErrorMessage": "", + "completedPercentage": 0, + "_metadata": null, + "tasks": [ + { + "id": "534bb2a1-40c3-4d7e-9b7f-2b221c43e345", + "taskName": "initialize Values", + "status": "SUCCESS", + "endDate": "2025-10-03T12:01:53.668Z", + "startDate": "2025-10-03T12:01:53.657Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "f353f6ba-2ac6-47eb-8944-0b47ad675313", + "taskName": "Trigger", + "status": "SUCCESS", + "endDate": "2025-10-03T12:01:53.657Z", + "startDate": "2025-10-03T12:01:53.657Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "73f5e2ce-a341-42e6-b5eb-bbd500b5ae07", + "taskName": "Validate Disk Reachability", + "status": "SUCCESS", + "endDate": "2025-10-03T12:01:53.694Z", + "startDate": "2025-10-03T12:01:53.668Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "b0e71e37-50fe-47e8-a41d-2068fe717301", + "taskName": "Get Backup", + "status": "SUCCESS", + "endDate": "2025-10-03T12:01:53.699Z", + "startDate": "2025-10-03T12:01:53.694Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "987a9b5a-55ab-4da3-83e1-c650f7445056", + "taskName": "Validate Backup", + "status": "SUCCESS", + "endDate": "2025-10-03T12:01:53.727Z", + "startDate": "2025-10-03T12:01:53.699Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "582bf84a-8af7-4108-b2b8-77a63c8562fb", + "taskName": "Prepare Delete", + "status": "SUCCESS", + "endDate": "2025-10-03T12:01:53.772Z", + "startDate": "2025-10-03T12:01:53.727Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "6e240cca-6ea5-4a71-8634-2b54f756d5df", + "taskName": "Delete MKS / System Manager / Managed Service Backup", + "status": "INPROGRESS", + "endDate": "", + "startDate": "2025-10-03T12:01:53.772Z", + "message": "", + "failedTaskDetail": { + + } + } + ] + } + }, + "get_backup_and_restore_execution36": { + "version": "2.0", + "response": { + "backupId": "1efb2b97-7245-48c2-b307-63c5c8fbd34b", + "id": "5d4d3a9f-b4c7-4bee-af70-3cea42bdc13e", + "jobType": "DELETE_BACKUP", + "status": "SUCCESS", + "endDate": "2025-10-03T12:02:23.943Z", + "startDate": "2025-10-03T12:01:53.656Z", + "createdBy": "Anonymous", + "errorMessage": "", + "errorCode": "", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "isForceUpdate": false, + "updateMessage": "", + "failedTaskDetail": { + + }, + "systemErrorMessage": "", + "completedPercentage": 0, + "_metadata": null, + "tasks": [ + { + "id": "b8f05096-992c-4fb3-a4fe-b3fb96387731", + "taskName": "initialize Values", + "status": "SUCCESS", + "endDate": "2025-10-03T12:01:53.668Z", + "startDate": "2025-10-03T12:01:53.657Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "6adcd054-921b-4539-8513-0777e2477061", + "taskName": "Trigger", + "status": "SUCCESS", + "endDate": "2025-10-03T12:01:53.657Z", + "startDate": "2025-10-03T12:01:53.657Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "32423a11-f364-427d-bb27-cb19b1cd2149", + "taskName": "Validate Disk Reachability", + "status": "SUCCESS", + "endDate": "2025-10-03T12:01:53.694Z", + "startDate": "2025-10-03T12:01:53.668Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "b25a4804-c09e-45da-a9d8-57543d54d2d9", + "taskName": "Get Backup", + "status": "SUCCESS", + "endDate": "2025-10-03T12:01:53.699Z", + "startDate": "2025-10-03T12:01:53.694Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "7a64cf30-e084-4096-835a-ab854d19893c", + "taskName": "Validate Backup", + "status": "SUCCESS", + "endDate": "2025-10-03T12:01:53.727Z", + "startDate": "2025-10-03T12:01:53.699Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "dec90474-ce43-4502-ae69-a6db3e0860aa", + "taskName": "Prepare Delete", + "status": "SUCCESS", + "endDate": "2025-10-03T12:01:53.772Z", + "startDate": "2025-10-03T12:01:53.727Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "3636912f-1a13-4684-a3a4-a1869f04ed68", + "taskName": "Delete MKS / System Manager / Managed Service Backup", + "status": "SUCCESS", + "endDate": "2025-10-03T12:02:23.930Z", + "startDate": "2025-10-03T12:01:53.772Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "4ba0d668-4288-4c5f-a26d-8b38a8af9815", + "taskName": "Delete Backup Metadata", + "status": "SUCCESS", + "endDate": "2025-10-03T12:02:23.939Z", + "startDate": "2025-10-03T12:02:23.936Z", + "message": "", + "failedTaskDetail": { + + } + } + ] + } + }, + "get_all_n_f_s_configurations36": { + "version": "2.0", + "response": [ + { + "id": "3648a5e6-f9d9-42fb-a66f-32beec8387e6", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "7b960aea-b2ef-4f19-969b-aa16eb608223", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB30" + }, + "status": { + "destinationPath": "/data/external/nfs-b42b9005-d788-596e-a2e7-cb9d89f7466e", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "9aa86409-69ff-4e5c-8a7b-68b8d40fa142", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "10.195.189.95", + "serverType": "NFS", + "sourcePath": "/data/nfsshare/iac" + }, + "status": { + "destinationPath": "/data/external/nfs-13fdd4b5-e0bf-54a3-92a1-e296ecda3e0c", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "d9ceef12-5979-4a73-8772-d72989206d71", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_all_backup36": { + "version": "2.0", + "response": [ + + ], + "page": { + "limit": 50, + "offset": 1, + "count": 0, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + + "playbook_no_backup_todelete": [ + { + "backup": [ + { + "delete_all_backup": true + } + ] + } + ], + "get_all_n_f_s_configurations37": { + "version": "2.0", + "response": [ + { + "id": "3648a5e6-f9d9-42fb-a66f-32beec8387e6", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "7b960aea-b2ef-4f19-969b-aa16eb608223", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB30" + }, + "status": { + "destinationPath": "/data/external/nfs-b42b9005-d788-596e-a2e7-cb9d89f7466e", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "9aa86409-69ff-4e5c-8a7b-68b8d40fa142", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "10.195.189.95", + "serverType": "NFS", + "sourcePath": "/data/nfsshare/iac" + }, + "status": { + "destinationPath": "/data/external/nfs-13fdd4b5-e0bf-54a3-92a1-e296ecda3e0c", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "d9ceef12-5979-4a73-8772-d72989206d71", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_all_backup37": { + "version": "2.0", + "response": [ + + ], + "page": { + "limit": 50, + "offset": 1, + "count": 0, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + "get_all_n_f_s_configurations38": { + "version": "2.0", + "response": [ + { + "id": "3648a5e6-f9d9-42fb-a66f-32beec8387e6", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "7b960aea-b2ef-4f19-969b-aa16eb608223", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB30" + }, + "status": { + "destinationPath": "/data/external/nfs-b42b9005-d788-596e-a2e7-cb9d89f7466e", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "9aa86409-69ff-4e5c-8a7b-68b8d40fa142", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "10.195.189.95", + "serverType": "NFS", + "sourcePath": "/data/nfsshare/iac" + }, + "status": { + "destinationPath": "/data/external/nfs-13fdd4b5-e0bf-54a3-92a1-e296ecda3e0c", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "d9ceef12-5979-4a73-8772-d72989206d71", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_all_backup38": { + "version": "2.0", + "response": [ + + ], + "page": { + "limit": 50, + "offset": 1, + "count": 0, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + + + "playbook_generate_new_backup": [ + { + "backup": [ + { + "generate_new_backup": true, + "name": "BACKUP05_10", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE" + } + ] + } + ], + "get_all_n_f_s_configurations39": { + "version": "2.0", + "response": [ + { + "id": "3648a5e6-f9d9-42fb-a66f-32beec8387e6", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB17" + }, + "status": { + "destinationPath": "/data/external/nfs-e9f731e5-ea3c-512d-acef-c169bca98d04", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "7b960aea-b2ef-4f19-969b-aa16eb608223", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB30" + }, + "status": { + "destinationPath": "/data/external/nfs-b42b9005-d788-596e-a2e7-cb9d89f7466e", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "9aa86409-69ff-4e5c-8a7b-68b8d40fa142", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "10.195.189.95", + "serverType": "NFS", + "sourcePath": "/data/nfsshare/iac" + }, + "status": { + "destinationPath": "/data/external/nfs-13fdd4b5-e0bf-54a3-92a1-e296ecda3e0c", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + }, + { + "id": "d9ceef12-5979-4a73-8772-d72989206d71", + "spec": { + "nfsPort": 2049, + "nfsVersion": "nfs4", + "portMapperPort": 111, + "server": "172.27.17.90", + "serverType": "NFS", + "sourcePath": "/home/nfsshare/backups/TB18" + }, + "status": { + "destinationPath": "/data/external/nfs-013c9812-7359-57bb-88b0-53d01e4122ff", + "state": "Healthy", + "subResourceState": "", + "unhealthyNodes": null + } + } + ] + }, + "get_all_backup39": { + "version": "2.0", + "response": [ + + ], + "page": { + "limit": 50, + "offset": 1, + "count": 0, + "sortBy": [ + { + "name": "startDate", + "order": "desc" + } + ] + }, + "filter": { + + } + }, + "create_backup": { + "version": "2.0", + "response": { + "taskId": "9b32befa-2a8a-458e-a65c-9225ebada422", + "url": "/dna/intent/api/v1/backupRestoreExecutions/9b32befa-2a8a-458e-a65c-9225ebada422" + } + }, + "get_backup_and_restore_execution39": { + "version": "2.0", + "response": { + "backupId": "7f62ec6b-d932-4641-bceb-96cd8c08afb3", + "id": "9b32befa-2a8a-458e-a65c-9225ebada422", + "jobType": "CREATE_BACKUP", + "status": "IN_PROGRESS", + "endDate": "1970-01-01T00:00:00.000Z", + "startDate": "2025-10-06T05:33:28.309Z", + "createdBy": "Anonymous", + "errorMessage": "", + "errorCode": "", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "isForceUpdate": false, + "updateMessage": "", + "failedTaskDetail": { + + }, + "systemErrorMessage": "", + "completedPercentage": 0, + "_metadata": null, + "tasks": [ + { + "id": "4a55a4d4-7a1d-4b16-939a-a345c507ccc4", + "taskName": "Create Backup Metadata", + "status": "SUCCESS", + "endDate": "2025-10-06T05:33:28.977Z", + "startDate": "2025-10-06T05:33:28.875Z", + "message": "", + "failedTaskDetail": { + + } + }, + { + "id": "7d8fee47-06da-47ed-9a8e-5bee9be7dbba", + "taskName": "Validation", + "status": "INPROGRESS", + "endDate": "", + "startDate": "2025-10-06T05:33:28.977Z", + "message": "", + "failedTaskDetail": { + + } + } + ] + } + }, + "get_backup_and_restore_execution40": { + "version": "2.0", + "response": { + "backupId": "7f62ec6b-d932-4641-bceb-96cd8c08afb3", + "id": "9b32befa-2a8a-458e-a65c-9225ebada422", + "jobType": "CREATE_BACKUP", + "status": "FAILED", + "endDate": "2025-10-06T05:34:35.183Z", + "startDate": "2025-10-06T05:33:28.309Z", + "createdBy": "Anonymous", + "errorMessage": "replicatedfs:app-hosting- backup failed. Error: backup for backupId 6d7f33c8-c5e7-4c90-bdd5-63ee6fa199e6 failed due to backup job failure", + "errorCode": "MANAGED-SERVICE#CREATE_BACKUP_STATUS_FAILED_replicatedfs", + "scope": "CISCO_DNA_DATA_WITHOUT_ASSURANCE", + "isForceUpdate": false, + "updateMessage": "", + "failedTaskDetail": { + "bapiExecutionAttempt": 1, + "endTime": "Mon Oct 06 05:34:32 GMT 2025", + "endTimeEpoch": 1759728872295, + "errorCode": "MANAGED-SERVICE#CREATE_BACKUP_STATUS_FAILED_replicatedfs", + "errorMessage": "replicatedfs:app-hosting- backup failed. Error: backup for backupId 6d7f33c8-c5e7-4c90-bdd5-63ee6fa199e6 failed due to backup job failure", + "memoryConsumed": 0.296875, + "mockApplied": false, + "mockTypeApplied": "NO_MOCK", + "startTime": "Mon Oct 06 05:34:32 GMT 2025", + "startTimeEpoch": 1759728872284, + "status": "FAILURE", + "systemErrorDetails": { + "errorCode": "500", + "errorMessage": "9b32befa-2a8a-458e-a65c-9225ebada422 | 9d2679be-0684-458a-8815-88621da4ce50 | 41a4-a8ef-4b7a-8196 : ScriptProcessor Execution Failed with error: Exception: replicatedfs:app-hosting- backup failed. Error: backup for backupId 6d7f33c8-c5e7-4c90-bdd5-63ee6fa199e6 failed due to backup job failure in