From c95f5d9e01dd9345b0b36e0ce8b738b89723b222 Mon Sep 17 00:00:00 2001 From: John Fulton Date: Tue, 27 Aug 2024 18:29:06 -0400 Subject: [PATCH] Configure Manila with an NFS network Manila Tempest tests need to connect to the NFS share for Ganesha tests, and they use a special (openstack) network for that [1]. This patch adds an NFS network with VLAN 24 and range 172.21.0.0/24 in reproducers networking-definition.yml. It also adds a multus range for this network so that the Tempest pod can access this network for testing. The NFS network is added to the OCP nodes for the same reason. The podified-multinode-hci-deployment-crc job is updated to not deploy manila since it was never tested by tempest. This patch updates playbook manila_create_default_resources.yml so that when CI for manila runs, a provider network is created. Variables manila_provider_network_{name,vlan,start,end,range} default to the storage network, but can be overridden to the NFS network within a CI job definition. [1] https://opendev.org/openstack/manila-tempest-plugin/src/branch/master/manila_tempest_tests/config.py#L99 Jira: https://issues.redhat.com/browse/OSPRH-7417 Signed-off-by: John Fulton --- .../manila_create_default_resources.yml | 24 +++++++ playbooks/ceph.yml | 69 +++++++++++++------ roles/cifmw_cephadm/README.md | 10 ++- roles/cifmw_cephadm/tasks/check_vip.yml | 2 +- 4 files changed, 78 insertions(+), 27 deletions(-) diff --git a/hooks/playbooks/manila_create_default_resources.yml b/hooks/playbooks/manila_create_default_resources.yml index df4caac499..69131a3e59 100644 --- a/hooks/playbooks/manila_create_default_resources.yml +++ b/hooks/playbooks/manila_create_default_resources.yml @@ -8,7 +8,31 @@ extra_specs: snapshot_support: "True" create_share_from_snapshot_support: "True" + manila_provider_network_name: storage + manila_provider_network_vlan: 21 + manila_provider_network_start: 172.18.0.150 + manila_provider_network_end: 172.18.0.200 + manila_provider_network_range: 172.18.0.0/24 tasks: + - name: Create Manila provider network with Neutron for instance to access Manila + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.command: | + oc -n {{ namespace }} exec -it pod/openstackclient -- {{ item }} + loop: + - "openstack network create {{ manila_provider_network_name }} --share --provider-network-type vlan --provider-physical-network datacentre --provider-segment {{ manila_provider_network_vlan }}" + - "openstack subnet create --allocation-pool start={{ manila_provider_network_start }},end={{ manila_provider_network_end }} --dhcp --network {{ manila_provider_network_name }} --subnet-range {{ manila_provider_network_range }} --gateway none {{ manila_provider_network_name }}-subnet" + register: _manila_provider_network_creation + failed_when: >- + ( _manila_provider_network_creation.rc | int ) != 0 + when: + - manila_provider_network_name | length > 0 + - (manila_provider_network_vlan | string) | length > 0 + - manila_provider_network_start | length > 0 + - manila_provider_network_end | length > 0 + - manila_provider_network_range | length > 0 + - name: Create share type default for manila tempest plugin tests environment: KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" diff --git a/playbooks/ceph.yml b/playbooks/ceph.yml index 8bdc4ba86e..cc009d05de 100644 --- a/playbooks/ceph.yml +++ b/playbooks/ceph.yml @@ -344,36 +344,67 @@ # public network always exist because is provided by the ceph_spec role - name: Get Storage network range ansible.builtin.set_fact: - cifmw_cephadm_rgw_network: "{{ lookup('ansible.builtin.ini', 'public_network section=global file=' ~ cifmw_cephadm_bootstrap_conf) }}" + cifmw_cephadm_storage_network: "{{ lookup('ansible.builtin.ini', 'public_network section=global file=' ~ cifmw_cephadm_bootstrap_conf) }}" - name: Set IP address of first monitor ansible.builtin.set_fact: - cifmw_cephadm_first_mon_ip: "{{ hostvars[this_host][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | first }}" + cifmw_cephadm_first_mon_ip: "{{ hostvars[this_host][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_storage_network) | first }}" vars: this_host: "{{ _target_hosts | first }}" - name: Assert if any EDPM nodes n/w interface is missing in storage network ansible.builtin.assert: that: - - hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | length > 0 - fail_msg: "node {{ item }} doesn't have any interface connected to network {{ cifmw_cephadm_rgw_network }}" + - hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_storage_network) | length > 0 + fail_msg: "node {{ item }} doesn't have any interface connected to network {{ cifmw_cephadm_storage_network }}" loop: "{{ _target_hosts }}" - - name: Get already assigned IP addresses - ansible.builtin.set_fact: - ips: "{{ ips | default([]) + [ hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | first ] }}" - loop: "{{ _target_hosts }}" + - name: Set NFS Network Properties + when: + - cifmw_ceph_daemons_layout.ceph_nfs_enabled | default(false) | bool + block: + - name: Set NFS network range to storage network only if it was not provided + ansible.builtin.set_fact: + cifmw_cephadm_nfs_network: "{{ cifmw_cephadm_storage_network }}" + when: + - cifmw_cephadm_nfs_network is not defined or + cifmw_cephadm_nfs_network | length == 0 + + - name: Assert if any EDPM nodes n/w interface is missing in NFS network + ansible.builtin.assert: + that: + - hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_nfs_network) | length > 0 + fail_msg: "node {{ item }} doesn't have any interface connected to network {{ cifmw_cephadm_nfs_network }}" + loop: "{{ _target_hosts }}" + when: + - cifmw_cephadm_nfs_network != cifmw_cephadm_storage_network - # cifmw_cephadm_vip is the VIP reserved in the Storage network - - name: Set VIP var as empty string - ansible.builtin.set_fact: - cifmw_cephadm_vip: "" + - name: Get already assigned NFS IP addresses + ansible.builtin.set_fact: + ips: "{{ ips | default([]) + [ hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_nfs_network) | first ] }}" + loop: "{{ _target_hosts }}" - - name: Process VIP - ansible.builtin.include_role: - name: cifmw_cephadm - tasks_from: check_vip - loop: "{{ range(1, (ips | length) + 1) | list }}" + - name: Set VIP var as empty string + ansible.builtin.set_fact: + cifmw_cephadm_vip: "" + when: + - cifmw_cephadm_nfs_vip is undefined + + - name: Get NFS VIP + ansible.builtin.include_role: + name: cifmw_cephadm + tasks_from: check_vip + loop: "{{ range(1, (ips | length) + 1) | list }}" + vars: + cifmw_cephadm_vip_network: "{{ cifmw_cephadm_nfs_network | default(storage_network_range, true) | default(ssh_network_range, true) }}" + when: + - cifmw_cephadm_nfs_vip is undefined + + - name: Set NFS VIP + ansible.builtin.set_fact: + cifmw_cephadm_nfs_vip: "{{ cifmw_cephadm_vip }}" + when: + - cifmw_cephadm_nfs_vip is undefined tasks: - name: Satisfy Ceph prerequisites @@ -409,6 +440,7 @@ vars: # cifmw_cephadm_vip is computed or passed as an override via -e @extra.yml cifmw_cephadm_rgw_vip: "{{ cifmw_cephadm_vip }}" + cifmw_cephadm_rgw_network: "{{ lookup('ansible.builtin.ini', 'public_network section=global file=' ~ cifmw_cephadm_bootstrap_conf) }}" - name: Configure Monitoring Stack when: cifmw_ceph_daemons_layout.dashboard_enabled | default(false) | bool @@ -432,9 +464,6 @@ ansible.builtin.import_role: name: cifmw_cephadm tasks_from: cephnfs - vars: - # we reuse the same VIP reserved for rgw - cifmw_cephadm_nfs_vip: "{{ cifmw_cephadm_vip }}/{{ cidr }}" - name: Create Cephx Keys for OpenStack ansible.builtin.import_role: diff --git a/roles/cifmw_cephadm/README.md b/roles/cifmw_cephadm/README.md index 661681dbae..ebc0887fd8 100644 --- a/roles/cifmw_cephadm/README.md +++ b/roles/cifmw_cephadm/README.md @@ -77,18 +77,16 @@ need to be changed for a typical EDPM deployment. is gathered from the `cifmw_cephadm_bootstrap_conf` file, which represents the initial Ceph configuration file passed at bootstrap time. -* `cifmw_cephadm_rgw_network`: the Ceph `public_network` where the `radosgw` - instances should be bound. The network range is gathered from the - `cifmw_cephadm_bootstrap_conf` file, which represents the initial Ceph - configuration file passed at bootstrap time. +* `cifmw_cephadm_nfs_network`: The network for NFS `ganesha`. If this + value is not passed then the Ceph `public_network` which represents + the initial Ceph configuration file passed at bootstrap time. * `cifmw_cephadm_rgw_vip`: the ingress daemon deployed along with `radosgw` requires a `VIP` that will be owned by `keepalived`. This IP address will be used as entry point to reach the `radosgw backends` through `haproxy`. * `cifmw_cephadm_nfs_vip`: the ingress daemon deployed along with the `nfs` - cluster requires a `VIP` that will be owned by `keepalived`. This IP - address is the same used for rgw unless an override is passed, and it's + cluster requires a `VIP` that will be owned by `keepalived`. This IP is used as entry point to reach the `ganesha backends` through an `haproxy` instance where proxy-protocol is enabled. diff --git a/roles/cifmw_cephadm/tasks/check_vip.yml b/roles/cifmw_cephadm/tasks/check_vip.yml index 0714510e7a..a87648e05a 100644 --- a/roles/cifmw_cephadm/tasks/check_vip.yml +++ b/roles/cifmw_cephadm/tasks/check_vip.yml @@ -24,7 +24,7 @@ - name: Get an IP address from the Storage network ansible.builtin.set_fact: - cur_ip: "{{ cifmw_cephadm_rgw_network | ansible.utils.next_nth_usable(count) }}" + cur_ip: "{{ cifmw_cephadm_vip_network | ansible.utils.next_nth_usable(count) }}" - name: Reserve VIP if the address is available ansible.builtin.set_fact: