Init: mediaserver

This commit is contained in:
2023-02-08 12:13:28 +01:00
parent 848bc9739c
commit f7c23d4ba9
31914 changed files with 6175775 additions and 0 deletions

View File

@@ -0,0 +1,894 @@
{
"files": [
{
"name": ".",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": ".github",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": ".github/workflows",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": ".github/workflows/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "1570daa88a95178fb4c4e731108d3912bfa6b10d4dd3de5c6d5c3d873bdd895c",
"format": 1
},
{
"name": ".gitignore",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "4abf762bdb90c9851c19f580dcda19b234668662b73068a9c0da33c07a2dfe15",
"format": 1
},
{
"name": "LICENSE",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "8b1ba204bb69a0ade2bfcf65ef294a920f6bb361b317dba43c7ef29d96332b9b",
"format": 1
},
{
"name": "changelogs",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "changelogs/changelog.yaml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "e90f40339e1d84e6d20c3e224db0a8c5a273605f36b2f9993f6967ed6c1bf769",
"format": 1
},
{
"name": "docs",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "docs/place_holder",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"format": 1
},
{
"name": "meta",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "meta/runtime.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "f5614008729520e63806d0dbb04f177ad75b25ba1aead1bb32585b45b1199d69",
"format": 1
},
{
"name": "meta/execution-environment.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "82854d0088f5a33247495393b516cea47b8c522131c0af4b7be755d75107af3d",
"format": 1
},
{
"name": "playbooks",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "playbooks/generic_ansible_sample.yaml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "4d804261c8c28d3a39afe649c255f983cdfd92b0b09eef320e046c10c60c7d84",
"format": 1
},
{
"name": "playbooks/generic_info.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "0e6347a4ddc84cc8daf6d73f6c9a26cd10b1d9a12d6ef971a1e888a448303b27",
"format": 1
},
{
"name": "playbooks/create_GMCV_in_CG.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "69b5d3f520619991ac8e75d5140bd6a5a46720e114e8f7afff2c41d5c90c11be",
"format": 1
},
{
"name": "playbooks/initial_setup_system_complete.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "ea539e79717b3515133f0c41aa8228289cd136edecc0f0fe541702a09d663bee",
"format": 1
},
{
"name": "playbooks/map_volume_to_host.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "3380e97a8e0fdc785e73fd3aec82199db7e388c1e714f0fda36bec2e87ce2811",
"format": 1
},
{
"name": "playbooks/volume_migrate.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "6992e1442f6691b982492f8cc8daca45a6a84973c60667248924aabc9daa9b08",
"format": 1
},
{
"name": "playbooks/volumegrp_create.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "d58341d383558e50798b3c140087079d254e10614e22d4165b600470565c4109",
"format": 1
},
{
"name": "plugins",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "plugins/README.md",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "c82ee692702ec1dd604cdbc38ff252114e5204e1b0627045a66c9451e7a918ac",
"format": 1
},
{
"name": "plugins/module_utils",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "plugins/module_utils/ibm_svc_ssh.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "8b0f1101e8c8562a6c68f8488e0710e202414874acbb88e494bb5b9ac2096b5d",
"format": 1
},
{
"name": "plugins/module_utils/ibm_svc_utils.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "9a51fea8298418764ae2bdf793f1b3d64b005a6100ad4c0269dff91b86d7f7a2",
"format": 1
},
{
"name": "plugins/modules",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "plugins/modules/__init__.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_auth.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "5b81f6b4db66443e632239ff4af910b95c92e571a40994874ba423113c805a9f",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_complete_initial_setup.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "1905de93c10ca651eedd79c81b442c0bf5fd33e9c484713e2cf6eb33d3d71785",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_manage_callhome.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "2bfa05adc0609f15011fbe1c1b9c8c45852c783ee560a88049b55dd15299a186",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_manage_cv.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "a9eb49551a8d77107fb11a3ea60daed8cb4630cd0fe0dd4f8f9a1a66d03cbd7d",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_manage_flashcopy.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "e716ba71c8b06026f19d5085bf46c1fdff89bda8e33c44048ff86ceee2f624a6",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_manage_ip.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "9a1564d37a960f0864d8fa977d6871aa95096f0c7423d691c8fd3d8b93315fbe",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_manage_mirrored_volume.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "0d3dc16d5803c5227143c0aa96abcf5eb13bcd5309b51e25f312082e4c39dafe",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_manage_replication.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "feebabbc03efd86c82f55de6e1c976083764752cd55a8cc8944c02d2d5e6a7e6",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_manage_replicationgroup.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "264fbe5d6ad76ea098bdd694a291b3e842c3363cf1212030d694b99a54190720",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_manage_user.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "8a748c40e02a044ae36034f6789c7f62d44e8cc298d926e06de7ec5e6b566ab3",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_manage_usergroup.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "795c8b2d00d0306edc5593446201107356a3f69db938cd2862eb409c35c8605c",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_mdisk.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "3c2903182074095bad71c2992576e5227661668c4ed4422be004cfc1e6b29abb",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_start_stop_flashcopy.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "a8542e5ec1020a807e1aecd6ecb44f14e60bf4641dddb5224c025411f7e5f47e",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_start_stop_replication.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "26a75e103b07d72e9b8f6a46cd25e10cee53cf4098f31413062d0da559e12cda",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_vol_map.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "77a6cb11357cf1f244c961094ea9d2245d0a9f145b9dd944ade609f7b6a4832b",
"format": 1
},
{
"name": "plugins/modules/ibm_svctask_command.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "b24e5881e0e417e3723ce4593fb1d8cc60b1be546c12c358f129f742d0418c2d",
"format": 1
},
{
"name": "plugins/modules/ibm_sv_manage_ip_partnership.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "cccfb97b0336b1f7c259032a85d08e65c1b78ac73ae6e2dc4bb8fd6b19c08b1d",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_info.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "4ae8ebebbe8aa12ec2160f023b0d0f3f606a3340225ca3c371d977d33d9ab56c",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_vdisk.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "8f55f1ef8fe1ff8b6b178ddb8cb1688b940bbb7108951f2b313f98e593a9ad59",
"format": 1
},
{
"name": "plugins/modules/ibm_sv_manage_provisioning_policy.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "88b6c6027dc71e59e25c517a95bfcd8dc6514c9857e990f5b697c5a35525193c",
"format": 1
},
{
"name": "plugins/modules/ibm_sv_manage_replication_policy.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "50989a73d4b4d0cc9e3f1777cc3ae09911033f6785babedfaa6439b98151720f",
"format": 1
},
{
"name": "plugins/modules/ibm_sv_manage_snapshot.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "ec1dfb1390100778abf18a0c2488411ab29eee06f24b05b1f76d9fe5cf8a52dd",
"format": 1
},
{
"name": "plugins/modules/ibm_sv_manage_snapshotpolicy.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "e61d04c1ca744f205f97a1d101a8fc13b9cc12f66f4cb2277f53bb77fa9d5505",
"format": 1
},
{
"name": "plugins/modules/ibm_sv_switch_replication_direction.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "3ae68fff59de2452e73cacff827171a1a7550a9c71fc600f751e728df9c5cfbe",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_hostcluster.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "94362873432e23d67da21c47507da5c1470596726776fde415506d8224f48664",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_initial_setup.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "0326cc4bbcf4594c81ef5797878e5e7bcb500ee4084a423a1e23dcdce545fd39",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_manage_consistgrp_flashcopy.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "5ca9a6fdc914d98e8a309848d0bdd5757bdb074be8d8d6b2482fdc36d18cc9b1",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_manage_ownershipgroup.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "83514c27a38c767df8421f2a23274d4eaeeb39d61560255ca7945434bd430cee",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_manage_portset.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "5162ccb6f4059554391b6ed3d115da150d7a045e1426b0770581dc4152baa8f6",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_manage_safeguarded_policy.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "75ce0c3ad4aa96d953dcda3c54e5b8bf11a6128608616b2e8d1529d48136f4ca",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_manage_sra.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "e298e0a4f5e4e8c47113ff8692f4dcdeabe880264c129c3cb07fdc96c7095ac6",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_manage_volumegroup.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "6944f827a63ae67d6c856aadc8348d06b1bbd362e9c1ec7b95a97938eb8456bb",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_mdiskgrp.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "fd3847487d84451a75e95fa410641b617de203aea7aaec5ec8fade9eec8d7fa3",
"format": 1
},
{
"name": "plugins/modules/ibm_sv_manage_ssl_certificate.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "8fcba087976d6c9cc98b12d77c3930d7306fb59b026776b77e8dae93ec3c2f81",
"format": 1
},
{
"name": "plugins/modules/ibm_sv_manage_truststore_for_replication.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "abad6c6da0a1eabac0bc03018681bbf0909cdeff67f93e73d327f07a46c8e6bc",
"format": 1
},
{
"name": "plugins/modules/ibm_svcinfo_command.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "5a85d5e474c57756f167672b77f26a4cde807c91cc7dbd343f0f7286d17fb410",
"format": 1
},
{
"name": "plugins/modules/ibm_sv_manage_awss3_cloudaccount.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "409017070ae73f47f68ed34cc46206e58bd3cdfb92f6c5eebd81c47e773a8348",
"format": 1
},
{
"name": "plugins/modules/ibm_sv_manage_cloud_backups.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "7276c9ad7acfab377588251f726369e021f31f30b62415db643362ae48713b8a",
"format": 1
},
{
"name": "plugins/modules/ibm_sv_restore_cloud_backup.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "5e7a2f56a1df9a3d7c6b677120267f7c4aecb6db73f041e06d2be2025ecd2f5e",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_host.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "6d5d5e34ff6ba866e350609c20a0555127ab045a6f114198469eacd044ab57f7",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_manage_migration.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "98e41799506a60feb3be0c1ff92cfc9a53390d889c406e2dc40a61885587a7f8",
"format": 1
},
{
"name": "plugins/modules/ibm_svc_manage_volume.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "7e08032dbce94d9519a92188bebcd67370ce61e62ff9c2af8f71c971e2531cc2",
"format": 1
},
{
"name": "roles",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/place_holder",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"format": 1
},
{
"name": "tests",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "tests/sanity",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "tests/sanity/ignore-2.9.txt",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "daff51a57f16f15f70a99b4356bc389ca3543c680dddf58f2721b4f652698ee0",
"format": 1
},
{
"name": "tests/sanity/ignore-2.13.txt",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "daff51a57f16f15f70a99b4356bc389ca3543c680dddf58f2721b4f652698ee0",
"format": 1
},
{
"name": "tests/unit",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "tests/unit/plugins",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "tests/unit/plugins/module_utils",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "tests/unit/plugins/module_utils/test_ibm_svc_ssh.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "b6f8c6893ede922696cb32e7327905799b8ced07620273efa30aa53361fd06bf",
"format": 1
},
{
"name": "tests/unit/plugins/module_utils/test_ibm_svc_utils.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "42d46cd2d1beef8c1399e65887c518f9e4909970b1d4000ce4b494608f0ed606",
"format": 1
},
{
"name": "tests/unit/plugins/modules",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_auth.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "dcbe779c8945b5dd2ae56441c830dffe0d306c644d60955d2d95d88fc89074e2",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_complete_initial_setup.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "187bae51452a2d69c08e229e9fe8d4742b7cdc79fcbedf0c9ded20a0e19849bb",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_hostcluster.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "2b285af061c2e81f3baff642f56566da3b69fb9057f91c739a1c48270a6f9aee",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_info.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "2cf742a3c1c3007de07dc9ecf94c6c72138d88aada8f08c8656dab9569d7456c",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_initial_setup.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "a6d836087b171b39982b4e0abeb28afeadc0c19e0befa119fdd7dcc4ac471490",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_manage_callhome.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "2c863e5cf376bb135a1c3b066e7972f81915f505651a8a6aaddd239fd10d62c1",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_manage_consistgrp_flashcopy.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "d69eab89afd086c37d637bed17d9cf181e8d8085510d0415497a1c7d4d45819e",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_manage_cv.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "1dbb60f628fffdb19cedd915fc19041f58ae4d56ce236c45e0de9ae09c8da511",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_manage_flashcopy.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "cab4c0cb49a09fd9b06c3e38e54ad6caa32efe1461c571ace2756281e59edee4",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_manage_ip.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "a3f7171af40820bf4b6f8aed69d3e09dd8ebac39e242bd5eaaac91489da51c91",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_manage_mirrored_volume.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "8937fb4013f7ecbf6b3773b2e852fd87d3b4d68cc20c5753f0e37e936a4e5c0b",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_manage_ownershipgroup.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "1616f8f6f0bcb35513e177f00c155554b30909f78464453370b215e5245b535c",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_manage_portset.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "05ea78b694fcb7b3284cfcb46cc1f062a9df55342771292403ebb2eba28d46a3",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_manage_replication.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "b0343cf2f92f4cca23471c46372bccac602db38eb41bec56890e0ccea4e940b7",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_manage_replicationgroup.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "972bbd8d1cfda500a1651d831a35d16d6fd6e8caddf4d80480371aa17528bf6b",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_manage_safeguarded_policy.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "6c84c0dc60dab3e2aed8c51e9140c4f45e5844608b06c563d9c93bbe3d3f0384",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_manage_sra.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "78774c98fbcf2a543fb6d49b82dea61c6d4a8105d6d2a8bc949f6980840e9037",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_manage_user.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "bc0a63fa0a7498b8320927737988824a187052032dbba16eedba25092c9eb6f5",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_manage_usergroup.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "afff9022506bf5c5a0f38b3a45cae4e326635e960808294532d4276b45e5042e",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_mdisk.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "51717d02bef44db970d6073ee870592a0282569893351d6946162e3f4e866a4e",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_start_stop_flashcopy.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "7c1d7d6b567ea9a7b59755464df59e951a2173b200cbd91a313986093780c2f5",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_start_stop_replication.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "5e76618cdc63825c97bd3d3ff0b7631ff7516c93540094aa3137007a5a5462ec",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_vdisk.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "69ca79500c2039bf8ff858aef6376d726183cd619fa304e90cf600b5c2b70255",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_vol_map.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "c27d92040a95ed23d9e5d860b880f7bcf343da767bf2932aafe83f33fb5b3287",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svcinfo_command.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "0f7d9be0c049635ad29ea2f7676dab82069fc8747dab75488f005887029d6634",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svctask_command.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "1f48e7a9815bb15cdfe937f946041055356318feff170014b3ad2f763484078f",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_sv_manage_ip_partnership.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "8bc51c38ab2c6a049d00112c45de31e3c1a19d02b85f0d32a7c578d94a158499",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_sv_manage_snapshot_policy.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "047cb1813da22efb4724c565ec07224a9e7c7768743ab324409e74004f1dd83f",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_sv_manage_provisioning_policy.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "384f512fac49f366678509d111913aa98a7c16079e59ace7edd0f4b5cfcfd1ce",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_sv_manage_replication_policy.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "02c91a06f4490861a9e58425802ab9a5c9c04a9f752541daf4ec47e8a47027ce",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_sv_manage_snapshot.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "cb78fa2f32ca6802c7ecbd246d0b1ac681c28e7a88f76435d2e78fe3a36dd107",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_sv_manage_truststore_for_replication.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "e2957f3fbaaf1049361f4a2027d565adf7e9a15f73d1af139a81aed30a67a4fb",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_sv_switch_replication_direction.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "cc1dcca78d7884cd2dee677782b62023668d12124bad9fda1d7a5f1f93a0aa15",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_manage_volumegroup.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "530795c8b22273d3d95bf67b671182eb86b34a1fc2639b93261ba1ca30c7597c",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_mdiskgrp.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "bf8003aaad6a7887a1223d259c6748e3345d59808da5c9ab5e3b3a7eea43eea8",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_sv_manage_ssl_certificate.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "176158c315c90c4eac0dca056a4058b0bb6b4142d43602503ec213d35244099b",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_sv_manage_awss3_cloudaccount.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "fa0fbd1abb1da6a478d009c0281ec19bd91da0b8739a17890f7dfe10a84cb54e",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_sv_manage_cloud_backups.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "48156ff487870ac46f4cb403957537ab300bafa121f5e61766075a649b2a2519",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_sv_restore_cloud_backup.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "8360e33c724235c589c057ec45b8b415a2e8762f32638c3bc4ec40f2c243d093",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_host.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "511784ec4de142ad89f0ca1409b931f070c462666912676cdd201280722e4e80",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_manage_migration.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "6f89529af5da44531b020271e058521d7252287ee713f76252ba03f98e415d34",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ibm_svc_manage_volume.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "3a90c76f723c298d79cfa1c8b3623557febf672e30c70055c0a49dcc3ffd684f",
"format": 1
},
{
"name": "requirements.txt",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "ebc6fd889d9c6a169c830bd34317f9d55659570a1d074ac58f1a7dab07cfa1cc",
"format": 1
},
{
"name": "requirements.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "134abe94ea511975eb2cc46a0150599afc81fb12e0a09ddbcf97d2ec39cb7357",
"format": 1
},
{
"name": "README.md",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "25270222ee5796bedce6f95ba1774bad1dfd41a22dd119bb5a49454f3dd63814",
"format": 1
}
],
"format": 1
}

View File

@@ -0,0 +1,674 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

View File

@@ -0,0 +1,34 @@
{
"collection_info": {
"namespace": "ibm",
"name": "spectrum_virtualize",
"version": "1.11.0",
"authors": [
"Shilpi Jain <shilpi.jain1@ibm.com>",
"Sanjaikumaar M <sanjaikumaar.m@ibm.com>",
"Rohit Kumar <rohit.kumar6@ibm.com>"
],
"readme": "README.md",
"tags": [
"storage"
],
"description": "Ansible Collections for IBM Spectrum Virtualize",
"license": [
"GPL-3.0-or-later"
],
"license_file": null,
"dependencies": {},
"repository": "https://github.com/ansible-collections/ibm.spectrum_virtualize",
"documentation": "https://github.com/ansible-collections/ibm.spectrum_virtualize",
"homepage": null,
"issues": "https://github.com/ansible-collections/ibm.spectrum_virtualize/issues"
},
"file_manifest_file": {
"name": "FILES.json",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "2ffc9a2b4d72ca38b5f0da4d15a86a9c5253e392d75f41cfd4dc41040719f0ad",
"format": 1
},
"format": 1
}

View File

@@ -0,0 +1,150 @@
# Ansible Collection - ibm.spectrum_virtualize
[![Code of conduct](https://img.shields.io/badge/code%20of%20conduct-Ansible-silver.svg)](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html )
This collection provides a series of Ansible modules and plugins for interacting with the IBM Spectrum Virtualize family storage products. These products include the IBM SAN Volume Controller, IBM FlashSystem family members built with IBM Spectrum Virtualize (FlashSystem 5xxx, 7xxx, 9xxx), IBM Storwize family, and IBM Spectrum Virtualize for Public Cloud. For more information regarding these products, see [IBM Documentation](https://www.ibm.com/docs/).
## Requirements
- Ansible version 2.9 or higher
## Installation
To install the IBM Spectrum Virtualize collection hosted in Galaxy:
```bash
ansible-galaxy collection install ibm.spectrum_virtualize
```
To upgrade to the latest version of the IBM Spectrum Virtualize collection:
```bash
ansible-galaxy collection install ibm.spectrum_virtualize --force
```
## Usage
### Playbooks
To use a module from the IBM Spectrum Virtualize collection, please reference the full namespace, collection name, and module name that you want to use:
```yaml
---
- name: Using the IBM Spectrum Virtualize collection
hosts: localhost
tasks:
- name: Gather info from storage
ibm.spectrum_virtualize.ibm_svc_info:
clustername: x.x.x.x
domain:
username: username
password: password
log_path: /tmp/playbook.debug
gather_subset: all
```
Alternatively, you can add a full namepsace and collection name in the `collections` element:
```yaml
---
- name: Using the IBM Spectrum Virtualize collection
collections:
- ibm.spectrum_virtualize
gather_facts: no
connection: local
hosts: localhost
tasks:
- name: Gather info from storage
ibm_svc_info:
clustername: x.x.x.x
domain:
username: username
password: password
log_path: /tmp/playbook.debug
gather_subset: all
```
## Supported Resources
### Modules
- ibm_svc_auth - Generates an authentication token for a user on Spectrum Virtualize storage systems
- ibm_svc_complete_initial_setup - Completes the initial setup configuration for LMC systems
- ibm_svc_host - Manages hosts on Spectrum Virtualize storage systems
- ibm_svc_hostcluster - Manages host cluster on Spectrum Virtualize storage systems
- ibm_svc_info - Collects information on Spectrum Virtualize storage systems
- ibm_svc_initial_setup - Manages initial setup configuration on Spectrum Virtualize storage systems
- ibm_svc_manage_callhome - Manages configuration of Call Home feature on Spectrum Virtualize storage systems
- ibm_svc_manage_consistgrp_flashcopy - Manages FlashCopy consistency groups on Spectrum Virtualize storage systems
- ibm_svc_manage_cv - Manages the change volume in remote copy replication on Spectrum Virtualize storage systems
- ibm_svc_manage_flashcopy - Manages FlashCopy mappings on Spectrum Virtualize storage systems
- ibm_svc_manage_ip - Manages IP provisioning on Spectrum Virtualize storage systems
- ibm_svc_manage_migration - Manages volume migration between clusters on Spectrum Virtualize storage systems
- ibm_svc_manage_mirrored_volume - Manages mirrored volumes on Spectrum Virtualize storage systems
- ibm_svc_manage_ownershipgroup - Manages ownership groups on Spectrum Virtualize storage systems
- ibm_svc_manage_portset - Manages IP portset on Spectrum Virtualize storage systems
- ibm_svc_manage_replication - Manages remote copy replication on Spectrum Virtualize storage systems
- ibm_svc_manage_replicationgroup - Manages remote copy consistency groups on Spectrum Virtualize storage systems
- ibm_svc_manage_safeguarded_policy - Manages safeguarded policy configuration on Spectrum Virtualize storage systems
- ibm_svc_manage_sra - Manages the remote support assistance configuration on Spectrum Virtualize storage systems
- ibm_svc_manage_user - Manages user on Spectrum Virtualize storage systems
- ibm_svc_manage_usergroup - Manages user groups on Spectrum Virtualize storage systems
- ibm_svc_manage_volume - Manages standard volumes on Spectrum Virtualize storage systems
- ibm_svc_manage_volumegroup - Manages volume groups on Spectrum Virtualize storage systems
- ibm_svc_mdisk - Manages MDisks for Spectrum Virtualize storage systems
- ibm_svc_mdiskgrp - Manages pools for Spectrum Virtualize storage systems
- ibm_svc_start_stop_flashcopy - Starts or stops FlashCopy mapping and consistency groups on Spectrum Virtualize storage systems
- ibm_svc_start_stop_replication - Starts or stops remote-copy independent relationships or consistency groups on Spectrum Virtualize storage systems
- ibm_svc_vol_map - Manages volume mapping for Spectrum Virtualize storage systems
- ibm_svcinfo_command - Runs svcinfo CLI command on Spectrum Virtualize storage systems over SSH session
- ibm_svctask_command - Runs svctask CLI command(s) on Spectrum Virtualize storage systems over SSH session
- ibm_sv_manage_awss3_cloudaccount - Manages Amazon S3 cloud account configuration on Spectrum Virtualize storage systems
- ibm_sv_manage_cloud_backup - Manages cloud backups on Spectrum Virtualize storage systems
- ibm_sv_manage_ip_partnership - Manages IP partnership configuration on Spectrum Virtualize storage systems
- ibm_sv_manage_provisioning_policy - Manages provisioning policy configuration on Spectrum Virtualize storage systems
- ibm_sv_manage_replication_policy - Manages policy-based replication configuration on Spectrum Virtualize storage systems
- ibm_sv_manage_snapshot - Manages snapshots (mutual consistent images of a volume) on Spectrum Virtualize storage systems
- ibm_sv_manage_snapshotpolicy - Manages snapshot policy configuration on Spectrum Virtualize storage systems
- ibm_sv_manage_ssl_certificate - Exports an existing system certificate on to Spectrum Virtualize storage systems
- ibm_sv_manage_truststore_for_replication - Manages certificate trust stores for replication on Spectrum Virtualize family storage systems
- ibm_sv_restore_cloud_backup - Restores cloud backups on Spectrum Virtualize storage systems
- ibm_sv_switch_replication_direction - Switches the replication direction on Spectrum Virtualize storage systems
### Other Feature Information
- SV Ansible Collection v1.8.0 provides the new 'ibm_svc_complete_initial_setup' module, to complete the automation of Day 0 configuration on Licensed Machine Code (LMC) systems.
For non-LMC systems, login to the user-interface is required in order to complete the automation of Day 0 configuration.
- SV Ansible Collection v1.7.0 provided `Setup and Configuration Automation` through different modules. This feature helps user to automate Day 0 configuration.
This feature includes three modules:
- ibm_svc_initial_setup
- ibm_svc_manage_callhome
- ibm_svc_manage_sra
- By proceeding and using these modules, the user acknowledges that [IBM Privacy Statement](https://www.ibm.com/privacy) has been read and understood.
### Prerequisite
- Paramiko must be installed to use ibm_svctask_command and ibm_svcinfo_command modules.
## Limitation
The modules in the IBM Spectrum Virtualize Ansible collection leverage REST APIs to connect to the IBM Spectrum Virtualize storage system. This has following limitations:
1. Using the REST APIs to list more than 2000 objects may create a loss of service from the API side, as it automatically restarts due to memory constraints.
2. It is not possible to access REST APIs using an IPv6 address on a cluster.
3. The Ansible collection can run on all IBM Spectrum Virtualize storage system versions above 8.1.3, except versions 8.3.1.3 and 8.3.1.4.
4. At time of release of the SV Ansible v1.8.0 collection, no module is available for non LMC systems to automate license agreements acceptance, including EULA.
User will be presented with a GUI setup wizard upon user-interface login, whether the Ansible modules have been used for initial configuration or not.
## Releasing, Versioning, and Deprecation
1. IBM Spectrum Virtualize Ansible Collection releases follow a quarterly release cycle.
2. IBM Spectrum Virtualize Ansible Collection releases follow [semantic versioning](https://semver.org/).
3. IBM Spectrum Virtualize Ansible modules deprecation cycle is aligned with [Ansible](https://docs.ansible.com/ansible/latest/dev_guide/module_lifecycle.html).
## Contributing
Currently we are not accepting community contributions.
Though, you may periodically review this content to learn when and how contributions can be made in the future.
IBM Spectrum Virtualize Ansible Collection maintainers can follow the [Maintainer guidelines](https://docs.ansible.com/ansible/devel/community/maintainers.html).
## License
GNU General Public License v3.0

View File

@@ -0,0 +1,122 @@
ancestor: null
releases:
1.0.2:
release_date: '2020-04-14'
changes:
release_summary: The `IBM Spectrum Virtualize Ansible Modules <https://github.com/ansible-collections/ibm.spectrum_virtualize>`_
are available on Ansible Galaxy as a collection.
1.6.0:
release_date: '2021-09-29'
changes:
bugfixes:
- Fixed bug `#43 <https://github.com/ansible-collections/ibm.spectrum_virtualize/issues/43>`
deprecated_features:
- ibm_svc_vdisk - A new module ibm_svc_manage_volume has been introduced to manage standard volumes.
minor_changes:
- ibm_svc_hostcluster - Added support for 'chhostcluster' host command
release_summary: Introduced three new modules. Added support for volume groups, volume migration, along with bug fixes.
modules:
- description: Manages volume groups on Spectrum Virtualize system
name: ibm_svc_manage_volumegroup
namespace: ''
- description: Manages standard volumes on Spectrum Virtualize system using 'mkvolume'
name: ibm_svc_manage_volume
namespace: ''
- description: Manages volume migration between Spectrum Virtualize storage systems
name: ibm_svc_manage_migration
namespace: ''
1.7.0:
release_date: '2021-12-24'
changes:
release_summary: Added new modules for managing users, user groups and ownership groups. Added modules for configuring
Call Home, remote support assistance and initial setup configurations.
modules:
- description: Manages users on Spectrum Virtualize system
name: ibm_svc_manage_user
namespace: ''
- description: Manages user groups on Spectrum Virtualize system
name: ibm_svc_manage_usergroup
namespace: ''
- description: Manages ownership groups on Spectrum Virtualize storage systems
name: ibm_svc_manage_ownershipgroup
namespace: ''
- description: Manages Call Home configuration on Spectrum Virtualize storage systems
name: ibm_svc_manage_callhome
namespace: ''
- description: Manages remote support assistance on Spectrum Virtualize storage systems
name: ibm_svc_manage_sra
namespace: ''
- description: Allow user to do initial setup configuration on Spectrum Virtualize storage systems
name: ibm_svc_initial_setup
namespace: ''
1.8.0:
release_date: '2022-3-31'
changes:
release_summary: Added new modules for managing portsets, IP configuration and safeguarded policies.
Also added support to complete initial setup configurations for LMC systems.
modules:
- description: Manages IP portset on Spectrum Virtualize system
name: ibm_svc_manage_portset
namespace: ''
- description: Manages IP provisioning on Spectrum Virtualize system
name: ibm_svc_manage_ip
namespace: ''
- description: Manages safeguarded policy configuration on Spectrum Virtualize storage systems
name: ibm_svc_manage_safeguarded_policy
namespace: ''
- description: Completes initial setup configuration for Licensed Machine Code (LMC) systems
name: ibm_svc_complete_initial_setup
namespace: ''
1.9.0:
release_date: '2022-6-30'
changes:
release_summary: Added new modules for managing IP partnership, snapshots, and snapshot policies.
Also added support to rename an existing host and existing volume.
modules:
- description: Manages IP configuration on Spectrum Virtualize systems
name: ibm_sv_manage_ip_partnership
namespace: ''
- description: Manages snapshots (mutual consistent images of a volume) on Spectrum Virtualize systems
name: ibm_sv_manage_snapshot
namespace: ''
- description: Manages snapshot policy configuration on Spectrum Virtualize storage systems
name: ibm_sv_manage_snapshotpolicy
namespace: ''
1.10.0:
release_date: '2022-9-30'
changes:
release_summary: Added new modules for managing provisioning policy and managing policy based replication.
Also added support to use an existing SSL certificate to create mTLS partnership between partner systems.
modules:
- description: Manages provisioning policy on Spectrum Virtualize systems
name: ibm_sv_manage_provisioning_policy
namespace: ''
- description: Manages policy based replication on Spectrum Virtualize systems
name: ibm_sv_manage_replication_policy
namespace: ''
- description: Allows user to switch replication direction in case of DR on Spectrum Virtualize storage systems
name: ibm_sv_switch_replication_direction
namespace: ''
- description: Allows user to export an existing system certificate on Spectrum Virtualize storage systems
name: ibm_sv_manage_ssl_certificate
namespace: ''
- description: Manages the certificates trust store on Spectrum Virtualize storage systems
name: ibm_sv_manage_truststore_for_replication
namespace: ''
1.11.0:
release_date: '2022-12-16'
changes:
release_summary: Added new modules for configuring Transparent Cloud Tiering (TCT) through Ansible.
minor_changes:
- ibm_svc_host - Added support for modification of iSCSI host.
- ibm_svc_manage_migraiton - Added support for volume migration across pools.
modules:
- description: Manages AWS cloud account configuration on Spectrum Virtualize systems
name: ibm_sv_manage_awss3_cloudaccount
namespace: ''
- description: Manages cloud backup on Spectrum Virtualize systems
name: ibm_sv_manage_cloud_backups
namespace: ''
- description: Allows user to restore an existing cloud backup on Spectrum Virtualize systems
name: ibm_sv_restore_cloud_backup
namespace: ''

View File

@@ -0,0 +1,12 @@
---
version: 1
build_arg_defaults:
ANSIBLE_GALAXY_CLI_COLLECTION_OPTS: "-v"
# ansible_config: '/etc/ansible/ansible.cfg'
dependencies:
galaxy: requirements.yml
python: requirements.txt
# system: bindep.txt

View File

@@ -0,0 +1,8 @@
---
requires_ansible: '>=2.9.0'
plugin_routing:
modules:
ibm_svc_vdisk:
deprecation:
removal_version: 2.0.0
warning_text: Use ibm_svc_manage_volume instead.

View File

@@ -0,0 +1,119 @@
---
- name: Using IBM Spectrum Virtualize collection to create rc consistency group
hosts: localhost
collections:
- ibm.spectrum_virtualize
gather_facts: no
vars:
- auxcluster: x.x.x.x
- ausername: ausername
- apassword: apassword
- clustername: clustername
- username: username
- password: password
- cgname: Group_cg11
- remotecluster: Cluster_x.x.x.x
- masterpool: site1pool1
- mastervol: master
- relname: scopy5
- auxvol: auxvol
connection: local
tasks:
- name: Fetch authorization token for aux
register: auth
ibm_svc_auth:
clustername: "{{auxcluster}}"
username: "{{ausername}}"
password: "{{apassword}}"
- name: create target volume
ibm_svc_vdisk:
clustername: "{{ auxcluster }}"
token: "{{auth.token}}"
mdiskgrp: "{{auxpool}}"
name: "{{auxvol}}"
size: 10
unit: "gb"
state: present
- name: Fetch authorization token for master
register: results
ibm_svc_auth:
clustername: "{{clustername}}"
username: "{{username}}"
password: "{{password}}"
- name: create remote copy cg
ibm_svc_manage_replicationgroup:
name: "{{cgname}}"
clustername: "{{clustername}}"
token: "{{results.token}}"
state: present
remotecluster: "{{remotecluster}}"
- name: Create source volume
ibm_svc_vdisk:
clustername: "{{ clustername }}"
token: "{{results.token}}"
mdiskgrp: "{{masterpool}}"
name: "{{mastervol}}"
size: 1
unit: "gb"
state: present
- name: create MM remote copy
ibm_svc_manage_replication:
name: "{{relname}}"
clustername: "{{ clustername }}"
token: "{{results.token}}"
state: present
remotecluster: "{{remotecluster}}"
master: "{{mastervol}}"
aux: "{{auxvol}}"
copytype: metro
sync: true
consistgrp: "{{cgname}}"
- name: remove the remote copy from CG
ibm_svc_manage_replication:
name: "{{relname}}"
clustername: "{{ clustername }}"
token: "{{results.token}}"
state: present
remotecluster: "{{remotecluster}}"
master: "{{mastervol}}"
aux: "{{auxvol}}"
copytype: metro
noconsistgrp: true
- name: Convert MM to GM
ibm_svc_manage_replication:
name: "{{relname}}"
clustername: "{{ clustername }}"
token: "{{results.token}}"
state: present
remotecluster: "{{remotecluster}}"
master: "{{mastervol}}"
aux: "{{auxvol}}"
copytype: global
- name: Convert GM to GMCV
ibm_svc_manage_replication:
name: "{{relname}}"
clustername: "{{clustername}}"
token: "{{results.token}}"
state: present
remotecluster: "{{remotecluster}}"
master: "{{mastervol}}"
aux: "{{auxvol}}"
copytype: GMCV
consistgrp: "{{cgname}}"
- name: Create/attach master change volume
ibm_svc_manage_cv:
clustername: "{{ clustername }}"
token: "{{results.token}}"
state: present
rname: "{{relname}}"
cvname: "{{ mastervolcv }}"
basevolume: "{{ mastervol }}"
- name: Create/attach aux change volume
ibm_svc_manage_cv:
clustername: "{{ auxcluster }}"
token: "{{auth.token}}"
state: present
rname: "{{relname}}"
cvname: "{{ auxvolcv }}"
basevolume: "{{ auxvol }}"
ismaster: false

View File

@@ -0,0 +1,34 @@
---
- name: Using the IBM Spectrum Virtualize collection
collections:
- ibm.spectrum_virtualize
gather_facts: no
connection: local
hosts: localhost
vars:
- clustername: x.x.x.x
- username: username
- password: password
- volname: vol0
- pool: pool0
- easy_tier: "off"
- size: 1
- unit: gb
tasks:
- name: Send CLI command over ssh connection
ibm_svctask_command:
command: [
"svctask mkvdisk -name {{ volname }} -mdiskgrp '{{ pool }}' -easytier '{{ easy_tier }}' -size {{ size }} -unit {{ unit }}",
"svctask rmvdisk {{ volname }}"
]
clustername: "{{ clustername }}"
username: "{{ username }}"
password: "{{ password }}"
log_path: /tmp/playbook.debug
- name: Send CLI command over ssh connection
ibm_svcinfo_command:
command: "svcinfo lsvdisk"
clustername: "{{ clustername }}"
username: "{{ username }}"
password: "{{ password }}"
log_path: /tmp/playbook.debug

View File

@@ -0,0 +1,24 @@
---
- name: Using the IBM Spectrum Virtualize collection
collections:
- ibm.spectrum_virtualize
gather_facts: no
connection: local
hosts: localhost
vars:
- user: username
- clustername: x.x.x.x
- username: username
- password: password
tasks:
- name: Run CLI commands
register: results
ibm_svcinfo_command:
command: "svcinfo lssystem"
clustername: "{{ clustername }}"
username: "{{ username }}"
password: "{{ password }}"
log_path: /tmp/test.debug
- name: show time zone in lssystem
set_fact:
time_zone: "{{ (results['stdout'] | from_json).time_zone }}"

View File

@@ -0,0 +1,74 @@
- name: Using Spectrum Virtualize collection to automate initial setup configuration
hosts: localhost
collections:
- ibm.spectrum_virtualize
gather_facts: no
vars:
- clustername: clustername
- username: username
- password: password
- address: address
- city: city
- company_name: company_name
- contact_email: contact_email
- contact_name: contact_name
- country: country
- location: location
- primary_phonenumber: primary_phonenumber
- postal_code: postal_code
- province: province
- server_ip: x.x.x.x
- server_port: xxxx
connection: local
tasks:
- name: Get auth token
register: results
ibm_svc_auth:
clustername: "{{clustername}}"
username: "{{username}}"
password: "{{password}}"
- name: 1.initial setup config
ibm_svc_initial_setup:
clustername: "{{clustername}}"
token: "{{results.token}}"
system_name: "{{ system_name }}"
dnsname:
- dnsserver01
dnsip:
- 'x.x.x.x'
- name: 2.Configure callhome with "email"
ibm_svc_manage_callhome:
clustername: "{{clustername}}"
token: "{{results.token}}"
state: "enabled"
callhome_type: "email"
address: "{{ address}}"
city: "{{ city }}"
company_name: "{{ company_name }}"
contact_email: "{{ contact_email }}"
contact_name: "{{ contact_name }}"
country: "{{ country }}"
location: "{{ location }}"
phonenumber_primary: "{{ primary_phonenumber }}"
postalcode: "{{ postal_code }}"
province: "{{ province }}"
serverIP: "{{ server_ip }}"
serverPort: "{{ server_port }}"
inventory: "on"
invemailinterval: 1
enhancedcallhome: "on"
censorcallhome: "on"
- name: 3.Configure SRA
ibm_svc_manage_sra:
clustername: "{{clustername}}"
token: "{{results.token}}"
state: enabled
name: SRA
sra_ip: y.y.y.y
sra_port: 22
support: remote
- name: 4.Complete initial setup
ibm_svc_complete_initial_setup:
clustername: "{{clustername}}"
username: "{{username}}"
password: "{{password}}"

View File

@@ -0,0 +1,48 @@
---
- name: Testing the IBM Spectrum Virtualize collection ibm_svc_vol_map
hosts: localhost
vars:
- clustername: clustername
- username: username
- password: password
- domain: domain
- test_vdisk: vdisk_name
- pool: pool
- test_host: host_name
- fcwwpn1: fcwwpn
collections:
- ibm.spectrum_virtualize
gather_facts: no
connection: local
tasks:
- name: Create vdisk
ibm_svc_vdisk:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
name: "{{test_vdisk}}"
state: present
mdiskgrp: "{{pool}}"
easytier: 'off'
size: "1024"
unit: "mb"
- name: Creating Host
ibm_svc_host:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
name: "{{test_host}}"
state: present
fcwwpn: "{{ fcwwpn1 }}"
- name: map Host to Vdisk
ibm_svc_vol_map:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
volname: "{{test_vdisk}}"
host: "{{test_host}}"
state: present
scsi: 0

View File

@@ -0,0 +1,79 @@
- name: Using Spectrum Virtualize collection to initiate migration
hosts: localhost
vars:
- auxcluster: x.x.x.x
- auxusername: auxusername
- auxpassword: auxpassword
- clustername: clustername
- username: username
- password: password
- cgname: Group_cg11
- remote_cluster: Cluster_x.x.x.x
- masterpool: site1pool1
- mastervol: master
- relname: scopy5
- auxvol: auxvol
- fcwwpn: fcwwpn
- size: 1
- unit: gb
- remote_pool: remote_pool
collections:
- ibm.spectrum_virtualize
gather_facts: no
connection: local
tasks:
- name: Fetch authorization token for aux
register: auth
ibm_svc_auth:
clustername: "{{ auxcluster }}"
username: "{{auxusername}}"
password: "{{auxpassword}}"
- name: Fetch authorization token for master
register: results
ibm_svc_auth:
clustername: "{{clustername}}"
username: "{{username}}"
password: "{{password}}"
- name: "create host"
ibm_svc_host:
clustername: "{{clustername}}"
username: "{{username}}"
password: "{{password}}"
name: host_xyz
state: present
fcwwpn: "{{fcwwpn}}"
protocol: scsi
- name: "Create source volume source_vol_1 "
ibm_svc_manage_volume:
clustername: "{{ clustername }}"
token: "{{results.token}}"
pool: "{{masterpool}}"
name: "source_vol_1"
size: "{{size}}"
unit: "{{ unit }}"
state: present
- name: Map Source volume to a host
ibm_svc_vol_map:
clustername: "{{clustername}}"
token: "{{results.token}}"
volname: "source_vol_1"
host: "host_xyz"
state: present
- name: Try to initiate a volume migration with replicate_hosts as true when no hosts exists on targets system as on source system"
ibm_svc_manage_migration:
source_volume: "source_vol_1"
target_volume: "target_vol_1"
clustername: "{{ clustername }}"
remote_cluster: "{{ remote_cluster }}"
token: "{{ results.token }}"
state: initiate
replicate_hosts: true
remote_token: "{{ auth.token }}"
relationship_name: "mmapping_1"
remote_pool: "{{ remote_pool}}"
- name: Switch replication direction of a migration relationship when all host are mapped"
ibm_svc_manage_migration:
relationship_name: "mmapping_1"
clustername: "{{ clustername}}"
token: "{{ results.token }}"
state: switch

View File

@@ -0,0 +1,29 @@
- name: Using Spectrum Virtualize collection to create a volume group
hosts: localhost
vars:
- clustername: clustername
- username: username
- password: password
- domain: domain
collections:
- ibm.spectrum_virtualize
gather_facts: no
connection: local
tasks:
- name: Create a new volume group
ibm_svc_manage_volumegroup:
clustername: "{{ clustername }}"
domain: "{{ domain }}"
username: "{{ username }}"
password: "{{ password }}"
name: vg1
state: present
- name: Create volumegroup with existing snapshotpolicy
ibm_svc_manage_volumegroup:
clustername: "{{ clustername }}"
domain: "{{ domain }}"
username: "{{ username }}"
password: "{{ password }}"
name: vg2
state: present
snapshotpolicy: snapshotpolicy2

View File

@@ -0,0 +1,31 @@
# Collections Plugins Directory
This directory can be used to ship various plugins inside an Ansible collection. Each plugin is placed in a folder that
is named after the type of plugin it is in. It can also include the `module_utils` and `modules` directory that
would contain module utils and modules respectively.
Here is an example directory of the majority of plugins currently supported by Ansible:
```
└── plugins
├── action
├── become
├── cache
├── callback
├── cliconf
├── connection
├── filter
├── httpapi
├── inventory
├── lookup
├── module_utils
├── modules
├── netconf
├── shell
├── strategy
├── terminal
├── test
└── vars
```
A full list of plugin types can be found at [Working With Plugins](https://docs.ansible.com/ansible/2.9/plugins/plugins.html).

View File

@@ -0,0 +1,100 @@
# Copyright (C) 2020 IBM CORPORATION
# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
""" Support class for IBM SVC generic ansible module """
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.compat.paramiko import paramiko
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import get_logger
class IBMSVCssh(object):
""" Communicate with SVC through SSH
The module use paramiko to connect SVC
"""
def __init__(self, module, clustername, username, password,
look_for_keys, key_filename, log_path):
""" Initialize module with what we need for initial connection
:param clustername: name of the SVC cluster
:type clustername: string
:param username: SVC username
:type username: string
:param password: Password for user
:type password: string
:param look_for_keys: whether to look for keys or not
:type look_for_keys: boolean
:param key_filename: SSH client private key file
:type key_filename: string
:param log_path: log file
:type log_path: string
"""
self.module = module
self.clustername = clustername
self.username = username
self.password = password
self.look_for_keys = look_for_keys
self.key_filename = key_filename
self.is_client_connected = False
# logging setup
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
self.client_type = 'paramiko'
if paramiko is None:
self.module.fail_json(msg='paramiko is not installed')
self.client = paramiko.SSHClient()
# connect through SSH
self.is_client_connected = self._svc_connect()
if not self.is_client_connected:
self.module.fail_json(msg='Failed to connect')
def _svc_connect(self):
"""
Initialize a SSH connection with properties
which were set up in constructor.
:return: True or False
"""
self.client.load_system_host_keys()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
self.client.connect(
hostname=self.clustername,
username=self.username,
password=self.password,
look_for_keys=self.look_for_keys,
key_filename=self.key_filename)
return True
except paramiko.BadHostKeyException as e:
self.log("BadHostKeyException %s", e)
except paramiko.AuthenticationException as e:
self.log("AuthenticationException %s", e)
except paramiko.SSHException as e:
self.log("SSHException %s", e)
except Exception as e:
self.log("SSH connection failed %s", e)
return False
def is_connected(self):
return self.is_client_connected
def _svc_disconnect(self):
"""
Disconnect from the SSH server.
"""
try:
self.client.close()
self.is_client_connected = False
self.log("SSH disconnected")
return True
except Exception as e:
self.log("SSH Disconnection failed %s", e)
return False

View File

@@ -0,0 +1,330 @@
# Copyright (C) 2020 IBM CORPORATION
# Author(s): Peng Wang <wangpww@cn.ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
""" Support class for IBM SVC ansible modules """
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
import logging
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.parse import quote
from ansible.module_utils.six.moves.urllib.error import HTTPError
def svc_argument_spec():
"""
Returns argument_spec of options common to ibm_svc_*-modules
:returns: argument_spec
:rtype: dict
"""
return dict(
clustername=dict(type='str', required=True),
domain=dict(type='str', default=None),
validate_certs=dict(type='bool', default=False),
username=dict(type='str'),
password=dict(type='str', no_log=True),
log_path=dict(type='str'),
token=dict(type='str', no_log=True)
)
def svc_ssh_argument_spec():
"""
Returns argument_spec of options common to ibm_svcinfo_command
and ibm_svctask_command modules
:returns: argument_spec
:rtype: dict
"""
return dict(
clustername=dict(type='str', required=True),
username=dict(type='str', required=True),
password=dict(type='str', required=True, no_log=True),
log_path=dict(type='str')
)
def strtobool(val):
'''
Converts a string representation to boolean.
This is a built-in function available in python till the version 3.9 under disutils.util
but this has been deprecated in 3.10 and may not be available in future python releases
so adding the source code here.
'''
if val in {'y', 'yes', 't', 'true', 'on', '1'}:
return 1
elif val in {'n', 'no', 'f', 'false', 'off', '0'}:
return 0
else:
raise ValueError("invalid truth value %r" % (val,))
def get_logger(module_name, log_file_name, log_level=logging.INFO):
FORMAT = '%(asctime)s.%(msecs)03d %(levelname)5s %(thread)d %(filename)s:%(funcName)s():%(lineno)s %(message)s'
DATEFORMAT = '%Y-%m-%dT%H:%M:%S'
log_path = 'IBMSV_ansible_collections.log'
if log_file_name:
log_path = log_file_name
logging.basicConfig(filename=log_path, format=FORMAT, datefmt=DATEFORMAT)
log = logging.getLogger(module_name)
log.setLevel(log_level)
return log
class IBMSVCRestApi(object):
""" Communicate with SVC through RestApi
SVC commands usually have the format
$ command -opt1 value1 -opt2 value2 arg1 arg2 arg3
to use the RestApi we transform this into
https://host:7443/rest/command/arg1/arg2/arg3
data={'opt1':'value1', 'opt2':'value2'}
"""
def __init__(self, module, clustername, domain, username, password,
validate_certs, log_path, token):
""" Initialize module with what we need for initial connection
:param clustername: name of the SVC cluster
:type clustername: string
:param domain: domain name to make a fully qualified host name
:type domain: string
:param username: SVC username
:type username: string
:param password: Password for user
:type password: string
:param validate_certs: whether or not the connection is insecure
:type validate_certs: bool
"""
self.module = module
self.clustername = clustername
self.domain = domain
self.username = username
self.password = password
self.validate_certs = validate_certs
self.token = token
# logging setup
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
# Make sure we can connect through the RestApi
if self.token is None:
if not self.username or not self.password:
self.module.fail_json(msg="You must pass in either pre-acquired token"
" or username/password to generate new token")
self.token = self._svc_authorize()
else:
self.log("Token already passed: %s", self.token)
if not self.token:
self.module.exit_json(msg='Failed to obtain access token', unreachable=True)
@property
def port(self):
return getattr(self, '_port', None) or '7443'
@property
def protocol(self):
return getattr(self, '_protocol', None) or 'https'
@property
def resturl(self):
if self.domain:
hostname = '%s.%s' % (self.clustername, self.domain)
else:
hostname = self.clustername
return (getattr(self, '_resturl', None)
or "{protocol}://{host}:{port}/rest".format(
protocol=self.protocol, host=hostname, port=self.port))
@property
def token(self):
return getattr(self, '_token', None) or None
@token.setter
def token(self, value):
return setattr(self, '_token', value)
def _svc_rest(self, method, headers, cmd, cmdopts, cmdargs, timeout=10):
""" Run SVC command with token info added into header
:param method: http method, POST or GET
:type method: string
:param headers: http headers
:type headers: dict
:param cmd: svc command to run
:type cmd: string
:param cmdopts: svc command options, name paramter and value
:type cmdopts: dict
:param cmdargs: svc command arguments, non-named paramaters
:type timeout: int
:param timeout: open_url argument to set timeout for http gateway
:return: dict of command results
:rtype: dict
"""
# Catch any output or errors and pass back to the caller to deal with.
r = {
'url': None,
'code': None,
'err': None,
'out': None,
'data': None
}
postfix = cmd
if cmdargs:
postfix = '/'.join([postfix] + [quote(str(a)) for a in cmdargs])
url = '/'.join([self.resturl] + [postfix])
r['url'] = url # Pass back in result for error handling
self.log("_svc_rest: url=%s", url)
payload = cmdopts if cmdopts else None
data = self.module.jsonify(payload).encode('utf8')
r['data'] = cmdopts # Original payload data has nicer formatting
self.log("_svc_rest: payload=%s", payload)
try:
o = open_url(url, method=method, headers=headers, timeout=timeout,
validate_certs=self.validate_certs, data=bytes(data))
except HTTPError as e:
self.log('_svc_rest: httperror %s', str(e))
r['code'] = e.getcode()
r['out'] = e.read()
r['err'] = "HTTPError %s", str(e)
return r
except Exception as e:
self.log('_svc_rest: exception : %s', str(e))
r['err'] = "Exception %s", str(e)
return r
try:
j = json.load(o)
except ValueError as e:
self.log("_svc_rest: value error pass: %s", str(e))
# pass, will mean both data and error are None.
return r
r['out'] = j
return r
def _svc_authorize(self):
""" Obtain a token if we are authoized to connect
:return: None or token string
"""
headers = {
'Content-Type': 'application/json',
'X-Auth-Username': self.username,
'X-Auth-Password': self.password
}
rest = self._svc_rest(method='POST', headers=headers, cmd='auth',
cmdopts=None, cmdargs=None)
if rest['err']:
return None
out = rest['out']
if out:
if 'token' in out:
return out['token']
return None
def _svc_token_wrap(self, cmd, cmdopts, cmdargs, timeout=10):
""" Run SVC command with token info added into header
:param cmd: svc command to run
:type cmd: string
:param cmdopts: svc command options, name paramter and value
:type cmdopts: dict
:param cmdargs: svc command arguments, non-named paramaters
:type cmdargs: list
:param timeout: open_url argument to set timeout for http gateway
:type timeout: int
:returns: command results
"""
if self.token is None:
self.module.fail_json(msg="No authorize token")
# Abort
headers = {
'Content-Type': 'application/json',
'X-Auth-Token': self.token
}
return self._svc_rest(method='POST', headers=headers, cmd=cmd,
cmdopts=cmdopts, cmdargs=cmdargs, timeout=timeout)
def svc_run_command(self, cmd, cmdopts, cmdargs, timeout=10):
""" Generic execute a SVC command
:param cmd: svc command to run
:type cmd: string
:param cmdopts: svc command options, name parameter and value
:type cmdopts: dict
:param cmdargs: svc command arguments, non-named parameters
:type cmdargs: list
:param timeout: open_url argument to set timeout for http gateway
:type timeout: int
:returns: command output
"""
rest = self._svc_token_wrap(cmd, cmdopts, cmdargs, timeout)
self.log("svc_run_command rest=%s", rest)
if rest['err']:
msg = rest
self.module.fail_json(msg=msg)
# Aborts
# Might be None
return rest['out']
def svc_obj_info(self, cmd, cmdopts, cmdargs, timeout=10):
""" Obtain information about an SVC object through the ls command
:param cmd: svc command to run
:type cmd: string
:param cmdopts: svc command options, name parameter and value
:type cmdopts: dict
:param cmdargs: svc command arguments, non-named paramaters
:type cmdargs: list
:param timeout: open_url argument to set timeout for http gateway
:type timeout: int
:returns: command output
:rtype: dict
"""
rest = self._svc_token_wrap(cmd, cmdopts, cmdargs, timeout)
self.log("svc_obj_info rest=%s", rest)
if rest['code']:
if rest['code'] == 500:
# Object did not exist, which is quite valid.
return None
# Fail for anything else
if rest['err']:
self.module.fail_json(msg=rest)
# Aborts
# Might be None
return rest['out']
def get_auth_token(self):
""" Obtain information about an SVC object through the ls command
:returns: authentication token
"""
# Make sure we can connect through the RestApi
self.token = self._svc_authorize()
self.log("_connect by using token")
if not self.token:
self.module.exit_json(msg='Failed to obtain access token', unreachable=True)
return self.token

View File

@@ -0,0 +1,496 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2022 IBM CORPORATION
# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_sv_manage_awss3_cloudaccount
short_description: This module configures and manages Amazon Simple Storage Service (Amazon S3) cloud account on IBM Spectrum Virtualize family storage systems
version_added: '1.11.0'
description:
- Ansible interface to manage mkcloudaccountawss3, chcloudaccountawss3, and rmcloudaccount commands.
options:
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
required: true
type: str
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the M(ibm.spectrum_virtualize.ibm_svc_auth) module.
type: str
log_path:
description:
- Path of debug log file.
type: str
state:
description:
- Creates, updates (C(present)), or deletes (C(absent)) an Amazon S3 account.
choices: [ present, absent ]
required: true
type: str
name:
description:
- Specifies the name of an Amazon S3 account.
type: str
required: true
old_name:
description:
- Specifies the old name of an Amazon S3 account.
- Valid when I(state=present), to rename the existing Amazon S3 account.
type: str
bucketprefix:
description:
- Specifies the prefix for the bucket object.
- Applies, when I(state=present), to create an Amazon S3 account.
type: str
accesskeyid:
description:
- Specifies the public part of the Amazon S3 access key credential
of the AWS user that the system use to access the cloud storage.
type: str
secretaccesskey:
description:
- Specifies the secret access key of an Amazon S3 cloud account.
type: str
upbandwidthmbits:
description:
- Specifies the upload bandwidth limit in megabits per second (Mbps).
- The value must be a number 1-10240.
type: str
downbandwidthmbits:
description:
- Specifies the download bandwidth limit in megabits per second (Mbps).
- The value must be a number 1-10240.
type: str
region:
description:
- Specifies the AWS region to use to access the cloud account and store data.
type: str
encrypt:
description:
- Specifies whether to encrypt the data in the cloud account.
- By default, encryption is enabled if encryption is enabled on
the cluster unless I(encrypt=no) is specified.
- Valid when I(state=present) to create an Amazon S3 account.
type: str
choices: [ 'yes', 'no' ]
ignorefailures:
description:
- Specify to change the access key whether the new access key works or not.
- Valid when I(state=present) to update an existing Amazon S3 account.
- Parameter is allowed only when I(accesskeyid) and I(secretaccesskey) are entered.
type: bool
mode:
description:
- Specifies the new or modified cloud account mode.
- Valid when I(state=present) to update an existing Amazon S3 account.
type: str
choices: [ import, normal ]
importsystem:
description:
- Specifies that the system's data be imported.
- Valid when I(state=present) to update an existing Amazon S3 account.
type: str
refresh:
description:
- Specifies a refresh of the system import candidates.
- If the account is in import mode, this parameter specifies a refresh of the data available for import.
type: bool
resetusagehistory:
description:
- Resets the usage history (to 0).
- Storage consumption that reflects the space that is consumed on the cloud account is cumulative,
which means that it remains in the current day row (the 0th row).
- Valid when I(state=present) to update an existing Amazon S3 account.
type: bool
validate_certs:
description:
- Validates certification.
default: false
type: bool
author:
- Sanjaikumaar M (@sanjaikumaar)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Configure Amazon S3 account
ibm.spectrum_virtualize.ibm_sv_manage_awss3_cloudaccount:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
name: awss3
bucketprefix: "{{bucketprefix}}"
accesskeyid: "{{accesskeyid}}"
secretaccesskey: "{{secretaccesskey}}"
state: present
- name: Update Amazon S3 account configuration
ibm.spectrum_virtualize.ibm_sv_manage_awss3_cloudaccount:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
name: awss3
upbandwidthmbits: "{{upbandwidthmbits}}"
downbandwidthmbits: "{{downbandwidthmbits}}"
state: present
- name: Update Amazon S3 account mode to import
ibm.spectrum_virtualize.ibm_sv_manage_awss3_cloudaccount:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
name: awss3
mode: import
importsystem: 123456789
state: present
- name: Delete Amazon S3 account configuration
ibm.spectrum_virtualize.ibm_sv_manage_awss3_cloudaccount:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
name: awss3
state: absent
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import (
IBMSVCRestApi, svc_argument_spec,
get_logger, strtobool
)
from ansible.module_utils._text import to_native
class IBMSVAWSS3:
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
state=dict(
type='str',
choices=['present', 'absent'],
required=True
),
name=dict(
type='str',
required=True
),
old_name=dict(
type='str'
),
bucketprefix=dict(
type='str',
),
accesskeyid=dict(
type='str',
no_log=False
),
secretaccesskey=dict(
type='str',
no_log=True
),
upbandwidthmbits=dict(
type='str'
),
downbandwidthmbits=dict(
type='str'
),
region=dict(
type='str'
),
encrypt=dict(
type='str',
choices=['yes', 'no']
),
ignorefailures=dict(
type='bool'
),
mode=dict(
type='str',
choices=['import', 'normal']
),
importsystem=dict(
type='str'
),
refresh=dict(
type='bool'
),
resetusagehistory=dict(
type='bool'
),
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# Required parameters
self.state = self.module.params.get('state')
self.name = self.module.params.get('name')
self.old_name = self.module.params.get('old_name', '')
self.bucketprefix = self.module.params.get('bucketprefix', '')
self.accesskeyid = self.module.params.get('accesskeyid', '')
self.secretaccesskey = self.module.params.get('secretaccesskey')
self.upbandwidthmbits = self.module.params.get('upbandwidthmbits', '')
self.downbandwidthmbits = self.module.params.get('downbandwidthmbits', '')
self.region = self.module.params.get('region', '')
self.encrypt = self.module.params.get('encrypt')
# ignorefailures will be allowed only when access and secretkey are entered
self.ignorefailures = self.module.params.get('ignorefailures')
self.mode = self.module.params.get('mode')
self.importsystem = self.module.params.get('importsystem')
self.refresh = self.module.params.get('refresh')
self.resetusagehistory = self.module.params.get('resetusagehistory')
self.basic_checks()
# logging setup
self.log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, self.log_path)
self.log = log.info
# Dynamic variables
self.changed = False
self.msg = ''
self.aws_data = {}
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=self.log_path,
token=self.module.params['token']
)
def basic_checks(self):
if not self.name:
self.module.fail_json(msg='Missing mandatory parameter: name')
if self.state == 'present':
if self.accesskeyid:
if not self.secretaccesskey:
self.module.fail_json(msg='Parameters required together: accesskeyid, secretaccesskey')
elif self.state == 'absent':
invalids = ('bucketprefix', 'accesskeyid', 'secretaccesskey', 'upbandwidthmbits',
'downbandwidthmbits', 'region', 'encrypt', 'ignorefailures', 'mode', 'importsystem',
'refresh', 'resetusagehistory', 'old_name')
invalid_exists = ', '.join((var for var in invalids if getattr(self, var) not in {'', None}))
if invalid_exists:
self.module.fail_json(
msg='state=absent but following parameters have been passed: {0}'.format(invalid_exists)
)
def create_validation(self):
if self.old_name:
self.rename_validation({})
required = ('bucketprefix', 'accesskeyid', 'secretaccesskey')
required_not_exists = ', '.join((var for var in required if not getattr(self, var)))
if required_not_exists:
self.module.fail_json(msg='Missing mandatory parameter: {0}'.format(required_not_exists))
invalids = ('ignorefailures', 'mode', 'importsystem',
'refresh', 'resetusagehistory')
invalid_exists = ', '.join((var for var in invalids if getattr(self, var) not in {'', None}))
if invalid_exists:
self.module.fail_json(
msg='Following parameters not supported during creation: {0}'.format(invalid_exists)
)
def rename_validation(self, updates):
if self.old_name and self.name:
if self.name == self.old_name:
self.module.fail_json(msg='New name and old name should be different.')
new = self.is_aws_account_exists()
existing = self.is_aws_account_exists(name=self.old_name)
if existing:
if new:
self.module.fail_json(
msg='Cloud account ({0}) already exists for the given new name.'.format(self.name)
)
else:
updates['name'] = self.name
else:
if not new:
self.module.fail_json(
msg='Cloud account ({0}) does not exists for the given old name.'.format(self.old_name)
)
else:
self.module.exit_json(
msg='Cloud account ({0}) already renamed. No modifications done.'.format(self.name)
)
def is_aws_account_exists(self, name=None):
result = {}
cmd = 'lscloudaccount'
name = name if name else self.name
data = self.restapi.svc_obj_info(cmd=cmd, cmdopts=None, cmdargs=[name])
if isinstance(data, list):
for d in data:
result.update(d)
else:
result = data
self.aws_data = result
return result
def aws_account_probe(self):
updates = {}
if self.encrypt and self.encrypt != self.aws_data.get('encrypt', ''):
self.module.fail_json(msg='Parameter not supported for update operation: encrypt')
if self.bucketprefix and self.bucketprefix != self.aws_data.get('awss3_bucket_prefix', ''):
self.module.fail_json(msg='Parameter not supported for update operation: bucketprefix')
if self.region and self.region != self.aws_data.get('awss3_region', ''):
self.module.fail_json(msg='Parameter not supported for update operation: region')
self.rename_validation(updates)
params = [
('upbandwidthmbits', self.aws_data.get('up_bandwidth_mbits')),
('downbandwidthmbits', self.aws_data.get('down_bandwidth_mbits')),
('mode', self.aws_data.get('mode')),
('importsystem', self.aws_data.get('import_system_name')),
]
for k, v in params:
if getattr(self, k) and getattr(self, k) != v:
updates[k] = getattr(self, k)
if self.accesskeyid and self.aws_data.get('awss3_access_key_id') != self.accesskeyid:
updates['accesskeyid'] = self.accesskeyid
updates['secretaccesskey'] = self.secretaccesskey
# ignorefailures can be provided only when accesskeyid and secretaccesskey are given
if self.ignorefailures:
updates['ignorefailures'] = self.ignorefailures
if self.refresh and self.aws_data.get('refreshing') == 'no':
updates['refresh'] = self.refresh
# Can't validate the below parameters.
if self.resetusagehistory:
updates['resetusagehistory'] = self.resetusagehistory
return updates
def create_aws_account(self):
self.create_validation()
if self.module.check_mode:
self.changed = True
return
cmd = 'mkcloudaccountawss3'
cmdopts = {
'name': self.name,
'bucketprefix': self.bucketprefix,
'accesskeyid': self.accesskeyid,
'secretaccesskey': self.secretaccesskey
}
params = {'upbandwidthmbits', 'downbandwidthmbits', 'region', 'encrypt'}
cmdopts.update(
dict((key, getattr(self, key)) for key in params if getattr(self, key))
)
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None, timeout=20)
self.log('Cloud account (%s) created', self.name)
self.changed = True
def update_aws_account(self, updates):
if self.module.check_mode:
self.changed = True
return
name = self.old_name if self.old_name else self.name
self.restapi.svc_run_command('chcloudaccountawss3', updates, cmdargs=[name], timeout=20)
self.changed = True
def delete_aws_account(self):
if self.module.check_mode:
self.changed = True
return
self.restapi.svc_run_command('rmcloudaccount', cmdopts=None, cmdargs=[self.name], timeout=20)
self.changed = True
def apply(self):
if self.is_aws_account_exists(name=self.old_name):
if self.state == 'present':
modifications = self.aws_account_probe()
if modifications:
self.update_aws_account(modifications)
self.msg = 'AWS account ({0}) updated'.format(self.name)
else:
self.msg = 'AWS account ({0}) already exists. No modifications done.'.format(self.name)
else:
self.delete_aws_account()
self.msg = 'AWS account ({0}) deleted.'.format(self.name)
else:
if self.state == 'absent':
self.msg = 'AWS account ({0}) does not exist'.format(self.name)
else:
self.create_aws_account()
self.msg = 'AWS account ({0}) created.'.format(self.name)
if self.module.check_mode:
self.msg = 'skipping changes due to check mode.'
self.module.exit_json(
changed=self.changed,
msg=self.msg
)
def main():
v = IBMSVAWSS3()
try:
v.apply()
except Exception as e:
v.log('Exception in apply(): \n%s', format_exc())
v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,391 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2022 IBM CORPORATION
# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_sv_manage_cloud_backups
short_description: This module configures and manages cloud backups on IBM Spectrum Virtualize family storage systems
version_added: '1.11.0'
description:
- Ansible interface to manage backupvolume, backupvolumegroup, and rmvolumebackupgeneration commands.
options:
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
required: true
type: str
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the M(ibm.spectrum_virtualize.ibm_svc_auth) module.
type: str
log_path:
description:
- Path of debug log file.
type: str
state:
description:
- Creates (C(present)) or deletes (C(absent)) a cloud backup.
choices: [ present, absent ]
required: true
type: str
volume_name:
description:
- Specifies the volume name for the volume being backed up.
- The parameters I(volume_name) and I(volumegroup_name) are mutually exclusive.
type: str
volumegroup_name:
description:
- Specifies the volumegroup name for the volume to back up.
- The parameters I(volume_name) and I(volumegroup_name) are mutually exclusive.
- Applies when I(state=present) to create cloud backups of all the volume group members.
- Cloud backup must be enabled on all the volume group members to execute this.
type: str
full:
description:
- Specifies that the snapshot generation for the volume should be a full snapshot.
- Applies when I(state=present).
type: bool
volume_UID:
description:
- Specifies the volume UID to delete a cloud backup of the volume.
- The value for a volume UID must be a value in the range 0-32.
- The parameters I(volume_UID) and I(volume_name) are mutually exclusive.
- Applies when I(state=absent) to delete cloud backups.
type: str
generation:
description:
- Specifies the snapshot generation ID that needs to be deleted for the volume.
- If the specified generation is for a snapshot operation that is in progress,
that snapshot operation is canceled.
- Applies when I(state=absent) to delete a generation of a volume backup.
- The parameters I(all) and I(generation) are mutually exclusive.
- Either I(generation) or I(all) is required to delete cloud backup.
type: int
all:
description:
- Specifies to delete all cloud backup generations.
- Applies when I(state=absent) to delete a backup.
- The parameters I(all) and I(generation) are mutually exclusive.
- Either I(generation) or I(all) is required to delete cloud backup.
type: bool
validate_certs:
description:
- Validates certification.
default: false
type: bool
author:
- Sanjaikumaar M (@sanjaikumaar)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Create cloud backup of volume
ibm.spectrum_virtualize.ibm_sv_manage_cloud_backups:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
volume_name: vol1
full: true
state: present
- name: Create cloud backup of volumegroup
ibm.spectrum_virtualize.ibm_sv_manage_cloud_backups:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
volumegroup_name: VG1
full: true
state: present
- name: Delete cloud backup
ibm.spectrum_virtualize.ibm_sv_manage_cloud_backups:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
volume_UID: 6005076400B70038E00000000000001C
all: true
state: absent
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import (
IBMSVCRestApi, svc_argument_spec,
get_logger, strtobool
)
from ansible.module_utils._text import to_native
class IBMSVCloudBackup:
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
state=dict(
type='str',
choices=['present', 'absent'],
required=True
),
volume_name=dict(
type='str'
),
volumegroup_name=dict(
type='str'
),
generation=dict(
type='int',
),
volume_UID=dict(
type='str',
),
full=dict(
type='bool',
),
all=dict(
type='bool'
)
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# Required parameters
self.state = self.module.params['state']
# Optional parameters
self.volume_name = self.module.params.get('volume_name')
self.volumegroup_name = self.module.params.get('volumegroup_name')
self.full = self.module.params.get('full')
# Parameters for deletion
self.volume_UID = self.module.params.get('volume_UID', '')
self.generation = self.module.params.get('generation', '')
self.all = self.module.params.get('all')
self.basic_checks()
# logging setup
self.log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, self.log_path)
self.log = log.info
# Dynamic variables
self.changed = False
self.msg = ''
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=self.log_path,
token=self.module.params['token']
)
def basic_checks(self):
if self.state == 'present':
if self.volume_UID:
self.module.fail_json(msg='Parameter not supported during creation: volume_UID')
if self.volume_name and self.volumegroup_name:
self.module.fail_json(msg='Mutually exclusive parameters: volume_name, volumegroup_name')
if not self.volumegroup_name and not self.volume_name:
self.module.fail_json(
msg='One of these parameter required to create backup: volume_name, volumegroup_name')
invalids = ('generation', 'all')
invalid_exists = ', '.join((var for var in invalids if getattr(self, var) not in {'', None}))
if invalid_exists:
self.module.fail_json(
msg='Following parameters not supported during creation: {0}'.format(invalid_exists)
)
else:
if self.volume_name and self.volume_UID:
self.module.fail_json(msg='Mutually exclusive parameters: volume_name, volume_UID')
if not self.volume_name and not self.volume_UID:
self.module.fail_json(msg='One of these parameter required to delete backup: volume_name, volume_UID')
if self.generation and self.all:
self.module.fail_json(msg='Mutually exclusive parameters: generation, all')
if self.generation in {'', None} and self.all in {'', None}:
self.module.fail_json(msg='One of the following parameter is required: generation, all')
if self.volumegroup_name:
self.module.fail_json(msg='Parameter not supported during deletion: volumegroup_name')
if self.full not in {'', None}:
self.module.fail_json(msg='Parameter not supported during deletion: full')
def check_source(self):
result = {}
if self.volumegroup_name:
cmd = 'lsvolumegroup'
cmdargs = [self.volumegroup_name]
cmdopts = None
elif self.volume_name and self.state == 'present':
cmd = 'lsvdisk'
cmdargs = [self.volume_name]
cmdopts = None
else:
cmd = 'lsvolumebackupgeneration'
cmdargs = None
cmdopts = {}
if self.volume_UID:
self.var = self.volume_UID
cmdopts['uid'] = self.volume_UID
else:
self.var = self.volume_name
cmdopts['volume'] = self.volume_name
data = self.restapi.svc_obj_info(cmd=cmd, cmdopts=cmdopts, cmdargs=cmdargs)
if isinstance(data, list):
for d in data:
result.update(d)
else:
result = data
if self.state == 'present':
return not result
else:
return result
def create_cloud_backup(self):
if self.module.check_mode:
self.changed = True
return
cmdopts = {}
if self.volume_name:
cmd = 'backupvolume'
cmdargs = [self.volume_name]
self.msg = 'Cloud backup ({0}) created'.format(self.volume_name)
else:
cmd = 'backupvolumegroup'
cmdargs = [self.volumegroup_name]
self.msg = 'Cloud backup ({0}) created'.format(self.volumegroup_name)
if self.full:
cmdopts['full'] = True
response = self.restapi._svc_token_wrap(cmd, cmdopts, cmdargs=cmdargs)
self.log("create_cloud_backup response=%s", response)
self.changed = True
if response['out']:
if b'CMMVC9083E' in response['out']:
self.msg = 'CMMVC9083E: Volume is not ready to perform any operation right now.'
self.changed = False
elif b'CMMVC8753E' in response['out']:
self.msg = 'Backup already in progress.'
self.changed = False
else:
self.msg = response
self.module.fail_json(msg=self.msg)
self.log(self.msg)
def delete_cloud_backup(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'rmvolumebackupgeneration'
cmdopts = {}
if self.volume_name:
cmdopts['volume'] = self.volume_name
var = self.volume_name
self.msg = 'Cloud backup ({0}) deleted'.format(self.volume_name)
else:
cmdopts['uid'] = self.volume_UID
var = self.volume_UID
self.msg = 'Cloud backup ({0}) deleted'.format(self.volume_UID)
if self.generation:
cmdopts['generation'] = self.generation
if self.all not in {'', None}:
cmdopts['all'] = self.all
response = self.restapi._svc_token_wrap(cmd, cmdopts=cmdopts, cmdargs=None)
self.log('response=%s', response)
self.changed = True
if response['out']:
if b'CMMVC9104E' in response['out']:
self.changed = False
self.msg = 'CMMVC9104E: Volume ({0}) is not ready to perform any operation right now.'.format(var)
elif b'CMMVC9090E' in response['out']:
self.changed = False
self.msg = 'Cloud backup generation already deleted.'
else:
self.module.fail_json(msg=response)
self.log(self.msg)
def apply(self):
if self.check_source():
if self.state == 'present':
self.module.fail_json(msg='Volume (or) Volumegroup does not exist.')
else:
self.delete_cloud_backup()
else:
if self.state == 'absent':
self.msg = 'Backup ({0}) does not exist for the given name/UID.'.format(self.var)
self.log(self.msg)
else:
self.create_cloud_backup()
if self.module.check_mode:
self.msg = 'skipping changes due to check mode.'
self.log(self.msg)
self.module.exit_json(
changed=self.changed,
msg=self.msg
)
def main():
v = IBMSVCloudBackup()
try:
v.apply()
except Exception as e:
v.log('Exception in apply(): \n%s', format_exc())
v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,637 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2022 IBM CORPORATION
# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_sv_manage_ip_partnership
short_description: This module manages IP partnerships on IBM Spectrum Virtualize family storage systems
description:
- Ansible interface to manage 'mkippartnership', 'rmpartnership', and 'chpartnership' commands
on local and remote systems.
version_added: "1.9.0"
options:
state:
description:
- Creates or updates (C(present)) or removes (C(absent)) an IP partnership.
choices: [ 'present', 'absent' ]
required: true
type: str
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
type: str
required: true
remote_clustername:
description:
- The hostname or management IP of the remote Spectrum Virtualize storage system.
type: str
required: true
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
remote_domain:
description:
- Domain for the remote Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(remote_clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
remote_username:
description:
- REST API username for the remote Spectrum Virtualize storage system.
- The parameters I(remote_username) and I(remote_password) are required if not using I(remote_token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
remote_password:
description:
- REST API password for the remote Spectrum Virtualize storage system.
- The parameters I(remote_username) and I(remote_password) are required if not using I(remote_token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the M(ibm.spectrum_virtualize.ibm_svc_auth) module.
type: str
remote_token:
description:
- The authentication token to verify a user on the remote Spectrum Virtualize storage system.
- To generate a token, use the M(ibm.spectrum_virtualize.ibm_svc_auth) module.
type: str
remote_clusterip:
description:
- Specifies the partner system IP address, either IPv4 or IPv6.
- Required when I(state=present), to create an IP partnership.
type: str
remote_cluster_id:
description:
- Specifies the partnership ID of the partner system.
- Required when I(state=present), to modify an existing IP partnership.
- Required when I(state=absent), to remove an existing IP partnership.
type: str
type:
description:
- Specifies the Internet Protocol (IP) address format for the partnership.
- Valid when I(state=present).
choices: [ 'ipv4', 'ipv6' ]
type: str
compressed:
description:
- Specifies whether compression is enabled for this partnership.
- Valid when I(state=present).
choices: [ 'yes', 'no' ]
type: str
linkbandwidthmbits:
description:
- Specifies the aggregate bandwidth of the RC link between two clustered systems (systems)
in megabits per second (Mbps). This is a numeric value from 1 through 100000.
- Valid when I(state=present).
type: int
backgroundcopyrate:
description:
- Specifies the maximum percentage of aggregate link bandwidth that can be used for background
copy operations. This is a numeric value from 0 through 100. The default value is 50.
- Valid when I(state=present).
type: int
link1:
description:
- Specifies the portset name to be used for WAN link 1 of the Spectrum Virtualize storage system.
- Valid when I(state=present), to create an IP partnership.
type: str
remote_link1:
description:
- Specifies the portset name to be used for WAN link 1 of the remote Spectrum Virtualize storage system.
- Valid when I(state=present), to create an IP partnership.
type: str
link2:
description:
- Specifies the portset name to be used for WAN link 2 of the Spectrum Virtualize storage system.
- Valid when I(state=present), to create an IP partnership.
type: str
remote_link2:
description:
- Specifies the portset name to be used for WAN link 2 of the remote Spectrum Virtualize storage system.
- Valid when I(state=present), to create an IP partnership.
type: str
validate_certs:
description:
- Validates certification for the local Spectrum Virtualize storage system.
default: false
type: bool
remote_validate_certs:
description:
- Validates certification for the remote Spectrum Virtualize storage system.
default: false
type: bool
log_path:
description:
- Path of debug log file.
type: str
author:
- Sreshtant Bohidar(@Sreshtant-Bohidar)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Create an IP partnership
ibm.spectrum_virtualize.ibm_sv_manage_ip_partnership:
clustername: "{{ clustername }}"
domain: "{{ domain }}"
username: "{{ username }}"
password: "{{ password }}"
remote_clustername: "{{ remote_clustername }}"
remote_domain: "{{ remote_domain }}"
remote_username: "{{ remote_username }}"
remote_password: "{{ remote_password }}"
log_path: "/tmp/debug.log"
remote_clusterip: "{{ partner_ip }}"
type: "ipv4"
linkbandwidthmbits: 100
backgroundcopyrate: 50
compressed: yes
link1: "{{ portsetname }}"
remote_link1: "{{ remote_portsetname}}"
state: "present"
- name: Update an IP partnership
ibm.spectrum_virtualize.ibm_sv_manage_ip_partnership:
clustername: "{{ clustername }}"
domain: "{{ domain }}"
username: "{{ username }}"
password: "{{ password }}"
remote_clustername: "{{ remote_clustername }}"
remote_domain: "{{ remote_domain }}"
remote_username: "{{ remote_username }}"
remote_password: "{{ remote_password }}"
log_path: "/tmp/debug.log"
remote_cluster_id: "{{ cluster_id }}"
linkbandwidthmbits: 110
backgroundcopyrate: 60
compressed: no
state: "present"
- name: Remove an IP partnership
ibm.spectrum_virtualize.ibm_sv_manage_ip_partnership:
clustername: "{{ clustername }}"
username: "{{ username }}"
password: "{{ password }}"
remote_clustername: "{{ remote_clustername }}"
remote_username: "{{ remote_username }}"
remote_password: "{{ remote_password }}"
log_path: "/tmp/debug.log"
remote_cluster_id: "{{ cluster_id }}"
state: "absent"
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
from ansible.module_utils._text import to_native
class IBMSVCIPPartnership(object):
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
state=dict(type='str', required=True, choices=['present', 'absent']),
type=dict(type='str', required=False, choices=['ipv4', 'ipv6']),
remote_clusterip=dict(type='str', required=False),
remote_cluster_id=dict(type='str', required=False),
compressed=dict(type='str', required=False, choices=['yes', 'no']),
linkbandwidthmbits=dict(type='int', required=False),
backgroundcopyrate=dict(type='int', required=False),
link1=dict(type='str', required=False),
link2=dict(type='str', required=False),
remote_clustername=dict(type='str', required=True),
remote_domain=dict(type='str', default=None),
remote_username=dict(type='str'),
remote_password=dict(type='str', no_log=True),
remote_token=dict(type='str', no_log=True),
remote_validate_certs=dict(type='bool', default=False),
remote_link1=dict(type='str', required=False),
remote_link2=dict(type='str', required=False)
)
)
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
# logging setup
log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
# Required
self.state = self.module.params['state']
self.remote_clustername = self.module.params['remote_clustername']
# Optional
self.remote_username = self.module.params.get('remote_username', '')
self.remote_password = self.module.params.get('remote_password', '')
self.remote_clusterip = self.module.params.get('remote_clusterip', '')
self.remote_cluster_id = self.module.params.get('remote_cluster_id', '')
self.type = self.module.params.get('type', '')
self.compressed = self.module.params.get('compressed', '')
self.linkbandwidthmbits = self.module.params.get('linkbandwidthmbits', '')
self.backgroundcopyrate = self.module.params.get('backgroundcopyrate', '')
self.link1 = self.module.params.get('link1', '')
self.link2 = self.module.params.get('link2', '')
self.remote_domain = self.module.params.get('remote_domain', '')
self.remote_token = self.module.params.get('remote_token', '')
self.remote_validate_certs = self.module.params.get('remote_validate_certs', '')
self.remote_link1 = self.module.params.get('remote_link1', '')
self.remote_link2 = self.module.params.get('remote_link2', '')
# Internal variable
self.changed = False
# creating an instance of IBMSVCRestApi for local system
self.restapi_local = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=log_path,
token=self.module.params['token']
)
# creating an instance of IBMSVCRestApi for remote system
self.restapi_remote = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['remote_clustername'],
domain=self.module.params['remote_domain'],
username=self.module.params['remote_username'],
password=self.module.params['remote_password'],
validate_certs=self.module.params['remote_validate_certs'],
log_path=log_path,
token=self.module.params['remote_token']
)
# perform some basic checks
def basic_checks(self):
# Handling for mandatory parameter 'state'
if not self.state:
self.module.fail_json(msg="Missing mandatory parameter: state")
# Parameter validation for creating IP partnership
def create_parameter_validation(self):
if self.state == 'present':
if not self.remote_clusterip:
self.module.fail_json(msg="Missing required parameter during creation: remote_clusterip")
if not (self.link1 or self.link2):
self.module.fail_json(msg="At least one is required during creation: link1 or link2")
if not (self.remote_link1 or self.remote_link2):
self.module.fail_json(msg="At least one is required during creation: remote_link1 or remote_link2")
# Parameter validation for deleting IP partnership
def delete_parameter_validation(self):
if self.state == 'absent':
if not self.remote_cluster_id:
self.module.fail_json(msg="Missing required parameter during deletion: remote_cluster_id")
unsupported = []
check_list = {
'remote_clusterip': self.remote_clusterip,
'type': self.type,
'linkbandwidthmbits': self.linkbandwidthmbits,
'backgroundcopyrate': self.backgroundcopyrate,
'compressed': self.compressed,
'link1': self.link1,
'link2': self.link2,
'remote_link1': self.remote_link1,
'remote_link2': self.remote_link2
}
self.log('%s', check_list)
for key, value in check_list.items():
if value:
unsupported.append(key)
if unsupported:
self.module.fail_json(msg="Unsupported parameter during deletion: {0}".format(unsupported))
# Parameter validation for updating IP partnership
def update_parameter_validation(self):
if self.state == 'present' and not self.remote_cluster_id:
self.module.fail_json(msg="Missing required parameter during updation: remote_cluster_id")
# fetch system IP address
def get_ip(self, rest_obj):
system_data = rest_obj.svc_obj_info('lssystem', {}, None)
if system_data and 'console_IP' in system_data and ':' in system_data['console_IP']:
return system_data['console_IP'].split(':')[0]
else:
self.module.fail_json(msg="Failed to fetch the IP address of local system")
# get all partnership
def get_all_partnership(self, rest_obj):
return rest_obj.svc_obj_info(cmd='lspartnership', cmdopts=None, cmdargs=[])
# filter partnership data
def filter_partnership(self, data, ip):
return list(
filter(
lambda item: item['cluster_ip'] == ip, data
)
)
# get local partnership
def get_local_partnership(self, data):
return list(
filter(
lambda item: item['location'] == 'local', data
)
)
# get all the attributes of a partnership
def get_partnership_detail(self, rest_obj, id):
return rest_obj.svc_obj_info(cmd='lspartnership', cmdopts=None, cmdargs=[id])
# fetch partnership data
def gather_all_validation_data(self, rest_local, rest_remote):
local_data = {}
remote_data = {}
local_ip = self.get_ip(rest_local)
local_id = None
# while updating and removing existing partnership
if self.remote_cluster_id:
local_data = self.get_partnership_detail(rest_local, self.remote_cluster_id)
all_local_partnership = self.get_all_partnership(rest_local)
if all_local_partnership:
local_partnership_data = self.get_local_partnership(all_local_partnership)
if local_partnership_data:
local_id = local_partnership_data[0]['id']
remote_data = self.get_partnership_detail(rest_remote, local_id)
# while creating partnership
else:
all_local_partnership = self.get_all_partnership(rest_local)
if all_local_partnership:
if self.remote_clusterip:
local_filter = self.filter_partnership(
all_local_partnership,
self.remote_clusterip
)
if local_filter:
local_data = self.get_partnership_detail(rest_local, local_filter[0]['id'])
all_remote_partnership = self.get_all_partnership(rest_remote)
if all_remote_partnership:
remote_filter = self.filter_partnership(
all_remote_partnership,
local_ip
)
if remote_filter:
remote_data = self.get_partnership_detail(rest_remote, remote_filter[0]['id'])
return local_ip, local_id, local_data, remote_data
# create a new IP partnership
def create_partnership(self, location, cluster_ip):
# when executed with check mode
if self.module.check_mode:
self.changed = True
return
rest_api = None
cmd = 'mkippartnership'
cmd_opts = {
'clusterip': cluster_ip
}
if self.type:
cmd_opts['type'] = self.type
if self.compressed:
cmd_opts['compressed'] = self.compressed
if self.linkbandwidthmbits:
cmd_opts['linkbandwidthmbits'] = self.linkbandwidthmbits
if self.backgroundcopyrate:
cmd_opts['backgroundcopyrate'] = self.backgroundcopyrate
if location == 'local':
rest_api = self.restapi_local
if self.link1:
cmd_opts['link1'] = self.link1
if self.link2:
cmd_opts['link2'] = self.link2
if location == 'remote':
rest_api = self.restapi_remote
if self.remote_link1:
cmd_opts['link1'] = self.remote_link1
if self.remote_link2:
cmd_opts['link2'] = self.remote_link2
result = rest_api.svc_run_command(cmd, cmd_opts, cmdargs=None)
self.log("Create result '%s'.", result)
if result == '':
self.changed = True
self.log("Created IP partnership for %s system.", location)
else:
self.module.fail_json(msg="Failed to create IP partnership for cluster ip {0}".format(cluster_ip))
# delete an existing partnership
def remove_partnership(self, location, id):
# when executed with check mode
if self.module.check_mode:
self.changed = True
return
rest_api = None
cmd = 'rmpartnership'
if location == 'local':
rest_api = self.restapi_local
if location == 'remote':
rest_api = self.restapi_remote
rest_api.svc_run_command(cmd, {}, [id])
self.log('Deleted partnership with name %s.', id)
self.changed = True
# probe a partnership
def probe_partnership(self, local_data, remote_data):
modify_local, modify_remote = {}, {}
# unsupported parameters while updating
unsupported = []
if self.link1:
if local_data and local_data['link1'] != self.link1:
unsupported.append('link1')
if self.link2:
if local_data and local_data['link2'] != self.link2:
unsupported.append('link2')
if self.remote_link1:
if remote_data and remote_data['link1'] != self.remote_link1:
unsupported.append('remote_link1')
if self.remote_link2:
if remote_data and remote_data['link2'] != self.remote_link2:
unsupported.append('remote_link2')
if self.type:
if (local_data and local_data['type'] != self.type) or (remote_data and remote_data['type'] != self.type):
unsupported.append('type')
if unsupported:
self.module.fail_json(msg="parameters {0} cannot be updated".format(unsupported))
# supported parameters while updating
if self.compressed:
if local_data and local_data['compressed'] != self.compressed:
modify_local['compressed'] = self.compressed
if remote_data and remote_data['compressed'] != self.compressed:
modify_remote['compressed'] = self.compressed
if self.linkbandwidthmbits:
if local_data and int(local_data['link_bandwidth_mbits']) != self.linkbandwidthmbits:
modify_local['linkbandwidthmbits'] = self.linkbandwidthmbits
if remote_data and int(remote_data['link_bandwidth_mbits']) != self.linkbandwidthmbits:
modify_remote['linkbandwidthmbits'] = self.linkbandwidthmbits
if self.backgroundcopyrate:
if local_data and int(local_data['background_copy_rate']) != self.backgroundcopyrate:
modify_local['backgroundcopyrate'] = self.backgroundcopyrate
if remote_data and int(remote_data['background_copy_rate']) != self.backgroundcopyrate:
modify_remote['backgroundcopyrate'] = self.backgroundcopyrate
if self.remote_clusterip:
if local_data and self.remote_clusterip != local_data['cluster_ip']:
modify_local['clusterip'] = self.remote_clusterip
return modify_local, modify_remote
# start a partnership
def start_partnership(self, rest_object, id):
cmd = 'chpartnership'
cmd_opts = {
'start': True
}
cmd_args = [id]
rest_object.svc_run_command(cmd, cmd_opts, cmd_args)
self.log('Started the partnership %s.', id)
# stop a partnership
def stop_partnership(self, rest_object, id):
cmd = 'chpartnership'
cmd_opts = {
'stop': True
}
cmd_args = [id]
rest_object.svc_run_command(cmd, cmd_opts, cmd_args)
self.log('Stopped partnership %s.', id)
# update a partnership
def update_partnership(self, location, id, modify_data):
# when executed with check mode
if self.module.check_mode:
self.changed = True
return
cmd = 'chpartnership'
cmd_args = [id]
rest_object = None
if location == 'local':
rest_object = self.restapi_local
if location == 'remote':
rest_object = self.restapi_remote
if 'compressed' in modify_data or 'clusterip' in modify_data:
cmd_opts = {}
if 'compressed' in modify_data:
cmd_opts['compressed'] = modify_data['compressed']
if 'clusterip' in modify_data and location == 'local':
cmd_opts['clusterip'] = modify_data['clusterip']
if cmd_opts:
# stop the partnership
self.stop_partnership(rest_object, id)
# perform update operation
rest_object.svc_run_command(cmd, cmd_opts, cmd_args)
# start the partnership
self.start_partnership(rest_object, id)
self.changed = True
if 'linkbandwidthmbits' in modify_data or 'backgroundcopyrate' in modify_data:
cmd_opts = {}
if 'linkbandwidthmbits' in modify_data:
cmd_opts['linkbandwidthmbits'] = modify_data['linkbandwidthmbits']
if 'backgroundcopyrate' in modify_data:
cmd_opts['backgroundcopyrate'] = modify_data['backgroundcopyrate']
if cmd_opts:
# perform the update operation
rest_object.svc_run_command(cmd, cmd_opts, cmd_args)
self.changed = True
def apply(self):
msg = ''
self.basic_checks()
local_ip, local_id, local_data, remote_data = self.gather_all_validation_data(self.restapi_local, self.restapi_remote)
if self.state == 'present':
if local_data and remote_data:
modify_local, modify_remote = self.probe_partnership(local_data, remote_data)
if modify_local or modify_remote:
self.update_parameter_validation()
if modify_local:
self.update_partnership('local', self.remote_cluster_id, modify_local)
msg += 'IP partnership updated on local system.'
else:
msg += 'IP partnership already exists on local system.'
if modify_remote:
self.update_partnership('remote', local_id, modify_remote)
msg += ' IP partnership updated on remote system.'
else:
msg += ' IP partnership already exists on remote system.'
else:
msg += 'IP partnership already exists on both local and remote system.'
elif local_data and not remote_data:
response = self.probe_partnership(local_data, remote_data)
modify_local = response[0]
self.create_parameter_validation()
self.create_partnership('remote', local_ip)
msg += 'IP partnership created on remote system.'
if modify_local:
self.update_parameter_validation()
self.update_partnership('local', self.remote_cluster_id, modify_local)
msg += ' IP partnership updated on {0} system.'.format(['local'])
else:
msg += ' IP Partnership already exists on local system.'
elif not local_data and remote_data:
response = self.probe_partnership(local_data, remote_data)
modify_remote = response[1]
self.create_parameter_validation()
self.create_partnership('local', self.remote_clusterip)
msg += ' IP partnership created on local system.'
if modify_remote:
self.update_partnership('remote', local_id, modify_remote)
msg += 'IP partnership updated on {0} system.'.format(['remote'])
else:
msg += 'IP Partnership already exists on remote system.'
elif not local_data and not remote_data:
self.create_parameter_validation()
self.create_partnership('local', self.remote_clusterip)
self.create_partnership('remote', local_ip)
msg = 'IP partnership created on both local and remote system.'
elif self.state == 'absent':
# parameter vaidation while removing partnership
self.delete_parameter_validation()
# removal of partnership on both local and remote system
if local_data and remote_data:
self.remove_partnership('local', self.remote_cluster_id)
self.remove_partnership('remote', local_id)
msg += 'IP partnership deleted from both local and remote system.'
elif local_data and not remote_data:
self.remove_partnership('local', self.remote_cluster_id)
msg += 'IP partnership deleted from local system.'
msg += ' IP partnership does not exists on remote system.'
elif not local_data and remote_data:
self.remove_partnership('remote', local_id)
msg += 'IP partnership deleted from remote system.'
msg += ' IP partnership does not exists on local system.'
elif not local_data and not remote_data:
msg += 'IP partnership does not exists on both local and remote system. No modifications done.'
if self.module.check_mode:
msg = 'Skipping changes due to check mode.'
self.module.exit_json(msg=msg, changed=self.changed)
def main():
v = IBMSVCIPPartnership()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,343 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2022 IBM CORPORATION
# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_sv_manage_provisioning_policy
short_description: This module configures and manages provisioning policies on IBM Spectrum Virtualize family storage systems
version_added: '1.10.0'
description:
- Ansible interface to manage mkprovisioningpolicy, chprovisioningpolicy, and rmprovisioningpolicy commands.
options:
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
required: true
type: str
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the M(ibm.spectrum_virtualize.ibm_svc_auth) module.
type: str
log_path:
description:
- Path of debug log file.
type: str
state:
description:
- Creates, updates (C(present)), or deletes (C(absent)) a provisioning policy.
choices: [ present, absent ]
required: true
type: str
name:
description:
- Specifies the name of the provisioning policy.
- Specifies the new name during rename.
type: str
required: true
capacitysaving:
description:
- Specifies the policy capacity savings.
- Applies, when I(state=present), to create a provisioning policy.
choices: [ drivebased, thin, compressed ]
type: str
deduplicated:
description:
- Specifies when volumes should be deduplicated.
- Applicable when I(capacitysaving=thin) or I(capacitysaving=compressed).
default: false
type: bool
old_name:
description:
- Specifies the old name of the provisioning policy during renaming.
- Valid when I(state=present) to rename an existing policy.
type: str
validate_certs:
description:
- Validates certification.
default: false
type: bool
author:
- Sanjaikumaar M (@sanjaikumaar)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Create provisioning policy
ibm.spectrum_virtualize.ibm_sv_manage_provisioning_policy:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
name: provisioning_policy0
capacitysaving: "compressed"
deduplicated: true
state: present
- name: Rename provisioning policy
ibm.spectrum_virtualize.ibm_sv_manage_provisioning_policy:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
name: pp0
old_name: provisioning_policy0
state: present
- name: Delete replication policy
ibm.spectrum_virtualize.ibm_sv_manage_provisioning_policy:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
name: pp0
state: absent
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import (
IBMSVCRestApi, svc_argument_spec,
get_logger, strtobool
)
from ansible.module_utils._text import to_native
class IBMSVProvisioningPolicy:
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
name=dict(
type='str',
required=True
),
state=dict(
type='str',
choices=['present', 'absent'],
required=True
),
capacitysaving=dict(
type='str',
choices=['drivebased', 'thin', 'compressed']
),
deduplicated=dict(
type='bool',
default=False
),
old_name=dict(
type='str',
),
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# Required parameters
self.name = self.module.params['name']
self.state = self.module.params['state']
# Optional parameters
self.capacitysaving = self.module.params.get('capacitysaving')
self.deduplicated = self.module.params.get('deduplicated', False)
self.old_name = self.module.params.get('old_name', '')
self.basic_checks()
# logging setup
self.log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, self.log_path)
self.log = log.info
# Dynamic variables
self.changed = False
self.msg = ''
self.pp_data = {}
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=self.log_path,
token=self.module.params['token']
)
def basic_checks(self):
if self.state == 'present':
if not self.name:
self.module.fail_json(
msg='Mandatory parameter missing: name'
)
else:
unsupported = ('capacitysaving', 'deduplicated', 'old_name')
unsupported_exists = ','.join(field for field in unsupported if getattr(self, field))
if unsupported_exists:
self.module.fail_json(
msg='state=absent but following paramters passed: {0}'.format(unsupported_exists)
)
def create_validation(self):
if self.old_name:
self.rename_validation([])
if not self.capacitysaving:
self.module.fail_json(
msg='Mandatory parameter missing: capacitysaving'
)
def rename_validation(self, updates):
if self.old_name and self.name:
if self.name == self.old_name:
self.module.fail_json(msg='New name and old name should be different.')
new = self.is_pp_exists()
existing = self.is_pp_exists(name=self.old_name)
if existing:
if new:
self.module.fail_json(
msg='Provisioning policy ({0}) already exists for the given new name'.format(self.name)
)
else:
updates.append('name')
else:
if not new:
self.module.fail_json(
msg='Provisioning policy ({0}) does not exists for the given old name.'.format(self.old_name)
)
else:
self.module.exit_json(
msg='Provisioning policy ({0}) already renamed. No modifications done.'.format(self.name)
)
def is_pp_exists(self, name=None):
result = {}
name = name if name else self.name
cmd = 'lsprovisioningpolicy'
data = self.restapi.svc_obj_info(cmd=cmd, cmdopts=None, cmdargs=[name])
if isinstance(data, list):
for d in data:
result.update(d)
else:
result = data
self.pp_data = result
return result
def create_provisioning_policy(self):
self.create_validation()
if self.module.check_mode:
self.changed = True
return
cmd = 'mkprovisioningpolicy'
cmdopts = {
'name': self.name,
'capacitysaving': self.capacitysaving,
'deduplicated': self.deduplicated
}
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
self.log('Provisioning policy (%s) created', self.name)
self.changed = True
def provisioning_policy_probe(self):
updates = []
self.rename_validation(updates)
if self.capacitysaving:
capsav = 'none' if self.capacitysaving == 'drivebased' else self.capacitysaving
if capsav and capsav != self.pp_data.get('capacity_saving', ''):
self.module.fail_json(msg='Following paramter not applicable for update operation: capacitysaving')
if self.deduplicated and not strtobool(self.pp_data.get('deduplicated', 0)):
self.module.fail_json(msg='Following paramter not applicable for update operation: deduplicated')
return updates
def update_provisioning_policy(self, updates):
if self.module.check_mode:
self.changed = True
return
cmd = 'chprovisioningpolicy'
cmdopts = {
'name': self.name
}
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=[self.old_name])
self.log('Provisioning policy (%s) renamed', self.name)
self.changed = True
def delete_provisioning_policy(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'rmprovisioningpolicy'
self.restapi.svc_run_command(cmd, cmdopts=None, cmdargs=[self.name])
self.changed = True
def apply(self):
if self.is_pp_exists(name=self.old_name):
if self.state == 'present':
modifications = self.provisioning_policy_probe()
if any(modifications):
self.update_provisioning_policy(modifications)
self.msg = 'Provisioning policy ({0}) updated'.format(self.name)
else:
self.msg = 'Provisioning policy ({0}) already exists. No modifications done.'.format(self.name)
else:
self.delete_provisioning_policy()
self.msg = 'Provisioning policy ({0}) deleted'.format(self.name)
else:
if self.state == 'absent':
self.msg = 'Provisioning policy ({0}) does not exist.'.format(self.name)
else:
self.create_provisioning_policy()
self.msg = 'Provisioning policy ({0}) created.'.format(self.name)
if self.module.check_mode:
self.msg = 'skipping changes due to check mode.'
self.module.exit_json(
changed=self.changed,
msg=self.msg
)
def main():
v = IBMSVProvisioningPolicy()
try:
v.apply()
except Exception as e:
v.log('Exception in apply(): \n%s', format_exc())
v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,336 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2022 IBM CORPORATION
# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_sv_manage_replication_policy
short_description: This module configures and manages replication policies on IBM Spectrum Virtualize family storage systems
version_added: '1.10.0'
description:
- Ansible interface to manage mkreplicationpolicy, chreplicationpolicy, and rmreplicationpolicy commands.
- This module manages policy based replication.
- This module can be run on all IBM Spectrum Virtualize storage systems with version 8.5.2.1 or later.
options:
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
required: true
type: str
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the M(ibm.spectrum_virtualize.ibm_svc_auth) module.
type: str
log_path:
description:
- Path of debug log file.
type: str
state:
description:
- Creates, updates (C(present)), or deletes (C(absent)) a replication policy.
choices: [ present, absent ]
required: true
type: str
name:
description:
- Specifies the name of the replication policy.
type: str
required: true
topology:
description:
- Specifies the policy topology.
choices: [ 2-site-async-dr ]
type: str
location1system:
description:
- Specifies the name or ID of the system in location 1 of the topology.
type: str
location1iogrp:
description:
- Specifies the ID of the I/O group of the system in location 1 of the topology.
type: int
location2system:
description:
- Specifies the name or ID of the system in location 2 of the topology.
type: str
location2iogrp:
description:
- Specifies the ID of the I/O group of the system in location 2 of the topology.
type: int
rpoalert:
description:
- Specifies the RPO alert threshold in seconds.
The minimum value is 60 (1 minute) and the maximum value is 86400 (1 day).
- The value must be a multiple of 60 seconds.
type: int
validate_certs:
description:
- Validates certification.
default: false
type: bool
author:
- Sanjaikumaar M (@sanjaikumaar)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Create replication policy
ibm.spectrum_virtualize.ibm_sv_manage_replication_policy:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
name: replication_policy0
topology: 2-site-async-dr
location1system: x.x.x.x
location1iogrp: 0
location2system: x.x.x.x
location2iogrp: 0
rpoalert: 60
state: present
- name: Delete replication policy
ibm.spectrum_virtualize.ibm_sv_manage_replication_policy:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
name: replication_policy0
state: absent
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import (
IBMSVCRestApi, svc_argument_spec,
get_logger
)
from ansible.module_utils._text import to_native
class IBMSVReplicationPolicy:
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
name=dict(
type='str',
required=True
),
state=dict(
type='str',
choices=['present', 'absent'],
required=True
),
topology=dict(
type='str',
choices=['2-site-async-dr']
),
location1system=dict(
type='str',
),
location1iogrp=dict(
type='int',
),
location2system=dict(
type='str',
),
location2iogrp=dict(
type='int',
),
rpoalert=dict(
type='int',
)
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# Required parameters
self.name = self.module.params['name']
self.state = self.module.params['state']
# Optional parameters
self.topology = self.module.params.get('topology', '')
self.location1system = self.module.params.get('location1system', '')
self.location1iogrp = self.module.params.get('location1iogrp', '')
self.location2system = self.module.params.get('location2system', '')
self.location2iogrp = self.module.params.get('location2iogrp', '')
self.rpoalert = self.module.params.get('rpoalert', '')
self.basic_checks()
# logging setup
self.log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, self.log_path)
self.log = log.info
# Dynamic variables
self.changed = False
self.msg = ''
self.rp_data = {}
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=self.log_path,
token=self.module.params['token']
)
def basic_checks(self):
if not self.name:
self.module.fail_json(
msg='Missing mandatory parameter: name'
)
if self.state == 'absent':
invalids = ('topology', 'location1system', 'location1iogrp', 'location2system', 'location2iogrp', 'rpoalert')
invalid_exists = ', '.join((var for var in invalids if not getattr(self, var) in {'', None}))
if invalid_exists:
self.module.fail_json(
msg='state=absent but following paramters have been passed: {0}'.format(invalid_exists)
)
def is_rp_exists(self):
result = {}
cmd = 'lsreplicationpolicy'
data = self.restapi.svc_obj_info(cmd=cmd, cmdopts=None, cmdargs=[self.name])
if isinstance(data, list):
for d in data:
result.update(d)
else:
result = data
self.rp_data = result
return result
def create_replication_policy(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'mkreplicationpolicy'
cmdopts = {
'name': self.name,
'topology': self.topology,
'location1system': self.location1system,
'location1iogrp': self.location1iogrp,
'location2system': self.location2system,
'location2iogrp': self.location2iogrp,
'rpoalert': self.rpoalert,
}
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
self.log('Replication policy (%s) created', self.name)
self.changed = True
def replication_policy_probe(self):
field_mappings = (
('topology', self.rp_data.get('topology', '')),
('location1system', (
('location1_system_name', self.rp_data.get('location1_system_name', '')),
('location1_system_id', self.rp_data.get('location1_system_id', ''))
)),
('location1iogrp', self.rp_data.get('location1_iogrp_id', '')),
('location2system', (
('location2_system_name', self.rp_data.get('location2_system_name', '')),
('location2_system_id', self.rp_data.get('location2_system_id', ''))
)),
('location2iogrp', self.rp_data.get('location2_iogrp_id', '')),
('rpoalert', self.rp_data.get('rpo_alert', ''))
)
self.log('replication policy probe data: %s', field_mappings)
for f, v in field_mappings:
current_value = str(getattr(self, f))
if current_value and f in {'location1system', 'location2system'}:
try:
next(iter(filter(lambda val: val[1] == current_value, v)))
except StopIteration:
self.module.fail_json(
msg='Policy modification is not supported. '
'Please delete and recreate new policy.'
)
elif current_value and current_value != v:
self.module.fail_json(
msg='Policy modification is not supported. '
'Please delete and recreate new policy.'
)
def delete_replication_policy(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'rmreplicationpolicy'
self.restapi.svc_run_command(cmd, cmdopts=None, cmdargs=[self.name])
self.log('Replication policy (%s) deleted', self.name)
self.changed = True
def apply(self):
if self.is_rp_exists():
if self.state == 'present':
self.replication_policy_probe()
self.msg = 'Replication policy ({0}) already exists. No modifications done.'.format(self.name)
else:
self.delete_replication_policy()
self.msg = 'Replication policy ({0}) deleted'.format(self.name)
else:
if self.state == 'absent':
self.msg = 'Replication policy ({0}) does not exists.'.format(self.name)
else:
self.create_replication_policy()
self.msg = 'Replication policy ({0}) created.'.format(self.name)
if self.module.check_mode:
self.msg = 'skipping changes due to check mode.'
self.module.exit_json(
changed=self.changed,
msg=self.msg
)
def main():
v = IBMSVReplicationPolicy()
try:
v.apply()
except Exception as e:
v.log('Exception in apply(): \n%s', format_exc())
v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,527 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2022 IBM CORPORATION
# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_sv_manage_snapshot
short_description: This module manages snapshots (PiT image of a volume) on IBM Spectrum Virtualize family storage systems
version_added: '1.9.0'
description:
- In this implementation, a snapshot is a mutually consistent image of the volumes
in a volume group or a list of independent volume(s).
- This Ansible module provides the interface to manage snapshots through 'addsnapshot',
'chsnapshot' and 'rmsnapshot' Spectrum Virtualize commands.
options:
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
required: true
type: str
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the M(ibm.spectrum_virtualize.ibm_svc_auth) module.
type: str
log_path:
description:
- Path of debug log file.
type: str
state:
description:
- Creates, updates (C(present)) or deletes (C(absent)) a snapshot.
choices: [ present, absent ]
required: true
type: str
name:
description:
- Specifies the name of a snapshot.
type: str
old_name:
description:
- Specifies the old name of a snapshot.
- Valid when I(state=present), to rename the existing snapshot.
type: str
src_volumegroup_name:
description:
- Specifies the name of the source volume group for which the snapshot is being created.
- I(src_volumegroup_name) and I(src_volume_names) are mutually exclusive.
- Required one of I(src_volumegroup_name) or I(src_volume_names) for creation of snapshot.
type: str
src_volume_names:
description:
- Specifies the name of the volumes for which the snapshots are to be created.
- List of volume names can be specified with the delimiter colon.
- Valid when I(state=present), to create a snapshot.
type: str
snapshot_pool:
description:
- Specifies the name of child pool within which the snapshot is being created.
type: str
ignorelegacy:
description:
- Specifies the addition of the volume snapshots although there are already legacy FlashCopy mappings using the volume as a source.
default: false
type: bool
ownershipgroup:
description:
- Specifies the name of the ownershipgroup.
- Valid when I(state=present), to update an existing snapshot.
type: str
safeguarded:
description:
- Flag to create a safeguarded snapshot.
- I(safeguarded) and I(retentiondays) are required together.
- Supported in SV build 8.5.2.0 or later.
type: bool
version_added: 1.10.0
retentiondays:
description:
- Specifies the retention period in days.
- I(safeguarded) and I(retentiondays) are required together.
- Applies, when I(state=present) to create a safeguarded snapshot.
type: int
version_added: 1.10.0
validate_certs:
description:
- Validates certification.
default: false
type: bool
author:
- Sanjaikumaar M (@sanjaikumaar)
notes:
- This module supports C(check_mode).
- This module automates the new Snapshot function, implemented by Spectrum Virtualize, which is using a
simplified management model. Any user requiring the flexibility available with legacy
FlashCopy can continue to use the existing module M(ibm.spectrum_virtualize.ibm_svc_manage_flashcopy).
- Snapshots created by this Ansible module are not directly accessible from the hosts.
To create a new group of host accessible volumes from a snapshot,
use M(ibm.spectrum_virtualize.ibm_svc_manage_volumegroup) module.
'''
EXAMPLES = '''
- name: Create volumegroup snapshot
ibm.spectrum_virtualize.ibm_sv_manage_snapshot:
clustername: '{{clustername}}'
username: '{{username}}'
password: '{{password}}'
name: ansible_1
src_volumegroup_name: volumegroup1
snapshot_pool: Pool0Childpool0
state: present
- name: Create volumes snapshot
ibm.spectrum_virtualize.ibm_sv_manage_snapshot:
clustername: '{{clustername}}'
username: '{{username}}'
password: '{{password}}'
name: ansible_2
src_volume_names: vdisk0:vdisk1
snapshot_pool: Pool0Childpool0
state: present
- name: Create safeguarded snapshot
ibm.spectrum_virtualize.ibm_sv_manage_snapshot:
clustername: '{{clustername}}'
username: '{{username}}'
password: '{{password}}'
name: ansible_2
src_volume_names: vdisk0:vdisk1
safeguarded: true
retentiondays: 1
snapshot_pool: Pool0Childpool0
state: present
- name: Update snapshot ansible_2
ibm.spectrum_virtualize.ibm_sv_manage_snapshot:
clustername: '{{clustername}}'
username: '{{username}}'
password: '{{password}}'
name: ansible_new
old_name: ansible_2
ownershipgroup: ownershipgroup0
state: present
- name: Delete volumegroup snapshot
ibm.spectrum_virtualize.ibm_sv_manage_snapshot:
clustername: '{{clustername}}'
username: '{{username}}'
password: '{{password}}'
name: ansible_1
src_volumegroup_name: volumegroup1
state: absent
- name: Delete volume snapshot
ibm.spectrum_virtualize.ibm_sv_manage_snapshot:
clustername: '{{clustername}}'
username: '{{username}}'
password: '{{password}}'
name: ansible_new
state: absent
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import (
IBMSVCRestApi,
svc_argument_spec,
strtobool,
get_logger
)
from ansible.module_utils._text import to_native
class IBMSVSnapshot:
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
state=dict(
type='str',
required=True,
choices=['present', 'absent']
),
name=dict(
type='str',
),
old_name=dict(
type='str'
),
snapshot_pool=dict(
type='str',
),
src_volumegroup_name=dict(
type='str',
),
src_volume_names=dict(
type='str',
),
ignorelegacy=dict(
type='bool',
default=False
),
ownershipgroup=dict(
type='str',
),
safeguarded=dict(
type='bool'
),
retentiondays=dict(
type='int',
)
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# Required parameters
self.name = self.module.params['name']
self.state = self.module.params['state']
# Default paramters
self.ignorelegacy = self.module.params['ignorelegacy']
# Optional parameters
self.old_name = self.module.params.get('old_name', '')
self.ownershipgroup = self.module.params.get('ownershipgroup', '')
self.snapshot_pool = self.module.params.get('snapshot_pool', '')
self.volumegroup = self.module.params.get('src_volumegroup_name', '')
self.volumes = self.module.params.get('src_volume_names', '')
self.safeguarded = self.module.params.get('safeguarded', False)
self.retentiondays = self.module.params.get('retentiondays')
self.basic_checks()
# logging setup
self.log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, self.log_path)
self.log = log.info
# Dynamic variables
self.changed = False
self.msg = ''
self.parentuid = None
self.lsvg_data = {}
self.lsv_data = {}
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=self.log_path,
token=self.module.params['token']
)
def basic_checks(self):
if not self.name:
self.module.fail_json(msg='Missing mandatory parameter: name')
if self.state == 'present':
if self.volumegroup and self.volumes:
self.module.fail_json(
msg='Mutually exclusive parameters: src_volumegroup_name, src_volume_names'
)
elif self.state == 'absent':
invalids = ('snapshot_pool', 'ignorelegacy', 'ownershipgroup', 'old_name', 'safeguarded', 'retentiondays')
invalid_exists = ', '.join((var for var in invalids if getattr(self, var)))
if self.volumes:
invalid_exists = 'src_volume_names, {0}'.format(invalid_exists)
if invalid_exists:
self.module.fail_json(
msg='state=absent but following paramters have been passed: {0}'.format(invalid_exists)
)
def create_validation(self):
if self.old_name:
self.rename_validation([])
if not self.volumegroup and not self.volumes:
self.module.fail_json(
msg='Either src_volumegroup_name or src_volume_names should be passed during snapshot creation.'
)
if self.ownershipgroup:
self.module.fail_json(
msg='`ownershipgroup` parameter is not supported during snapshot creation'
)
def rename_validation(self, updates):
if self.old_name and self.name:
if self.name == self.old_name:
self.module.fail_json(msg='New name and old name should be different.')
new = self.is_snapshot_exists()
existing = self.is_snapshot_exists(old_name=self.old_name)
if existing:
if new:
self.module.fail_json(
msg='Snapshot ({0}) already exists for the given new name.'.format(self.name)
)
else:
updates.append('name')
else:
if not new:
self.module.fail_json(
msg='Snapshot ({0}) does not exists for the given old name.'.format(self.old_name)
)
else:
self.module.exit_json(
msg='Snapshot ({0}) already renamed. No modifications done.'.format(self.name)
)
def is_snapshot_exists(self, old_name=None, force=False):
old_name = old_name if old_name else self.name
if self.volumegroup:
data = self.lsvolumegroupsnapshot(old_name=old_name, force=force)
self.parentuid = data.get('parent_uid')
else:
if self.lsv_data.get('snapshot_name') == old_name and not force:
return self.lsv_data
cmdopts = {
"filtervalue": "snapshot_name={0}".format(old_name)
}
result = self.restapi.svc_obj_info(
cmd='lsvolumesnapshot',
cmdopts=cmdopts,
cmdargs=None
)
try:
data = next(
filter(
lambda x: x['volume_group_name'] == '',
result
)
)
except StopIteration:
return {}
else:
self.lsv_data = data
self.parentuid = data.get('parent_uid')
return data
def lsvolumegroupsnapshot(self, force=False, old_name=None, parentuid=None):
old_name = old_name if old_name else self.name
if self.lsvg_data.get('name') == old_name and not force:
return self.lsvg_data
cmdopts = {
'snapshot': old_name
}
if parentuid:
cmdopts['parentuid'] = self.parentuid
else:
cmdopts['volumegroup'] = self.volumegroup
data = {}
result = self.restapi.svc_obj_info(
cmd='lsvolumegroupsnapshot',
cmdopts=cmdopts,
cmdargs=None
)
if isinstance(result, list):
for res in result:
data = res
else:
data = result
self.lsvg_data = data
return data
def create_snapshot(self):
self.create_validation()
if self.module.check_mode:
self.changed = True
return
cmd = 'addsnapshot'
cmdopts = {
'name': self.name
}
if self.snapshot_pool:
cmdopts['pool'] = self.snapshot_pool
if self.ignorelegacy:
cmdopts['ignorelegacy'] = self.ignorelegacy
if self.retentiondays:
cmdopts['retentiondays'] = self.retentiondays
if self.safeguarded:
cmdopts['safeguarded'] = self.safeguarded
if self.volumegroup:
cmdopts['volumegroup'] = self.volumegroup
else:
cmdopts['volumes'] = self.volumes
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
self.log('Snapshot (%s) created', self.name)
self.changed = True
def snapshot_probe(self):
updates = []
self.rename_validation(updates)
kwargs = dict((k, getattr(self, k)) for k in ['old_name', 'parentuid'] if getattr(self, k))
ls_data = self.lsvolumegroupsnapshot(**kwargs)
if self.ownershipgroup and ls_data['owner_name'] != self.ownershipgroup:
updates.append('ownershipgroup')
if self.safeguarded in {True, False} and self.safeguarded != strtobool(ls_data.get('safeguarded', 0)):
self.module.fail_json(
msg='Following paramter not applicable for update operation: safeguarded'
)
self.log('Snapshot probe result: %s', updates)
return updates
def update_snapshot(self, updates):
if self.module.check_mode:
self.changed = True
return
old_name = self.old_name if self.old_name else self.name
cmd = 'chsnapshot'
cmdopts = dict((k, getattr(self, k)) for k in updates)
cmdopts['snapshot'] = old_name
if self.volumegroup:
cmdopts['volumegroup'] = self.volumegroup
else:
cmdopts['parentuid'] = self.parentuid
self.restapi.svc_run_command(cmd, cmdopts=cmdopts, cmdargs=None)
self.changed = True
def delete_snapshot(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'rmsnapshot'
cmdopts = {
'snapshot': self.name
}
if self.volumegroup:
cmdopts['volumegroup'] = self.volumegroup
else:
cmdopts['parentuid'] = self.parentuid
self.restapi.svc_run_command(cmd, cmdopts=cmdopts, cmdargs=None)
self.changed = True
still_exists = self.is_snapshot_exists(force=True)
if still_exists:
self.msg = 'Snapshot ({0}) will be in the dependent_delete '\
'state until those dependencies are removed'.format(self.name)
else:
self.msg = 'Snapshot ({0}) deleted.'.format(self.name)
def apply(self):
if self.is_snapshot_exists(old_name=self.old_name):
if self.state == 'present':
modifications = self.snapshot_probe()
if any(modifications):
self.update_snapshot(modifications)
self.msg = 'Snapshot ({0}) updated.'.format(self.name)
else:
self.msg = 'Snapshot ({0}) already exists. No modifications done.'.format(self.name)
else:
self.delete_snapshot()
else:
if self.state == 'absent':
self.msg = 'Snapshot ({0}) does not exists.'.format(self.name)
else:
self.create_snapshot()
self.msg = 'Snapshot ({0}) created.'.format(self.name)
if self.module.check_mode:
self.msg = 'skipping changes due to check mode.'
self.module.exit_json(
changed=self.changed,
msg=self.msg
)
def main():
v = IBMSVSnapshot()
try:
v.apply()
except Exception as e:
v.log('Exception in apply(): \n%s', format_exc())
v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,365 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2022 IBM CORPORATION
# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_sv_manage_snapshotpolicy
short_description: This module manages snapshot policy configuration on IBM Spectrum Virtualize family storage systems
version_added: "1.9.0"
description:
- Ansible interface to manage 'mksnapshotpolicy' and 'rmsnapshotpolicy' snapshot policy commands.
- Snapshot policy is introduced in IBM Spectrum Virtualize 8.5.1.0.
options:
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
required: true
type: str
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the M(ibm.spectrum_virtualize.ibm_svc_auth) module.
type: str
log_path:
description:
- Path of debug log file.
type: str
state:
description:
- Creates (C(present)) or deletes (C(absent)) a snapshot policy.
- Resume (C(resume)) or suspend (C(suspend)) the snapshot policy, system-wide.
choices: [ present, absent, suspend, resume ]
required: true
type: str
name:
description:
- Specifies a unique name of the snapshot policy.
- Not applicable when I(state=suspend) or I(state=resume).
type: str
backupunit:
description:
- Specifies the backup unit in mentioned metric.
- Applies when I(state=present).
choices: [ minute, hour, day, week, month ]
type: str
backupinterval:
description:
- Specifies the backup interval.
- Applies when I(state=present).
type: str
backupstarttime:
description:
- Specifies the start time of backup in the format YYMMDDHHMM.
- Applies when I(state=present).
type: str
retentiondays:
description:
- Specifies the retention days for the backup.
- Applies when I(state=present).
type: str
removefromvolumegroups:
description:
- Specify to remove the volume group association from the snapshot policy.
- Applies when I(state=absent).
- This option is allowed only for SecurityAdmin users.
type: bool
validate_certs:
description:
- Validates certification.
default: false
type: bool
author:
- Shilpi Jain(@Shilpi-J)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Create snapshot policy
ibm.spectrum_virtualize.ibm_sv_manage_snapshotpolicy:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
name: policy0
backupunit: day
backupinterval: 1
backupstarttime: 2102281800
retentiondays: 15
state: present
- name: Suspend snapshot policy functionality
ibm.spectrum_virtualize.ibm_sv_manage_snapshotpolicy:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
state: suspend
- name: Resume snapshot policy functionality
ibm.spectrum_virtualize.ibm_sv_manage_snapshotpolicy:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
state: resume
- name: Delete snapshot policy
ibm.spectrum_virtualize.ibm_sv_manage_snapshotpolicy:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
name: policy0
state: absent
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import (
IBMSVCRestApi, svc_argument_spec,
get_logger
)
from ansible.module_utils._text import to_native
class IBMSVCSnapshotPolicy:
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
state=dict(
type='str',
required=True,
choices=['present', 'absent', 'suspend', 'resume']
),
name=dict(
type='str',
),
backupunit=dict(
type='str',
choices=['minute', 'hour', 'day', 'week', 'month'],
),
backupinterval=dict(
type='str',
),
backupstarttime=dict(
type='str',
),
retentiondays=dict(
type='str',
),
removefromvolumegroups=dict(
type='bool'
),
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# Required parameters
self.name = self.module.params['name']
self.state = self.module.params['state']
self.backupunit = self.module.params.get('backupunit', '')
self.backupinterval = self.module.params.get('backupinterval', '')
self.backupstarttime = self.module.params.get('backupstarttime', '')
self.retentiondays = self.module.params.get('retentiondays', '')
self.removefromvolumegroups = self.module.params.get('removefromvolumegroups', False)
self.basic_checks()
# Variable to cache data
self.snapshot_policy_details = None
# logging setup
self.log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, self.log_path)
self.log = log.info
self.changed = False
self.msg = ''
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=self.log_path,
token=self.module.params['token']
)
def basic_checks(self):
if self.state == 'present':
fields = ['name', 'backupinterval', 'backupstarttime', 'retentiondays', 'backupunit']
exists = list(filter(lambda x: not getattr(self, x), fields))
if any(exists):
self.module.fail_json(
msg="State is present but following parameters are missing: {0}".format(', '.join(exists))
)
if self.removefromvolumegroups:
self.module.fail_json(
msg="`removefromvolumegroups` parameter is not supported when state=present"
)
elif self.state == 'absent':
if not self.name:
self.module.fail_json(msg="Missing mandatory parameter: name")
fields = ['backupinterval', 'backupstarttime', 'retentiondays', 'backupunit']
exists = list(filter(lambda x: getattr(self, x) or getattr(self, x) == '', fields))
if any(exists):
self.module.fail_json(msg='{0} should not be passed when state=absent'.format(', '.join(exists)))
elif self.state in ['suspend', 'resume']:
fields = ['name', 'backupinterval', 'backupstarttime', 'retentiondays', 'backupunit']
exists = list(filter(lambda x: getattr(self, x) or getattr(self, x) == '', fields))
if any(exists):
self.module.fail_json(msg='{0} should not be passed when state={1}'.format(', '.join(exists), self.state))
def policy_exists(self):
merged_result = {}
data = self.restapi.svc_obj_info(
cmd='lssnapshotschedule',
cmdopts=None,
cmdargs=[self.name]
)
if isinstance(data, list):
for d in data:
merged_result.update(d)
else:
merged_result = data
self.snapshot_policy_details = merged_result
return merged_result
def create_snapshot_policy(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'mksnapshotpolicy'
cmdopts = {
'name': self.name,
'backupstarttime': self.backupstarttime,
'backupinterval': self.backupinterval,
'backupunit': self.backupunit,
'retentiondays': self.retentiondays
}
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
self.log('Snapshot policy (%s) created', self.name)
self.changed = True
def snapshot_policy_probe(self):
field_mappings = (
('backupinterval', self.snapshot_policy_details['backup_interval']),
('backupstarttime', self.snapshot_policy_details['backup_start_time']),
('retentiondays', self.snapshot_policy_details['retention_days']),
('backupunit', self.snapshot_policy_details['backup_unit'])
)
updates = []
for field, existing_value in field_mappings:
if field == 'backupstarttime':
updates.append(existing_value != '{0}00'.format(getattr(self, field)))
else:
updates.append(existing_value != getattr(self, field))
return updates
def delete_snapshot_policy(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'rmsnapshotpolicy'
cmdargs = [self.name]
cmdopts = None
if self.removefromvolumegroups:
cmdopts = {
'removefromvolumegroups': True
}
self.restapi.svc_run_command(cmd, cmdopts=cmdopts, cmdargs=cmdargs)
self.log('Snapshot policy (%s) deleted', self.name)
self.changed = True
def update_snapshot_scheduler(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'chsystem'
cmdopts = {'snapshotpolicysuspended': 'yes' if self.state == 'suspend' else 'no'}
self.restapi.svc_run_command(cmd, cmdopts=cmdopts, cmdargs=None)
self.log('Snapshot scheduler status changed: %s', self.state)
self.changed = True
def apply(self):
if self.state in ['resume', 'suspend']:
self.update_snapshot_scheduler()
self.msg = 'Snapshot scheduler {0}ed'.format(self.state.rstrip('e'))
else:
if self.policy_exists():
if self.state == 'present':
modifications = self.snapshot_policy_probe()
if any(modifications):
self.msg = 'Policy modification is not supported in ansible. Please delete and recreate new policy.'
else:
self.msg = 'Snapshot policy ({0}) already exists. No modifications done.'.format(self.name)
else:
self.delete_snapshot_policy()
self.msg = 'Snapshot policy ({0}) deleted.'.format(self.name)
else:
if self.state == 'absent':
self.msg = 'Snapshot policy ({0}) does not exist. No modifications done.'.format(self.name)
else:
self.create_snapshot_policy()
self.msg = 'Snapshot policy ({0}) created.'.format(self.name)
if self.module.check_mode:
self.msg = 'skipping changes due to check mode.'
self.module.exit_json(
changed=self.changed,
msg=self.msg
)
def main():
v = IBMSVCSnapshotPolicy()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,158 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2022 IBM CORPORATION
# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_sv_manage_ssl_certificate
short_description: This module exports existing system-signed certificate on to IBM Spectrum Virtualize family storage systems
version_added: '1.10.0'
description:
- Only existing system-signed certificates can be exported. External authority certificate generation is not supported.
options:
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
required: true
type: str
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when the hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the M(ibm.spectrum_virtualize.ibm_svc_auth) module.
type: str
log_path:
description:
- Path of debug log file.
type: str
certificate_type:
description:
- Specify the certificate type to be exported.
choices: [ 'system' ]
default: 'system'
type: str
validate_certs:
description:
- Validates certification.
default: false
type: bool
author:
- Sanjaikumaar M(@sanjaikumaar)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Export SSL certificate internally
ibm.spectrum_virtualize.ibm_sv_manage_ssl_certificate:
clustername: "x.x.x.x"
username: "username"
password: "password"
certificate_type: "system"
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import (
IBMSVCRestApi, svc_argument_spec,
get_logger
)
from ansible.module_utils._text import to_native
class IBMSVSSLCertificate:
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
certificate_type=dict(
type='str',
choices=['system'],
default='system'
)
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# Default parameters
self.certificate_type = self.module.params['certificate_type']
# logging setup
self.log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, self.log_path)
self.log = log.info
# Dynamic variables
self.changed = False
self.msg = ''
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=self.log_path,
token=self.module.params['token']
)
def export_cert(self):
if self.module.check_mode:
self.changed = True
return
self.restapi.svc_run_command('chsystemcert', cmdopts=None, cmdargs=['-export'])
self.log('Certificate exported')
self.changed = True
def apply(self):
if self.certificate_type == 'system':
self.export_cert()
self.msg = 'Certificate exported.'
if self.module.check_mode:
self.msg = 'skipping changes due to check mode.'
self.module.exit_json(
changed=self.changed,
msg=self.msg
)
def main():
v = IBMSVSSLCertificate()
try:
v.apply()
except Exception as e:
v.log('Exception in apply(): \n%s', format_exc())
v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,399 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2022 IBM CORPORATION
# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_sv_manage_truststore_for_replication
short_description: This module manages certificate trust stores for replication on
IBM Spectrum Virtualize family storage systems
version_added: '1.10.0'
description:
- Ansible interface to manage mktruststore and rmtruststore commands.
- This module transfers the certificate from a remote system to the local system.
- This module works on SSH and uses paramiko to establish an SSH connection.
- Once transfer is done successfully, it also adds the certificate to the trust store of the local system.
- This module can be used to set up mutual TLS (mTLS) for policy-based replication inter-system communication
using cluster endpoint certificates (usually system-signed which are exported by the
M(ibm.spectrum_virtualize.ibm_sv_manage_ssl_certificate) module).
options:
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
required: true
type: str
username:
description:
- Username for the Spectrum Virtualize storage system.
type: str
required: true
password:
description:
- Password for the Spectrum Virtualize storage system.
- Mandatory, when I(usesshkey=no).
type: str
usesshkey:
description:
- For key-pair based SSH connection, set this field as "yes".
Provide full path of key in key_filename field.
If not provided, default path of SSH key is used.
type: str
choices: [ 'yes', 'no']
default: 'no'
key_filename:
description:
- SSH client private key filename. By default, ~/.ssh/id_rsa is used.
type: str
log_path:
description:
- Path of debug log file.
type: str
state:
description:
- Creates (C(present)) or deletes (C(absent)) a trust store.
choices: [ present, absent ]
required: true
type: str
name:
description:
- Specifies the name of the trust store.
- If not specified, the module generates a name automatically with format store_I(remote_clustername).
type: str
remote_clustername:
description:
- Specifies the name of the partner remote cluster with which mTLS partnership needs to be setup.
type: str
required: true
remote_username:
description:
- Username for remote cluster.
- Applies when I(state=present) to create a trust store.
type: str
remote_password:
description:
- Password for remote cluster.
- Applies when I(state=present) to create a trust store.
type: str
author:
- Sanjaikumaar M(@sanjaikumaar)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Create truststore
ibm.spectrum_virtualize.ibm_sv_manage_truststore_for_replication:
clustername: "{{clustername}}"
username: "{{username}}"
password: "{{password}}"
name: "{{name}}"
remote_clustername: "{{remote_clustername}}"
remote_username: "{{remote_username}}"
remote_password: "{{remote_password}}"
log_path: "{{log_path}}"
state: "present"
- name: Delete truststore
ibm.spectrum_virtualize.ibm_sv_manage_truststore_for_replication:
clustername: "{{clustername}}"
username: "{{username}}"
password: "{{password}}"
name: "{{name}}"
remote_clustername: "{{remote_clustername}}"
log_path: "{{log_path}}"
state: "absent"
'''
RETURN = '''#'''
from traceback import format_exc
import json
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import (
svc_ssh_argument_spec,
get_logger
)
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_ssh import IBMSVCssh
from ansible.module_utils._text import to_native
class IBMSVTrustStore:
def __init__(self):
argument_spec = svc_ssh_argument_spec()
argument_spec.update(
dict(
password=dict(
type='str',
required=False,
no_log=True
),
name=dict(
type='str'
),
usesshkey=dict(
type='str',
default='no',
choices=['yes', 'no']
),
key_filename=dict(
type='str',
),
state=dict(
type='str',
choices=['present', 'absent'],
required=True
),
remote_clustername=dict(
type='str',
required=True
),
remote_username=dict(
type='str',
),
remote_password=dict(
type='str',
no_log=True
),
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# logging setup
self.log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, self.log_path)
self.log = log.info
# Required parameters
self.state = self.module.params['state']
self.remote_clustername = self.module.params['remote_clustername']
# local SSH keys will be used in case of password less SSH connection
self.usesshkey = self.module.params['usesshkey']
self.key_filename = self.module.params['key_filename']
# Optional parameters
self.password = self.module.params.get('password', '')
self.name = self.module.params.get('name', '')
self.remote_username = self.module.params.get('remote_username', '')
self.remote_password = self.module.params.get('remote_password', '')
if not self.name:
self.name = 'store_{0}'.format(self.remote_clustername)
if not self.password:
if self.usesshkey == 'yes':
self.log("password is none and use ssh private key. Check for its path")
if self.key_filename:
self.log("key file_name is provided, use it")
self.look_for_keys = True
else:
self.log("key file_name is not provided, use default one, ~/.ssh/id_rsa.pub")
self.look_for_keys = True
else:
self.log("password is none and SSH key is not provided")
self.module.fail_json(msg="You must pass either password or usesshkey parameter.")
else:
self.log("password is given")
self.look_for_keys = False
self.basic_checks()
# Dynamic variables
self.changed = False
self.msg = ''
self.ssh_client = IBMSVCssh(
module=self.module,
clustername=self.module.params['clustername'],
username=self.module.params['username'],
password=self.password,
look_for_keys=self.look_for_keys,
key_filename=self.key_filename,
log_path=self.log_path
)
def basic_checks(self):
if self.state == 'present':
if not self.remote_clustername:
self.module.fail_json(
msg='Missing mandatory parameter: remote_clustername'
)
if not self.remote_username:
self.module.fail_json(
msg='Missing mandatory parameter: remote_username'
)
if not self.remote_password:
self.module.fail_json(
msg='Missing mandatory parameter: remote_password'
)
elif self.state == 'absent':
if not self.remote_clustername:
self.module.fail_json(
msg='Missing mandatory parameter: remote_clustername'
)
unsupported = ('remote_username', 'remote_password')
unsupported_exists = ', '.join((field for field in unsupported if getattr(self, field)))
if unsupported_exists:
self.module.fail_json(
msg='state=absent but following paramters have been passed: {0}'.format(unsupported_exists)
)
def raise_error(self, stderr):
message = stderr.read().decode('utf-8')
if len(message) > 0:
self.log("%s", message)
self.module.fail_json(msg=message)
else:
message = 'Unknown error received.'
self.module.fail_json(msg=message)
def is_truststore_exists(self):
merged_result = {}
cmd = 'lstruststore -json {0}'.format(self.name)
stdin, stdout, stderr = self.ssh_client.client.exec_command(cmd)
result = stdout.read().decode('utf-8')
if result:
result = json.loads(result)
else:
return merged_result
rc = stdout.channel.recv_exit_status()
if rc > 0:
message = stderr.read().decode('utf-8')
if (message.count('CMMVC5804E') != 1) or (message.count('CMMVC6035E') != 1):
self.log("Error in executing CLI command: %s", cmd)
self.log("%s", message)
self.module.fail_json(msg=message)
else:
self.log("Expected error: %s", message)
if isinstance(result, list):
for d in result:
merged_result.update(d)
else:
merged_result = result
return merged_result
def download_file(self):
if self.module.check_mode:
return
cmd = 'scp -o stricthostkeychecking=no {0}@{1}:/dumps/certificate.pem /upgrade/'.format(
self.remote_username,
self.remote_clustername
)
self.log('Command to be executed: %s', cmd)
stdin, stdout, stderr = self.ssh_client.client.exec_command(cmd, get_pty=True, timeout=60 * 1.5)
result = ''
while not stdout.channel.recv_ready():
data = stdout.channel.recv(1024)
self.log(str(data, 'utf-8'))
if data:
if b'Password:' in data or b'password' in data:
stdin.write("{0}\n".format(self.remote_password))
stdin.flush()
else:
result += data.decode('utf-8')
break
result += stdout.read().decode('utf-8')
rc = stdout.channel.recv_exit_status()
if rc > 0:
message = stderr.read().decode('utf-8')
self.log("Error in executing command: %s", cmd)
if not len(message) > 1:
if len(result) > 1:
err = result.replace('\rPassword:\r\n', '')
self.log("Error: %s", err)
if err:
self.module.fail_json(msg=err)
self.module.fail_json(msg='Unknown error received')
else:
self.module.fail_json(msg=message)
else:
self.log(result)
def create_truststore(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'mktruststore -name {0} -file {1}'.format(self.name, '/upgrade/certificate.pem')
self.log('Command to be executed: %s', cmd)
stdin, stdout, stderr = self.ssh_client.client.exec_command(cmd)
result = stdout.read().decode('utf-8')
rc = stdout.channel.recv_exit_status()
if rc > 0:
self.log("Error in executing command: %s", cmd)
self.raise_error(stderr)
else:
self.log('Truststore (%s) created', self.name)
self.changed = True
def delete_truststore(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'rmtruststore {0}'.format(self.name)
self.log('Command to be executed: %s', cmd)
stdin, stdout, stderr = self.ssh_client.client.exec_command(cmd)
result = stdout.read().decode('utf-8')
rc = stdout.channel.recv_exit_status()
if rc > 0:
self.log("Error in executing command: %s", cmd)
self.raise_error(stderr)
else:
self.log('Truststore (%s) deleted', self.name)
self.changed = True
def apply(self):
if self.is_truststore_exists():
self.log("Truststore (%s) exists", self.name)
if self.state == 'present':
self.msg = 'Truststore ({0}) already exist. No modifications done'.format(self.name)
else:
self.delete_truststore()
self.msg = 'Truststore ({0}) deleted.'.format(self.name)
else:
if self.state == 'absent':
self.msg = 'Truststore ({0}) does not exist. No modifications done.'.format(self.name)
else:
self.download_file()
self.create_truststore()
self.msg = 'Truststore ({0}) created.'.format(self.name)
if self.module.check_mode:
self.msg = 'skipping changes due to check mode.'
self.module.exit_json(
changed=self.changed,
msg=self.msg
)
def main():
v = IBMSVTrustStore()
try:
v.apply()
except Exception as e:
v.log('Exception in apply(): \n%s', format_exc())
v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,304 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2022 IBM CORPORATION
# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_sv_restore_cloud_backup
short_description: This module restores the cloud backup on IBM Spectrum Virtualize family storage systems
version_added: '1.11.0'
description:
- Ansible interface to manage restorevolume command.
options:
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
required: true
type: str
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the M(ibm.spectrum_virtualize.ibm_svc_auth) module.
type: str
log_path:
description:
- Path of debug log file.
type: str
target_volume_name:
description:
- Specifies the volume name to restore onto.
type: str
required: true
source_volume_uid:
description:
- Specifies the volume snapshot to restore (specified by volume UID).
- This parameter is required to restore a backup from a different volume.
- Specified UID must be different from the UID of the volume being restored.
type: str
generation:
description:
- Specifies the snapshot generation to restore. The value must be a number.
type: int
restoreuid:
description:
- Specifies the UID of the restored volume should be set to the UID
of the volume snapshot that is being restored.
- This parameter can be used only with I(source_volume_uid).
- The I(restoreuid) parameter is not supported if cloud account is in import mode.
type: bool
deletelatergenerations:
description:
- Specifies that all backup generations should be deleted after the generation is restored.
type: bool
cancel:
description:
- Specifies to cancel the restore operation.
type: bool
validate_certs:
description:
- Validates certification.
default: false
type: bool
author:
- Sanjaikumaar M (@sanjaikumaar)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Restore cloud backup
ibm.spectrum_virtualize.ibm_sv_restore_cloud_backup:
clustername: "{{cluster_A}}"
username: "{{username_A}}"
password: "{{password_A}}"
target_volume_name: vol1
source_volume_uid: 6005076400B70038E00000000000001C
generation: 1
- name: Restore cloud backup to different cluster
ibm.spectrum_virtualize.ibm_sv_restore_cloud_backup:
clustername: "{{cluster_B}}"
username: "{{username_B}}"
password: "{{password_B}}"
target_volume_name: vol2
source_volume_uid: 6005076400B70038E00000000000001C
generation: 1
- name: Cancel restore operation
ibm.spectrum_virtualize.ibm_sv_restore_cloud_backup:
clustername: "{{cluster_A}}"
username: "{{username_A}}"
password: "{{password_A}}"
target_volume_name: vol1
cancel: true
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import (
IBMSVCRestApi, svc_argument_spec,
get_logger, strtobool
)
from ansible.module_utils._text import to_native
class IBMSVRestoreCloudBackup:
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
target_volume_name=dict(
type='str',
required=True
),
source_volume_uid=dict(
type='str'
),
generation=dict(
type='int',
),
restoreuid=dict(
type='bool'
),
deletelatergenerations=dict(
type='bool'
),
cancel=dict(
type='bool'
),
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# Required parameters
self.target_volume_name = self.module.params.get('target_volume_name', '')
self.source_volume_uid = self.module.params.get('source_volume_uid', '')
self.generation = self.module.params.get('generation', '')
self.restoreuid = self.module.params.get('restoreuid', '')
self.deletelatergenerations = self.module.params.get('deletelatergenerations', False)
self.cancel = self.module.params.get('cancel', False)
self.basic_checks()
# logging setup
self.log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, self.log_path)
self.log = log.info
# Dynamic variables
self.changed = False
self.msg = ''
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=self.log_path,
token=self.module.params['token']
)
def basic_checks(self):
if not self.target_volume_name:
self.module.fail_json(msg='Missing mandatory parameter: target_volume_name')
if self.cancel:
invalids = ('source_volume_uid', 'generation', 'restoreuid', 'deletelatergenerations')
invalid_exists = ', '.join((var for var in invalids if getattr(self, var) not in {'', None}))
if invalid_exists:
self.module.fail_json(
msg='Parameters not supported during restore cancellation: {0}'.format(invalid_exists)
)
def validate(self):
if not self.cancel:
cmd = 'lsvolumebackupgeneration'
cmdargs = None
cmdopts = {}
if self.source_volume_uid:
cmdopts['uid'] = self.source_volume_uid
else:
cmdopts['volume'] = self.target_volume_name
result = self.restapi.svc_obj_info(cmd=cmd, cmdopts=cmdopts, cmdargs=cmdargs)
else:
result = True
cmd = 'lsvdisk'
vdata = {}
data = self.restapi.svc_obj_info(cmd=cmd, cmdopts=None, cmdargs=[self.target_volume_name])
if isinstance(data, list):
for d in data:
vdata.update(d)
else:
vdata = data
if vdata and self.cancel and vdata['restore_status'] in {'none', 'available'}:
self.module.exit_json(
msg='No restore operation is in progress for the volume ({0}).'.format(self.target_volume_name)
)
return result
def restore_volume(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'restorevolume'
cmdargs = [self.target_volume_name]
cmdopts = {}
if self.cancel:
cmdopts['cancel'] = self.cancel
self.msg = 'Restore operation on volume ({0}) cancelled.'.format(self.target_volume_name)
else:
if self.source_volume_uid:
cmdopts['fromuid'] = self.source_volume_uid
if self.generation:
cmdopts['generation'] = self.generation
if self.restoreuid:
cmdopts['restoreuid'] = self.restoreuid
if self.deletelatergenerations:
cmdopts['deletelatergenerations'] = self.deletelatergenerations
self.msg = 'Restore operation on volume ({0}) started.'.format(self.target_volume_name)
response = self.restapi._svc_token_wrap(cmd, cmdopts, cmdargs=cmdargs)
self.log('response=%s', response)
self.changed = True
if response['out']:
if b'CMMVC9103E' in response['out']:
self.msg = 'CMMVC9103E: Volume ({0}) is not ready to perform any operation right now.'.format(
self.target_volume_name
)
self.changed = False
elif b'CMMVC9099E' in response['out']:
self.msg = 'No restore operation is in progress for the volume ({0}).'.format(self.target_volume_name)
self.changed = False
else:
self.module.fail_json(msg=response)
def apply(self):
if self.validate():
self.restore_volume()
self.log(self.msg)
else:
self.msg = 'No backup exist for the given source UID/volume.'
self.log(self.msg)
self.module.fail_json(msg=self.msg)
if self.module.check_mode:
self.msg = 'skipping changes due to check mode.'
self.log(self.msg)
self.module.exit_json(
changed=self.changed,
msg=self.msg
)
def main():
v = IBMSVRestoreCloudBackup()
try:
v.apply()
except Exception as e:
v.log('Exception in apply(): \n%s', format_exc())
v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,187 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2022 IBM CORPORATION
# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
#
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_sv_switch_replication_direction
short_description: This module switches the replication direction on IBM Spectrum Virtualize family storage systems
version_added: '1.10.0'
description:
- Ansible interface to manage the chvolumegroupreplication command.
- This module can be used to switch replication direction.
options:
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
required: true
type: str
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when the hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the M(ibm.spectrum_virtualize.ibm_svc_auth) module.
type: str
log_path:
description:
- Path of debug log file.
type: str
name:
description:
- Specifies the name of the volume group.
type: str
required: true
mode:
description:
- Specifies the replication mode of the volume group.
choices: [ independent, production ]
required: true
type: str
validate_certs:
description:
- Validates certification.
default: false
type: bool
author:
- Shilpi Jain(@Shilpi-J)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Switch to independent mode
ibm.spectrum_virtualize.ibm_sv_switch_replication_direction:
clustername: "{{ clustername }}"
username: "{{ username }}"
password: "{{ password }}"
mode: independent
name: vg0
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import (
IBMSVCRestApi, svc_argument_spec,
get_logger
)
from ansible.module_utils._text import to_native
class IBMSVSwitchReplication:
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
name=dict(
type='str',
required=True
),
mode=dict(
type='str',
choices=['independent', 'production'],
required=True
)
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# Required parameters
self.name = self.module.params['name']
self.mode = self.module.params['mode']
self.basic_checks()
# logging setup
self.log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, self.log_path)
self.log = log.info
# Dynamic variables
self.changed = False
self.msg = ''
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=self.log_path,
token=self.module.params['token']
)
def basic_checks(self):
if not self.name:
self.module.fail_json(
msg='Missing mandatory parameter: name'
)
# function to check whether volume group exists or not
def get_volumegroup_info(self):
return self.restapi.svc_obj_info(
'lsvolumegroup', None, [self.name]
)
def change_vg_mode(self):
cmd = 'chvolumegroupreplication'
cmdopts = {}
cmdopts["mode"] = self.mode
self.log("Changing replicaiton direction.. Command %s opts %s", cmd, cmdopts)
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=[self.name])
def apply(self):
if self.module.check_mode:
self.msg = 'skipping changes due to check mode.'
else:
if self.get_volumegroup_info():
self.change_vg_mode()
self.changed = True
self.msg = "Replication direction on volume group [%s] has been modified." % self.name
else:
self.module.fail_json(msg="Volume group does not exist: [%s]" % self.name)
self.module.exit_json(
changed=self.changed,
msg=self.msg
)
def main():
v = IBMSVSwitchReplication()
try:
v.apply()
except Exception as e:
v.log('Exception in apply(): \n%s', format_exc())
v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,134 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2021 IBM CORPORATION
# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_auth
short_description: This module generates an authentication token for a user on IBM Spectrum Virtualize family storage system
description:
- Ansible interface to generate the authentication token.
The token is used to make REST API calls to the storage system.
version_added: "1.5.0"
options:
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
type: str
required: true
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- This parameter is required in this module to generate the token.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- This parameter is required in this module to generate the token.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- This field is not required for ibm_svc_auth module.
type: str
validate_certs:
description:
- Validates certification.
default: false
type: bool
log_path:
description:
- Path of debug log file.
type: str
author:
- Shilpi Jain(@Shilpi-J)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Obtain an authentication token
register: result
ibm.spectrum_virtualize.ibm_svc_auth:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
- name: Create a volume
ibm.spectrum_virtualize.ibm_svc_vdisk:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
token: "{{result.token}}"
name: volume0
state: present
mdiskgrp: Pool0
easytier: 'off'
size: "4294967296"
unit: b
'''
RETURN = '''
token:
description: Authentication token for a user.
returned: success
type: str
version_added: 1.5.0
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
from ansible.module_utils._text import to_native
class IBMSVCauth(object):
def __init__(self):
argument_spec = svc_argument_spec()
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# logging setup
log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=log_path,
token=None
)
def main():
v = IBMSVCauth()
try:
if v.restapi.token is not None:
msg = "Authentication token generated"
v.module.exit_json(msg=msg, token=v.restapi.token)
else:
msg = "Authentication token is not generated"
v.module.fail_json(msg=msg, token=v.restapi.token)
except Exception as e:
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,142 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2022 IBM CORPORATION
# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_complete_initial_setup
short_description: This module completes the initial setup configuration for LMC systems
description:
- It disables the GUI setup wizard for LMC systems.
- It is recommended to run this module after using ibm_svc_initial_setup module for intial setup configuration.
- This module works on SSH. Paramiko must be installed to use this module.
version_added: "1.8.0"
options:
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
type: str
required: true
username:
description:
- Username for the Spectrum Virtualize storage system.
type: str
required: true
password:
description:
- Password for the Spectrum Virtualize storage system.
type: str
required: true
log_path:
description:
- Path of debug log file.
type: str
author:
- Shilpi Jain(@Shilpi-J)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: complete intial setup
ibm.spectrum_virtualize.ibm_svc_complete_initial_setup:
clustername: "{{clustername}}"
username: "{{username}}"
password: "{{password}}"
'''
RETURN = '''# '''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import svc_ssh_argument_spec, get_logger
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_ssh import IBMSVCssh
class IBMSVCCompleteSetup(object):
def __init__(self):
argument_spec = svc_ssh_argument_spec()
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# logging setup
log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
self.ssh_client = IBMSVCssh(
module=self.module,
clustername=self.module.params['clustername'],
username=self.module.params['username'],
password=self.module.params['password'],
look_for_keys=None,
key_filename=None,
log_path=log_path
)
def is_lmc(self):
info_output = ""
cmd = 'svcinfo lsguicapabilities'
stdin, stdout, stderr = self.ssh_client.client.exec_command(cmd)
for line in stdout.readlines():
info_output += line
if 'login_eula yes' in info_output:
self.log("The system is non LMC")
return False
else:
self.log("The system is LMC")
return True
def disable_setup_wizard(self):
self.log("Disable setup wizard")
cmd = 'chsystem -easysetup no'
stdin, stdout, stderr = self.ssh_client.client.exec_command(cmd)
def apply(self):
changed = False
is_lmc = False
msg = ""
if self.module.check_mode:
msg = "skipping changes due to check mode"
else:
if not self.ssh_client.is_client_connected:
self.module.fail_json(msg="SSH client not connected")
is_lmc = self.is_lmc()
if is_lmc:
self.disable_setup_wizard()
changed = True
msg += "Initial Setup configuration completed. Setup wizard is disabled."
self.ssh_client._svc_disconnect()
self.module.exit_json(msg=msg, changed=changed)
else:
msg += "This is a non LMC system. Please log in GUI to accept EULA. "
msg += "More details are available in README (https://github.com/ansible-collections/ibm.spectrum_virtualize)."
self.ssh_client._svc_disconnect()
self.module.fail_json(msg=msg, changed=changed)
def main():
v = IBMSVCCompleteSetup()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,631 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2020 IBM CORPORATION
# Author(s): Peng Wang <wangpww@cn.ibm.com>
# Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
# Rohit Kumar <rohit.kumar6@ibm.com>
# Sudheesh Reddy Satti<Sudheesh.Reddy.Satti@ibm.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_host
short_description: This module manages hosts on IBM Spectrum Virtualize family storage systems
version_added: "1.0.0"
description:
- Ansible interface to manage 'mkhost', 'chhost', and 'rmhost' host commands.
options:
name:
description:
- Specifies a name or label for the new host object.
required: true
type: str
state:
description:
- Creates or updates (C(present)) or removes (C(absent)) a host.
choices: [ absent, present ]
required: true
type: str
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
required: true
type: str
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the M(ibm.spectrum_virtualize.ibm_svc_auth) module.
type: str
version_added: '1.5.0'
fcwwpn:
description:
- List of Initiator WWPNs to be added to the host. The complete list of WWPNs must be provided.
- The parameters I(fcwwpn) and I(iscsiname) are mutually exclusive.
- Required when I(state=present), to create or modify a Fibre Channel (FC) host.
type: str
iscsiname:
description:
- List of Initiator IQNs to be added to the host. IQNs are separated by comma. The complete list of IQNs must be provided.
- The parameters I(fcwwpn) and I(iscsiname) are mutually exclusive.
- Valid when I(state=present), to create host.
type: str
iogrp:
description:
- Specifies a set of one or more input/output (I/O) groups from which the host can access the volumes.
Once specified, this parameter cannot be modified.
- Valid when I(state=present), to create a host.
type: str
protocol:
description:
- Specifies the protocol used by the host to communicate with the storage system. Only 'scsi' protocol is supported.
- Valid when I(state=present), to create a host.
type: str
type:
description:
- Specifies the type of host.
- Valid when I(state=present), to create or modify a host.
type: str
site:
description:
- Specifies the site name of the host.
- Valid when I(state=present), to create or modify a host.
type: str
hostcluster:
description:
- Specifies the name of the host cluster to which the host object is to be added.
A host cluster must exist before a host object can be added to it.
- Parameters I(hostcluster) and I(nohostcluster) are mutually exclusive.
- Valid when I(state=present), to create or modify a host.
type: str
version_added: '1.5.0'
nohostcluster:
description:
- If specified as C(True), host object is removed from the host cluster.
- Parameters I(hostcluster) and I(nohostcluster) are mutually exclusive.
- Valid when I(state=present), to modify an existing host.
type: bool
version_added: '1.5.0'
old_name:
description:
- Specifies the old name of the host while renaming.
- Valid when I(state=present), to rename an existing host.
type: str
version_added: '1.9.0'
log_path:
description:
- Path of debug log file.
type: str
validate_certs:
description:
- Validates certification.
default: false
type: bool
author:
- Sreshtant Bohidar (@Sreshtant-Bohidar)
- Rohit Kumar (@rohitk-github)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Define a new iSCSI host
ibm.spectrum_virtualize.ibm_svc_host:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
name: host4test
state: present
iscsiname: iqn.1994-05.com.redhat:2e358e438b8a
iogrp: 0:1:2:3
protocol: scsi
type: generic
site: site-name
- name: Add a host to an existing host cluster
ibm.spectrum_virtualize.ibm_svc_host:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
name: host4test
state: present
hostcluster: hostcluster0
- name: Define a new FC host
ibm.spectrum_virtualize.ibm_svc_host:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
name: host4test
state: present
fcwwpn: 100000109B570216:1000001AA0570266
iogrp: 0:1:2:3
protocol: scsi
type: generic
site: site-name
- name: Rename an existing host
ibm.spectrum_virtualize.ibm_svc_host:
clustername: "{{ clustername }}"
domain: "{{ domain }}"
username: "{{ username }}"
password: "{{ password }}"
old_name: "host4test"
name: "new_host_name"
state: "present"
- name: Create an iSCSI host
ibm.spectrum_virtualize.ibm_svc_host:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
name: host_name
iscsiname: iqn.1994-05.com.redhat:2e358e438b8a,iqn.localhost.hostid.7f000001
state: present
- name: Delete a host
ibm.spectrum_virtualize.ibm_svc_host:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
name: new_host_name
state: absent
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
from ansible.module_utils._text import to_native
class IBMSVChost(object):
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent',
'present']),
fcwwpn=dict(type='str', required=False),
iscsiname=dict(type='str', required=False),
iogrp=dict(type='str', required=False),
protocol=dict(type='str', required=False),
type=dict(type='str'),
site=dict(type='str'),
hostcluster=dict(type='str'),
nohostcluster=dict(type='bool'),
old_name=dict(type='str', required=False)
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# logging setup
log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
# Required
self.name = self.module.params['name']
self.state = self.module.params['state']
# Optional
self.fcwwpn = self.module.params.get('fcwwpn', '')
self.iscsiname = self.module.params.get('iscsiname', '')
self.iogrp = self.module.params.get('iogrp', '')
self.protocol = self.module.params.get('protocol', '')
self.type = self.module.params.get('type', '')
self.site = self.module.params.get('site', '')
self.hostcluster = self.module.params.get('hostcluster', '')
self.nohostcluster = self.module.params.get('nohostcluster', '')
self.old_name = self.module.params.get('old_name', '')
# internal variable
self.changed = False
# Handling duplicate fcwwpn
if self.fcwwpn:
dup_fcwwpn = self.duplicate_checker(self.fcwwpn.split(':'))
if dup_fcwwpn:
self.module.fail_json(msg='The parameter {0} has been entered multiple times. Enter the parameter only one time.'.format(dup_fcwwpn))
# Handling duplicate iscsiname
if self.iscsiname:
dup_iscsiname = self.duplicate_checker(self.iscsiname.split(','))
if dup_iscsiname:
self.module.fail_json(msg='The parameter {0} has been entered multiple times. Enter the parameter only one time.'.format(dup_iscsiname))
# Handling for missing mandatory parameter name
if not self.name:
self.module.fail_json(msg='Missing mandatory parameter: name')
# Handling for parameter protocol
if self.protocol:
if self.protocol != 'scsi':
self.module.fail_json(msg="[{0}] is not supported. Only 'scsi' protocol is supported.".format(self.protocol))
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=log_path,
token=self.module.params['token']
)
# for validating parameter while renaming a volume
def parameter_handling_while_renaming(self):
parameters = {
"fcwwpn": self.fcwwpn,
"iscsiname": self.iscsiname,
"iogrp": self.iogrp,
"protocol": self.protocol,
"type": self.type,
"site": self.site,
"hostcluster": self.hostcluster,
"nohostcluster": self.nohostcluster
}
parameters_exists = [parameter for parameter, value in parameters.items() if value]
if parameters_exists:
self.module.fail_json(msg="Parameters {0} not supported while renaming a host.".format(parameters_exists))
def duplicate_checker(self, items):
unique_items = set(items)
if len(items) != len(unique_items):
return [element for element in unique_items if items.count(element) > 1]
else:
return []
def get_existing_host(self, host_name):
merged_result = {}
data = self.restapi.svc_obj_info(cmd='lshost', cmdopts=None,
cmdargs=[host_name])
if isinstance(data, list):
for d in data:
merged_result.update(d)
else:
merged_result = data
return merged_result
# TBD: Implement a more generic way to check for properties to modify.
def host_probe(self, data):
props = []
if self.hostcluster and self.nohostcluster:
self.module.fail_json(msg="You must not pass in both hostcluster and "
"nohostcluster to the module.")
if self.hostcluster and (self.hostcluster != data['host_cluster_name']):
if data['host_cluster_name'] != '':
self.module.fail_json(msg="Host already belongs to hostcluster [%s]" % data['host_cluster_name'])
else:
props += ['hostcluster']
# TBD: The parameter is fcwwpn but the view has fcwwpn label.
if self.type:
if self.type != data['type']:
props += ['type']
if self.fcwwpn:
self.existing_fcwwpn = [node["WWPN"] for node in data['nodes'] if "WWPN" in node]
self.input_fcwwpn = self.fcwwpn.upper().split(":")
if set(self.existing_fcwwpn).symmetric_difference(set(self.input_fcwwpn)):
props += ['fcwwpn']
if self.iscsiname:
self.existing_iscsiname = [node["iscsi_name"] for node in data['nodes'] if "iscsi_name" in node]
self.input_iscsiname = self.iscsiname.split(",")
if set(self.existing_iscsiname).symmetric_difference(set(self.input_iscsiname)):
props += ['iscsiname']
if self.site:
if self.site != data['site_name']:
props += ['site']
if self.nohostcluster:
if data['host_cluster_name'] != '':
props += ['nohostcluster']
self.log("host_probe props='%s'", props)
return props
def host_create(self):
if (not self.fcwwpn) and (not self.iscsiname):
self.module.fail_json(msg="You must pass in fcwwpn or iscsiname "
"to the module.")
if self.fcwwpn and self.iscsiname:
self.module.fail_json(msg="You must not pass in both fcwwpn and "
"iscsiname to the module.")
if self.hostcluster and self.nohostcluster:
self.module.fail_json(msg="You must not pass in both hostcluster and "
"nohostcluster to the module.")
if self.module.check_mode:
self.changed = True
return
self.log("creating host '%s'", self.name)
# Make command
cmd = 'mkhost'
cmdopts = {'name': self.name, 'force': True}
if self.fcwwpn:
cmdopts['fcwwpn'] = self.fcwwpn
elif self.iscsiname:
cmdopts['iscsiname'] = self.iscsiname
if self.protocol:
cmdopts['protocol'] = self.protocol
if self.iogrp:
cmdopts['iogrp'] = self.iogrp
if self.type:
cmdopts['type'] = self.type
if self.site:
cmdopts['site'] = self.site
self.log("creating host command '%s' opts '%s'",
self.fcwwpn, self.type)
# Run command
result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
self.log("create host result '%s'", result)
if result and 'message' in result:
self.changed = True
self.log("create host result message '%s'", (result['message']))
else:
self.module.fail_json(
msg="Failed to create host [%s]" % self.name)
def host_fcwwpn_update(self):
to_be_removed = ':'.join(list(set(self.existing_fcwwpn) - set(self.input_fcwwpn)))
if to_be_removed:
self.restapi.svc_run_command(
'rmhostport',
{'fcwwpn': to_be_removed, 'force': True},
[self.name]
)
self.log('%s removed from %s', to_be_removed, self.name)
to_be_added = ':'.join(list(set(self.input_fcwwpn) - set(self.existing_fcwwpn)))
if to_be_added:
self.restapi.svc_run_command(
'addhostport',
{'fcwwpn': to_be_added, 'force': True},
[self.name]
)
self.log('%s added to %s', to_be_added, self.name)
def host_iscsiname_update(self):
to_be_removed = ','.join(list(set(self.existing_iscsiname) - set(self.input_iscsiname)))
if to_be_removed:
self.restapi.svc_run_command(
'rmhostport',
{'iscsiname': to_be_removed, 'force': True},
[self.name]
)
self.log('%s removed from %s', to_be_removed, self.name)
to_be_added = ','.join(list(set(self.input_iscsiname) - set(self.existing_iscsiname)))
if to_be_added:
self.restapi.svc_run_command(
'addhostport',
{'iscsiname': to_be_added, 'force': True},
[self.name]
)
self.log('%s added to %s', to_be_added, self.name)
def host_update(self, modify, host_data):
# update the host
self.log("updating host '%s'", self.name)
if 'hostcluster' in modify:
self.addhostcluster()
elif 'nohostcluster' in modify:
self.removehostcluster(host_data)
cmd = 'chhost'
cmdopts = {}
if 'fcwwpn' in modify:
self.host_fcwwpn_update()
self.changed = True
self.log("fcwwpn of %s updated", self.name)
if 'iscsiname' in modify:
self.host_iscsiname_update()
self.changed = True
self.log("iscsiname of %s updated", self.name)
if 'type' in modify:
cmdopts['type'] = self.type
if 'site' in modify:
cmdopts['site'] = self.site
if cmdopts:
cmdargs = [self.name]
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
# Any error will have been raised in svc_run_command
# chhost does not output anything when successful.
self.changed = True
self.log("type of %s updated", self.name)
def host_delete(self):
if self.module.check_mode:
self.changed = True
return
self.log("deleting host '%s'", self.name)
cmd = 'rmhost'
cmdopts = {}
cmdargs = [self.name]
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
# Any error will have been raised in svc_run_command
# chhost does not output anything when successful.
self.changed = True
def get_existing_hostcluster(self):
self.log("get_existing_hostcluster %s", self.hostcluster)
data = self.restapi.svc_obj_info(cmd='lshostcluster', cmdopts=None,
cmdargs=[self.hostcluster])
return data
def addhostcluster(self):
if self.module.check_mode:
self.changed = True
return
self.log("Adding host '%s' in hostcluster %s", self.name, self.hostcluster)
cmd = 'addhostclustermember'
cmdopts = {}
cmdargs = [self.hostcluster]
cmdopts['host'] = self.name
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
# Any error will have been raised in svc_run_command
# chhost does not output anything when successful.
self.changed = True
def removehostcluster(self, data):
if self.module.check_mode:
self.changed = True
return
self.log("removing host '%s' from hostcluster %s", self.name, data['host_cluster_name'])
hostcluster_name = data['host_cluster_name']
cmd = 'rmhostclustermember'
cmdopts = {}
cmdargs = [hostcluster_name]
cmdopts['host'] = self.name
cmdopts['keepmappings'] = True
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
# Any error will have been raised in svc_run_command
# chhost does not output anything when successful.
self.changed = True
# function for renaming an existing host with a new name
def host_rename(self, host_data):
msg = ''
self.parameter_handling_while_renaming()
old_host_data = self.get_existing_host(self.old_name)
if not old_host_data and not host_data:
self.module.fail_json(msg="Host [{0}] does not exists.".format(self.old_name))
elif old_host_data and host_data:
self.module.fail_json(msg="Host [{0}] already exists.".format(self.name))
elif not old_host_data and host_data:
msg = "Host with name [{0}] already exists.".format(self.name)
elif old_host_data and not host_data:
# when check_mode is enabled
if self.module.check_mode:
self.changed = True
return
self.restapi.svc_run_command('chhost', {'name': self.name}, [self.old_name])
self.changed = True
msg = "Host [{0}] has been successfully rename to [{1}].".format(self.old_name, self.name)
return msg
def apply(self):
changed = False
msg = None
modify = []
host_data = self.get_existing_host(self.name)
if self.state == 'present' and self.old_name:
msg = self.host_rename(host_data)
elif self.state == 'absent' and self.old_name:
self.module.fail_json(msg="Rename functionality is not supported when 'state' is absent.")
else:
if host_data:
if self.state == 'absent':
self.log("CHANGED: host exists, but requested state is 'absent'")
changed = True
elif self.state == 'present':
# This is where we detect if chhost should be called
modify = self.host_probe(host_data)
if modify:
changed = True
else:
if self.state == 'present':
self.log("CHANGED: host does not exist, but requested state is 'present'")
changed = True
if changed:
if self.state == 'present':
if self.hostcluster:
hc_data = self.get_existing_hostcluster()
if hc_data is None:
self.module.fail_json(msg="Host cluster must already exist before its usage in this module")
elif not host_data and hc_data:
self.host_create()
self.addhostcluster()
msg = "host %s has been created and added to hostcluster." % self.name
elif not host_data:
self.host_create()
msg = "host %s has been created." % self.name
if host_data and modify:
# This is where we would modify
self.host_update(modify, host_data)
msg = "host [%s] has been modified." % self.name
elif self.state == 'absent':
self.host_delete()
msg = "host [%s] has been deleted." % self.name
else:
self.log("exiting with no changes")
if self.state == 'absent':
msg = "host [%s] did not exist." % self.name
else:
msg = "host [%s] already exists." % self.name
if self.module.check_mode:
msg = 'skipping changes due to check mode'
self.module.exit_json(msg=msg, changed=self.changed)
def main():
v = IBMSVChost()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,344 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2021 IBM CORPORATION
# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_hostcluster
short_description: This module manages host cluster on IBM Spectrum Virtualize family storage systems
version_added: "1.5.0"
description:
- Ansible interface to manage 'mkhostcluster', 'chhostcluster' and 'rmhostcluster' host commands.
options:
name:
description:
- Specifies a name or label for the new host cluster object.
required: true
type: str
state:
description:
- Creates (C(present)) or removes (C(absent)) a host cluster.
choices: [ absent, present ]
required: true
type: str
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
required: true
type: str
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the M(ibm.spectrum_virtualize.ibm_svc_auth) module.
type: str
ownershipgroup:
description:
- The name of the ownership group to which the host cluster object is being added.
- Parameters I(ownershipgroup) and I(noownershipgroup) are mutually exclusive.
- Applies when I(state=present).
type: str
version_added: '1.6.0'
noownershipgroup:
description:
- If specified True, the host cluster object is removed from the ownership group to which it belongs.
- Parameters I(ownershipgroup) and I(noownershipgroup) are mutually exclusive.
- Applies when I(state=present) to modify an existing hostcluster.
type: bool
version_added: '1.6.0'
removeallhosts:
description:
- Specifies that all hosts in the host cluster and the associated host cluster object be deleted.
- Applies when I(state=absent).
type: bool
log_path:
description:
- Path of debug log file.
type: str
validate_certs:
description:
- Validates certification.
default: false
type: bool
author:
- Shilpi Jain (@Shilpi-J)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Define a new host cluster
ibm.spectrum_virtualize.ibm_svc_hostcluster:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
name: hostcluster0
state: present
ownershipgroup: group1
- name: Update the ownershipgroup of a host cluster
ibm.spectrum_virtualize.ibm_svc_hostcluster:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
name: hostcluster0
state: present
noownershipgroup: True
- name: Delete a host cluster
ibm.spectrum_virtualize.ibm_svc_hostcluster:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
name: hostcluster0
state: absent
removeallhosts: True
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
from ansible.module_utils._text import to_native
class IBMSVChostcluster(object):
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent',
'present']),
ownershipgroup=dict(type='str'),
noownershipgroup=dict(type='bool'),
removeallhosts=dict(type='bool')
)
)
self.changed = ""
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# logging setup
log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
# Required
self.name = self.module.params['name']
self.state = self.module.params['state']
# Optional
self.ownershipgroup = self.module.params.get('ownershipgroup', '')
self.noownershipgroup = self.module.params.get('noownershipgroup', '')
self.removeallhosts = self.module.params.get('removeallhosts', '')
# Handling missing mandatory parameter name
if not self.name:
self.module.fail_json(msg='Missing mandatory parameter: name')
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=log_path,
token=self.module.params['token']
)
def get_existing_hostcluster(self):
merged_result = {}
data = self.restapi.svc_obj_info(cmd='lshostcluster', cmdopts=None,
cmdargs=[self.name])
if isinstance(data, list):
for d in data:
merged_result.update(d)
else:
merged_result = data
return merged_result
def hostcluster_probe(self, data):
props = []
if self.removeallhosts:
self.module.fail_json(msg="Parameter 'removeallhosts' can be used only while deleting hostcluster")
if self.ownershipgroup and self.noownershipgroup:
self.module.fail_json(msg="You must not pass in both 'ownershipgroup' and "
"'noownershipgroup' to the module.")
if data['owner_name'] and self.noownershipgroup:
props += ['noownershipgroup']
if self.ownershipgroup and (not data['owner_name'] or self.ownershipgroup != data['owner_name']):
props += ['ownershipgroup']
if props is []:
props = None
self.log("hostcluster_probe props='%s'", data)
return props
def hostcluster_create(self):
if self.removeallhosts:
self.module.fail_json(msg="Parameter 'removeallhosts' cannot be passed while creating hostcluster")
if self.module.check_mode:
self.changed = True
return
# Make command
cmd = 'mkhostcluster'
cmdopts = {'name': self.name}
if self.ownershipgroup:
cmdopts['ownershipgroup'] = self.ownershipgroup
self.log("creating host cluster command opts '%s'",
self.ownershipgroup)
# Run command
result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
self.log("create host cluster result '%s'", result)
if 'message' in result:
self.changed = True
self.log("create host cluster result message '%s'", (result['message']))
else:
self.module.fail_json(
msg="Failed to create host cluster [%s]" % self.name)
def hostcluster_update(self, modify):
if self.module.check_mode:
self.changed = True
return
self.log("updating host cluster '%s'", self.name)
cmd = 'chhostcluster'
cmdopts = {}
if 'ownershipgroup' in modify:
cmdopts['ownershipgroup'] = self.ownershipgroup
elif 'noownershipgroup' in modify:
cmdopts['noownershipgroup'] = self.noownershipgroup
if cmdopts:
cmdargs = [self.name]
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
# Any error will have been raised in svc_run_command
# chhost does not output anything when successful.
self.changed = True
self.log("Properties of %s updated", self.name)
def hostcluster_delete(self):
if self.module.check_mode:
self.changed = True
return
self.log("deleting host cluster '%s'", self.name)
cmd = 'rmhostcluster'
cmdopts = {}
cmdargs = [self.name]
if self.removeallhosts:
cmdopts = {'force': True}
cmdopts['removeallhosts'] = self.removeallhosts
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
# Any error will have been raised in svc_run_command
# chhost does not output anything when successful.
self.changed = True
def apply(self):
changed = False
msg = None
modify = []
hc_data = self.get_existing_hostcluster()
if hc_data:
if self.state == 'absent':
self.log("CHANGED: host cluster exists, but requested "
"state is 'absent'")
changed = True
elif self.state == 'present':
# This is where we detect if chhostcluster should be called
modify = self.hostcluster_probe(hc_data)
if modify:
changed = True
else:
if self.state == 'present':
self.log("CHANGED: host cluster does not exist, "
"but requested state is 'present'")
changed = True
if changed:
if self.state == 'present':
if not hc_data:
self.hostcluster_create()
msg = "host cluster %s has been created." % self.name
else:
# This is where we would modify
self.hostcluster_update(modify)
msg = "host cluster [%s] has been modified." % self.name
elif self.state == 'absent':
self.hostcluster_delete()
msg = "host cluster [%s] has been deleted." % self.name
if self.module.check_mode:
msg = "skipping changes due to check mode"
else:
self.log("exiting with no changes")
if self.state == 'absent':
msg = "host cluster [%s] did not exist." % self.name
else:
msg = "host cluster [%s] already exists. No modifications done." % self.name
self.module.exit_json(msg=msg, changed=changed)
def main():
v = IBMSVChostcluster()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,578 @@
#!/usr/bin/python
# Copyright (C) 2020 IBM CORPORATION
# Author(s): Peng Wang <wangpww@cn.ibm.com>
# Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_info
short_description: This module gathers various information from the IBM Spectrum Virtualize family storage systems
version_added: "1.0.0"
description:
- Gathers the list of specified IBM Spectrum Virtualize family storage system
entities. These include the list of nodes, pools, volumes, hosts,
host clusters, FC ports, iSCSI ports, target port FC, FC consistgrp,
vdiskcopy, I/O groups, FC map, FC connectivity, NVMe fabric,
array, and system.
author:
- Peng Wang (@wangpww)
options:
clustername:
description:
- The hostname or management IP of the
Spectrum Virtualize storage system.
type: str
required: true
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the ibm_svc_auth module.
type: str
version_added: '1.5.0'
log_path:
description:
- Path of debug log file.
type: str
validate_certs:
description:
- Validates certification.
default: false
type: bool
objectname:
description:
- If specified, only the instance with the I(objectname) is returned. If not specified, all the instances are returned.
type: str
gather_subset:
type: list
elements: str
description:
- List of string variables to specify the Spectrum Virtualize entities
for which information is required.
- all - list of all Spectrum Virtualize entities
supported by the module.
- vol - lists information for VDisks.
- pool - lists information for mdiskgrps.
- node - lists information for nodes.
- iog - lists information for I/O groups.
- host - lists information for hosts.
- hostvdiskmap - lists all vdisks mapped to host 'objectname'
- vdiskhostmap - lists all hosts vdisk 'objectname' is mapped to
- hc - lists information for host clusters.
- fc - lists information for FC connectivity.
- fcport - lists information for FC ports.
- targetportfc - lists information for WWPN which is required to set up
FC zoning and to display the current failover status
of host I/O ports.
- fcmap - lists information for FC maps.
- rcrelationship - lists information for remote copy relationships.
- fcconsistgrp - displays a concise list or a detailed
view of flash copy consistency groups.
- rcconsistgrp - displays a concise list or a detailed
view of remote copy consistency groups.
- iscsiport - lists information for iSCSI ports.
- vdiskcopy - lists information for volume copy.
- array - lists information for array MDisks.
- system - displays the storage system information.
choices: [vol, pool, node, iog, host, hostvdiskmap, vdiskhostmap, hc, fcport
, iscsiport, fc, fcmap, fcconsistgrp, rcrelationship, rcconsistgrp
, vdiskcopy, targetportfc, array, system, all]
default: "all"
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Get volume info
ibm.spectrum_virtualize.ibm_svc_info:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/ansible.log
gather_subset: vol
- name: Get volume info
ibm.spectrum_virtualize.ibm_svc_info:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/ansible.log
objectname: volumename
gather_subset: vol
- name: Get pool info
ibm.spectrum_virtualize.ibm_svc_info:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/ansible.log
gather_subset: pool
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
from ansible.module_utils._text import to_native
class IBMSVCGatherInfo(object):
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
objectname=dict(type='str'),
gather_subset=dict(type='list', elements='str', required=False,
default=['all'],
choices=['vol',
'pool',
'node',
'iog',
'host',
'hostvdiskmap',
'vdiskhostmap',
'hc',
'fc',
'fcport',
'targetportfc',
'iscsiport',
'fcmap',
'rcrelationship',
'fcconsistgrp',
'rcconsistgrp',
'vdiskcopy',
'array',
'system',
'all'
]),
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# logging setup
log_path = self.module.params['log_path']
self.log = get_logger(self.__class__.__name__, log_path)
self.objectname = self.module.params['objectname']
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=log_path,
token=self.module.params['token']
)
def get_volumes_list(self):
try:
cmdargs = [self.objectname] if self.objectname else None
vols = self.restapi.svc_obj_info(cmd='lsvdisk', cmdopts=None,
cmdargs=cmdargs)
self.log.info("Successfully listed %d volumes from array %s",
len(vols), self.module.params['clustername'])
return vols
except Exception as e:
msg = ('Get Volumes from array %s failed with error %s ',
self.module.params['clustername'], str(e))
self.log.error(msg)
self.module.fail_json(msg=msg)
def get_pools_list(self):
try:
cmdargs = [self.objectname] if self.objectname else None
pools = self.restapi.svc_obj_info(cmd='lsmdiskgrp', cmdopts=None,
cmdargs=cmdargs)
self.log.info('Successfully listed %d pools from array '
'%s', len(pools), self.module.params['clustername'])
return pools
except Exception as e:
msg = ('Get Pools from array %s failed with error %s ',
self.module.params['clustername'], str(e))
self.log.error(msg)
self.module.fail_json(msg=msg)
def get_nodes_list(self):
try:
cmdargs = [self.objectname] if self.objectname else None
nodes = self.restapi.svc_obj_info(cmd='lsnode', cmdopts=None,
cmdargs=cmdargs)
self.log.info('Successfully listed %d pools from array %s',
len(nodes), self.module.params['clustername'])
return nodes
except Exception as e:
msg = ('Get Nodes from array %s failed with error %s ',
self.module.params['clustername'], str(e))
self.log.error(msg)
self.module.fail_json(msg=msg)
def get_hosts_list(self):
try:
cmdargs = [self.objectname] if self.objectname else None
hosts = self.restapi.svc_obj_info(cmd='lshost', cmdopts=None,
cmdargs=cmdargs)
self.log.info('Successfully listed %d hosts from array '
'%s', len(hosts), self.module.params['clustername'])
return hosts
except Exception as e:
msg = ('Get Hosts from array %s failed with error %s ',
self.module.params['clustername'], str(e))
self.log.error(msg)
self.module.fail_json(msg=msg)
def get_vdisk_host_map(self):
try:
cmdargs = [self.objectname] if self.objectname else None
vhmaps = self.restapi.svc_obj_info(cmd='lsvdiskhostmap', cmdopts=None,
cmdargs=cmdargs)
self.log.info('Successfully listed %d vdisk host maps from array '
'%s', len(vhmaps), self.module.params['clustername'])
return vhmaps
except Exception as e:
msg = ('Get Vdisk Host Maps from array %s failed with error %s ',
self.module.params['clustername'], str(e))
self.log.error(msg)
self.module.fail_json(msg=msg)
def get_host_vdisk_map(self):
try:
cmdargs = [self.objectname] if self.objectname else None
hvmaps = self.restapi.svc_obj_info(cmd='lshostvdiskmap', cmdopts=None,
cmdargs=cmdargs)
self.log.info('Successfully listed %d host vdisk maps from array '
'%s', len(hvmaps), self.module.params['clustername'])
return hvmaps
except Exception as e:
msg = ('Get Host Vdisk Maps from array %s failed with error %s ',
self.module.params['clustername'], str(e))
self.log.error(msg)
self.module.fail_json(msg=msg)
def get_iogroups_list(self):
try:
cmdargs = [self.objectname] if self.objectname else None
iogrps = self.restapi.svc_obj_info(cmd='lsiogrp', cmdopts=None,
cmdargs=cmdargs)
self.log.info('Successfully listed %d hosts from array '
'%s', len(iogrps), self.module.params['clustername'])
return iogrps
except Exception as e:
msg = ('Get IO Groups from array %s failed with error %s ',
self.module.params['clustername'], str(e))
self.log.error(msg)
self.module.fail_json(msg=msg)
def get_host_clusters_list(self):
try:
cmdargs = [self.objectname] if self.objectname else None
hcs = self.restapi.svc_obj_info(cmd='lshostcluster', cmdopts=None,
cmdargs=cmdargs)
self.log.info('Successfully listed %d host clusters from array '
'%s', len(hcs), self.module.params['clustername'])
return hcs
except Exception as e:
msg = ('Get Host Cluster from array %s failed with error %s ',
self.module.params['clustername'], str(e))
self.log.error(msg)
self.module.fail_json(msg=msg)
def get_fc_connectivity_list(self):
try:
cmdargs = [self.objectname] if self.objectname else None
fc = self.restapi.svc_obj_info(cmd='lsfabric', cmdopts=None,
cmdargs=cmdargs)
self.log.info('Successfully listed %d fc connectivity from array '
'%s', len(fc), self.module.params['clustername'])
return fc
except Exception as e:
msg = ('Get FC Connectivity from array %s failed with error %s ',
self.module.params['clustername'], str(e))
self.log.error(msg)
self.module.fail_json(msg=msg)
def get_fc_ports_list(self):
try:
cmdargs = [self.objectname] if self.objectname else None
fcports = self.restapi.svc_obj_info(cmd='lsportfc', cmdopts=None,
cmdargs=cmdargs)
self.log.info('Successfully listed %d fc ports from array %s',
len(fcports), self.module.params['clustername'])
return fcports
except Exception as e:
msg = ('Get fc ports from array %s failed with error %s ',
self.module.params['clustername'], str(e))
self.log.error(msg)
self.module.fail_json(msg=msg)
def get_target_port_fc_list(self):
try:
cmdargs = [self.objectname] if self.objectname else None
targetportfc = self.restapi.svc_obj_info(cmd='lstargetportfc',
cmdopts=None,
cmdargs=cmdargs)
self.log.info('Successfully listed %d target port fc '
'from array %s', len(targetportfc),
self.module.params['clustername'])
return targetportfc
except Exception as e:
msg = ('Get target port fc from array %s failed with error %s ',
self.module.params['clustername'], str(e))
self.log.error(msg)
self.module.fail_json(msg=msg)
def get_iscsi_ports_list(self):
try:
cmdargs = [self.objectname] if self.objectname else None
ipports = self.restapi.svc_obj_info(cmd='lsportip', cmdopts=None,
cmdargs=cmdargs)
self.log.info('Successfully listed %d iscsi ports from array %s',
len(ipports), self.module.params['clustername'])
return ipports
except Exception as e:
msg = ('Get iscsi ports from array %s failed with error %s ',
self.module.params['clustername'], str(e))
self.log.error(msg)
self.module.fail_json(msg=msg)
def get_fc_map_list(self):
try:
cmdargs = [self.objectname] if self.objectname else None
fcmaps = self.restapi.svc_obj_info(cmd='lsfcmap', cmdopts=None,
cmdargs=cmdargs)
self.log.info('Successfully listed %d fc maps from array %s',
len(fcmaps), self.module.params['clustername'])
return fcmaps
except Exception as e:
msg = ('Get fc maps from array %s failed with error %s ',
self.module.params['clustername'], str(e))
self.log.error(msg)
self.module.fail_json(msg=msg)
def get_rcrel_list(self):
try:
cmdargs = [self.objectname] if self.objectname else None
rcrel = self.restapi.svc_obj_info(cmd='lsrcrelationship',
cmdopts=None,
cmdargs=cmdargs)
self.log.info('Successfully listed %d remotecopy from array %s',
len(rcrel), self.module.params['clustername'])
return rcrel
except Exception as e:
msg = ('Get remotecopies from array %s failed with error %s ',
self.module.params['clustername'], str(e))
self.log.error(msg)
self.module.fail_json(msg=msg)
def get_array_list(self):
try:
cmdargs = [self.objectname] if self.objectname else None
array = self.restapi.svc_obj_info(cmd='lsarray', cmdopts=None,
cmdargs=cmdargs)
self.log.info('Successfully listed %d array info from array %s',
len(array), self.module.params['clustername'])
return array
except Exception as e:
msg = ('Get Array info from array %s failed with error %s ',
self.module.params['clustername'], str(e))
self.log.error(msg)
self.module.fail_json(msg=msg)
def get_system_list(self):
try:
if self.objectname:
self.log.warn('The objectname %s is ignored when retrieving '
'the system information', self.objectname)
system = self.restapi.svc_obj_info(cmd='lssystem', cmdopts=None,
cmdargs=None)
self.log.info('Successfully listed %d system info from array %s',
len(system), self.module.params['clustername'])
return system
except Exception as e:
msg = ('Get System info from array %s failed with error %s ',
self.module.params['clustername'], str(e))
self.log.error(msg)
self.module.fail_json(msg=msg)
def get_fcconsistgrp_list(self):
try:
cmdargs = [self.objectname] if self.objectname else None
fcconsistgrp = self.restapi.svc_obj_info(cmd='lsfcconsistgrp',
cmdopts=None,
cmdargs=cmdargs)
self.log.info('Successfully listed %d fcconsistgrp info '
'from array %s', len(fcconsistgrp),
self.module.params['clustername'])
return fcconsistgrp
except Exception as e:
msg = ('Get fcconsistgrp info from array %s failed with error %s ',
self.module.params['clustername'], str(e))
self.log.error(msg)
self.module.fail_json(msg=msg)
def get_rcconsistgrp_list(self):
try:
cmdargs = [self.objectname] if self.objectname else None
rcconsistgrp = self.restapi.svc_obj_info(cmd='lsrcconsistgrp',
cmdopts=None,
cmdargs=cmdargs)
self.log.info('Successfully listed %d rcconsistgrp info '
'from array %s', len(rcconsistgrp),
self.module.params['clustername'])
return rcconsistgrp
except Exception as e:
msg = ('Get rcconsistgrp info from array %s failed with error %s ',
self.module.params['clustername'], str(e))
self.log.error(msg)
self.module.fail_json(msg=msg)
def get_vdiskcopy_list(self):
try:
cmdargs = [self.objectname] if self.objectname else None
vdiskcopy = self.restapi.svc_obj_info(cmd='lsvdiskcopy',
cmdopts=None,
cmdargs=cmdargs)
self.log.info('Successfully listed %d vdiskcopy info '
'from array %s', len(vdiskcopy),
self.module.params['clustername'])
return vdiskcopy
except Exception as e:
msg = ('Get vdiskcopy info from array %s failed with error %s ',
self.module.params['clustername'], str(e))
self.log.error(msg)
self.module.fail_json(msg=msg)
def apply(self):
all = ['vol', 'pool', 'node', 'iog', 'host', 'hc', 'fc',
'fcport', 'iscsiport', 'fcmap', 'rcrelationship',
'fcconsistgrp', 'rcconsistgrp', 'vdiskcopy',
'targetportfc', 'array', 'system']
# host/vdiskmap not added to all as they require an objectname
# in order to run, so only use these as gather_subset
subset = self.module.params['gather_subset']
if self.objectname and len(subset) != 1:
msg = ("objectname(%s) is specified while gather_subset(%s) is not "
"one of %s" % (self.objectname, self.subset, all))
self.module.fail_json(msg=msg)
if len(subset) == 0 or 'all' in subset:
self.log.info("The default value for gather_subset is all")
subset = all
vol = []
pool = []
node = []
iog = []
host = []
hostvdiskmap = []
vdiskhostmap = []
hc = []
fc = []
fcport = []
targetportfc = []
iscsiport = []
fcmap = []
fcconsistgrp = []
rcrelationship = []
rcconsistgrp = []
vdiskcopy = []
array = []
system = []
if 'vol' in subset:
vol = self.get_volumes_list()
if 'pool' in subset:
pool = self.get_pools_list()
if 'node' in subset:
node = self.get_nodes_list()
if 'iog' in subset:
iog = self.get_iogroups_list()
if 'host' in subset:
host = self.get_hosts_list()
if 'hostvdiskmap' in subset:
hostvdiskmap = self.get_host_vdisk_map()
if 'vdiskhostmap' in subset:
vdiskhostmap = self.get_vdisk_host_map()
if 'hc' in subset:
hc = self.get_host_clusters_list()
if 'fc' in subset:
fc = self.get_fc_connectivity_list()
if 'targetportfc' in subset:
targetportfc = self.get_target_port_fc_list()
if 'fcport' in subset:
fcport = self.get_fc_ports_list()
if 'iscsiport' in subset:
iscsiport = self.get_iscsi_ports_list()
if 'fcmap' in subset:
fcmap = self.get_fc_map_list()
if 'fcconsistgrp' in subset:
fcconsistgrp = self.get_fcconsistgrp_list()
if 'rcrelationship' in subset:
rcrelationship = self.get_rcrel_list()
if 'rcconsistgrp' in subset:
rcconsistgrp = self.get_rcconsistgrp_list()
if 'vdiskcopy' in subset:
vdiskcopy = self.get_vdiskcopy_list()
if 'array' in subset:
array = self.get_array_list()
if 'system' in subset:
system = self.get_system_list()
self.module.exit_json(
Volume=vol,
Pool=pool,
Node=node,
IOGroup=iog,
Host=host,
HostVdiskMap=hostvdiskmap,
VdiskHostMap=vdiskhostmap,
HostCluster=hc,
FCConnectivitie=fc,
FCConsistgrp=fcconsistgrp,
RCConsistgrp=rcconsistgrp,
VdiskCopy=vdiskcopy,
FCPort=fcport,
TargetPortFC=targetportfc,
iSCSIPort=iscsiport,
FCMap=fcmap,
RemoteCopy=rcrelationship,
Array=array,
System=system)
def main():
v = IBMSVCGatherInfo()
try:
v.apply()
except Exception as e:
v.log.debug("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,599 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2021 IBM CORPORATION
# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_initial_setup
short_description: This module allows users to manage the initial setup configuration on IBM Spectrum Virtualize family storage systems
version_added: "1.7.0"
description:
- Ansible interface to perform various initial system configuration
options:
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
required: true
type: str
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the M(ibm.spectrum_virtualize.ibm_svc_auth) module.
type: str
log_path:
description:
- Path of debug log file.
type: str
validate_certs:
description:
- Validates certification.
default: false
type: bool
system_name:
description:
- Specifies system name.
type: str
dnsname:
description:
- Specifies a unique name for the system DNS server being created.
- Maximum two DNS servers can be configured. User needs to provide the complete list of DNS servers that are required to be configured.
type: list
elements: str
dnsip:
description:
- Specifies the DNS server Internet Protocol (IP) address.
type: list
elements: str
ntpip:
description:
- Specifies the IPv4 address or fully qualified domain name (FQDN) for the Network Time Protocol (NTP) server.
- To remove an already configured NTP IP, user must specify 0.0.0.0.
type: str
time:
description:
- Specifies the time to which the system must be set.
- This value must be in the following format MMDDHHmmYYYY (where M is month, D is day, H is hour, m is minute, and Y is year).
type: str
timezone:
description:
- Specifies the time zone to set for the system.
type: str
license_key:
description:
- Provides the license key to activate a feature that contains 16 hexadecimal characters organized in four groups
of four numbers with each group separated by a hyphen (such as 0123-4567-89AB-CDEF).
type: list
elements: str
remote:
description:
- Changes system licensing for remote-copy functions such as Metro Mirror, Global Mirror, and HyperSwap.
- Depending on the type of system, specify a capacity value in terabytes (TB) or specify the total number of
internal and external enclosures that user has licensed on the system.
There must be an enclosure license for all enclosures.
type: int
virtualization:
description:
- Changes system licensing for the Virtualization function.
- Depending on the type of system, specify a capacity value in terabytes (TB) or specify the total number of
storage capacity units (SCUs) that user is licensed to virtualize across tiers of storage on the system or
specify the number of enclosures of external storage that user is authorized to use.
type: int
compression:
description:
- Changes system licensing for the compression function.
- Depending on the type of system, specify a capacity value in terabytes (TB) or specify the total number of
storage capacity units (SCUs) that user is licensed to virtualize across tiers of storage on the system or
specify the total number of internal and external enclosures that user has licensed on the system.
type: int
flash:
description:
- Changes system licensing for the FlashCopy function.
- Depending on the type of system, specify a capacity value in terabytes (TB) or specify the total number of
internal and external enclosures for the FlashCopy function.
type: int
cloud:
description:
- Specifies the number of enclosures for the transparent cloud tiering function.
type: int
easytier:
description:
- Specifies the number of enclosures on which user can run Easy Tier.
type: int
physical_flash:
description:
- For physical disk licensing, this parameter enables or disables the FlashCopy function.
type: str
choices: [ 'on', 'off' ]
default: 'off'
encryption:
description:
- Specifies whether the encryption license function is enabled or disabled.
type: str
choices: [ 'on', 'off' ]
author:
- Shilpi Jain (@Shilpi-J)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Initial configuration on FlashSystem 9200
ibm.spectrum_virtualize.ibm_svc_initial_setup:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
system_name: cluster_test_0
time: 101009142021
timezone: 200
remote: 50
virtualization: 50
flash: 50
license_key:
- 0123-4567-89AB-CDEF
- 8921-4567-89AB-GHIJ
- name: Add DNS servers
ibm.spectrum_virtualize.ibm_svc_initial_setup:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
system_name: cluster_test_
dnsname:
- dns_01
- dns_02
dnsip:
- '1.1.1.1'
- '2.2.2.2'
- name: Delete dns_02 server
ibm.spectrum_virtualize.ibm_svc_initial_setup:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
system_name: cluster_test_
dnsname:
- dns_01
dnsip:
- '1.1.1.1'
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
from ansible.module_utils._text import to_native
class IBMSVCInitialSetup(object):
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
system_name=dict(type='str'),
dnsname=dict(type='list', elements='str'),
dnsip=dict(type='list', elements='str'),
ntpip=dict(type='str'),
time=dict(type='str'),
timezone=dict(type='str'),
license_key=dict(type='list', elements='str', no_log=True),
remote=dict(type='int'),
virtualization=dict(type='int'),
flash=dict(type='int'),
compression=dict(type='int'),
cloud=dict(type='int'),
easytier=dict(type='int'),
physical_flash=dict(type='str', default='off', choices=['on', 'off']),
encryption=dict(type='str', choices=['on', 'off'])
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# logging setup
log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
self.system_data = ""
self.changed = False
self.message = ""
# Optional
self.systemname = self.module.params.get('system_name', '')
self.dnsname = self.module.params.get('dnsname', '')
self.dnsip = self.module.params.get('dnsip', '')
self.ntpip = self.module.params.get('ntpip', '')
self.time = self.module.params.get('time', '')
self.timezone = self.module.params.get('timezone', '')
# license related parameters
self.license_key = self.module.params.get('license_key', '')
self.remote = self.module.params.get('remote', '')
self.virtualization = self.module.params.get('virtualization', '')
self.compression = self.module.params.get('compression', '')
self.flash = self.module.params.get('flash', '')
self.cloud = self.module.params.get('cloud', '')
self.easytier = self.module.params.get('easytier', '')
self.physical_flash = self.module.params.get('physical_flash', '')
self.encryption = self.module.params.get('encryption', '')
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=log_path,
token=self.module.params['token']
)
def basic_checks(self):
if self.time and self.ntpip:
self.module.fail_json(msg='Either NTP or time should be given')
if self.dnsname and self.dnsip:
if len(self.dnsname) != len(self.dnsip):
self.module.fail_json(msg='To configure DNS, DNS IP and DNS server name must be given.')
def get_system_info(self):
self.log("Entering function get_system_info")
self.system_data = self.restapi.svc_obj_info(cmd='lssystem', cmdopts=None, cmdargs=None)
return self.system_data
def systemname_update(self):
cmd = 'chsystem'
cmdopts = {}
cmdopts['name'] = self.systemname
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
# Any error will have been raised in svc_run_command
self.changed = True
self.log("System Name: %s updated", cmdopts)
self.message += " System name [%s] updated." % self.systemname
def ntp_update(self, ip):
cmd = 'chsystem'
cmdopts = {}
cmdopts['ntpip'] = ip
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
# Any error will have been raised in svc_run_command
self.changed = True
self.log("NTP IP: %s updated", cmdopts)
if self.ntpip:
self.message += " NTP IP [%s] updated." % self.ntpip
def systemtime_update(self):
cmd = 'setsystemtime'
cmdopts = {}
cmdopts['time'] = self.time
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
# Any error will have been raised in svc_run_command
self.changed = True
self.log("Time: %s updated", self.time)
self.message += " Time [%s] updated." % self.time
def timezone_update(self):
cmd = 'settimezone'
cmdopts = {}
cmdopts['timezone'] = self.timezone
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
# Any error will have been raised in svc_run_command
# chhost does not output anything when successful.
self.changed = True
self.log("Properties: Time zone %s updated", self.timezone)
self.message += " Timezone [%s] updated." % self.timezone
def system_update(self, data):
name_change_required = False
ntp_change_required = False
time_change_required = False
timezone_change_required = False
tz = (None, None)
if self.module.check_mode:
self.changed = True
return
if self.systemname and self.systemname != data['name']:
self.log("Name change detected")
name_change_required = True
if self.ntpip and self.ntpip != data['cluster_ntp_IP_address']:
self.log("NTP change detected")
ntp_change_required = True
if self.time and data['cluster_ntp_IP_address'] is not None:
self.log("TIME change detected, clearing NTP IP")
ntp_change_required = True
if self.time:
self.log("TIME change detected")
time_change_required = True
if data['time_zone']:
tz = data['time_zone'].split(" ", 1)
if self.timezone and (tz[0] != self.timezone):
timezone_change_required = True
if name_change_required:
self.systemname_update()
if ntp_change_required:
self.log("updating system properties '%s, %s'", self.systemname, self.ntpip)
if self.ntpip:
ip = self.ntpip
if self.time and ntp_change_required:
ip = '0.0.0.0'
self.ntp_update(ip)
if time_change_required:
self.systemtime_update()
if timezone_change_required:
self.timezone_update()
def get_existing_dnsservers(self):
merged_result = []
data = self.restapi.svc_obj_info(cmd='lsdnsserver', cmdopts=None, cmdargs=None)
if isinstance(data, list):
for d in data:
merged_result.append(d)
else:
merged_result = data
return merged_result
def dns_configure(self):
dns_add_remove = False
modify = {}
existing_dns = {}
existing_dns_server = []
existing_dns_ip = []
if self.module.check_mode:
self.changed = True
return
dns_data = self.get_existing_dnsservers()
self.log("dns_data=%s", dns_data)
if (self.dnsip and self.dnsname) or (self.dnsip == "" and self.dnsname == ""):
for server in dns_data:
existing_dns_server.append(server['name'])
existing_dns_ip.append(server['IP_address'])
existing_dns[server['name']] = server['IP_address']
for name, ip in zip(self.dnsname, self.dnsip):
if name == 'None':
self.log(" Empty DNS configuration is provided.")
return
if name in existing_dns:
if existing_dns[name] != ip:
self.log("update, diff IP.")
modify[name] = ip
else:
self.log("no update, same IP.")
if (set(existing_dns_server)).symmetric_difference(set(self.dnsname)):
dns_add_remove = True
if modify:
for item in modify:
self.restapi.svc_run_command(
'chdnsserver',
{'ip': modify[item]}, [item]
)
self.changed = True
self.message += " DNS %s modified." % modify
if dns_add_remove:
to_be_added, to_be_removed = False, False
to_be_removed = list(set(existing_dns_server) - set(self.dnsname))
if to_be_removed:
for item in to_be_removed:
self.restapi.svc_run_command(
'rmdnsserver', None,
[item]
)
self.changed = True
self.message += " DNS server %s removed." % to_be_removed
to_be_added = list(set(self.dnsname) - set(existing_dns_server))
to_be_added_ip = list(set(self.dnsip) - set(existing_dns_ip))
if any(to_be_added):
for dns_name, dns_ip in zip(to_be_added, to_be_added_ip):
if dns_name:
self.log('%s %s', dns_name, dns_ip)
self.restapi.svc_run_command(
'mkdnsserver',
{'name': dns_name, 'ip': dns_ip}, cmdargs=None
)
self.changed = True
self.message += " DNS server %s added." % to_be_added
elif not modify:
self.log("No DNS Changes")
def license_probe(self):
props = []
cmd = 'lslicense'
cmdopts = {}
data = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
if self.remote and int(data['license_remote']) != self.remote:
props += ['remote']
if self.virtualization and int(data['license_virtualization']) != self.virtualization:
props += ['virtualization']
if self.compression:
if (self.system_data['product_name'] == "IBM Storwize V7000") or (self.system_data['product_name'] == "IBM FlashSystem 7200"):
if (int(data['license_compression_enclosures']) != self.compression):
self.log("license_compression_enclosure=%d", int(data['license_compression_enclosures']))
props += ['compression']
else:
if (int(data['license_compression_capacity']) != self.compression):
self.log("license_compression_capacity=%d", int(data['license_compression_capacity']))
props += ['compression']
if self.flash and int(data['license_flash']) != self.flash:
props += ['flash']
if self.cloud and int(data['license_cloud_enclosures']) != self.cloud:
props += ['cloud']
if self.easytier and int(data['license_easy_tier']) != self.easytier:
props += ['easytier']
if self.physical_flash and data['license_physical_flash'] != self.physical_flash:
props += ['physical_flash']
self.log("props: %s", props)
return props
def license_update(self, modify):
if self.module.check_mode:
self.changed = True
return
cmd = 'chlicense'
for license in modify:
cmdopts = {}
cmdopts[license] = getattr(self, license)
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
self.changed = True if modify else False
if self.encryption:
cmdopts = {}
cmdopts['encryption'] = self.encryption
self.changed = True
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
self.log("Licensed functions %s updated", modify)
self.message += " Licensed functions %s updated." % modify
def license_key_update(self):
existing_license_keys = []
license_id_pairs = {}
license_add_remove = False
if self.module.check_mode:
self.changed = True
return
for key in self.license_key:
if key == 'None':
self.log(" Empty License key list provided")
return
cmd = 'lsfeature'
cmdopts = {}
feature_list = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
for feature in feature_list:
existing_license_keys.append(feature['license_key'])
license_id_pairs[feature['license_key']] = feature['id']
self.log("existing licenses=%s, license_id_pairs=%s", existing_license_keys, license_id_pairs)
if (set(existing_license_keys)).symmetric_difference(set(self.license_key)):
license_add_remove = True
if license_add_remove:
deactivate_license_keys, activate_license_keys = False, False
deactivate_license_keys = list(set(existing_license_keys) - set(self.license_key))
self.log('deactivate_license_keys %s ', deactivate_license_keys)
if deactivate_license_keys:
for item in deactivate_license_keys:
if not item:
self.log('%s item', [license_id_pairs[item]])
self.restapi.svc_run_command(
'deactivatefeature',
None, [license_id_pairs[item]]
)
self.changed = True
self.log('%s deactivated', deactivate_license_keys)
self.message += " License %s deactivated." % deactivate_license_keys
activate_license_keys = list(set(self.license_key) - set(existing_license_keys))
self.log('activate_license_keys %s ', activate_license_keys)
if activate_license_keys:
for item in activate_license_keys:
if item:
self.restapi.svc_run_command(
'activatefeature',
{'licensekey': item}, None
)
self.changed = True
self.log('%s activated', activate_license_keys)
self.message += " License %s activated." % activate_license_keys
else:
self.message += " No license Changes."
def apply(self):
msg = None
modify = []
self.basic_checks()
self.system_data = self.get_system_info()
if self.systemname or self.ntpip or self.timezone or self.time:
self.system_update(self.system_data)
# DNS configuration
self.dns_configure()
# For honour based licenses
modify = self.license_probe()
if modify:
self.license_update(modify)
# For key based licenses
if self.license_key:
self.license_key_update()
if self.changed:
if self.module.check_mode:
msg = "skipping changes due to check mode."
else:
msg = self.message
else:
msg = "No modifications required. Exiting with no changes."
self.module.exit_json(msg=msg, changed=self.changed)
def main():
v = IBMSVCInitialSetup()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,890 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2021 IBM CORPORATION
# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_manage_callhome
short_description: This module manages Call Home feature configuration on IBM Spectrum Virtualize
family storage systems
description:
- Ansible interface to manage cloud and email Call Home feature.
version_added: "1.7.0"
options:
state:
description:
- Enables or updates (C(enabled)) or disables (C(disabled)) Call Home feature.
choices: [ enabled, disabled ]
required: true
type: str
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
type: str
required: true
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the ibm_svc_auth module.
type: str
callhome_type:
description:
- Specifies the transmission type.
choices: [ 'cloud services', 'email', 'both' ]
required: True
type: str
proxy_type:
description:
- Specifies the proxy type.
- Required when I(state=enabled), to create or modify Call Home feature.
- Proxy gets deleted for I(proxy_type=no_proxy).
- The parameter is mandatory when I(callhome_type='cloud services')) or I(callhome_type='both').
choices: [ open_proxy, basic_authentication, certificate, no_proxy ]
type: str
proxy_url:
description:
- Specifies the proxy server URL with a protocol prefix in fully qualified domain name format.
- Applies when I(state=enabled) and I(proxy_type=open_proxy) or I(proxy_type=basic_authentication).
type: str
proxy_port:
description:
- Specifies the proxy server port number.
The value must be in the range 1 - 65535.
- Applies when I(state=enabled) and I(proxy_type=open_proxy) or I(proxy_type=basic_authentication).
type: int
proxy_username:
description:
- Specifies the proxy's username.
- Applies when I(state=enabled) and I(proxy_type=basic_authentication).
type: str
proxy_password:
description:
- Specifies the proxy's password.
- Applies when I(state=enabled) and I(proxy_type=basic_authentication).
type: str
sslcert:
description:
- Specifies the file path of proxy's certificate.
- Applies when I(state=enabled) and I(proxy_type=certificate).
type: str
company_name:
description:
- Specifies the user's organization as it should appear in Call Home email.
- Required when I(state=enabled).
type: str
address:
description:
- Specifies the first line of the user's address as it should appear in Call Home email.
- Required when I(state=enabled).
type: str
city:
description:
- Specifies the user's city as it should appear in Call Home email.
- Required when I(state=enabled).
type: str
province:
description:
- Specifies the user's state or province as it should appear in Call Home email.
- Required when I(state=enabled).
type: str
postalcode:
description:
- Specifies the user's zip code or postal code as it should appear in Call Home email.
- Required when I(state=enabled).
type: str
country:
description:
- Specifies the country in which the machine resides as it should appear in Call Home email.
- Required when I(state=enabled).
type: str
location:
description:
- Specifies the physical location of the system that has reported the error.
- Required when I(state=enabled).
type: str
contact_name:
description:
- Specifies the name of the person receiving the email.
- Required when I(state=enabled).
type: str
contact_email:
description:
- Specifies the email of the person.
- Required when I(state=enabled).
type: str
phonenumber_primary:
description:
- Specifies the primary contact telephone number.
- Required when I(state=enabled).
type: str
phonenumber_secondary:
description:
- Specifies the secondary contact telephone number.
- Required when I(state=enabled).
type: str
serverIP:
description:
- Specifies the IP address of the email server.
- Required when I(state=enabled) and I(callhome_type=email) or I(callhome_type=both).
type: str
serverPort:
description:
- Specifies the port number of the email server.
- The value must be in the range 1 - 65535.
- Required when I(state=enabled) and I(callhome_type=email) or I(callhome_type=both).
type: int
inventory:
description:
- Specifies whether the recipient mentioned in parameter I(contact_email) receives inventory email notifications.
- Applies when I(state=enabled).
If unspecified, default value 'off' will be used.
choices: ['on', 'off']
type: str
invemailinterval:
description:
- Specifies the interval at which inventory emails are sent to the configured email recipients.
- The interval is measured in days. The value must be in the range 0 - 15.
- Setting the value to '0' turns off the inventory email notification function.
Valid if I(inventory) is set to 'on'.
type: int
enhancedcallhome:
description:
- Specifies that the Call Home function is to send enhanced reports to the support center.
- Applies when I(state=enabled).
- If unspecified, default value 'off' will be used.
choices: ['on', 'off']
type: str
censorcallhome:
description:
- Specifies that sensitive data is deleted from the enhanced Call Home data.
- Applies when I(state=enabled).
- If unspecified, default value 'off' will be used.
choices: ['on', 'off']
type: str
validate_certs:
description:
- Validates certification.
default: false
type: bool
log_path:
description:
- Path of debug log file.
type: str
author:
- Sreshtant Bohidar(@Sreshtant-Bohidar)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Configure callhome with both email and cloud
ibm.spectrum_virtualize.ibm_svc_manage_callhome:
clustername: "{{ clustername }}"
username: "{{ username }}"
password: "{{ password }}"
log_path: "/tmp/playbook.debug"
state: "enabled"
callhome_type: "both"
address: "{{ address }}"
city: "{{ city }}"
company_name: "{{ company_name }}"
contact_email: "{{ contact_email }}"
contact_name: "{{ contact_name }}"
country: "{{ country }}"
location: "{{ location }}"
phonenumber_primary: "{{ primary_phonenumber }}"
postalcode: "{{ postal_code }}"
province: "{{ province }}"
proxy_type: "{{ proxy_type }}"
proxy_url: "{{ proxy_url }}"
proxy_port: "{{ proxy_port }}"
serverIP: "{{ server_ip }}"
serverPort: "{{ server_port }}"
inventory: "on"
invemailinterval: 1
enhancedcallhome: "on"
censorcallhome: "on"
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
from ansible.module_utils._text import to_native
import time
class IBMSVCCallhome(object):
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
state=dict(type='str', required=True, choices=['enabled', 'disabled']),
callhome_type=dict(type='str', required=True, choices=['cloud services', 'email', 'both']),
proxy_type=dict(type='str', choices=['open_proxy', 'basic_authentication', 'certificate', 'no_proxy']),
proxy_url=dict(type='str'),
proxy_port=dict(type='int'),
proxy_username=dict(type='str'),
proxy_password=dict(type='str', no_log=True),
sslcert=dict(type='str'),
company_name=dict(type='str'),
address=dict(type='str'),
city=dict(type='str'),
province=dict(type='str'),
postalcode=dict(type='str'),
country=dict(type='str'),
location=dict(type='str'),
contact_name=dict(type='str'),
contact_email=dict(type='str'),
phonenumber_primary=dict(type='str'),
phonenumber_secondary=dict(type='str'),
serverIP=dict(type='str'),
serverPort=dict(type='int'),
inventory=dict(type='str', choices=['on', 'off']),
invemailinterval=dict(type='int'),
enhancedcallhome=dict(type='str', choices=['on', 'off']),
censorcallhome=dict(type='str', choices=['on', 'off'])
)
)
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
# logging setup
log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
# Required
self.state = self.module.params['state']
self.callhome_type = self.module.params['callhome_type']
self.company_name = self.module.params['company_name']
self.address = self.module.params['address']
self.city = self.module.params['city']
self.province = self.module.params['province']
self.postalcode = self.module.params['postalcode']
self.country = self.module.params['country']
self.location = self.module.params['location']
self.contact_name = self.module.params['contact_name']
self.contact_email = self.module.params['contact_email']
self.phonenumber_primary = self.module.params['phonenumber_primary']
# Optional
self.proxy_type = self.module.params.get('proxy_type', False)
self.proxy_url = self.module.params.get('proxy_url', False)
self.proxy_port = self.module.params.get('proxy_port', False)
self.proxy_username = self.module.params.get('proxy_username', False)
self.proxy_password = self.module.params.get('proxy_password', False)
self.sslcert = self.module.params.get('sslcert', False)
self.phonenumber_secondary = self.module.params.get('phonenumber_secondary', False)
self.serverIP = self.module.params.get('serverIP', False)
self.serverPort = self.module.params.get('serverPort', False)
self.inventory = self.module.params.get('inventory', False)
self.invemailinterval = self.module.params.get('invemailinterval', False)
self.enhancedcallhome = self.module.params.get('enhancedcallhome', False)
self.censorcallhome = self.module.params.get('censorcallhome', False)
# creating an instance of IBMSVCRestApi
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=log_path,
token=self.module.params['token']
)
def basic_checks(self):
# setting the default value if unspecified
if not self.inventory:
self.inventory = 'off'
if not self.enhancedcallhome:
self.enhancedcallhome = 'off'
if not self.censorcallhome:
self.censorcallhome = 'off'
# perform some basic handling for few parameters
if self.inventory == 'on':
if not self.invemailinterval:
self.module.fail_json(msg="Parameter [invemailinterval] should be configured to use [inventory]")
if self.invemailinterval:
if self.inventory == 'off':
self.module.fail_json(msg="The parameter [inventory] should be configured with 'on' while setting [invemailinterval]")
if self.invemailinterval not in range(1, 16):
self.module.fail_json(msg="Parameter [invemailinterval] supported range is 0 to 15")
if isinstance(self.serverPort, int):
if self.serverPort not in range(1, 65536):
self.module.fail_json(msg="Parameter [serverPort] must be in range[1-65535]")
if isinstance(self.proxy_port, int):
if self.proxy_port not in range(1, 65536):
self.module.fail_json(msg="Parameter [proxy_port] must be in range[1-65535]")
if not self.state:
self.module.fail_json(msg="Missing mandatory parameter: state")
if not self.callhome_type:
self.module.fail_json(msg="Missing mandatory parameter: callhome_type")
if (self.callhome_type in ['email', 'both']) and (not self.serverIP or not self.serverPort) and (self.state == 'enabled'):
self.module.fail_json(msg="Parameters: serverIP, serverPort are required when callhome_type is email/both")
if self.state == "enabled" and self.proxy_type in ["cloud services", "both"] and self.proxy_type:
if self.proxy_type == 'open_proxy' and (not self.proxy_url or not self.proxy_port):
self.module.fail_json(msg="Parameters [proxy_url, proxy_port] required when proxy_type=open_proxy")
if self.proxy_type == 'basic_authentication' and (not self.proxy_url or not self.proxy_port or not self.proxy_username or not self.proxy_password):
self.module.fail_json(msg="Parameters [proxy_url, proxy_port, proxy_username, proxy_password] required when proxy_type=basic_authentication")
if self.proxy_type == 'certificate' and (not self.proxy_url or not self.proxy_port or not self.sslcert):
self.module.fail_json(msg="Parameters [proxy_url, proxy_port, sslcert] required when proxy_type=certificate")
if self.state == 'enabled':
parameters = {
'callhome_type': self.callhome_type,
'company_name': self.company_name,
'address': self.address,
'city': self.city,
'province': self.province,
'country': self.country,
'location': self.location,
'contact_name': self.contact_name,
'contact_email': self.contact_email,
'phonenumber_primary': self.phonenumber_primary,
}
parameter_not_provided = []
for parameter in parameters:
if not parameters[parameter]:
parameter_not_provided.append(parameter)
if parameter_not_provided:
self.module.fail_json(msg="Parameters {0} are required when state is 'enabled'".format(parameter_not_provided))
# function to fetch lssystem data
def get_system_data(self):
return self.restapi.svc_obj_info('lssystem', cmdopts=None, cmdargs=None)
# function to probe lssystem data
def probe_system(self, data):
modify = {}
if self.invemailinterval:
if self.invemailinterval != data['inventory_mail_interval']:
modify['invemailinterval'] = self.invemailinterval
if self.enhancedcallhome:
if self.enhancedcallhome != data['enhanced_callhome']:
modify['enhancedcallhome'] = self.enhancedcallhome
if self.censorcallhome:
if self.censorcallhome != data['enhanced_callhome']:
modify['censorcallhome'] = self.censorcallhome
return modify
# function to execute chsystem commands
def update_system(self, modify):
command = 'chsystem'
command_options = modify
cmdargs = None
self.restapi.svc_run_command(command, command_options, cmdargs)
self.log("Chsystem commands executed.")
# function to fetch existing email user
def get_existing_email_user_data(self):
data = {}
email_data = self.restapi.svc_obj_info(cmd='lsemailuser', cmdopts=None, cmdargs=None)
for item in email_data:
if item['address'] == self.contact_email:
data = item
return data
# function to check if email server exists or not
def check_email_server_exists(self):
status = False
data = self.restapi.svc_obj_info(cmd='lsemailserver', cmdopts=None, cmdargs=None)
for item in data:
if item['IP_address'] == self.serverIP and int(item['port']) == self.serverPort:
status = True
break
return status
# function to check if email user exists or not
def check_email_user_exists(self):
temp = {}
data = self.restapi.svc_obj_info(cmd='lsemailuser', cmdopts=None, cmdargs=None)
for item in data:
if item['address'] == self.contact_email:
temp = item
break
return temp
# function to create an email server
def create_email_server(self):
if self.module.check_mode:
self.changed = True
return
self.log("Creating email server '%s:%s'.", self.serverIP, self.serverPort)
command = 'mkemailserver'
command_options = {
'ip': self.serverIP,
'port': self.serverPort,
}
cmdargs = None
result = self.restapi.svc_run_command(command, command_options, cmdargs)
if 'message' in result:
self.changed = True
self.log("create email server result message '%s'", (result['message']))
else:
self.module.fail_json(
msg="Failed to create email server [%s:%s]" % (self.serverIP, self.serverPort)
)
# function to update email user
def update_email_user(self, data, id):
command = "chemailuser"
command_options = data
cmdargs = [id]
self.restapi.svc_run_command(command, command_options, cmdargs)
self.log('Email user updated successfully.')
# function to manage support email user
def manage_support_email_user(self):
if self.module.check_mode:
self.changed = True
return
support_email = {}
selected_email_id = ''
t = -1 * ((time.timezone / 60) / 60)
if t >= -8 and t <= -4:
# for US timezone, callhome0@de.ibm.com is used
selected_email_id = 'callhome0@de.ibm.com'
else:
# for ROW, callhome1@de.ibm.com is used
selected_email_id = 'callhome1@de.ibm.com'
existing_user = self.restapi.svc_obj_info('lsemailuser', cmdopts=None, cmdargs=None)
if existing_user:
for user in existing_user:
if user['user_type'] == 'support':
support_email = user
if not support_email:
self.log("Creating support email user '%s'.", selected_email_id)
command = 'mkemailuser'
command_options = {
'address': selected_email_id,
'usertype': 'support',
'info': 'off',
'warning': 'off',
}
if self.inventory:
command_options['inventory'] = self.inventory
cmdargs = None
result = self.restapi.svc_run_command(command, command_options, cmdargs)
if 'message' in result:
self.changed = True
self.log("create support email user result message '%s'", (result['message']))
else:
self.module.fail_json(
msg="Failed to support create email user [%s]" % (self.contact_email)
)
else:
modify = {}
if support_email['address'] != selected_email_id:
modify['address'] = selected_email_id
if self.inventory:
if support_email['inventory'] != self.inventory:
modify['inventory'] = self.inventory
if modify:
self.restapi.svc_run_command(
'chemailuser',
modify,
[support_email['id']]
)
self.log("Updated support user successfully.")
# function to create an email user
def create_email_user(self):
if self.module.check_mode:
self.changed = True
return
self.log("Creating email user '%s'.", self.contact_email)
command = 'mkemailuser'
command_options = {
'address': self.contact_email,
'usertype': 'local',
}
if self.inventory:
command_options['inventory'] = self.inventory
cmdargs = None
result = self.restapi.svc_run_command(command, command_options, cmdargs)
if 'message' in result:
self.changed = True
self.log("Create email user result message '%s'.", (result['message']))
else:
self.module.fail_json(
msg="Failed to create email user [%s]" % (self.contact_email)
)
# function to enable email callhome
def enable_email_callhome(self):
if self.module.check_mode:
self.changed = True
return
command = "startemail"
command_options = {}
cmdargs = None
self.restapi.svc_run_command(command, command_options, cmdargs)
self.log("Email callhome enabled.")
# function to disable email callhome
def disable_email_callhome(self):
if self.module.check_mode:
self.changed = True
return
command = "stopemail"
command_options = {}
cmdargs = None
self.restapi.svc_run_command(command, command_options, cmdargs)
self.log("Email callhome disabled.")
# function to update email data
def update_email_data(self):
if self.module.check_mode:
self.changed = True
return
command = "chemail"
command_options = {}
if self.contact_email:
command_options['reply'] = self.contact_email
if self.contact_name:
command_options['contact'] = self.contact_name
if self.phonenumber_primary:
command_options['primary'] = self.phonenumber_primary
if self.phonenumber_secondary:
command_options['alternate'] = self.phonenumber_secondary
if self.location:
command_options['location'] = self.location
if self.company_name:
command_options['organization'] = self.company_name
if self.address:
command_options['address'] = self.address
if self.city:
command_options['city'] = self.city
if self.province:
command_options['state'] = self.province
if self.postalcode:
command_options['zip'] = self.postalcode
if self.country:
command_options['country'] = self.country
cmdargs = None
if command_options:
self.restapi.svc_run_command(command, command_options, cmdargs)
self.log("Email data successfully updated.")
# function for checking if proxy server exists
def get_existing_proxy(self):
data = {}
data = self.restapi.svc_obj_info(cmd='lsproxy', cmdopts=None, cmdargs=None)
return data
# function for removing a proxy
def remove_proxy(self):
if self.module.check_mode:
self.changed = True
return
command = 'rmproxy'
command_options = None
cmdargs = None
self.restapi.svc_run_command(command, command_options, cmdargs)
self.log('Proxy removed successfully.')
# function for creating a proxy
def create_proxy(self):
if self.module.check_mode:
self.changed = True
return
command = 'mkproxy'
command_options = {}
if self.proxy_type == 'open_proxy':
if self.proxy_url:
command_options['url'] = self.proxy_url
if self.proxy_port:
command_options['port'] = self.proxy_port
elif self.proxy_type == 'basic_authentication':
if self.proxy_url:
command_options['url'] = self.proxy_url
if self.proxy_port:
command_options['port'] = self.proxy_port
if self.proxy_username:
command_options['username'] = self.proxy_username
if self.proxy_password:
command_options['password'] = self.proxy_password
elif self.proxy_type == 'certificate':
if self.proxy_url:
command_options['url'] = self.proxy_url
if self.proxy_port:
command_options['port'] = self.proxy_port
if self.sslcert:
command_options['sslcert'] = self.sslcert
cmdargs = None
self.restapi.svc_run_command(command, command_options, cmdargs)
self.log("Proxy created successfully.")
# function for probing existing proxy data
def probe_proxy(self, data):
modify = {}
if self.proxy_type == 'open_proxy':
if self.proxy_url:
if self.proxy_url != data['url']:
modify['url'] = self.proxy_url
if self.proxy_port:
if int(self.proxy_port) != int(data['port']):
modify['port'] = self.proxy_port
elif self.proxy_type == 'basic_authentication':
if self.proxy_url:
if self.proxy_url != data['url']:
modify['url'] = self.proxy_url
if self.proxy_port:
if self.proxy_port != int(data['port']):
modify['port'] = self.proxy_port
if self.proxy_username:
if self.proxy_username != data['username']:
modify['username'] = self.proxy_username
if self.proxy_password:
modify['password'] = self.proxy_password
elif self.proxy_type == 'certificate':
if self.proxy_url:
if self.proxy_url != data['url']:
modify['url'] = self.proxy_url
if self.proxy_port:
if self.proxy_port != int(data['port']):
modify['port'] = self.proxy_port
if self.sslcert:
modify['sslcert'] = self.sslcert
return modify
# function for updating a proxy
def update_proxy(self, data):
if self.module.check_mode:
self.changed = True
return
command = 'chproxy'
command_options = data
cmdargs = None
self.restapi.svc_run_command(command, command_options, cmdargs)
self.log('Proxy updated successfully.')
# function for fetching existing cloud callhome data
def get_existing_cloud_callhome_data(self):
data = {}
command = 'lscloudcallhome'
command_options = None
cmdargs = None
data = self.restapi.svc_obj_info(command, command_options, cmdargs)
return data
# function for enabling cloud callhome
def enable_cloud_callhome(self):
if self.module.check_mode:
self.changed = True
return
command = 'chcloudcallhome'
command_options = {
'enable': True
}
cmdargs = None
self.restapi.svc_run_command(command, command_options, cmdargs)
self.changed = True
self.log('Cloud callhome enabled.')
# function for doing connection test for cloud callhome
def test_connection_cloud_callhome(self):
if self.module.check_mode:
self.changed = True
return
command = 'sendcloudcallhome'
command_options = {
'connectiontest': True
}
self.restapi.svc_run_command(command, command_options, None)
self.changed = True
self.log('Cloud callhome connection tested.')
# the connection testing can take some time to complete.
time.sleep(3)
# function for managing proxy server
def manage_proxy_server(self):
proxy_data = self.get_existing_proxy()
if proxy_data['enabled'] == 'no':
if self.proxy_type == 'no_proxy':
self.log('Proxy already disabled.')
else:
self.create_proxy()
self.changed = True
elif proxy_data['enabled'] == 'yes':
if self.proxy_type == 'no_proxy':
self.remove_proxy()
self.changed = True
else:
modify = self.probe_proxy(proxy_data)
if modify:
self.update_proxy(modify)
self.changed = True
# function for disabling cloud callhome
def disable_cloud_callhome(self):
if self.module.check_mode:
self.changed = True
return
command = 'chcloudcallhome'
command_options = {
'disable': True
}
cmdargs = None
self.restapi.svc_run_command(command, command_options, cmdargs)
self.log('Cloud callhome disabled.')
# function to initiate callhome with cloud
def initiate_cloud_callhome(self):
msg = ''
attempts = 0
limit_reached = False
active_status = False
# manage proxy server
self.manage_proxy_server()
# update email data
self.update_email_data()
# manage cloud callhome
lsdata = self.get_existing_cloud_callhome_data()
if lsdata['status'] == 'enabled':
# perform connection test
self.test_connection_cloud_callhome()
else:
self.enable_cloud_callhome()
# cloud callhome takes some time to get enabled.
while not active_status:
attempts += 1
if attempts > 10:
limit_reached = True
break
lsdata = self.get_existing_cloud_callhome_data()
if lsdata['status'] == 'enabled':
active_status = True
time.sleep(2)
if limit_reached:
# the module will exit without performing connection test.
msg = "Callhome with Cloud is enabled. Please check connection to proxy."
self.changed = True
return msg
if active_status:
# perform connection test
self.test_connection_cloud_callhome()
msg = "Callhome with Cloud enabled successfully."
self.changed = True
return msg
# function to initiate callhome with email notifications
def initiate_email_callhome(self):
msg = ''
# manage email server
email_server_exists = self.check_email_server_exists()
if email_server_exists:
self.log("Email server already exists.")
else:
self.create_email_server()
self.changed = True
# manage support email user
self.manage_support_email_user()
# manage local email user
email_user_exists = self.check_email_user_exists()
if email_user_exists:
email_user_modify = {}
if email_user_exists['inventory'] != self.inventory:
email_user_modify['inventory'] = self.inventory
if email_user_modify:
self.update_email_user(email_user_modify, email_user_exists['id'])
else:
self.create_email_user()
# manage email data
self.update_email_data()
# enable email callhome
self.enable_email_callhome()
msg = "Callhome with email enabled successfully."
self.changed = True
return msg
def apply(self):
self.changed = False
msg = None
self.basic_checks()
if self.state == 'enabled':
# enable cloud callhome
if self.callhome_type == 'cloud services':
msg = self.initiate_cloud_callhome()
# enable email callhome
elif self.callhome_type == 'email':
msg = self.initiate_email_callhome()
# enable both cloud and email callhome
elif self.callhome_type == 'both':
temp_msg = ''
temp_msg += self.initiate_cloud_callhome()
temp_msg += ' ' + self.initiate_email_callhome()
if temp_msg:
msg = temp_msg
# manage chsystem parameters
system_data = self.get_system_data()
system_modify = self.probe_system(system_data)
if system_modify:
self.update_system(system_modify)
elif self.state == 'disabled':
if self.callhome_type == 'cloud services':
cloud_callhome_data = self.get_existing_cloud_callhome_data()
if cloud_callhome_data['status'] == 'disabled':
msg = "Callhome with cloud already disabled."
elif cloud_callhome_data['status'] == 'enabled':
self.disable_cloud_callhome()
msg = "Callhome with cloud disabled successfully."
self.changed = True
elif self.callhome_type == 'email':
self.disable_email_callhome()
msg = "Callhome with email disabled successfully."
self.changed = True
elif self.callhome_type == 'both':
# disable email callhome
self.disable_email_callhome()
msg = "Callhome with email disabled successfully."
self.changed = True
# disable cloud callhome
cloud_callhome_data = self.get_existing_cloud_callhome_data()
if cloud_callhome_data['status'] == 'disabled':
msg += " Callhome with cloud already disabled."
elif cloud_callhome_data['status'] == 'enabled':
self.disable_cloud_callhome()
msg += " Callhome with cloud disabled successfully."
self.changed = True
self.module.exit_json(msg=msg, changed=self.changed)
def main():
v = IBMSVCCallhome()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,280 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2021 IBM CORPORATION
# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_manage_consistgrp_flashcopy
short_description: This module manages FlashCopy consistency groups on IBM Spectrum Virtualize
family storage systems
description:
- Ansible interface to manage 'mkfcconsistgrp' and 'rmfcconsistgrp' volume commands.
version_added: "1.4.0"
options:
name:
description:
- Specifies the name of the FlashCopy consistency group.
required: true
type: str
state:
description:
- Creates (C(present)) or removes (C(absent)) a FlashCopy consistency group.
choices: [ present, absent ]
required: true
type: str
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
type: str
required: true
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the M(ibm.spectrum_virtualize.ibm_svc_auth) module.
type: str
version_added: '1.5.0'
ownershipgroup:
description:
- Specifies the name of the ownership group.
- Parameters I(ownershipgroup) and I(noownershipgroup) are mutually exclusive.
- Valid when I(state=present), to create or modify a FlashCopy consistency group.
required: false
type: str
noownershipgroup:
description:
- If specified True, the consistency group is removed from all associated ownership groups.
- Parameters I(noownershipgroup) and I(ownershipgroup) are mutually exclusive.
- Valid when I(state=present), to modify a FlashCopy consistency group.
required: false
type: bool
force:
description:
- If specified True, removes all the associated FlashCopy mappings while deleting the FlashCopy consistency group.
- Valid when I(state=absent), to delete a FlashCopy consistency group.
required: false
type: bool
log_path:
description:
- Path of debug log file.
type: str
validate_certs:
description:
- Validates certification.
default: false
type: bool
author:
- Sreshtant Bohidar(@Sreshtant-Bohidar)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Create a FlashCopy consistency group
ibm.spectrum_virtualize.ibm_svc_manage_consistgrp_flashcopy:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
name: consistgroup-name
state: present
ownershipgroup: ownershipgroup-name
- name: Delete a FlashCopy consistency group
ibm.spectrum_virtualize.ibm_svc_manage_consistgrp_flashcopy:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
name: consistgroup-name
state: absent
force: true
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
from ansible.module_utils._text import to_native
class IBMSVCFlashcopyConsistgrp(object):
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['present', 'absent']),
ownershipgroup=dict(type='str', required=False),
noownershipgroup=dict(type='bool', required=False),
force=dict(type='bool', required=False),
)
)
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
# logging setup
log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
# Required
self.name = self.module.params['name']
self.state = self.module.params['state']
# Optional
self.ownershipgroup = self.module.params.get('ownershipgroup', False)
self.noownershipgroup = self.module.params.get('noownershipgroup', False)
self.force = self.module.params.get('force', False)
# Handling missing mandatory parameters name
if not self.name:
self.module.fail_json(msg='Missing mandatory parameter: name')
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=log_path,
token=self.module.params['token']
)
def get_existing_fcconsistgrp(self):
data = {}
data = self.restapi.svc_obj_info(cmd='lsfcconsistgrp', cmdopts=None,
cmdargs=[self.name])
return data
def fcconsistgrp_create(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'mkfcconsistgrp'
cmdopts = {}
cmdopts['name'] = self.name
if self.ownershipgroup:
cmdopts['ownershipgroup'] = self.ownershipgroup
self.log("Creating fc consistgrp.. Command: %s opts %s", cmd, cmdopts)
result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
if 'message' in result:
self.changed = True
self.log("Create fc consistgrp message %s", result['message'])
else:
self.module.fail_json(msg="Failed to create fc consistgrp [%s]" % self.name)
def fcconsistgrp_delete(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'rmfcconsistgrp'
cmdopts = {}
if self.force:
cmdopts['force'] = self.force
self.log("Deleting fc consistgrp.. Command %s opts %s", cmd, cmdopts)
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=[self.name])
def fcconsistgrp_probe(self, data):
props = {}
self.log("Probe which properties need to be updated...")
if not self.noownershipgroup:
if self.ownershipgroup and self.ownershipgroup != data["owner_name"]:
props["ownershipgroup"] = self.ownershipgroup
if self.noownershipgroup and data["owner_name"]:
props['noownershipgroup'] = self.noownershipgroup
return props
def fcconsistgrp_update(self, modify):
if self.module.check_mode:
self.changed = True
return
if modify:
self.log("updating fcmap with properties %s", modify)
cmd = 'chfcconsistgrp'
cmdopts = {}
for prop in modify:
cmdopts[prop] = modify[prop]
cmdargs = [self.name]
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
def apply(self):
changed = False
msg = None
modify = []
gdata = self.get_existing_fcconsistgrp()
if gdata:
if self.state == "absent":
self.log("fc consistgrp [%s] exist, but requested state is 'absent'", self.name)
changed = True
elif self.state == "present":
modify = self.fcconsistgrp_probe(gdata)
if modify:
changed = True
else:
if self.state == "present":
self.log("fc consistgrp [%s] doesn't exist, but requested state is 'present'", self.name)
changed = True
if changed:
if self.state == "absent":
self.fcconsistgrp_delete()
msg = "fc consistgrp [%s] has been deleted" % self.name
elif self.state == "present" and modify:
self.fcconsistgrp_update(modify)
msg = "fc consistgrp [%s] has been modified" % self.name
elif self.state == "present" and not modify:
self.fcconsistgrp_create()
msg = "fc consistgrp [%s] has been created" % self.name
if self.module.check_mode:
msg = 'skipping changes due to check mode.'
else:
if self.state == "absent":
msg = "fc consistgrp [%s] does not exist" % self.name
elif self.state == "present":
msg = "fc consistgrp [%s] already exists" % self.name
self.module.exit_json(msg=msg, changed=changed)
def main():
v = IBMSVCFlashcopyConsistgrp()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,401 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2020 IBM CORPORATION
# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_manage_cv
short_description: This module manages the change volume for a given volume on IBM
Spectrum Virtualize family storage systems
description:
- Ansible interface to manage the change volume in remote copy replication on IBM Spectrum Virtualize family storage systems.
version_added: "1.3.0"
options:
state:
description:
- Creates or updates (C(present)) or removes (C(absent)), a change volume.
choices: [absent, present]
required: true
type: str
rname:
description:
- Specifies the name of the remote copy relationship.
required: true
type: str
cvname:
description:
- Specifies the name to assign to the master or auxiliary change volume.
required: true
type: str
basevolume:
description:
- Specifies the base volume name (master or auxiliary).
- Required when I(state=present), to create the change volume.
type: str
ismaster:
description:
- Specifies whether the change volume is being (dis)associated with master cluster.
- Required when the change volume is being associated or disassociated from the master cluster.
type: bool
default: true
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
type: str
required: true
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the ibm_svc_auth module.
type: str
version_added: '1.5.0'
validate_certs:
description:
- Validates certification.
default: false
type: bool
log_path:
description:
- Path of debug log file.
type: str
author:
- Shilpi Jain(@Shilpi-Jain1)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Create master change volume and associate with rcopy
ibm.spectrum_virtualize.ibm_svc_manage_cv:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
state: present
rname: sample_rcopy
cvname: vol1_cv
basevolume: vol1
- name: Create auxiliary change volume and associate with rcopy
ibm.spectrum_virtualize.ibm_svc_manage_cv:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
state: present
rname: sample_rcopy
cvname: vol2_aux_cv
basevolume: vol2
ismaster: false
- name: Delete master change volume and disassociate from rcopy
ibm.spectrum_virtualize.ibm_svc_manage_cv:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
state: absent
rname: sample_rcopy
cvname: vol1_cv
- name: Delete auxiliary change volume and disassociate from rcopy
ibm.spectrum_virtualize.ibm_svc_manage_cv:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
state: absent
rname: sample_rcopy
cvname: vol2_aux_cv
ismaster: false
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
from ansible.module_utils._text import to_native
class IBMSVCchangevolume(object):
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
state=dict(type='str',
required=True,
choices=['present', 'absent']),
rname=dict(type='str', required=True),
cvname=dict(type='str', required=True),
basevolume=dict(type='str'),
ismaster=dict(type='bool', default=True)
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# logging setup
log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
# Required
self.state = self.module.params['state']
self.rname = self.module.params['rname']
self.cvname = self.module.params['cvname']
# Optional
self.basevolume = self.module.params['basevolume']
self.ismaster = self.module.params['ismaster']
# Handling missing mandatory parameter rname
if not self.rname:
self.module.fail_json(msg='Missing mandatory parameter: rname')
# Handling missing mandatory parameter cvname
if not self.cvname:
self.module.fail_json(msg='Missing mandatory parameter: cvname')
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=log_path,
token=self.module.params['token']
)
def get_existing_rc(self):
"""
find the remote copy relationships such as Metro Mirror, Global Mirror
relationships visible to the system.
Returns:
None if no matching instances or a list including all the matching
instances
"""
self.log('Trying to get the remote copy relationship %s', self.rname)
data = self.restapi.svc_obj_info(cmd='lsrcrelationship',
cmdopts=None, cmdargs=[self.rname])
return data
def get_existing_vdisk(self, volname):
merged_result = {}
data = self.restapi.svc_obj_info(cmd='lsvdisk', cmdopts={'bytes': True},
cmdargs=[volname])
if not data:
self.log("source volume %s does not exist", volname)
return
if isinstance(data, list):
for d in data:
merged_result.update(d)
else:
merged_result = data
return merged_result
def change_volume_attach(self, rcrelationship_data):
cmdopts = {}
if rcrelationship_data['copy_type'] != 'global':
self.module.fail_json(msg="Relationship '%s' type must be global" % self.rname)
if self.ismaster:
cmdopts['masterchange'] = self.cvname
else:
cmdopts['auxchange'] = self.cvname
# command
cmd = 'chrcrelationship'
cmdargs = [self.rname]
self.log("updating chrcrelationship %s with properties %s", cmd, cmdopts)
# Run command
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
self.changed = True
self.log("Updated remote copy relationship ")
def change_volume_detach(self, rcrelationship_data):
cmdopts = {}
if self.ismaster:
cmdopts = {'nomasterchange': True}
else:
cmdopts = {'noauxchange': True}
# command
cmd = 'chrcrelationship'
cmdargs = [self.rname]
self.log("updating chrcrelationship %s with properties %s", cmd, cmdopts)
# Run command
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
self.changed = True
self.log("Updated remote copy relationship ")
def change_volume_probe(self):
is_update_required = False
rcrelationship_data = self.get_existing_rc()
if not rcrelationship_data:
self.module.fail_json(msg="Relationship '%s' does not exists, relationship must exists before calling this module" % self.rname)
if self.ismaster:
if self.cvname == rcrelationship_data['master_change_vdisk_name']:
self.log("Master change volume %s is already attached to the relationship", self.cvname)
elif rcrelationship_data['master_change_vdisk_name'] != '':
self.module.fail_json(msg="Master change volume %s is already attached to the relationship" % rcrelationship_data['master_change_vdisk_name'])
else:
is_update_required = True
else:
if self.cvname == rcrelationship_data['aux_change_vdisk_name']:
self.log("Aux change volume %s is already attached to the relationship", self.cvname)
elif rcrelationship_data['aux_change_vdisk_name'] != '':
self.module.fail_json(msg="Aux change volume %s is already attached to the relationship" % rcrelationship_data['aux_change_vdisk_name'])
else:
is_update_required = True
return is_update_required
def change_volume_delete(self):
# command
cmd = 'rmvolume'
cmdopts = None
cmdargs = [self.cvname]
# Run command
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
self.changed = True
self.log("Delete vdisk %s", self.cvname)
def change_volume_create(self):
if self.module.check_mode:
self.changed = True
return
if not self.basevolume:
self.module.fail_json(msg="You must pass in name of the master or auxiliary volume.")
# lsvdisk <basevolume>
vdisk_data = self.get_existing_vdisk(self.basevolume)
if not vdisk_data:
self.module.fail_json(msg="%s volume does not exist, change volume not created" % self.basevolume)
# Make command
cmd = 'mkvdisk'
cmdopts = {}
cmdopts['name'] = self.cvname
cmdopts['mdiskgrp'] = vdisk_data['mdisk_grp_name']
cmdopts['size'] = vdisk_data['capacity']
cmdopts['unit'] = 'b'
cmdopts['rsize'] = '0%'
cmdopts['autoexpand'] = True
cmdopts['iogrp'] = vdisk_data['IO_group_name']
self.log("creating vdisk command %s opts %s", cmd, cmdopts)
# Run command
result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
if 'message' in result:
self.changed = True
self.log("Create vdisk result message %s", result['message'])
else:
self.module.fail_json(
msg="Failed to create vdisk [%s]" % self.cvname)
def apply(self):
changed = False
msg = None
modify = []
vdisk_data = self.get_existing_vdisk(self.cvname)
if vdisk_data:
if self.state == 'absent':
self.log(
"CHANGED: Change volume exists, requested state is 'absent'")
changed = True
elif self.state == 'present':
modify = self.change_volume_probe()
if modify:
changed = True
else:
self.log("No change detected")
else:
if self.state == 'present':
changed = True
self.log("CHANGED: Change volume does not exist, but requested state is '%s'", self.state)
if changed:
if self.module.check_mode:
msg = 'skipping changes due to check mode.'
else:
rcrelationship_data = self.get_existing_rc()
if not rcrelationship_data:
self.module.fail_json(msg="Relationship '%s' does not exists, relationship must exists before calling this module" % self.rname)
else:
if self.state == 'present' and modify:
self.change_volume_attach(rcrelationship_data)
msg = "Change volume %s configured to the remote copy relationship." % self.cvname
elif self.state == 'present':
self.change_volume_create()
self.change_volume_attach(rcrelationship_data)
msg = "vdisk %s has been created and configured to remote copy relationship." % self.cvname
elif self.state == 'absent':
self.change_volume_detach(rcrelationship_data)
self.change_volume_delete()
msg = "vdisk %s has been deleted and detached from remote copy relationship." % self.cvname
else:
self.log("Exiting with no changes")
if self.state in ['absent']:
msg = "Change volume [%s] does not exist." % self.cvname
else:
msg = "No Modifications detected, Change volume [%s] already configured." % self.cvname
self.module.exit_json(msg=msg, changed=changed)
def main():
v = IBMSVCchangevolume()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,484 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2021 IBM CORPORATION
# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_manage_flashcopy
short_description: This module manages FlashCopy mappings on IBM Spectrum Virtualize
family storage systems
description:
- Ansible interface to manage 'mkfcmap', 'rmfcmap', and 'chfcmap' volume commands.
version_added: "1.4.0"
options:
name:
description:
- Specifies the name of the FlashCopy mapping.
required: true
type: str
state:
description:
- Creates or updates (C(present)) or removes (C(absent)) a FlashCopy mapping.
choices: [ present, absent ]
required: true
type: str
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
type: str
required: true
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the ibm_svc_auth module.
type: str
version_added: '1.5.0'
copytype:
description:
- Specifies the copy type when creating the FlashCopy mapping.
- Required when I(state=present), to create a FlashCopy mapping.
choices: [ snapshot, clone]
type: str
source:
description:
- Specifies the name of the source volume.
- Required when I(state=present), to create a FlashCopy mapping.
type: str
target:
description:
- Specifies the name of the target volume.
- Required when I(state=present), to create a FlashCopy mapping.
type: str
mdiskgrp:
description:
- Specifies the name of the storage pool to use when creating the target volume.
- If unspecified, the pool associated with the source volume is used.
- Valid when I(state=present), to create a FlashCopy mapping.
type: str
consistgrp:
description:
- Specifies the name of the consistency group to which the FlashCopy mapping is to be added.
- Parameters I(consistgrp) and I(noconsistgrp) are mutually exclusive.
- Valid when I(state=present), to create or modify a FlashCopy mapping.
type: str
noconsistgrp:
description:
- If specified True, FlashCopy mapping is removed from the consistency group.
- Parameters I(noconsistgrp) and I(consistgrp) are mutually exclusive.
- Valid when I(state=present), to modify a FlashCopy mapping.
type: bool
copyrate:
description:
- Specifies the copy rate. The rate varies between 0-150.
- If unspecified, the default copy rate of 50 for clone and 0 for snapshot is used.
- Valid when I(state=present), to create or modify a FlashCopy mapping.
type: str
grainsize:
description:
- Specifies the grain size for the FlashCopy mapping.
- The grainsize can be set to 64 or 256. The default value is 256.
- Valid when I(state=present), to create a FlashCopy mapping.
type: str
force:
description:
- Brings the target volume online. This parameter is required if the FlashCopy mapping is in the stopped state.
- Valid when I(state=absent), to delete a FlashCopy mapping.
type: bool
validate_certs:
description:
- Validates certification.
default: false
type: bool
log_path:
description:
- Path of debug log file.
type: str
author:
- Sreshtant Bohidar(@Sreshtant-Bohidar)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Create FlashCopy mapping for snapshot
ibm.spectrum_virtualize.ibm_svc_manage_flashcopy:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
state: present
name: snapshot-name
copytype: snapshot
source: source-volume-name
target: target-volume-name
mdiskgrp: Pool0
consistgrp: consistencygroup-name
copyrate: 50
grainsize: 64
- name: Create FlashCopy mapping for clone
ibm.spectrum_virtualize.ibm_svc_manage_flashcopy:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
state: present
name: snapshot-name
copytype: clone
source: source-volume-name
target: target-volume-name
mdiskgrp: Pool0
consistgrp: consistencygroup-name
copyrate: 50
grainsize: 64
- name: Delete FlashCopy mapping for snapshot
ibm.spectrum_virtualize.ibm_svc_manage_flashcopy:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
name: snapshot-name
state: absent
force: true
- name: Delete FlashCopy mapping for clone
ibm.spectrum_virtualize.ibm_svc_manage_flashcopy:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
name: clone-name
state: absent
force: true
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
from ansible.module_utils._text import to_native
import time
class IBMSVCFlashcopy(object):
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
copytype=dict(type='str', required=False, choices=['snapshot', 'clone']),
source=dict(type='str', required=False),
target=dict(type='str', required=False),
mdiskgrp=dict(type='str', required=False),
state=dict(type='str', required=True, choices=['present', 'absent']),
consistgrp=dict(type='str', required=False),
noconsistgrp=dict(type='bool', required=False),
copyrate=dict(type='str', required=False),
grainsize=dict(type='str', required=False),
force=dict(type='bool', required=False)
)
)
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
# logging setup
log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
# Required
self.name = self.module.params['name']
self.state = self.module.params['state']
# Optional
self.copytype = self.module.params.get('copytype', False)
self.source = self.module.params.get('source', False)
self.target = self.module.params.get('target', False)
self.mdiskgrp = self.module.params.get('mdiskgrp', False)
self.consistgrp = self.module.params.get('consistgrp', False)
self.noconsistgrp = self.module.params.get('noconsistgrp', False)
self.grainsize = self.module.params.get('grainsize', False)
self.copyrate = self.module.params.get('copyrate', False)
self.force = self.module.params.get('force', False)
# Handline for mandatory parameter name
if not self.name:
self.module.fail_json(msg="Missing mandatory parameter: name")
# Handline for mandatory parameter state
if not self.state:
self.module.fail_json(msg="Missing mandatory parameter: state")
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=log_path,
token=self.module.params['token']
)
def run_command(self, cmd):
return self.restapi.svc_obj_info(cmd=cmd[0], cmdopts=cmd[1], cmdargs=cmd[2])
def gather_data(self):
result = [None, None, None, []]
commands = [["lsfcmap", None, [self.name]]]
if self.state == "present" and self.source:
commands.append(["lsvdisk", {'bytes': True, 'filtervalue': 'name=%s' % self.source}, None])
if self.state == "present" and self.target:
commands.append(["lsvdisk", {'bytes': True, 'filtervalue': 'name=%s' % self.target}, None])
commands.append(["lsvdisk", {'bytes': True, 'filtervalue': 'name=%s' % self.target + "_temp_*"}, None])
res = list(map(self.run_command, commands))
if len(res) == 1:
result[0] = res[0]
elif len(res) == 2:
result[0] = res[0]
result[1] = res[1]
elif len(res) == 4:
result = res
return result
def target_create(self, temp_target_name, sdata):
cmd = 'mkvdisk'
cmdopts = {}
cmdopts['name'] = temp_target_name
if self.mdiskgrp:
cmdopts['mdiskgrp'] = self.mdiskgrp
else:
cmdopts['mdiskgrp'] = sdata['mdisk_grp_name']
cmdopts['size'] = sdata['capacity']
cmdopts['unit'] = 'b'
cmdopts['iogrp'] = sdata['IO_group_name']
if self.copytype == 'snapshot':
cmdopts['rsize'] = '0%'
cmdopts['autoexpand'] = True
if self.module.check_mode:
self.changed = True
return
self.log("Creating vdisk.. Command %s opts %s", cmd, cmdopts)
# Run command
result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
self.log("Create target volume result %s", result)
if 'message' in result:
self.changed = True
self.log("Create target volume result message %s",
result['message'])
else:
self.module.fail_json(
msg="Failed to create target volume [%s]" % self.target)
def fcmap_create(self, temp_target_name):
if self.copyrate:
if self.copytype == 'clone':
if int(self.copyrate) not in range(1, 151):
self.module.fail_json(msg="Copyrate for clone must be in range 1-150")
if self.copytype == 'snapshot':
if int(self.copyrate) not in range(0, 151):
self.module.fail_json(msg="Copyrate for snapshot must be in range 0-150")
else:
if self.copytype == 'clone':
self.copyrate = 50
elif self.copytype == 'snapshot':
self.copyrate = 0
if self.module.check_mode:
self.changed = True
return
cmd = 'mkfcmap'
cmdopts = {}
cmdopts['name'] = self.name
cmdopts['source'] = self.source
cmdopts['target'] = temp_target_name
cmdopts['copyrate'] = self.copyrate
if self.grainsize:
cmdopts['grainsize'] = self.grainsize
if self.consistgrp:
cmdopts['consistgrp'] = self.consistgrp
if self.copytype == 'clone':
cmdopts['autodelete'] = True
self.log("Creating fc mapping.. Command %s opts %s",
cmd, cmdopts)
# Run command
result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
self.log("Create flash copy mapping relationship result %s", result)
if 'message' in result:
self.changed = True
self.log("Create flash copy mapping relationship result "
"message %s", result['message'])
else:
self.module.fail_json(msg="Failed to create FlashCopy mapping "
"relationship [%s]" % self.name)
def fcmap_delete(self):
self.log("Deleting flash copy mapping relationship'%s'", self.name)
if self.module.check_mode:
self.changed = True
return
cmd = 'rmfcmap'
cmdopts = {}
if self.force:
cmdopts['force'] = self.force
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=[self.name])
def rename_temp_to_target(self, temp_name):
if self.module.check_mode:
self.changed = True
return
cmd = 'chvdisk'
cmdopts = {}
cmdopts['name'] = self.target
self.log("Rename %s to %s", cmd, cmdopts)
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=[temp_name])
def fcmap_probe(self, data):
props = {}
props_not_supported = []
if self.source:
if data["source_vdisk_name"] != self.source:
props_not_supported.append("source")
if self.target:
if data["target_vdisk_name"] != self.target:
props_not_supported.append("target")
if self.copytype:
if (self.copytype == "snapshot" and data['autodelete'] == "on") or (self.copytype == "clone" and data["autodelete"] != "on"):
props_not_supported.append("copytype")
if self.grainsize:
if data['grain_size'] != self.grainsize:
props_not_supported.append("grainsize")
if props_not_supported:
self.module.fail_json(msg="Update not supported for parameter: " + ", ".join(props_not_supported))
self.log("Probe which properties need to be updated...")
if data['group_name'] and self.noconsistgrp:
props['consistgrp'] = 0
if not self.noconsistgrp:
if self.consistgrp:
if self.consistgrp != data['group_name']:
props['consistgrp'] = self.consistgrp
if self.copyrate:
if self.copyrate != data['copy_rate']:
props['copyrate'] = self.copyrate
return props
def fcmap_update(self, modify):
if self.module.check_mode:
self.changed = True
return
if modify:
self.log("updating fcmap with properties %s", modify)
cmd = 'chfcmap'
cmdopts = {}
for prop in modify:
cmdopts[prop] = modify[prop]
cmdargs = [self.name]
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
def apply(self):
changed = False
msg = None
modify = []
mdata, sdata, tdata, temp = self.gather_data()
if mdata:
if self.state == "present":
modify = self.fcmap_probe(mdata)
if modify:
changed = True
else:
msg = "mapping [%s] already exists" % self.name
elif self.state == "absent":
changed = True
else:
if self.state == "present":
if not sdata:
self.module.fail_json(msg="The source volume [%s] doesn't exist." % self.source)
if tdata:
if sdata[0]["capacity"] == tdata[0]["capacity"]:
if self.copytype == 'clone':
msg = "target [%s] already exists." % self.target
elif self.copytype == 'snapshot':
msg = "target [%s] already exists, fcmap would not be created." % self.target
elif sdata[0]["capacity"] != tdata[0]["capacity"]:
self.module.fail_json(msg="source and target must be of same size")
if sdata and not tdata:
changed = True
elif self.state == "absent":
msg = "mapping [%s] does not exist" % self.name
if changed:
if self.state == "present" and not modify:
if None in [self.source, self.target, self.copytype]:
self.module.fail_json(msg="Required while creating FlashCopy mapping: 'source', 'target' and 'copytype'")
temp_target = "%s_temp_%s" % (self.target, time.time())
if len(temp) == 0:
self.target_create(temp_target, sdata[0])
self.fcmap_create(temp_target)
self.rename_temp_to_target(temp_target)
msg = "mapping [%s] has been created" % self.name
elif len(temp) == 1:
self.fcmap_create(temp[0]["name"])
self.rename_temp_to_target(temp[0]["name"])
msg = "mapping [%s] has been created" % self.name
elif len(temp) > 1:
self.module.fail_json(msg="Multiple %s_temp_* volumes exists" % self.target)
elif self.state == "present" and modify:
self.fcmap_update(modify)
msg = "mapping [%s] has been modified" % self.name
elif self.state == "absent":
self.fcmap_delete()
msg = "mapping [%s] has been deleted" % self.name
if self.module.check_mode:
msg = 'skipping changes due to check mode.'
else:
if self.state == "absent":
msg = "mapping [%s] does not exist" % self.name
self.module.exit_json(msg=msg, changed=changed)
def main():
v = IBMSVCFlashcopy()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,317 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2022 IBM CORPORATION
# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_manage_ip
short_description: This module manages IP provisioning on IBM Spectrum Virtualize family storage systems
description:
- Ansible interface to manage 'mkip' and 'rmip' commands.
- This module can run on all IBM Spectrum Virtualize storage running on 8.4.2.0 or later.
version_added: "1.8.0"
options:
state:
description:
- Creates (C(present)) or removes (C(absent)) an IP address.
choices: [ present, absent ]
required: true
type: str
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
type: str
required: true
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the ibm_svc_auth module.
type: str
node:
description:
- Specifies the name of the node.
type: str
required: true
port:
description:
- Specifies a port ranging from 1 - 16 to which IP shall be assigned.
type: int
required: true
portset:
description:
- Specifies the name of the portset object.
type: str
ip_address:
description:
- Specifies a valid ipv4/ipv6 address.
type: str
required: true
subnet_prefix:
description:
- Specifies the prefix of subnet mask.
- Applies when I(state=present).
type: int
gateway:
description:
- Specifies the gateway address.
- Applies when I(state=present).
type: str
vlan:
description:
- Specifies a vlan id ranging from 1 - 4096.
- Applies when I(state=present).
type: int
shareip:
description:
- Specifies the flag when IP is shared between multiple portsets.
- Applies when I(state=present).
type: bool
validate_certs:
description:
- Validates certification.
default: false
type: bool
log_path:
description:
- Path of debug log file.
type: str
author:
- Sreshtant Bohidar(@Sreshtant-Bohidar)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Create IP provisioning
ibm.spectrum_virtualize.ibm_svc_manage_ip:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
node: node1
port: 1
portset: portset0
ip_address: x.x.x.x
subnet_prefix: 20
gateway: x.x.x.x
vlan: 1
shareip: true
state: present
- name: Remove IP provisioning
ibm.spectrum_virtualize.ibm_svc_manage_ip:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
node: node1
port: 1
portset: portset0
ip_address: x.x.x.x
state: absent
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
from ansible.module_utils._text import to_native
class IBMSVCIp(object):
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
node=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['present', 'absent']),
port=dict(type='int', required=True),
portset=dict(type='str'),
ip_address=dict(type='str', required=True),
subnet_prefix=dict(type='int'),
gateway=dict(type='str'),
vlan=dict(type='int'),
shareip=dict(type='bool')
)
)
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
# logging setup
log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
# Required
self.node = self.module.params['node']
self.state = self.module.params['state']
self.port = self.module.params['port']
self.ip_address = self.module.params.get('ip_address', False)
# Optional
self.portset = self.module.params.get('portset', False)
self.subnet_prefix = self.module.params.get('subnet_prefix', False)
self.gateway = self.module.params.get('gateway', False)
self.vlan = self.module.params.get('vlan', False)
self.shareip = self.module.params.get('shareip', False)
# Initialize changed variable
self.changed = False
# creating an instance of IBMSVCRestApi
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=log_path,
token=self.module.params['token']
)
def basic_checks(self):
if not self.state:
self.module.fail_json(msg="The parameter [state] is required.")
if self.state == 'present':
required_when_present = {
'node': self.node,
'port': self.port,
'ip_address': self.ip_address,
'subnet_prefix': self.subnet_prefix
}
missing_present = [item for item, value in required_when_present.items() if not value]
if missing_present:
self.module.fail_json(msg="The parameter {0} is required when state is present.".format(missing_present))
if self.state == 'absent':
required_when_absent = {
'node': self.node,
'port': self.port,
'ip_address': self.ip_address
}
not_required_when_absent = {
'subnet_prefix': self.subnet_prefix,
'gateway': self.gateway,
'vlan': self.vlan,
'shareip': self.shareip
}
missing_absent = [item for item, value in required_when_absent.items() if not value]
if missing_absent:
self.module.fail_json(msg="The parameter {0} is required when state is absent.".format(missing_absent))
not_applicable_absent = [item for item, value in not_required_when_absent.items() if value]
if not_applicable_absent:
self.module.fail_json(msg="The parameter {0} are not applicable when state is absent.".format(not_applicable_absent))
def get_ip_info(self):
all_data = self.restapi.svc_obj_info(cmd='lsip', cmdopts=None, cmdargs=None)
if self.portset:
data = list(
filter(
lambda item: item['node_name'] == self.node and
item['port_id'] == str(self.port) and
item['portset_name'] == self.portset and
item['IP_address'] == self.ip_address, all_data
)
)
else:
data = list(
filter(
lambda item: item['node_name'] == self.node and
item['port_id'] == str(self.port) and
item['IP_address'] == self.ip_address, all_data
)
)
if len(data) > 1:
self.module.fail_json(msg="Module could not find the exact IP with [node, port, ip_address]. Please also use [portset].")
self.log('GET: IP data: %s', data)
return data
def create_ip(self):
if self.module.check_mode:
self.changed = True
return
command = 'mkip'
command_options = {
'node': self.node,
'port': self.port,
'ip': self.ip_address,
'prefix': self.subnet_prefix
}
if self.portset:
command_options['portset'] = self.portset
if self.gateway:
command_options['gw'] = self.gateway
if self.vlan:
command_options['vlan'] = self.vlan
if self.shareip:
command_options['shareip'] = self.shareip
result = self.restapi.svc_run_command(command, command_options, cmdargs=None)
self.log("create IP result %s", result)
if 'message' in result:
self.changed = True
self.log("create IP result message %s", result['message'])
else:
self.module.fail_json(
msg="Failed to create IP [%s]" % self.ip_address)
def remove_ip(self, ip_address_id):
if self.module.check_mode:
self.changed = True
return
command = 'rmip'
command_options = None
cmdargs = [ip_address_id]
self.restapi.svc_run_command(command, command_options, cmdargs)
self.changed = True
self.log("removed IP '%s'", self.ip_address)
def apply(self):
msg = None
self.basic_checks()
if self.state == 'present':
self.create_ip()
msg = "IP address {0} has been created.".format(self.ip_address)
elif self.state == 'absent':
ip_data = self.get_ip_info()
if ip_data:
self.remove_ip(ip_data[0]['id'])
msg = "IP address {0} has been removed.".format(self.ip_address)
else:
msg = "IP address {0} does not exist.".format(self.ip_address)
self.module.exit_json(msg=msg, changed=self.changed)
def main():
v = IBMSVCIp()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,779 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2021 IBM CORPORATION
# Author(s): Rohit kumar <rohit.kumar6@ibm.com>
# Shilpi Jain <shilpi.jain1@ibm.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_manage_migration
short_description: This module manages volume migration between clusters on IBM Spectrum Virtualize family storage systems
description:
- Ansible interface to manage the migration commands.
version_added: "1.6.0"
options:
type_of_migration:
description:
- Specifies the type of migration whether it is migration across pools or migration across clusters
choices: [across_pools, across_clusters]
default: across_clusters
type: str
version_added: '1.11.0'
new_pool:
description:
- Specifies the pool on which the volume has to be migrated.
- Valid only when I(type_of_migration=across_pools).
type: str
version_added: '1.11.0'
source_volume:
description:
- Specifies the name of the existing source volume to be used in migration.
- Required when I(state=initiate) or I(state=cleanup) or I(type_of_migration=across_pools).
type: str
target_volume:
description:
- Specifies the name of the volume to be created on the target system.
- Required when I(state=initiate).
type: str
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
type: str
required: true
remote_cluster:
description:
- Specifies the name of the remote cluster.
- Required when I(state=initiate).
type: str
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user on the local system.
type: str
remote_username:
description:
- REST API username for the partner Spectrum Virtualize storage system.
- The parameters I(remote_username) and I(remote_password) are required if not using I(remote_token) to authenticate a user on the partner system.
- Valid when C(state=initiate).
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user on the local system.
type: str
remote_password:
description:
- REST API password for the partner Spectrum Virtualize storage system.
- The parameters I(remote_username) and I(remote_password) are required if not using I(remote_token) to authenticate a user on the partner system.
- Valid when I(state=initiate).
type: str
relationship_name:
description:
- Name of the migration relationship. Required when I(state=initiate) or I(state=switch).
type: str
state:
description:
- Specifies the different states of the migration process when I(type_of_migration=across_clusters).
- C(initiate), creates a volume on remote cluster; optionally used to replicate hosts, and to create and start a migration relationship.
- C(switch), switches the migration relationship direction allowing write access on the target volume.
- C(cleanup), deletes the source volume and migration relationship after a 'switch'.
choices: [initiate, switch, cleanup]
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the ibm_svc_auth module.
type: str
remote_token:
description:
- The authentication token to verify a user on the partner Spectrum Virtualize storage system.
- To generate a token, use the ibm_svc_auth module.
Valid when I(state=initiate).
type: str
remote_pool:
description:
- Specifies the pool on which the volume on Partner Spectrum Virtualize storage system should get created.
- Required when I(state=initiate).
type: str
validate_certs:
description:
- Validates certification.
default: false
type: bool
remote_validate_certs:
description:
- Validates certification for partner Spectrum Virtualize storage system.
- Valid when I(state=initiate).
default: false
type: bool
replicate_hosts:
description:
- Replicates the hosts mapped to a source volume on the source system, to the target system, and maps the hosts to the target volume. The
user can use ibm_svc_host and ibm_svc_vol_map modules to create and map hosts to the target volume for an
existing migration relationship.
- Valid when I(state=initiate).
default: false
type: bool
log_path:
description:
- Path of debug log file.
type: str
author:
- Rohit Kumar(@rohitk-github)
- Shilpi Jain(@Shilpi-J)
notes:
- This module supports C(check_mode).
- This module supports both volume migration across pools and volume migration across clusters.
- In case, user does not specify type_of_migration, the module shall proceed with migration across clusters by default.
- In case of I(type_of_migration=across_pools), the only parameters allowed are I(new_pool) and I(source_volume) along with cluster credentials.
'''
EXAMPLES = '''
- name: Create a target volume
Create a relationship
Replicate hosts from source volume to target volume
Start a relationship
ibm.spectrum_virtualize.ibm_svc_manage_migration:
source_volume: "src_vol"
target_volume: "target_vol"
clustername: "{{ source_cluster }}"
remote_cluster: "{{ remote_cluster }}"
token: "{{ source_cluster_token }}"
state: initiate
replicate_hosts: true
remote_token: "{{ partner_cluster_token }}"
relationship_name: "migrate_vol"
log_path: /tmp/ansible.log
remote_pool: "{{ remote_pool }}"
- name: Switch replication direction
ibm.spectrum_virtualize.ibm_svc_manage_migration:
relationship_name: "migrate_vol"
clustername: "{{ source_cluster }}"
token: "{{ source_cluster_token }}"
state: switch
log_path: /tmp/ansible.log
- name: Delete source volume and migration relationship
ibm.spectrum_virtualize.ibm_svc_manage_migration:
clustername: "{{ source_cluster }}"
state: cleanup
source_volume: "src_vol"
token: "{{ source_cluster_token }}"
log_path : /tmp/ansible.log
- name: Migration an existing vol from pool0 to pool1
ibm.spectrum_virtualize.ibm_svc_manage_migration:
clustername: "{{ source_cluster }}"
token: "{{ source_cluster_token }}"
log_path : /tmp/ansible.log
type_of_migration : across_pools
source_volume : vol1
new_pool : pool1
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
from ansible.module_utils._text import to_native
class IBMSVCMigrate(object):
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
type_of_migration=dict(type='str', required=False, default='across_clusters',
choices=['across_clusters', 'across_pools']),
new_pool=dict(type='str', required=False),
source_volume=dict(type='str', required=False),
target_volume=dict(type='str', required=False),
state=dict(type='str',
choices=['initiate', 'switch', 'cleanup']),
remote_pool=dict(type='str', required=False),
replicate_hosts=dict(type='bool', default=False),
relationship_name=dict(type='str', required=False),
remote_cluster=dict(type='str', required=False),
remote_token=dict(type='str', required=False, no_log=True),
remote_validate_certs=dict(type='bool', default=False),
remote_username=dict(type='str', required=False),
remote_password=dict(type='str', required=False, no_log=True)
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
self.existing_rel_data = ""
self.source_vdisk_data = ""
self.hosts_iscsi_flag = False
# logging setup
log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
# Required when migration across clusters
self.state = self.module.params['state']
# Required when migration across pools
self.new_pool = self.module.params['new_pool']
# Optional
self.type_of_migration = self.module.params['type_of_migration']
self.source_volume = self.module.params['source_volume']
self.remote_pool = self.module.params['remote_pool']
self.target_volume = self.module.params['target_volume']
self.relationship_name = self.module.params['relationship_name']
self.remote_username = self.module.params['remote_username']
self.replicate_hosts = self.module.params['replicate_hosts']
self.remote_password = self.module.params['remote_password']
self.remote_token = self.module.params['remote_token']
self.remote_cluster = self.module.params['remote_cluster']
self.remote_validate_certs = self.module.params['remote_validate_certs']
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=log_path,
token=self.module.params['token']
)
def get_existing_vdisk(self):
self.log("Entering function get_existing_vdisk")
cmd = 'lsvdisk'
cmdargs = {}
cmdopts = {'bytes': True}
cmdargs = [self.source_volume]
remote_vdisk_data = ""
existing_vdisk_data = self.restapi.svc_obj_info(cmd, cmdopts, cmdargs)
if self.target_volume:
cmdargs = [self.target_volume]
remote_restapi = self.construct_remote_rest()
remote_vdisk_data = remote_restapi.svc_obj_info(cmd, cmdopts, cmdargs)
return existing_vdisk_data, remote_vdisk_data
def basic_checks(self):
self.log("Entering function basic_checks()")
valid_params = {}
valid_params['initiate'] = ['source_volume', 'remote_cluster', 'target_volume', 'replicate_hosts',
'remote_username', 'remote_password', 'relationship_name',
'remote_token', 'remote_pool', 'remote_validate_certs']
valid_params['switch'] = ['relationship_name']
valid_params['cleanup'] = ['source_volume']
param_list = set(valid_params['initiate'] + valid_params['switch'] + valid_params['cleanup'])
# Check for missing mandatory parameter
for param in valid_params[self.state]:
param_value = getattr(self, param)
if not param_value:
if self.state == "initiate":
if param == 'remote_validate_certs' or param == 'replicate_hosts':
continue
if (param == 'remote_username' or param == 'remote_password'):
if not self.remote_username or not self.remote_password:
if self.remote_token:
continue
else:
self.module.fail_json(msg="You must pass in either pre-acquired remote_token or "
"remote_username/remote_password to generate new token.")
elif param == 'remote_token':
if (self.remote_username and self.remote_password):
if not self.remote_token:
continue
self.module.fail_json(msg="Missing mandatory parameter [%s]." % param)
# Check for invalid parameters
for param in param_list:
if self.state == 'initiate':
if getattr(self, param):
if param not in valid_params['initiate']:
self.module.fail_json(msg="Invalid parameter [%s] for state 'initiate'" % param)
if self.state == 'switch':
if getattr(self, param):
if param not in valid_params['switch']:
self.module.fail_json(msg="Invalid parameter [%s] for state 'switch'" % param)
elif self.state == 'cleanup':
if getattr(self, param):
if param not in valid_params['cleanup']:
self.module.fail_json(msg="Invalid parameter [%s] for state 'cleanup'" % param)
def get_source_hosts(self):
self.log("Entering function get_source_hosts")
cmd = 'lsvdiskhostmap'
cmdargs = {}
cmdopts = {}
cmdargs = [self.source_volume]
sourcevolume_hosts = self.restapi.svc_obj_info(cmd, cmdopts, cmdargs)
return sourcevolume_hosts
def replicate_source_hosts(self, hosts_data):
self.log("Entering function replicate_source_hosts()")
merged_result = []
hosts_wwpn = {}
hosts_iscsi = {}
host_list = []
if self.module.check_mode:
self.changed = True
return
self.log("creating vdiskhostmaps on target system")
if isinstance(hosts_data, list):
for d in hosts_data:
merged_result.append(d)
elif hosts_data:
merged_result = [hosts_data]
for host in merged_result:
host_list.append(host['host_name'])
for host in host_list:
host_wwpn_list = []
host_iscsi_list = []
self.log("for host %s", host)
data = self.restapi.svc_obj_info(cmd='lshost', cmdopts=None, cmdargs=[host])
nodes_data = data['nodes']
for node in nodes_data:
if 'WWPN' in node.keys():
host_wwpn_list.append(node['WWPN'])
hosts_wwpn[host] = host_wwpn_list
elif 'iscsi_name' in node.keys():
host_iscsi_list.append(node['iscsi_name'])
hosts_iscsi[host] = host_iscsi_list
if hosts_wwpn or hosts_iscsi:
self.create_remote_hosts(hosts_wwpn, hosts_iscsi)
def create_remote_hosts(self, hosts_wwpn, hosts_iscsi):
self.log("Entering function create_remote_hosts()")
if self.module.check_mode:
self.changed = True
return
# Make command
remote_hosts_list = []
source_host_list = []
remote_hosts_list = self.return_remote_hosts()
if hosts_iscsi:
for host, iscsi_vals in hosts_iscsi.items():
source_host_list.append(host)
if hosts_wwpn:
for host, wwpn_vals in hosts_wwpn.items():
source_host_list.append(host)
cmd = 'mkhost'
for host, wwpn in hosts_wwpn.items():
if host not in remote_hosts_list:
cmdopts = {'name': host, 'force': True}
wwpn = ':'.join([str(elem) for elem in wwpn])
cmdopts['fcwwpn'] = wwpn
remote_restapi = self.construct_remote_rest()
remote_restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
for host, iscsi in hosts_iscsi.items():
if host not in remote_hosts_list:
cmdopts = {'name': host, 'force': True}
iscsi = ','.join([str(elem) for elem in iscsi])
cmdopts['iscsiname'] = iscsi
remote_restapi = self.construct_remote_rest()
remote_restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
if source_host_list:
self.map_host_vol_remote(source_host_list)
def map_host_vol_remote(self, host_list):
remote_restapi = self.construct_remote_rest()
if self.module.check_mode:
self.changed = True
return
for host in host_list:
# Run command
cmd = 'mkvdiskhostmap'
cmdopts = {'force': True}
cmdopts['host'] = host
cmdargs = [self.target_volume]
result = remote_restapi.svc_run_command(cmd, cmdopts, cmdargs)
self.log("create vdiskhostmap result %s", result)
if 'message' in result:
self.changed = True
self.log("create vdiskhostmap result message %s", result['message'])
else:
self.module.fail_json(msg="Failed to create vdiskhostmap.")
def vdisk_create(self, data):
if not self.remote_pool:
self.module.fail_json(msg="You must pass in "
"remote_pool to the module.")
if self.module.check_mode:
self.changed = True
return
self.log("creating vdisk '%s'", self.source_volume)
size = int(data[0]['capacity'])
# Make command
cmd = 'mkvolume'
cmdopts = {}
if self.remote_pool:
cmdopts['pool'] = self.remote_pool
cmdopts['name'] = self.target_volume
cmdopts['size'] = size
cmdopts['unit'] = "b"
self.log("creating vdisk command %s opts %s", cmd, cmdopts)
# Run command
remote_restapi = self.construct_remote_rest()
result = remote_restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
self.log("create vdisk result %s", result)
if 'message' in result:
self.changed = True
self.log("create vdisk result message %s", result['message'])
else:
self.module.fail_json(msg="Failed to create volume [%s]" % self.source_volume)
def verify_remote_volume_mapping(self):
self.log("Entering function verify_remote_volume_mapping")
cmd = 'lsvdiskhostmap'
cmdargs = {}
cmdopts = {}
cmdargs = [self.target_volume]
remote_hostmap_data = ""
remote_restapi = self.construct_remote_rest()
remote_hostmap_data = remote_restapi.svc_obj_info(cmd, cmdopts, cmdargs)
if remote_hostmap_data:
self.module.fail_json(msg="The target volume has hostmappings, Migration relationship cannot be created.")
def return_remote_hosts(self):
self.log("Entering function return_remote_hosts")
cmd = 'lshost'
remote_hosts = []
cmdopts = {}
cmdargs = None
remote_hosts_data = []
remote_restapi = self.construct_remote_rest()
remote_hosts_data = remote_restapi.svc_obj_info(cmd, cmdopts, cmdargs)
self.log(len(remote_hosts_data))
for host in remote_hosts_data:
remote_hosts.append(host['name'])
return remote_hosts
def verify_target(self):
self.log("Entering function verify_target()")
source_data, target_data = self.get_existing_vdisk()
if source_data:
if source_data[0]['RC_name']:
self.module.fail_json(msg="Source Volume [%s] is already in a relationship." % self.source_volume)
if target_data:
if target_data[0]['RC_name']:
self.module.fail_json(msg="Target Volume [%s] is already in a relationship." % self.target_volume)
if target_data[0]['mdisk_grp_name'] != self.remote_pool:
self.module.fail_json(msg="Target Volume [%s] exists on a different pool." % self.target_volume)
if not source_data:
self.module.fail_json(msg="Source Volume [%s] does not exist." % self.source_volume)
elif source_data and target_data:
source_size = int(source_data[0]['capacity'])
remote_size = int(target_data[0]['capacity'])
if source_size != remote_size:
self.module.fail_json(msg="Remote Volume size is different than that of source volume.")
else:
self.log("Target volume already exists, verifying volume mappings now..")
self.verify_remote_volume_mapping()
elif source_data and not target_data:
self.vdisk_create(source_data)
self.log("Target volume successfully created")
self.changed = True
def discover_partner_system(self):
cmd = 'lspartnership'
cmdopts = {}
cmdargs = [self.remote_cluster]
partnership_data = self.restapi.svc_obj_info(cmd, cmdopts, cmdargs)
if partnership_data:
system_location = partnership_data['location']
if system_location == 'local':
self.module.fail_json(msg="The relationship could not be created as migration relationships are only allowed to be created to a remote system.")
self.partnership_exists = True
remote_socket = partnership_data['console_IP']
return remote_socket.split(':')[0]
else:
msg = "The partnership with remote cluster [%s] does not exist." % self.remote_cluster
self.module.fail_json(msg=msg)
def construct_remote_rest(self):
remote_ip = self.discover_partner_system()
self.remote_restapi = IBMSVCRestApi(
module=self.module,
domain='',
clustername=remote_ip,
username=self.module.params['remote_username'],
password=self.module.params['remote_password'],
validate_certs=self.module.params['remote_validate_certs'],
log_path=self.module.params['log_path'],
token=self.module.params['remote_token']
)
return self.remote_restapi
def create_relationship(self):
if self.module.check_mode:
self.changed = True
return
self.log("Creating remote copy '%s'", self.relationship_name)
# Make command
cmd = 'mkrcrelationship'
cmdopts = {}
if self.remote_cluster:
cmdopts['cluster'] = self.remote_cluster
if self.source_volume:
cmdopts['master'] = self.source_volume
cmdopts['aux'] = self.target_volume
cmdopts['name'] = self.relationship_name
cmdopts['migration'] = True
# Run command
self.log("Command %s opts %s", cmd, cmdopts)
if not self.existing_rel_data:
result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
self.log("create remote copy result %s", result)
if 'message' in result:
self.changed = True
self.log("Succeeded to create remote copy result message %s", result['message'])
else:
msg = "Failed to create migration relationship [%s]" % self.relationship_name
self.module.fail_json(msg=msg)
def source_vol_relationship(self, volume):
"""
Check if the source volume is associated to any migration relationship.
Returns:
None if no matching instances
"""
source_vdisk_data, target_vdisk_data = self.get_existing_vdisk()
if not source_vdisk_data:
msg = "Source volume [%s] does not exist" % self.source_volume
self.module.exit_json(msg=msg)
self.log('Trying to get the remote copy relationship')
relationship_name = source_vdisk_data[0]['RC_name']
if not relationship_name:
self.module.fail_json(msg="Volume [%s] cannot be deleted. No Migration relationship is configured with the volume." % self.source_volume)
existing_rel_data = self.restapi.svc_obj_info(cmd='lsrcrelationship', cmdopts=None, cmdargs=[relationship_name])
if existing_rel_data['copy_type'] != 'migration':
self.module.fail_json(msg="Volume [%s] cannot be deleted. No Migration relationship is configured with the volume." % self.source_volume)
def existing_rc(self):
"""
Find the relationships such as Metro Mirror, Global Mirror relationships visible to the system.
Returns:
None if no matching instances or a list including all the matching
instances
"""
self.log('Trying to get the remote copy relationship %s', self.relationship_name)
self.existing_rel_data = self.restapi.svc_obj_info(cmd='lsrcrelationship', cmdopts=None, cmdargs=[self.relationship_name])
return self.existing_rel_data
def verify_existing_rel(self, rel_data):
if self.existing_rel_data:
master_volume, aux_volume = rel_data['master_vdisk_name'], rel_data['aux_vdisk_name']
primary, remotecluster, rel_type = rel_data['primary'], rel_data['aux_cluster_name'], rel_data['copy_type']
if rel_type != 'migration':
self.module.fail_json(msg="Remote Copy relationship [%s] already exists and is not a migration relationship" % self.relationship_name)
if self.source_volume != master_volume:
self.module.fail_json(msg="Migration relationship [%s] already exists with a different source volume" % self.relationship_name)
if self.target_volume != aux_volume:
self.module.fail_json(msg="Migration relationship [%s] already exists with a different target volume" % self.relationship_name)
if primary != 'master':
self.module.fail_json(msg="Migration relationship [%s] replication direction is incorrect" % self.relationship_name)
if remotecluster != self.remote_cluster:
self.module.fail_json(msg="Migration relationship [%s] is configured with a different partner system" % self.relationship_name)
def start_relationship(self):
"""Start the migration relationship copy process."""
cmdopts = {}
if self.module.check_mode:
self.changed = True
return
result = self.restapi.svc_run_command(cmd='startrcrelationship', cmdopts=cmdopts, cmdargs=[self.relationship_name])
if result == '':
self.changed = True
self.log("succeeded to start the remote copy %s", self.relationship_name)
elif 'message' in result:
self.changed = True
self.log("start the rcrelationship %s with result message %s", self.relationship_name, result['message'])
else:
msg = "Failed to start the rcrelationship [%s]" % self.relationship_name
self.module.fail_json(msg=msg)
def switch(self):
"""Switch the replication direction."""
cmdopts = {}
cmdopts['primary'] = 'aux'
if self.existing_rel_data:
rel_type = self.existing_rel_data['copy_type']
if rel_type != 'migration':
self.module.fail_json(msg="Remote Copy relationship [%s] is not a migration relationship." % self.relationship_name)
if self.module.check_mode:
self.changed = True
return
result = self.restapi.svc_run_command(cmd='switchrcrelationship', cmdopts=cmdopts, cmdargs=[self.relationship_name])
self.log("switch the rcrelationship %s with result %s", self.relationship_name, result)
if result == '':
self.changed = True
self.log("succeeded to switch the remote copy %s", self.relationship_name)
elif 'message' in result:
self.changed = True
self.log("switch the rcrelationship %s with result message %s", self.relationship_name, result['message'])
else:
msg = "Failed to switch the rcrelationship [%s]" % self.relationship_name
self.module.fail_json(msg=msg)
def delete(self):
"""Use the rmvolume command to delete the source volume and the existing migration relationship."""
if self.module.check_mode:
self.changed = True
return
cmd = 'rmvolume'
cmdopts = {}
cmdopts['removehostmappings'] = True
cmdargs = [self.source_volume]
if self.module.check_mode:
self.changed = True
return
result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
# Any error will have been raised in svc_run_command
# Command does not output anything when successful.
if result == '':
self.changed = True
self.log("succeeded to delete the source volume %s and associated host mappings and migration relationship", self.source_volume)
self.changed = True
elif 'message' in result:
self.changed = True
self.log("delete the source volume %s with result message %s",
self.source_volume, result['message'])
else:
self.module.fail_json(
msg="Failed to delete the volume [%s]" % self.source_volume)
def basic_checks_migrate_vdisk(self):
self.log("Entering function basic_checks_migrate_vdisk()")
invalid_params = {}
# Check for missing parameters
missing = [item[0] for item in [('new_pool', self.new_pool), ('source_volume', self.source_volume)] if not item[1]]
if missing:
self.module.fail_json(
msg='Missing mandatory parameter: [{0}] for migration across pools'.format(', '.join(missing))
)
invalid_params['across_pools'] = ['state', 'relationship_name', 'remote_cluster', 'remote_username',
'remote_password', 'remote_token', 'remote_pool', 'remote_validate_certs',
'replicate_hosts']
param_list = set(invalid_params['across_pools'])
# Check for invalid parameters
for param in param_list:
if self.type_of_migration == 'across_pools':
if getattr(self, param):
if param in invalid_params['across_pools']:
self.module.fail_json(msg="Invalid parameter [%s] for volume migration 'across_pools'" % param)
def migrate_pools(self):
self.basic_checks_migrate_vdisk()
if self.module.check_mode:
self.changed = True
return
source_data, target_data = self.get_existing_vdisk()
if not source_data:
msg = "Source volume [%s] does not exist" % self.source_volume
self.module.fail_json(msg=msg)
elif source_data[0]['mdisk_grp_name'] != self.new_pool:
cmd = 'migratevdisk'
cmdopts = {}
cmdopts['mdiskgrp'] = self.new_pool
cmdopts['vdisk'] = self.source_volume
self.log("Command %s opts %s", cmd, cmdopts)
result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
if result == '':
self.changed = True
else:
self.module.fail_json(msg="Failed to migrate volume in different pool.")
else:
msg = "No modifications done. New pool [%s] is same" % self.new_pool
self.module.exit_json(msg=msg, changed=False)
def apply(self):
changed = False
msg = None
if self.type_of_migration == 'across_pools':
self.migrate_pools()
msg = "Source Volume migrated successfully to new pool [%s]." % self.new_pool
changed = True
else:
self.basic_checks()
if self.state == 'initiate' or self.state == 'switch':
existing_rc_data = self.existing_rc()
if not existing_rc_data:
if self.state == 'initiate':
self.verify_target()
self.create_relationship()
if self.replicate_hosts:
hosts_data = self.get_source_hosts()
self.replicate_source_hosts(hosts_data)
self.start_relationship()
changed = True
msg = "Migration Relationship [%s] has been started." % self.relationship_name
elif self.state == 'switch':
msg = "Relationship [%s] does not exist." % self.relationship_name
changed = False
self.module.fail_json(msg=msg)
elif self.state == 'initiate':
self.verify_existing_rel(existing_rc_data)
self.start_relationship()
msg = "Migration Relationship [%s] has been started." % self.relationship_name
changed = True
elif self.state == 'switch':
self.switch()
msg = "Migration Relationship [%s] successfully switched." % self.relationship_name
changed = True
elif self.state == 'cleanup':
self.source_vol_relationship(self.source_volume)
self.delete()
msg = "Source Volume [%s] deleted successfully." % self.source_volume
changed = True
if self.module.check_mode:
msg = "skipping changes due to check mode."
self.module.exit_json(msg=msg, changed=changed)
def main():
v = IBMSVCMigrate()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,757 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2021 IBM CORPORATION
# Author(s): Rohit Kumar <rohit.kumar6@ibm.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_manage_mirrored_volume
short_description: This module manages mirrored volumes on IBM Spectrum Virtualize
family storage systems
description:
- Ansible interface to manage 'mkvolume', 'addvolumecopy', 'rmvolumecopy', and 'rmvolume' volume commands.
version_added: "1.4.0"
options:
name:
description:
- Specifies the name to assign to the new volume.
required: true
type: str
state:
description:
- Creates (C(present)) or removes (C(absent)) a mirrored volume.
choices: [ absent, present ]
required: true
type: str
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
type: str
required: true
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the ibm_svc_auth module.
type: str
version_added: '1.5.0'
poolA:
description:
- Specifies the name of first storage pool to be used when creating a mirrored volume.
type: str
poolB:
description:
- Specifies the name of second storage pool to be used when creating a mirrored volume.
type: str
type:
description:
- Specifies the desired volume type.
- When the type is C(local hyperswap), a HyperSwap volume gets created.
- When the type is C(standard) and values for I(PoolA) and I(PoolB) arguments are also specified,
a "standard mirror" volume gets created.
- If a "standard" mirrored volume exists and either I(PoolA) or I(PoolB)
is specified, the mirrored volume gets converted to a standard volume.
choices: [ local hyperswap, standard ]
type: str
thin:
description:
- Specifies if the volume to be created is thin-provisioned.
type: bool
compressed:
description:
- Specifies if the volume to be created is compressed.
type: bool
deduplicated:
description:
- Specifies if the volume to be created is deduplicated.
type: bool
grainsize:
description:
- Specifies the grain size (in KB) to use when
creating the HyperSwap volume.
type: str
rsize:
description:
- Specifies the rsize (buffersize) in %. Defines how much physical space
is initially allocated to the thin-provisioned or compressed volume.
type: str
size:
description:
- Specifies the size of mirrored volume in MB. This can also be used
to resize a mirrored volume. When resizing, only mandatory parameters can
be passed.
type: str
validate_certs:
description:
- Validates certification.
default: false
type: bool
log_path:
description:
- Path of debug log file.
type: str
author:
- Rohit Kumar(@rohitk-github)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Create a HyperSwap volume
ibm.spectrum_virtualize.ibm_svc_manage_mirrored_volume:
clustername: "{{clustername}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
type: "local hyperswap"
name: "vol1"
state: present
poolA: "pool1"
poolB: "pool2"
size: "1024"
- name: Create a thin-provisioned HyperSwap volume
ibm.spectrum_virtualize.ibm_svc_manage_mirrored_volume:
clustername: "{{clustername}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
type: "local hyperswap"
name: "vol2"
state: present
poolA: "pool1"
poolB: "pool2"
size: "1024"
thin: true
- name: Delete a mirrored volume
ibm.spectrum_virtualize.ibm_svc_manage_mirrored_volume:
clustername: "{{clustername}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
name: "vol2"
state: absent
- name: Create a standard mirror volume
block:
- name: Create Volume
ibm.spectrum_virtualize.ibm_svc_manage_mirrored_volume:
clustername: "{{clustername}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
name: "vol4"
state: present
type: "standard"
poolA: "pool1"
poolB: "pool3"
- name: Resize an existing mirrored volume
block:
- name: Resize an existing mirrored volume
ibm.spectrum_virtualize.ibm_svc_manage_mirrored_volume:
clustername: "{{clustername}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
name: "vol1"
state: present
size: "{{new_size}}"
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
from ansible.module_utils._text import to_native
class IBMSVCvolume(object):
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent',
'present']),
poolA=dict(type='str', required=False),
poolB=dict(type='str', required=False),
size=dict(type='str', required=False),
thin=dict(type='bool', required=False),
type=dict(type='str', required=False, choices=['local hyperswap', 'standard']),
grainsize=dict(type='str', required=False),
rsize=dict(type='str', required=False),
compressed=dict(type='bool', required=False),
deduplicated=dict(type='bool', required=False)
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
self.vdisk_type = ""
self.discovered_poolA = ""
self.discovered_poolB = ""
self.discovered_standard_vol_pool = ""
self.poolA_data = ""
self.poolB_data = ""
self.isdrp = False
self.expand_flag = False
self.shrink_flag = False
# logging setup
log_path = self.module.params.get('log_path')
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
# Required
self.name = self.module.params.get('name')
self.state = self.module.params.get('state')
if not self.name:
self.module.fail_json(msg="Missing mandatory parameter: name")
if not self.state:
self.module.fail_json(msg="Missing mandatory parameter: state")
# Optional
self.poolA = self.module.params.get('poolA')
self.poolB = self.module.params.get('poolB')
self.size = self.module.params.get('size')
self.type = self.module.params.get('type')
self.compressed = self.module.params.get('compressed')
self.thin = self.module.params.get('thin')
self.deduplicated = self.module.params.get('deduplicated')
self.rsize = self.module.params.get('rsize')
self.grainsize = self.module.params.get('grainsize')
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params.get('clustername'),
domain=self.module.params.get('domain'),
username=self.module.params.get('username'),
password=self.module.params.get('password'),
validate_certs=self.module.params.get('validate_certs'),
log_path=log_path,
token=self.module.params['token']
)
def get_existing_vdisk(self):
self.log("Entering function get_existing_vdisk")
cmd = 'lsvdisk'
cmdargs = {}
cmdopts = {'bytes': True}
cmdargs = [self.name]
existing_vdisk_data = self.restapi.svc_obj_info(cmd, cmdopts, cmdargs)
return existing_vdisk_data
def basic_checks(self, data):
self.log("Entering function basic_checks")
if self.poolA:
self.poolA_data = self.restapi.svc_obj_info(cmd='lsmdiskgrp', cmdopts=None, cmdargs=[self.poolA])
if not self.poolA_data:
self.module.fail_json(msg="PoolA does not exist")
if self.poolB:
self.poolB_data = self.restapi.svc_obj_info(cmd='lsmdiskgrp', cmdopts=None, cmdargs=[self.poolB])
if not self.poolB_data:
self.module.fail_json(msg="PoolB does not exist")
if self.state == "present" and not self.type and not self.size:
self.module.fail_json(msg="missing required argument: type")
if self.poolA and self.poolB:
if self.poolA == self.poolB:
self.module.fail_json(msg="poolA and poolB cannot be same")
siteA, siteB = self.discover_site_from_pools()
if siteA != siteB and self.type == "standard":
self.module.fail_json(msg="To create Standard Mirrored volume, provide pools belonging to same site.")
if not self.poolA and not self.poolB and self.state == "present" and not self.size:
self.module.fail_json(msg="Both poolA and poolB cannot be empty")
if self.type == "local hyperswap" and self.state != 'absent':
if not self.poolA or not self.poolB:
self.module.fail_json(msg="Both poolA and poolB need to be passed when type is 'local hyperswap'")
def discover_vdisk_type(self, data):
# Discover the vdisk type. this function is called if the volume already exists.
self.log("Entering function discover_vdisk_type")
is_std_mirrored_vol = False
is_hs_vol = False
if data[0]['type'] == "many":
is_std_mirrored_vol = True
self.discovered_poolA = data[1]['mdisk_grp_name']
self.discovered_poolB = data[2]['mdisk_grp_name']
self.log("The discovered standard mirrored volume \"%s\" belongs to \
pools \"%s\" and \"%s\"", self.name, self.discovered_poolA, self.discovered_poolB)
relationship_name = data[0]['RC_name']
if relationship_name:
rel_data = self.restapi.svc_obj_info(cmd='lsrcrelationship', cmdopts=None, cmdargs=[relationship_name])
if rel_data['copy_type'] == "activeactive":
is_hs_vol = True
if is_hs_vol:
master_vdisk = rel_data['master_vdisk_name']
aux_vdisk = rel_data['aux_vdisk_name']
master_vdisk_data = self.restapi.svc_obj_info(cmd='lsvdisk', cmdopts=None, cmdargs=[master_vdisk])
aux_vdisk_data = self.restapi.svc_obj_info(cmd='lsvdisk', cmdopts=None, cmdargs=[aux_vdisk])
if is_std_mirrored_vol:
self.discovered_poolA = master_vdisk_data[1]['mdisk_grp_name']
self.discovered_poolB = aux_vdisk_data[1]['mdisk_grp_name']
self.log("The discovered mixed volume \"%s\" belongs to pools \"%s\" and \"%s\"", self.name, self.discovered_poolA, self.discovered_poolB)
else:
self.discovered_poolA = master_vdisk_data[0]['mdisk_grp_name']
self.discovered_poolB = aux_vdisk_data[0]['mdisk_grp_name']
self.log("The discovered HyperSwap volume \"%s\" belongs to pools\
\"%s\" and \"%s\"", self.name, self.discovered_poolA, self.discovered_poolB)
if is_std_mirrored_vol and is_hs_vol:
self.module.fail_json(msg="Unsupported Configuration: Both HyperSwap and Standard Mirror \
are configured on this volume")
elif is_hs_vol:
vdisk_type = "local hyperswap"
elif is_std_mirrored_vol:
vdisk_type = "standard mirror"
if not is_std_mirrored_vol and not is_hs_vol:
mdisk_grp_name = data[0]['mdisk_grp_name']
self.discovered_standard_vol_pool = mdisk_grp_name
vdisk_type = "standard"
self.log("The standard volume %s belongs to pool \"%s\"", self.name, self.discovered_standard_vol_pool)
return vdisk_type
def discover_site_from_pools(self):
self.log("Entering function discover_site_from_pools")
poolA_site = self.poolA_data['site_name']
poolB_site = self.poolB_data['site_name']
return poolA_site, poolB_site
def vdisk_probe(self, data):
self.log("Entering function vdisk_probe")
props = []
resizevolume_flag = False
if self.type == "local hyperswap" and self.vdisk_type == "standard mirror":
self.module.fail_json(msg="You cannot \
update the topolgy from standard mirror to HyperSwap")
if (self.vdisk_type == "local hyperswap" or self.vdisk_type == "standard mirror") and self.size:
size_in_bytes = int(self.size) * 1024 * 1024
existing_size = int(data[0]['capacity'])
if size_in_bytes != existing_size:
resizevolume_flag = True
props += ['resizevolume']
if size_in_bytes > existing_size:
self.changebysize = size_in_bytes - existing_size
self.expand_flag = True
elif size_in_bytes < existing_size:
self.changebysize = existing_size - size_in_bytes
self.shrink_flag = True
if self.poolA and self.poolB:
if self.vdisk_type == "local hyperswap" and self.type == "standard":
self.module.fail_json(msg="HyperSwap Volume cannot be converted to standard mirror")
if self.vdisk_type == "standard mirror" or self.vdisk_type == "local hyperswap":
if (self.poolA == self.discovered_poolA or self.poolA == self.discovered_poolB)\
and (self.poolB == self.discovered_poolA or self.poolB == self.discovered_poolB) and not resizevolume_flag:
return props
elif not resizevolume_flag:
self.module.fail_json(msg="Pools for Standard Mirror or HyperSwap volume cannot be updated")
elif self.vdisk_type == "standard" and self.type == "local hyperswap":
# input poolA or poolB must belong to given Volume
if self.poolA == self.discovered_standard_vol_pool or self.poolB == self.discovered_standard_vol_pool:
props += ['addvolumecopy']
else:
self.module.fail_json(msg="One of the input pools must belong to the Volume")
elif self.vdisk_type == "standard" and self.type == "standard":
if self.poolA == self.discovered_standard_vol_pool or self.poolB == self.discovered_standard_vol_pool:
props += ['addvdiskcopy']
else:
self.module.fail_json(msg="One of the input pools must belong to the Volume")
elif self.vdisk_type and not self.type:
self.module.fail_json(msg="missing required argument: type")
elif not self.poolA or not self.poolB:
if self.vdisk_type == "standard":
if self.poolA == self.discovered_standard_vol_pool or self.poolB == self.discovered_standard_vol_pool:
self.log("Standard Volume already exists, no modifications done")
return props
if self.poolA:
if self.poolA == self.discovered_poolA or self.poolA == self.discovered_poolB:
props += ['rmvolumecopy']
else:
self.module.fail_json(msg="One of the input pools must belong to the Volume")
elif self.poolB:
if self.poolB == self.discovered_poolA or self.poolB == self.discovered_poolB:
props += ['rmvolumecopy']
else:
self.module.fail_json(msg="One of the input pools must belong to the Volume")
if not (self.poolA or not self.poolB) and not self.size:
if (self.system_topology == "hyperswap" and self.type == "local hyperswap"):
self.module.fail_json(msg="Type must be standard if either PoolA or PoolB is not specified.")
return props
def resizevolume(self):
if self.thin is not None or self.deduplicated is not None or self.rsize is not None or self.grainsize is not None \
or self.compressed is not None or self.poolA is not None or self.poolB is not None or self.type is not None:
self.module.fail_json(msg="Volume already exists, Parameter 'thin', 'deduplicated', 'rsize', 'grainsize', 'compressed' \
'PoolA', 'PoolB' or 'type' cannot be passed while resizing the volume.")
if self.module.check_mode:
self.changed = True
return
cmd = ""
cmdopts = {}
if self.vdisk_type == "local hyperswap" and self.expand_flag:
cmd = "expandvolume"
elif self.vdisk_type == "local hyperswap" and self.shrink_flag:
self.module.fail_json(msg="Size of a HyperSwap Volume cannot be shrinked")
elif self.vdisk_type == "standard mirror" and self.expand_flag:
cmd = "expandvdisksize"
elif self.vdisk_type == "standard mirror" and self.shrink_flag:
cmd = "shrinkvdisksize"
elif self.vdisk_type != "standard mirror" or self.vdisk_type != "local hyperswap":
self.module.fail_json(msg="The volume is not a mirror volume, Please use ibm_svc_vdisk module for resizing standard volumes")
cmdopts["size"] = str(self.changebysize)
cmdopts["unit"] = "b"
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=[self.name])
self.changed = True
def volume_create(self):
self.log("Entering function volume_create")
if not self.size:
self.module.fail_json(msg="You must pass in size to the module.")
if not self.type:
self.module.fail_json(msg="You must pass type to the module.")
self.log("creating Volume '%s'", self.name)
if self.module.check_mode:
self.changed = True
return
# Make command
cmd = 'mkvolume'
cmdopts = {}
if self.poolA and self.poolB:
cmdopts['pool'] = self.poolA + ":" + self.poolB
if self.size:
cmdopts['size'] = self.size
cmdopts['unit'] = "mb"
if self.grainsize:
cmdopts['grainsize'] = self.grainsize
if self.thin and self.rsize:
cmdopts['thin'] = self.thin
cmdopts['buffersize'] = self.rsize
elif self.thin:
cmdopts['thin'] = self.thin
elif self.rsize and not self.thin:
self.module.fail_json(msg="To configure 'rsize', parameter 'thin' should be passed and the value should be 'true'.")
if self.compressed:
cmdopts['compressed'] = self.compressed
if self.thin:
cmdopts['thin'] = self.thin
if self.deduplicated:
cmdopts['deduplicated'] = self.deduplicated
cmdopts['name'] = self.name
self.log("creating volume command %s opts %s", cmd, cmdopts)
# Run command
result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
self.log("create volume result %s", result)
if 'message' in result:
self.changed = True
self.log("create volume result message %s", result['message'])
else:
self.module.fail_json(
msg="Failed to create volume [%s]" % self.name)
def vdisk_create(self):
self.log("Entering function vdisk_create")
if not self.size:
self.module.fail_json(msg="You must pass in size to the module.")
if not self.type:
self.module.fail_json(msg="You must pass type to the module.")
self.log("creating Volume '%s'", self.name)
# Make command
cmd = 'mkvdisk'
cmdopts = {}
if self.poolA and self.poolB:
cmdopts['mdiskgrp'] = self.poolA + ":" + self.poolB
if self.size:
cmdopts['size'] = self.size
cmdopts['unit'] = "mb"
if self.compressed:
cmdopts['compressed'] = self.compressed
if self.thin and self.rsize:
cmdopts['rsize'] = self.rsize
elif self.thin:
cmdopts['rsize'] = "2%"
elif self.rsize and not self.thin:
self.module.fail_json(msg="To configure 'rsize', parameter 'thin' should be passed and the value should be 'true.'")
if self.grainsize:
cmdopts['grainsize'] = self.grainsize
if self.deduplicated:
if self.thin:
cmdopts['autoexpand'] = True
cmdopts['deduplicated'] = self.deduplicated
else:
self.module.fail_json(msg="To configure 'deduplicated', parameter 'thin' should be passed and the value should be 'true.'")
cmdopts['name'] = self.name
cmdopts['copies'] = 2
if self.isdrp and self.thin:
cmdopts['autoexpand'] = True
self.log("creating volume command %s opts %s", cmd, cmdopts)
if self.module.check_mode:
self.changed = True
return
# Run command
result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
self.log("create volume result %s", result)
if 'message' in result:
self.changed = True
self.log("create volume result message %s", result['message'])
else:
self.module.fail_json(
msg="Failed to create Volume [%s]" % self.name)
def addvolumecopy(self):
self.log("Entering function addvolumecopy")
cmd = 'addvolumecopy'
cmdopts = {}
if self.compressed:
cmdopts['compressed'] = self.compressed
if self.grainsize:
cmdopts['grainsize'] = self.grainsize
if self.thin and self.rsize:
cmdopts['thin'] = self.thin
cmdopts['buffersize'] = self.rsize
elif self.thin:
cmdopts['thin'] = self.thin
elif self.rsize and not self.thin:
self.module.fail_json(msg="To configure 'rsize', parameter 'thin' should be passed and the value should be 'true'.")
if self.deduplicated:
cmdopts['deduplicated'] = self.deduplicated
if self.size:
self.module.fail_json(msg="Parameter 'size' cannot be passed while converting a standard volume to Mirror Volume")
if self.poolA and (self.poolB == self.discovered_standard_vol_pool and self.poolA != self.discovered_standard_vol_pool):
cmdopts['pool'] = self.poolA
elif self.poolB and (self.poolA == self.discovered_standard_vol_pool and self.poolB != self.discovered_standard_vol_pool):
cmdopts['pool'] = self.poolB
if self.module.check_mode:
self.changed = True
return
cmdargs = [self.name]
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
def addvdiskcopy(self):
self.log("Entering function addvdiskcopy")
cmd = 'addvdiskcopy'
cmdopts = {}
if self.size:
self.module.fail_json(msg="Parameter 'size' cannot be passed while converting a standard volume to Mirror Volume")
siteA, siteB = self.discover_site_from_pools()
if siteA != siteB:
self.module.fail_json(msg="To create Standard Mirrored volume, provide pools belonging to same site.")
if self.poolA and (self.poolB == self.discovered_standard_vol_pool and self.poolA != self.discovered_standard_vol_pool):
cmdopts['mdiskgrp'] = self.poolA
elif self.poolB and (self.poolA == self.discovered_standard_vol_pool and self.poolB != self.discovered_standard_vol_pool):
cmdopts['mdiskgrp'] = self.poolB
else:
self.module.fail_json(msg="One of the input pools must belong to the volume")
if self.compressed:
cmdopts['compressed'] = self.compressed
if self.grainsize:
cmdopts['grainsize'] = self.grainsize
if self.thin and self.rsize:
cmdopts['rsize'] = self.rsize
elif self.thin:
cmdopts['rsize'] = "2%"
elif self.rsize and not self.thin:
self.module.fail_json(msg="To configure 'rsize', parameter 'thin' should be passed and the value should be 'true'.")
if self.deduplicated:
if self.thin:
cmdopts['deduplicated'] = self.deduplicated
cmdopts['autoexpand'] = True
else:
self.module.fail_json(msg="To configure 'deduplicated', parameter 'thin' should be passed and the value should be 'true.'")
if self.isdrp and self.thin:
cmdopts['autoexpand'] = True
if self.module.check_mode:
self.changed = True
return
cmdargs = [self.name]
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
def rmvolumecopy(self):
self.log("Entering function rmvolumecopy")
cmd = 'rmvolumecopy'
if self.size or self.thin or self.deduplicated or self.rsize or self.grainsize or self.compressed:
self.module.fail_json(msg="Parameter 'size', 'thin', 'deduplicated', 'rsize', 'grainsize' or 'compressed' \
cannot be passed while converting a Mirror Volume to Standard.")
if self.module.check_mode:
self.changed = True
return
cmdopts = {}
if not self.poolA:
if (self.poolB != self.discovered_poolA):
cmdopts['pool'] = self.discovered_poolA
else:
cmdopts['pool'] = self.discovered_poolB
elif not self.poolB:
if (self.poolA != self.discovered_poolB):
cmdopts['pool'] = self.discovered_poolB
else:
cmdopts['pool'] = self.discovered_poolA
cmdargs = [self.name]
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
def vdisk_update(self, modify):
self.log("Entering function vdisk_update")
if 'addvdiskcopy' in modify and 'resizevolume' in modify:
self.module.fail_json(msg="You cannot resize the volume alongwith converting the volume to Standard Mirror")
if 'addvolumecopy' in modify and 'resizevolume' in modify:
self.module.fail_json(msg="You cannot resize the volume alongwith converting the volume to Local HyperSwap")
if 'rmvolumecopy' in modify and 'resizevolume' in modify:
self.module.fail_json(msg="You cannot resize the volume alongwith converting the Mirror volume to Standard")
if 'addvolumecopy' in modify:
self.addvolumecopy()
elif 'addvdiskcopy' in modify:
self.isdrpool()
self.addvdiskcopy()
elif 'rmvolumecopy' in modify:
self.rmvolumecopy()
elif 'resizevolume' in modify:
self.resizevolume()
def isdrpool(self):
poolA_drp = self.poolA_data['data_reduction']
poolB_drp = self.poolB_data['data_reduction']
isdrpool_list = [poolA_drp, poolB_drp]
if "yes" in isdrpool_list:
self.isdrp = True
def volume_delete(self):
self.log("Entering function volume_delete")
self.log("deleting volume '%s'", self.name)
if self.module.check_mode:
self.changed = True
return
cmd = 'rmvolume'
cmdopts = None
cmdargs = [self.name]
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
# Any error will have been raised in svc_run_command
# rmvolume does not output anything when successful.
self.changed = True
def discover_system_topology(self):
self.log("Entering function discover_system_topology")
system_data = self.restapi.svc_obj_info(cmd='lssystem', cmdopts=None, cmdargs=None)
sys_topology = system_data['topology']
return sys_topology
def apply(self):
self.log("Entering function apply")
changed = False
msg = None
modify = []
vdisk_data = self.get_existing_vdisk()
# Perform basic checks and fail the module with appropriate error msg if requirements are not satisfied
self.basic_checks(vdisk_data)
# Discover System Topology
self.system_topology = self.discover_system_topology()
if self.system_topology == "standard" and self.type == "local hyperswap":
self.module.fail_json(msg="The system topology is Standard, HyperSwap actions are not supported.")
if vdisk_data:
if self.state == 'absent':
self.log("CHANGED: volume exists, but requested state is 'absent'")
changed = True
elif self.state == 'present':
# Discover the existing vdisk type.
self.vdisk_type = self.discover_vdisk_type(vdisk_data)
# Check if there is change in configuration
modify = self.vdisk_probe(vdisk_data)
if modify:
changed = True
else:
if self.state == 'present':
if self.poolA and self.poolB:
self.log("CHANGED: volume does not exist, but requested state is 'present'")
changed = True
else:
self.module.fail_json(msg="Volume does not exist, To create a Mirrored volume (standard mirror or HyperSwap), \
You must pass in poolA and poolB to the module.")
if changed:
if self.state == 'present':
if not vdisk_data:
if not self.type:
self.module.fail_json(msg="missing required argument: type")
# create_vdisk_flag = self.discover_site_from_pools()
if self.type == "standard":
self.isdrpool()
self.vdisk_create()
msg = "Standard Mirrored Volume %s has been created." % self.name
changed = True
elif self.type == "local hyperswap":
# if not create_vdisk_flag:
self.volume_create()
msg = "HyperSwap Volume %s has been created." % self.name
changed = True
else:
# This is where we would modify if required
self.vdisk_update(modify)
msg = "Volume [%s] has been modified." % self.name
changed = True
elif self.state == 'absent':
self.volume_delete()
msg = "Volume [%s] has been deleted." % self.name
changed = True
if self.module.check_mode:
msg = 'skipping changes due to check mode'
else:
self.log("exiting with no changes")
if self.state == 'absent':
msg = "Volume %s did not exist." % self.name
else:
msg = self.vdisk_type + " Volume [%s] already exists, no modifications done" % self.name
self.module.exit_json(msg=msg, changed=changed)
def main():
v = IBMSVCvolume()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,244 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2021 IBM CORPORATION
# Author(s): Sanjaikumaar <sanjaikumaar.m@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_manage_ownershipgroup
short_description: This module manages ownership group on IBM Spectrum Virtualize family storage systems
version_added: "1.7.0"
description:
- Ansible interface to manage 'mkownershipgroup' and 'rmownershipgroup' commands.
options:
name:
description:
- Specifies the name or label for the new ownership group object.
required: true
type: str
state:
description:
- Creates (C(present)) or removes (C(absent)) an ownership group.
choices: [ absent, present ]
required: true
type: str
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
required: true
type: str
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the M(ibm.spectrum_virtualize.ibm_svc_auth) module.
type: str
keepobjects:
description:
- If specified, the objects that currently belong to the ownership group will be kept but will be moved to noownershipgroup.
- Applies when I(state=disabled).
type: bool
log_path:
description:
- Path of debug log file.
type: str
validate_certs:
description:
- Validates certification.
default: false
type: bool
author:
- Sanjaikumaar M (@sanjaikumaar)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Create ownership group
ibm.spectrum_virtualize.ibm_svc_manage_ownershipgroup:
clustername: "{{ clustername }}"
domain: "{{ domain }}"
username: "{{ username }}"
password: "{{ password }}"
log_path: /tmp/playbook.debug
name: newOwner
state: present
- name: Delete ownership group
ibm.spectrum_virtualize.ibm_svc_manage_ownershipgroup:
clustername: "{{ clustername }}"
domain: "{{ domain }}"
username: "{{ username }}"
password: "{{ password }}"
log_path: /tmp/playbook.debug
name: newOwner
state: absent
keepobjects: true
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import (
IBMSVCRestApi,
svc_argument_spec,
get_logger
)
class IBMSVCOwnershipgroup:
def __init__(self):
# Gathering required arguments for the module
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(
type='str',
required=True,
choices=['present', 'absent']
),
keepobjects=dict(type='bool')
)
)
# Initializing ansible module
self.module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
# Required parameters
self.name = self.module.params['name']
self.state = self.module.params['state']
# Optional parameters
self.keepobjects = self.module.params.get('keepobjects')
if not self.name:
self.module.fail_json(msg='Missing mandatory parameter: name')
# logging setup
log_path = self.module.params['log_path']
logger = get_logger(self.__class__.__name__, log_path)
self.log = logger.info
self.changed = False
self.msg = None
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=log_path,
token=self.module.params['token']
)
def check_existing_owgroups(self):
merged_result = {}
data = self.restapi.svc_obj_info(cmd='lsownershipgroup', cmdopts=None,
cmdargs=[self.name])
if isinstance(data, list):
for d in data:
merged_result.update(d)
else:
merged_result = data
return merged_result
def create_ownershipgroup(self):
if self.module.check_mode:
self.changed = True
return
if self.keepobjects:
self.module.fail_json(
msg='Keepobjects should only be passed while deleting ownershipgroup'
)
cmd = 'mkownershipgroup'
cmdopts = None
cmdargs = ['-name', self.name]
result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
self.changed = True
self.log('Create ownership group result: %s', result)
def delete_ownershipgroup(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'rmownershipgroup'
cmdopts = None
cmdargs = [self.name]
if self.keepobjects:
cmdargs.insert(0, '-keepobjects')
result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
self.changed = True
self.log('Delete ownership group result: %s', result)
def apply(self):
if self.check_existing_owgroups():
if self.state == 'present':
self.msg = 'Ownership group (%s) already exist.' % (self.name)
else:
self.delete_ownershipgroup()
self.msg = 'Ownership group (%s) deleted.' % (self.name)
else:
if self.state == 'absent':
self.msg = 'Ownership group (%s) does not exist.' % (self.name)
else:
self.create_ownershipgroup()
self.msg = 'Ownership group (%s) created.' % \
(self.name)
if self.module.check_mode:
self.msg = 'skipping changes due to check mode.'
self.module.exit_json(
changed=self.changed,
msg=self.msg
)
def main():
v = IBMSVCOwnershipgroup()
try:
v.apply()
except Exception as e:
v.log('Exception in apply(): \n%s', format_exc())
v.module.fail_json(msg='Module failed. Error [{0}].'.format(to_native(e)))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,315 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2022 IBM CORPORATION
# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_manage_portset
short_description: This module manages portset configuration on IBM Spectrum Virtualize family storage systems
version_added: "1.8.0"
description:
- Ansible interface to manage IP portsets 'mkportset', 'chportset' and 'rmportset' commands.
options:
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
required: true
type: str
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the M(ibm.spectrum_virtualize.ibm_svc_auth) module.
type: str
log_path:
description:
- Path of debug log file.
type: str
state:
description:
- Creates (C(present)) or Deletes (C(absent)) the IP portset.
choices: [ present, absent ]
required: true
type: str
name:
description:
- Specifies the name of portset.
type: str
required: true
portset_type:
description:
- Specifies the type for the portset.
- Applies only during creation of portset.
choices: [ host, replication ]
default: host
type: str
ownershipgroup:
description:
- The name of the ownership group to which the portset object is being mapped.
- Parameters I(ownershipgroup) and I(noownershipgroup) are mutually exclusive.
- Applies when I(state=present).
type: str
noownershipgroup:
description:
- Specify to remove the ownership group from portset.
- Parameters I(ownershipgroup) and I(noownershipgroup) are mutually exclusive.
- Applies only during updation of portset.
type: bool
validate_certs:
description:
- Validates certification.
default: false
type: bool
author:
- Sanjaikumaar M (@sanjaikumaar)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Create a portset
ibm.spectrum_virtualize.ibm_svc_manage_portset:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
name: portset1
portset_type: host
ownershipgroup: owner1
state: present
- name: Update a portset
ibm.spectrum_virtualize.ibm_svc_manage_portset:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
name: portset1
noownershipgroup: true
state: present
- name: Delete a portset
ibm.spectrum_virtualize.ibm_svc_manage_portset:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
name: portset1
state: absent
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import (
IBMSVCRestApi, svc_argument_spec,
get_logger
)
from ansible.module_utils._text import to_native
class IBMSVCPortset:
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
state=dict(
type='str',
required=True,
choices=['present', 'absent']
),
name=dict(
type='str',
required=True,
),
portset_type=dict(
type='str',
default='host',
choices=['host', 'replication']
),
ownershipgroup=dict(
type='str',
),
noownershipgroup=dict(
type='bool',
)
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# Required parameters
self.name = self.module.params['name']
self.state = self.module.params['state']
# Optional parameters
self.portset_type = self.module.params.get('portset_type', '')
self.ownershipgroup = self.module.params.get('ownershipgroup', '')
self.noownershipgroup = self.module.params.get('noownershipgroup', '')
self.basic_checks()
# Varialbe to cache data
self.portset_details = None
# logging setup
self.log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, self.log_path)
self.log = log.info
self.changed = False
self.msg = ''
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=self.log_path,
token=self.module.params['token']
)
def basic_checks(self):
if self.state == 'present':
if not self.name:
self.module.fail_json(msg='Missing mandatory parameter: name')
if self.ownershipgroup and self.noownershipgroup:
self.module.fail_json(msg='Mutually exclusive parameter: ownershipgroup, noownershipgroup')
else:
fields = [f for f in ['ownershipgroup', 'noownershipgroup'] if getattr(self, f)]
if any(fields):
self.module.fail_json(msg='{0} should not be passed when state=absent'.format(', '.join(fields)))
def is_portset_exists(self):
merged_result = {}
data = self.restapi.svc_obj_info(
cmd='lsportset',
cmdopts=None,
cmdargs=[self.name]
)
if isinstance(data, list):
for d in data:
merged_result.update(d)
else:
merged_result = data
self.portset_details = merged_result
return merged_result
def create_portset(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'mkportset'
cmdopts = {
'name': self.name,
'type': self.portset_type if self.portset_type else 'host'
}
if self.ownershipgroup:
cmdopts['ownershipgroup'] = self.ownershipgroup
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
self.log('Portset (%s) created', self.name)
self.changed = True
def portset_probe(self):
updates = []
if self.ownershipgroup and self.ownershipgroup != self.portset_details['owner_name']:
updates.append('ownershipgroup')
if self.noownershipgroup:
updates.append('noownershipgroup')
self.log("Modifications to be done: %s", updates)
return updates
def update_portset(self, updates):
if self.module.check_mode:
self.changed = True
return
cmd = 'chportset'
cmdopts = dict((k, getattr(self, k)) for k in updates)
cmdargs = [self.name]
self.restapi.svc_run_command(cmd, cmdopts=cmdopts, cmdargs=cmdargs)
self.log('Portset (%s) updated', self.name)
self.changed = True
def delete_portset(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'rmportset'
cmdargs = [self.name]
self.restapi.svc_run_command(cmd, cmdopts=None, cmdargs=cmdargs)
self.log('Portset (%s) deleted', self.name)
self.changed = True
def apply(self):
if self.is_portset_exists():
if self.state == 'present':
modifications = self.portset_probe()
if any(modifications):
self.update_portset(modifications)
self.msg = 'Portset ({0}) updated.'.format(self.name)
else:
self.msg = 'Portset ({0}) already exists. No modifications done.'.format(self.name)
else:
self.delete_portset()
self.msg = 'Portset ({0}) deleted.'.format(self.name)
else:
if self.state == 'absent':
self.msg = 'Portset ({0}) does not exist. No modifications done.'.format(self.name)
else:
self.create_portset()
self.msg = 'Portset ({0}) created.'.format(self.name)
if self.module.check_mode:
self.msg = 'skipping changes due to check mode.'
self.module.exit_json(
changed=self.changed,
msg=self.msg
)
def main():
v = IBMSVCPortset()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,542 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2020 IBM CORPORATION
# Author(s): Rohit Kumar <rohit.kumar6@ibm.com>
# Shilpi Jain <shilpi.jain1@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_manage_replication
short_description: This module manages remote copies (or rcrelationship) on
IBM Spectrum Virtualize family storage systems
version_added: "1.3.0"
description:
- Ansible interface to manage remote copy replication.
options:
name:
description:
- Specifies the name to assign to the new remote copy relationship or to operate on the existing remote copy.
type: str
state:
description:
- Creates or updates (C(present)), removes (C(absent)) a
remote copy relationship.
choices: [absent, present]
required: true
type: str
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
type: str
required: true
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the ibm_svc_auth module.
type: str
version_added: '1.5.0'
copytype:
description:
- Specifies the mirror type of the remote copy. 'metro' means MetroMirror,
'global' means GlobalMirror, and 'GMCV' means GlobalMirror with change volume.
- If not specified, a MetroMirror remote copy will be created when creating a remote copy I(state=present).
type: str
choices: [ 'metro', 'global' , 'GMCV']
master:
description:
- Specifies the master volume name when creating a remote copy.
type: str
aux:
description:
- Specifies the auxiliary volume name when creating a remote copy.
type: str
cyclingperiod:
description:
- Specifies the cycle period in seconds. The default cycle is of 300 seconds.
type: int
remotecluster:
description:
- Specifies the name of remote cluster when creating a remote copy.
type: str
sync:
description:
- Specifies whether to create a synchronized relationship.
default: false
type: bool
force:
description:
- Specifies that the relationship must be deleted even if it results in the secondary volume containing inconsistent data.
type: bool
consistgrp:
description:
- Specifies a consistency group that this relationship will join. If not specified by user, the relationship is created as a stand-alone relationship.
- Applies when I(state=present).
type: str
noconsistgrp:
description:
- Specifies whether to remove the specified relationship from a consistency
group, making the relationship a stand-alone relationship.
- Applies when I(state=present).
default: false
type: bool
validate_certs:
description:
- Validates certification.
default: false
type: bool
log_path:
description:
- Path of debug log file.
type: str
notes:
- The parameters I(primary) and I(aux) are mandatory only when a remote copy relationship does not exist.
- This module supports C(check_mode).
author:
- rohit(@rohitk-github)
- Shilpi Jain (@Shilpi-Jain1)
'''
EXAMPLES = '''
- name: Create remote copy
ibm.spectrum_virtualize.ibm_svc_manage_replication:
name: sample_rcopy
clustername: "{{clustername}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/ansible.log
state: present
remotecluster: "{{remotecluster}}"
master: SourceVolume0
aux: TargetVolume0
copytype: global
sync: true
consistgrp: sample_rccg
register: result
- name: Exclude the remote copy from consistency group
ibm.spectrum_virtualize.ibm_svc_manage_replication:
name: sample_rcopy2
clustername: "{{clustername}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/ansible.log
state: present
noconsistgrp: true
- name: Delete remote copy
ibm.spectrum_virtualize.ibm_svc_manage_replication:
name: sample_rcopy3
clustername: "{{clustername}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/ansible.log
state: absent
- name: Create GlobalMirror remote copy relationship with change volume
ibm.spectrum_virtualize.ibm_svc_manage_replication:
name: sample_rcopy4
clustername: "{{clustername}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/ansible.log
state: present
remotecluster: "{{remotecluster}}"
master: SourceVolume1
aux: TargetVolume1
copytype: GMCV
sync: true
register: result
'''
RETURN = '''#'''
from ansible.module_utils._text import to_native
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
from ansible.module_utils.basic import AnsibleModule
from traceback import format_exc
class IBMSVCManageReplication(object):
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
name=dict(type='str'),
state=dict(type='str',
required=True,
choices=['present', 'absent']),
remotecluster=dict(type='str'),
copytype=dict(type='str', choices=['metro', 'global', 'GMCV']),
master=dict(type='str'),
aux=dict(type='str'),
force=dict(type='bool', required=False),
consistgrp=dict(type='str'),
noconsistgrp=dict(type='bool', default=False),
sync=dict(type='bool', default=False),
cyclingperiod=dict(type='int')
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# logging setup
log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
# Required
self.name = self.module.params['name']
self.state = self.module.params['state']
self.remotecluster = self.module.params['remotecluster']
# Optional
self.consistgrp = self.module.params.get('consistgrp', None)
self.aux = self.module.params.get('aux')
self.master = self.module.params.get('master')
self.sync = self.module.params.get('sync', False)
self.noconsistgrp = self.module.params.get('noconsistgrp', False)
self.copytype = self.module.params.get('copytype', None)
self.force = self.module.params.get('force', False)
self.cyclingperiod = self.module.params.get('cyclingperiod')
# Handling missing mandatory parameter name
if not self.name:
self.module.fail_json(msg='Missing mandatory parameter: name')
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=log_path,
token=self.module.params['token']
)
def existing_vdisk(self, volname):
merged_result = {}
data = self.restapi.svc_obj_info(cmd='lsvdisk', cmdopts={'bytes': True},
cmdargs=[volname])
if not data:
self.log("source volume %s does not exist", volname)
return
if isinstance(data, list):
for d in data:
merged_result.update(d)
else:
merged_result = data
return merged_result
def cycleperiod_update(self):
"""
Use the chrcrelationship command to update cycling period in remote copy
relationship.
"""
if self.module.check_mode:
self.changed = True
return
if (self.copytype == 'GMCV') and (self.cyclingperiod):
cmd = 'chrcrelationship'
cmdopts = {}
cmdopts['cycleperiodseconds'] = self.cyclingperiod
cmdargs = [self.name]
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
else:
self.log("not updating chrcrelationship with cyclingperiod %s", self.cyclingperiod)
def cyclemode_update(self):
"""
Use the chrcrelationship command to update cycling mode in remote copy
relationship.
"""
if self.module.check_mode:
self.changed = True
return
cmd = 'chrcrelationship'
cmdopts = {}
cmdargs = [self.name]
if self.copytype == 'GMCV':
self.log("updating chrcrelationship with cyclingmode multi")
cmdopts['cyclingmode'] = 'multi'
else:
self.log("updating chrcrelationship with no cyclingmode")
cmdopts['cyclingmode'] = 'none'
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
def existing_rc(self):
"""
find the remote copy relationships such as Metro Mirror, Global Mirror
relationships visible to the system.
Returns:
None if no matching instances or a list including all the matching
instances
"""
self.log('Trying to get the remote copy relationship %s', self.name)
data = self.restapi.svc_obj_info(cmd='lsrcrelationship',
cmdopts=None, cmdargs=[self.name])
return data
def rcrelationship_probe(self, data):
props = {}
propscv = {}
if data['consistency_group_name'] and self.noconsistgrp:
props['noconsistgrp'] = self.noconsistgrp
if self.consistgrp is not None and self.consistgrp != data['consistency_group_name']:
props['consistgrp'] = self.consistgrp
if self.master is not None and self.master != data['master_vdisk_name']:
props['master'] = self.master
if self.aux is not None and self.aux != data['aux_vdisk_name']:
props['aux'] = self.aux
if self.copytype == 'global' and data['copy_type'] == 'metro':
props['global'] = True
if (self.copytype == 'metro' or self.copytype is None) and (data['copy_type'] == 'global' and data['cycling_mode'] == 'multi'):
self.module.fail_json(msg="Changing relationship type from GMCV to metro is not allowed")
elif (self.copytype == 'metro' or self.copytype is None) and data['copy_type'] == 'global':
props['metro'] = True
if self.copytype == 'GMCV' and data['copy_type'] == 'global' and self.consistgrp is None:
if data['cycling_mode'] != 'multi':
propscv['cyclingmode'] = 'multi'
if self.cyclingperiod is not None and self.cyclingperiod != int(data['cycle_period_seconds']):
propscv['cycleperiodseconds'] = self.cyclingperiod
if self.copytype == 'global' and (data['copy_type'] == 'global' and (data['master_change_vdisk_name'] or data['aux_change_vdisk_name'])):
propscv['cyclingmode'] = 'none'
if self.copytype == 'GMCV' and data['copy_type'] == 'metro':
self.module.fail_json(msg="Changing relationship type from metro to GMCV is not allowed")
if self.copytype != 'metro' and self.copytype != 'global' and self.copytype != 'GMCV' and self.copytype is not None:
self.module.fail_json(msg="Unsupported mirror type: %s. Only 'global', 'metro' and 'GMCV' are supported when modifying" % self.copytype)
return props, propscv
def rcrelationship_update(self, modify, modifycv):
"""
Use the chrcrelationship command to modify certain attributes of an
existing relationship, such as to add a relationship to a consistency
group to remove a relationship from a consistency group.
You can change one attribute at a time.
"""
if self.module.check_mode:
self.changed = True
return
if modify:
self.log("updating chrcrelationship with properties %s", modify)
cmd = 'chrcrelationship'
cmdopts = {}
for prop in modify:
cmdopts[prop] = modify[prop]
cmdargs = [self.name]
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
# Error(if any) will be raised in svc_run_command
self.changed = True
if modifycv:
if 'cycleperiodseconds' in modifycv:
self.cycleperiod_update()
self.log("cyclingperiod in change volume updated")
if 'cyclingmode' in modifycv:
self.cyclemode_update()
self.log("cyclingmode in change volume updated")
# Error(if any) will be raised in svc_run_command
self.changed = True
if not modify and not modifycv:
self.log("There is no property need to be updated")
self.changed = False
def create(self):
"""
Specify the mkrcrelationship command to create a new Global Mirror,
Metro Mirror in the same system, forming an intrasystem Metro Mirror
relationship or intersystem
relationship (if it involves more than one system).
Returns:
a remote copy instance
"""
if not self.name:
self.module.fail_json(msg="You must pass in name to the module.")
if not self.master:
self.module.fail_json(msg="You must pass in master to the module.")
if not self.aux:
self.module.fail_json(msg="You must pass in aux to the module.")
if not self.remotecluster:
self.module.fail_json(msg="You must pass in remotecluster to the module.")
if self.module.check_mode:
self.changed = True
return
self.log("Creating remote copy '%s'", self.name)
# Make command
cmd = 'mkrcrelationship'
cmdopts = {}
if self.remotecluster:
cmdopts['cluster'] = self.remotecluster
if self.master:
cmdopts['master'] = self.master
if self.aux:
cmdopts['aux'] = self.aux
if self.name:
cmdopts['name'] = self.name
if self.copytype:
if self.copytype == 'global' or self.copytype == 'GMCV':
cmdopts['global'] = True
elif self.copytype == 'metro' or self.copytype == 'blank':
pass
else:
msg = "Invalid parameter specified as the Copy Type(%s) when creating Remotecopy" % self.copytype
self.module.fail_json(msg=msg)
if self.copytype != 'GMCV' and self.cyclingperiod is not None:
msg = "Provided copytype is %s. Copy Type must be GMCV when creating Remotecopy relationship with change volumes and cycling period" % self.copytype
self.module.fail_json(msg=msg)
if self.consistgrp:
cmdopts['consistgrp'] = self.consistgrp
if self.sync:
cmdopts['sync'] = self.sync
# Run command
self.log("Command %s opts %s", cmd, cmdopts)
result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
self.log("create remote copy result %s", result)
if 'message' in result:
self.changed = True
data = self.existing_rc()
self.log("Succeeded to create remote copy result message %s",
result['message'])
return data
else:
msg = "Failed to create remote copy [%s]" % self.name
self.module.fail_json(msg=msg)
def delete(self):
"""
Use the rmrcrelationship command to delete an existing remote copy
relationship.
"""
if self.module.check_mode:
self.changed = True
return
cmd = 'rmrcrelationship'
cmdopts = {}
if self.force:
cmdopts['force'] = self.force
cmdargs = [self.name]
result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
# Any error will have been raised in svc_run_command
# Command does not output anything when successful.
if result == '':
self.changed = True
self.log("succeeded to delete the remote copy %s", self.name)
elif 'message' in result:
self.changed = True
self.log("delete the remote copy %s with result message %s",
self.name, result['message'])
else:
self.module.fail_json(
msg="Failed to delete the remote copy [%s]" % self.name)
def apply(self):
changed = False
msg = None
modify = {}
modifycv = {}
rcrelationship_data = self.existing_rc()
if rcrelationship_data:
if self.state == 'absent':
self.log(
"CHANGED: RemoteCopy relationship exists, requested state is 'absent'")
changed = True
elif self.state == 'present':
modify, modifycv = self.rcrelationship_probe(rcrelationship_data)
if modify or modifycv:
changed = True
else:
if self.state == 'present':
changed = True
self.log(
"CHANGED: Remotecopy relationship does not exist, but requested state is '%s'", self.state)
if changed:
if self.state == 'present':
if not rcrelationship_data:
self.create()
if self.copytype == 'GMCV' and self.consistgrp is None:
self.cycleperiod_update()
self.cyclemode_update()
msg = "remote copy relationship with change volume %s has been created." % self.name
else:
msg = "remote copy relationship %s has been created." % self.name
else:
self.rcrelationship_update(modify, modifycv)
msg = "remote copy relationship [%s] has been modified." % self.name
elif self.state == 'absent':
self.delete()
msg = "remote copy relationship [%s] has been deleted." % self.name
if self.module.check_mode:
msg = 'skipping changes due to check mode.'
else:
self.log("exiting with no changes")
if self.state in ['absent']:
msg = "Remotecopy relationship [%s] does not exist." % self.name
else:
msg = "No Modifications detected, Remotecopy relationship [%s] already exists." % self.name
self.module.exit_json(msg=msg, changed=changed)
def main():
v = IBMSVCManageReplication()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,379 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2020 IBM CORPORATION
# Author(s): Rohit Kumar <rohit.kumar6@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_manage_replicationgroup
short_description: This module manages remote copy consistency group on
IBM Spectrum Virtualize family storage systems
version_added: "1.3.0"
description:
- Ansible interface to manage 'mkrcconsistgrp', 'chrcconsistgrp', and 'rmrcconsistgrp'
remote copy consistency group commands.
options:
name:
description:
- Specifies the name for the new consistency group.
required: true
type: str
state:
description:
- Creates or updates (C(present)) removes (C(absent))
a consistency group.
choices: [ absent, present ]
required: true
type: str
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
type: str
required: true
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the ibm_svc_auth module.
type: str
version_added: '1.5.0'
log_path:
description:
- Path of debug log file.
type: str
validate_certs:
description:
- Validates certification.
default: false
type: bool
remotecluster:
description:
- Specifies the name of the remote system.
Only used while creating a consistency group.
type: str
force:
description:
- If used to delete a consistency group,
it specifies that you want the system to remove any
relationship that belongs to the consistency
group before the group is deleted.
- If used to start a consistency group,
it specifies that you want the system to process the
copy operation even if it causes a temporary loss of
consistency during synchronization.
- It is required if the consistency group is in the ConsistentStopped
state, but is not synchronized or is in the idling state -
except if consistency protection is configured.
type: bool
copytype:
description:
- Specifies the mirror type of the remote copy. 'metro' means MetroMirror, 'global' means GlobalMirror.
- If not specified, a MetroMirror remote copy will be created when creating a remote copy I(state=present).
type: str
choices: [ 'metro', 'global' ]
cyclingmode:
description:
- Specifies the behavior of Global Mirror for the relationship.
- Active-active relationships and relationships with cycling modes set to Multiple must always be configured with change volumes.
- Applies when I(state=present) and I(copytype=global).
type: str
choices: [ 'multi', 'none' ]
cyclingperiod:
description:
- Specifies the cycle period in seconds.
type: int
author:
- rohit(@rohitk-github)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Define a new rc consistency group
ibm_svc_manage_replicationgroup:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
name: rccg4test
remotecluster: remotecluster
state: present
- name: Delete rc consistency group
ibm_svc_manage_replicationgroup:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
name: rccg4test
force: true
state: absent
- name: Update rc consistency group
ibm_svc_manage_replicationgroup:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
name: rccg4test
cyclingperiod: 60
state: present
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import \
IBMSVCRestApi, svc_argument_spec, get_logger
from ansible.module_utils._text import to_native
class IBMSVCRCCG(object):
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent',
'present']),
remotecluster=dict(type='str', required=False),
force=dict(type='bool', required=False),
copytype=dict(type='str', choices=['metro', 'global']),
cyclingmode=dict(type='str', required=False, choices=['multi', 'none']),
cyclingperiod=dict(type='int', required=False)
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# logging setup
log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
# Required
self.name = self.module.params['name']
self.state = self.module.params['state']
# Optional
self.cluster = self.module.params.get('remotecluster', None)
self.force = self.module.params.get('force', False)
self.copytype = self.module.params.get('copytype', None)
self.cyclingmode = self.module.params.get('cyclingmode', None)
self.cyclingperiod = self.module.params.get('cyclingperiod', None)
# Handling missing mandatory paratmeter name
if not self.name:
self.module.fail_json(msg='Missing mandatory parameter: name')
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=log_path,
token=self.module.params['token']
)
def get_existing_rccg(self):
merged_result = {}
data = self.restapi.svc_obj_info(cmd='lsrcconsistgrp', cmdopts=None,
cmdargs=[self.name])
if isinstance(data, list):
for d in data:
merged_result.update(d)
else:
merged_result = data
return merged_result
def rccg_probe(self, data):
props = {}
propscv = {}
if self.copytype and self.copytype != data['copy_type']:
if self.copytype == 'global':
props['global'] = True
elif self.copytype == 'metro':
props['metro'] = True
else:
self.module.fail_json(msg="Unsupported mirror type: %s. Only 'global' and 'metro' are supported when modifying" % self.copytype)
if self.copytype == 'global' and self.cyclingperiod and self.cyclingperiod != int(data['cycle_period_seconds']):
propscv['cycleperiodseconds'] = self.cyclingperiod
if self.copytype == 'global' and self.cyclingmode and self.cyclingmode != data['cycling_mode']:
propscv['cyclingmode'] = self.cyclingmode
return props, propscv
def rccg_create(self):
if self.module.check_mode:
self.changed = True
return
rccg_data = self.get_existing_rccg()
if rccg_data:
self.rccg_update(rccg_data)
self.log("creating rc consistgrp '%s'", self.name)
# Make command
cmd = 'mkrcconsistgrp'
cmdopts = {'name': self.name}
if self.cluster:
cmdopts['cluster'] = self.cluster
self.log("creating rc consistgrp command '%s' opts", self.cluster)
# Run command
result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
self.log("create rc consistgrp result '%s'", result)
msg = "succeeded to create rc consistgrp '%s'" % self.name
self.log(msg)
if 'message' in result:
self.log("create rc consistgrp result message '%s'",
result['message'])
self.module.exit_json(msg="rc consistgrp '%s' is created" %
self.name, changed=True)
else:
self.module.fail_json(msg=result)
def rccg_update(self, modify, modifycv):
if modify:
self.log("updating chrcconsistgrp with properties %s", modify)
cmd = 'chrcconsistgrp'
cmdopts = {}
for prop in modify:
cmdopts[prop] = modify[prop]
cmdargs = [self.name]
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
# Any error would have been raised in svc_run_command
# chrcconsistgrp does not output anything when successful.
self.changed = True
if modifycv:
self.log("updating chrcconsistgrp with properties %s", modifycv)
cmd = 'chrcconsistgrp'
cmdargs = [self.name]
for prop in modifycv:
cmdopts = {}
cmdopts[prop] = modifycv[prop]
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
# Any error would have been raised in svc_run_command
# chrcconsistgrp does not output anything when successful.
self.changed = True
if not modify and not modifycv:
self.log("There is no property to be updated")
self.changed = False
def rccg_delete(self):
rccg_data = self.get_existing_rccg()
if not rccg_data:
self.module.exit_json(msg="rc consistgrp '%s' did not exist" %
self.name, changed=False)
if self.module.check_mode:
self.changed = True
return
self.log("deleting rc consistgrp '%s'", self.name)
cmd = 'rmrcconsistgrp'
cmdopts = {'force': True} if self.force else None
cmdargs = [self.name]
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
# Any error will have been raised in svc_run_command
# rmrcconsistgrp does not output anything when successful.
msg = "rc consistgrp '%s' is deleted" % self.name
self.log(msg)
self.module.exit_json(msg=msg, changed=True)
def apply(self):
changed = False
msg = None
modify = {}
rccg_data = self.get_existing_rccg()
if rccg_data:
if self.state == 'absent':
self.log(
"CHANGED: RemoteCopy group exists, requested state is 'absent'")
changed = True
elif self.state == 'present':
modify, modifycv = self.rccg_probe(rccg_data)
if modify or modifycv:
changed = True
else:
if self.state == 'present':
if self.copytype:
self.module.fail_json(msg="copytype cannot be passed while creating a consistency group")
changed = True
self.log(
"CHANGED: Remotecopy group does not exist, but requested state is '%s'", self.state)
if changed:
if self.state == 'present':
if not rccg_data:
self.rccg_create()
msg = "remote copy group %s has been created." % self.name
else:
self.rccg_update(modify, modifycv)
msg = "remote copy group [%s] has been modified." % self.name
elif self.state == 'absent':
self.rccg_delete()
msg = "remote copy group [%s] has been deleted." % self.name
if self.module.check_mode:
msg = 'skipping changes due to check mode.'
else:
self.log("exiting with no changes")
if self.state in ['absent']:
msg = "Remotecopy group [%s] does not exist." % self.name
else:
msg = "No Modifications detected, Remotecopy group [%s] already exists." % self.name
self.module.exit_json(msg=msg, changed=changed)
def main():
v = IBMSVCRCCG()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,342 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2022 IBM CORPORATION
# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_manage_safeguarded_policy
short_description: This module manages safeguarded policy configuration on IBM Spectrum Virtualize family storage systems
version_added: "1.8.0"
description:
- Ansible interface to manage 'mksafeguardedpolicy' and 'rmsafeguardedpolicy' safeguarded policy commands.
- Safeguarded copy functionality is introduced in IBM Spectrum Virtualize 8.4.2.
options:
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
required: true
type: str
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the M(ibm.spectrum_virtualize.ibm_svc_auth) module.
type: str
log_path:
description:
- Path of debug log file.
type: str
state:
description:
- Creates (C(present)) or deletes (C(absent)) a safeguarded policy.
- Resume (C(resume)) or suspend (C(suspend)) the safeguarded copy functionality system wide.
choices: [ present, absent, suspend, resume ]
required: true
type: str
name:
description:
- Specifies the name of safeguarded policy.
- Not applicable when I(state=suspend) or I(state=resume).
type: str
backupunit:
description:
- Specify the backup unit in mentioned metric.
- Applies when I(state=present).
choices: [ minute, hour, day, week, month ]
type: str
backupinterval:
description:
- Specifies the interval of backup.
- Applies when I(state=present).
type: str
backupstarttime:
description:
- Specifies the start time of backup in the format YYMMDDHHMM.
- Applies when I(state=present).
type: str
retentiondays:
description:
- Specifies the retention days for the backup.
- Applies when I(state=present).
type: str
validate_certs:
description:
- Validates certification.
default: false
type: bool
author:
- Sanjaikumaar M (@sanjaikumaar)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Create safeguarded policy
ibm.spectrum_virtualize.ibm_svc_manage_safeguarded_policy:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
name: sgpolicy0
backupunit: day
backupinterval: 1
backupstarttime: 2102281800
retentiondays: 15
state: present
- name: Suspend safeguarded copy functionality
ibm.spectrum_virtualize.ibm_svc_manage_safeguarded_policy:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
state: suspend
- name: Resume safeguarded copy functionality
ibm.spectrum_virtualize.ibm_svc_manage_safeguarded_policy:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
state: resume
- name: Delete safeguarded policy
ibm.spectrum_virtualize.ibm_svc_manage_safeguarded_policy:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
name: sgpolicy0
state: absent
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import (
IBMSVCRestApi, svc_argument_spec,
get_logger
)
from ansible.module_utils._text import to_native
class IBMSVCSafeguardedPolicy:
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
state=dict(
type='str',
required=True,
choices=['present', 'absent', 'suspend', 'resume']
),
name=dict(
type='str',
),
backupunit=dict(
type='str',
choices=['minute', 'hour', 'day', 'week', 'month'],
),
backupinterval=dict(
type='str',
),
backupstarttime=dict(
type='str',
),
retentiondays=dict(
type='str',
),
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# Required parameters
self.name = self.module.params['name']
self.state = self.module.params['state']
self.backupunit = self.module.params.get('backupunit', '')
self.backupinterval = self.module.params.get('backupinterval', '')
self.backupstarttime = self.module.params.get('backupstarttime', '')
self.retentiondays = self.module.params.get('retentiondays', '')
self.basic_checks()
# Variable to cache data
self.sg_policy_details = None
# logging setup
self.log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, self.log_path)
self.log = log.info
self.changed = False
self.msg = ''
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=self.log_path,
token=self.module.params['token']
)
def basic_checks(self):
if self.state == 'present':
fields = ['name', 'backupinterval', 'backupstarttime', 'retentiondays', 'backupunit']
exists = list(filter(lambda x: not getattr(self, x), fields))
if any(exists):
self.module.fail_json(msg="State is present but following parameters are missing: {0}".format(', '.join(exists)))
elif self.state == 'absent':
if not self.name:
self.module.fail_json(msg="Missing mandatory parameter: name")
fields = ['backupinterval', 'backupstarttime', 'retentiondays', 'backupunit']
exists = list(filter(lambda x: getattr(self, x) or getattr(self, x) == '', fields))
if any(exists):
self.module.fail_json(msg='{0} should not be passed when state=absent'.format(', '.join(exists)))
elif self.state in ['suspend', 'resume']:
fields = ['name', 'backupinterval', 'backupstarttime', 'retentiondays', 'backupunit']
exists = list(filter(lambda x: getattr(self, x) or getattr(self, x) == '', fields))
if any(exists):
self.module.fail_json(msg='{0} should not be passed when state={1}'.format(', '.join(exists), self.state))
def is_sg_exists(self):
merged_result = {}
data = self.restapi.svc_obj_info(
cmd='lssafeguardedschedule',
cmdopts=None,
cmdargs=[self.name]
)
if isinstance(data, list):
for d in data:
merged_result.update(d)
else:
merged_result = data
self.sg_policy_details = merged_result
return merged_result
def create_sg_policy(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'mksafeguardedpolicy'
cmdopts = {
'name': self.name,
'backupstarttime': self.backupstarttime,
'backupinterval': self.backupinterval,
'backupunit': self.backupunit,
'retentiondays': self.retentiondays
}
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
self.log('Safeguarded policy (%s) created', self.name)
self.changed = True
def sg_probe(self):
field_mappings = (
('backupinterval', self.sg_policy_details['backup_interval']),
('backupstarttime', self.sg_policy_details['backup_start_time']),
('retentiondays', self.sg_policy_details['retention_days']),
('backupunit', self.sg_policy_details['backup_unit'])
)
updates = []
for field, existing_value in field_mappings:
if field == 'backupstarttime':
updates.append(existing_value != '{0}00'.format(getattr(self, field)))
else:
updates.append(existing_value != getattr(self, field))
return updates
def delete_sg_policy(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'rmsafeguardedpolicy'
cmdargs = [self.name]
self.restapi.svc_run_command(cmd, cmdopts=None, cmdargs=cmdargs)
self.log('Safeguarded policy (%s) deleted', self.name)
self.changed = True
def update_safeguarded_copy_functionality(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'chsystem'
cmdopts = {'safeguardedcopysuspended': 'yes' if self.state == 'suspend' else 'no'}
self.restapi.svc_run_command(cmd, cmdopts=cmdopts, cmdargs=None)
self.log('Safeguarded copy functionality status changed: %s', self.state)
self.changed = True
def apply(self):
if self.state in ['resume', 'suspend']:
self.update_safeguarded_copy_functionality()
self.msg = 'Safeguarded copy functionality {0}ed'.format(self.state.rstrip('e'))
else:
if self.is_sg_exists():
if self.state == 'present':
modifications = self.sg_probe()
if any(modifications):
self.msg = 'Policy modification is not supported in ansible. Please delete and recreate new policy.'
else:
self.msg = 'Safeguarded policy ({0}) already exists. No modifications done.'.format(self.name)
else:
self.delete_sg_policy()
self.msg = 'Safeguarded policy ({0}) deleted.'.format(self.name)
else:
if self.state == 'absent':
self.msg = 'Safeguarded policy ({0}) does not exist. No modifications done.'.format(self.name)
else:
self.create_sg_policy()
self.msg = 'Safeguarded policy ({0}) created.'.format(self.name)
if self.module.check_mode:
self.msg = 'skipping changes due to check mode.'
self.module.exit_json(
changed=self.changed,
msg=self.msg
)
def main():
v = IBMSVCSafeguardedPolicy()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,412 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2021 IBM CORPORATION
# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_manage_sra
short_description: This module manages remote support assistance configuration on IBM Spectrum Virtualize family storage systems
version_added: "1.7.0"
description:
- Ansible interface to manage 'chsra' support remote assistance command.
options:
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
required: true
type: str
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the M(ibm.spectrum_virtualize.ibm_svc_auth) module.
type: str
log_path:
description:
- Path of debug log file.
type: str
state:
description:
- Enables (C(enabled)) or disables (C(disabled)) the remote support assistance.
choices: [ enabled, disabled ]
required: true
type: str
support:
description:
- Specifies the support assistance through C(remote) or C(onsite).
choices: [ remote, onsite ]
type: str
required: true
name:
description:
- Specifies the list of unique names for the support center or proxy to be defined.
- Required when I(support=remote), to enable remote support assistance.
type: list
elements: str
sra_ip:
description:
- Specifies the list of IP addresses or fully qualified domain names for the new support center or proxy server.
- Required when I(support=remote) and I(state=enabled), to enable support remote assistannce.
type: list
elements: str
sra_port:
description:
- Specifies the list of port numbers for the new support center or proxy server.
- Required when I(support=remote) and I(state=enabled), to enable support remote assistannce.
type: list
elements: str
validate_certs:
description:
- Validates certification.
default: false
type: bool
author:
- Sanjaikumaar M (@sanjaikumaar)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Enable support remote assistance
ibm.spectrum_virtualize.ibm_svc_manage_sra:
clustername: "{{ clustername }}"
domain: "{{ domain }}"
username: "{{ username }}"
password: "{{ password }}"
log_path: "{{ log_path }}"
support: remote
state: enabled
name:
- proxy_1
- proxy_2
- proxy_3
sra_ip:
- '0.0.0.0'
- '1.1.1.1'
- '2.1.2.2'
sra_port:
- 8888
- 9999
- 8800
- name: Disable support remote assistance
ibm.spectrum_virtualize.ibm_svc_manage_sra:
clustername: "{{ clustername }}"
domain: "{{ domain }}"
username: "{{ username }}"
password: "{{ password }}"
log_path: "{{ log_path }}"
support: remote
state: disabled
name:
- proxy_1
- proxy_2
- proxy_3
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import (
IBMSVCRestApi, svc_argument_spec,
get_logger
)
from ansible.module_utils._text import to_native
class IBMSVCSupportRemoteAssistance:
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
state=dict(
type='str',
required=True,
choices=['enabled', 'disabled']
),
support=dict(
type='str',
required=True,
choices=['remote', 'onsite']
),
name=dict(type='list', elements='str'),
sra_ip=dict(type='list', elements='str'),
sra_port=dict(type='list', elements='str')
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# Required parameters
self.support = self.module.params['support']
self.state = self.module.params['state']
# Optional parameters
self.name = self.module.params.get('name', [])
self.sra_ip = self.module.params.get('sra_ip', [])
self.sra_port = self.module.params.get('sra_port', [])
self.basic_checks()
# Varialbe to store some frequently used data
self.sra_status_detail = None
# logging setup
self.log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, self.log_path)
self.log = log.info
self.changed = False
self.msg = ''
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=self.log_path,
token=self.module.params['token']
)
def basic_checks(self):
self.filtered_params = dict(
filter(
lambda item: item[0] in ['name', 'sra_ip', 'sra_port'],
self.module.params.items()
)
)
if self.support == 'remote' and self.state == 'enabled':
if self.name and self.sra_ip and self.sra_port:
if len(self.name) == len(self.sra_ip) == len(self.sra_port):
if not all([all(self.name), all(self.sra_ip), all(self.sra_port)]):
missing_params = ', '.join([k for k, v in self.filtered_params.items() if not all(v)])
self.module.fail_json(
msg='{0} should not contain blank values'.format(missing_params)
)
else:
self.module.fail_json(
msg='Name, sra_ip and sra_port parameters should contain same number of arguments'
)
else:
missing_params = ', '.join([k for k, v in self.filtered_params.items() if not v])
self.module.fail_json(
msg='support is remote and state is enabled but following parameter missing: {0}'.format(missing_params)
)
elif self.support == 'remote' and self.state == 'disabled':
if self.sra_ip or self.sra_port:
invalid_params = ', '.join([k for k, v in self.filtered_params.items() if k in ['sra_ip', 'sra_port'] and v])
self.module.fail_json(
msg='{0} should not be passed when support=remote and state=disabled'.format(invalid_params)
)
elif self.support == 'onsite':
if self.name or self.sra_ip or self.sra_port:
invalid_params = ', '.join([k for k, v in self.filtered_params.items()])
self.module.fail_json(
msg='{0} should not be passed when support=onsite'.format(invalid_params)
)
def is_sra_enabled(self):
if self.sra_status_detail:
return self.sra_status_detail['status'] == 'enabled'
result = self.restapi.svc_obj_info(
cmd='lssra',
cmdopts=None,
cmdargs=None
)
self.sra_status_detail = result
return result['status'] == 'enabled'
def is_remote_support_enabled(self):
if self.sra_status_detail:
return self.sra_status_detail['remote_support_enabled'] == 'yes'
result = self.restapi.svc_obj_info(
cmd='lssra',
cmdopts=None,
cmdargs=None
)
return result['remote_support_enabled'] == 'yes'
def is_proxy_exist(self, obj_name):
obj = {}
result = self.restapi.svc_obj_info(
cmd='lssystemsupportcenter',
cmdopts=None,
cmdargs=[obj_name]
)
if isinstance(result, list):
for d in result:
obj.update(d)
else:
obj = result
return obj
def sra_probe(self):
if self.module.check_mode:
self.changed = True
return
message = ''
if (self.support == 'remote' and not self.is_remote_support_enabled()) \
or (self.support == 'onsite' and self.is_remote_support_enabled()):
message += 'SRA configuration cannot be updated right now. '
if any(self.add_proxy_details()):
message += 'Proxy server details cannot be updated when SRA is enabled. '
message += 'Please disable SRA and try to update.' if message else ''
self.msg = message if message else self.msg
return self.msg
def add_proxy_details(self):
existed = []
if self.support == 'remote':
cmd = 'mksystemsupportcenter'
cmdargs = []
for nm, ip, port in zip(self.name, self.sra_ip, self.sra_port):
if nm != 'None' and ip != 'None' and port != 'None':
if not self.is_proxy_exist(nm):
existed.append(True)
if not self.is_sra_enabled():
cmdopts = {
'name': nm,
'ip': ip,
'port': port,
'proxy': 'yes'
}
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
self.log('Proxy server(%s) details added', nm)
else:
self.log('Skipping, Proxy server(%s) already exist', nm)
else:
missing_params = ', '.join([k for k, v in self.filtered_params.items() if 'None' in v])
self.module.fail_json(
msg='support is remote and state is enabled but following parameter missing: {0}'.format(missing_params)
)
return existed
def remove_proxy_details(self):
if self.support == 'remote':
cmd = 'rmsystemsupportcenter'
cmdopts = {}
for nm in self.name:
if nm and nm != 'None':
if self.is_proxy_exist(nm):
cmdargs = [nm]
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
self.log('Proxy server(%s) details removed', nm)
else:
self.log('Proxy server(%s) does not exist', nm)
else:
self.module.fail_json(
msg='support is remote and state is disabled but following parameter is blank: name'
)
def enable_sra(self):
if self.module.check_mode:
self.changed = True
return
self.add_proxy_details()
cmd = 'chsra'
cmdopts = {}
cmdargs = ['-enable']
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
if self.support == 'remote':
cmdargs = ['-remotesupport', 'enable']
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
self.log('%s support assistance enabled', self.support.capitalize())
self.changed = True
def disable_sra(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'chsra'
cmdopts = {}
if self.support == 'remote':
cmdargs = ['-remotesupport', 'disable']
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
cmdargs = ['-disable']
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
self.log('%s support assistance disabled', self.support.capitalize())
self.remove_proxy_details()
self.changed = True
def apply(self):
if self.is_sra_enabled():
if self.state == 'enabled':
if not self.sra_probe():
self.msg = 'Support remote assistance already enabled. '\
'No modifications done.'
else:
self.disable_sra()
self.msg = 'Support remote assistance disabled.'
else:
if self.state == 'disabled':
self.msg = 'Support remote assistance is already disabled.'
else:
self.enable_sra()
self.msg = 'Support remote assistance({0}) enabled.'.format(
self.support
)
if self.module.check_mode:
self.msg = 'skipping changes due to check mode.'
self.module.exit_json(msg=self.msg, changed=self.changed)
def main():
v = IBMSVCSupportRemoteAssistance()
try:
v.apply()
except Exception as e:
v.log('Exception in apply(): \n%s', format_exc())
v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,385 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2021 IBM CORPORATION
# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_manage_user
short_description: This module manages user on IBM Spectrum Virtualize family storage systems
description:
- Ansible interface to manage 'mkuser', 'rmuser', and 'chuser' commands.
version_added: "1.7.0"
options:
name:
description:
- Specifies the unique username.
required: true
type: str
state:
description:
- Creates or updates (C(present)) or removes (C(absent)) a user.
choices: [ present, absent ]
required: true
type: str
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
type: str
required: true
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the ibm_svc_auth module.
type: str
user_password:
description:
- Specifies the password associated with the user.
- Applies when I(state=present).
type: str
nopassword:
description:
- Specifies that the user's password is to be deleted.
- Applies when I(state=present), to modify a user.
type: bool
keyfile:
description:
- Specifies the name of the file containing the Secure Shell (SSH) public key.
- Applies when I(state=present).
type: str
nokey:
description:
- Specifies that the user's SSH key is to be deleted.
- Applies when I(state=present), to modify a user.
type: bool
auth_type:
description:
- Specifies whether the user authenticates to the system using a remote authentication service or system authentication methods.
- Only supported value is 'usergrp'.
- Required when I(state=present), to create a user.
choices: [ usergrp ]
type: str
usergroup:
description:
- Specifies the name of the user group with which the local user is to be associated.
- Applies when I(state=present) and I(auth_type=usergrp).
type: str
forcepasswordchange:
description:
- Specifies that the password is to be changed on next login.
- Applies when I(state=present), to modify a user.
type: bool
lock:
description:
- Specifies to lock the account indefinitely. The user cannot log in unless unlocked again with the parameter I(unlock).
- Applies when I(state=present), to modify a user.
- Parameters I(lock) and I(unlock) are mutually exclusive.
type: bool
unlock:
description:
- Specifies to unlock the account so it can be logged in to again.
- Applies when I(state=present), to modify a user.
- Parameters I(lock) and I(unlock) are mutually exclusive.
type: bool
validate_certs:
description:
- Validates certification.
default: false
type: bool
log_path:
description:
- Path of debug log file.
type: str
author:
- Sreshtant Bohidar(@Sreshtant-Bohidar)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Create a user
ibm.spectrum_virtualize.ibm_svc_manage_user:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
state: present
name: user-name
user_password: user-password
auth_type: usergrp
usergroup: usergroup-name
- name: Remove a user
ibm.spectrum_virtualize.ibm_svc_manage_user:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
state: absent
name: user-name
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
from ansible.module_utils._text import to_native
class IBMSVCUser(object):
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['present', 'absent']),
auth_type=dict(type='str', required=False, choices=['usergrp']),
user_password=dict(type='str', required=False, no_log=True),
nopassword=dict(type='bool', required=False),
keyfile=dict(type='str', required=False, no_log=True),
nokey=dict(type='bool', required=False),
forcepasswordchange=dict(type='bool', required=False),
lock=dict(type='bool', required=False),
unlock=dict(type='bool', required=False),
usergroup=dict(type='str', required=False),
)
)
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
# logging setup
log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
# Required
self.name = self.module.params['name']
self.state = self.module.params['state']
# Required during creation of user
self.auth_type = self.module.params['auth_type']
self.usergroup = self.module.params['usergroup']
# Optional
self.user_password = self.module.params.get('user_password', False)
self.nopassword = self.module.params.get('nopassword', False)
self.keyfile = self.module.params.get('keyfile', False)
self.nokey = self.module.params.get('nokey', False)
self.forcepasswordchange = self.module.params.get('forcepasswordchange', False)
self.lock = self.module.params.get('lock', False)
self.unlock = self.module.params.get('unlock', False)
# creating an instance of IBMSVCRestApi
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=log_path,
token=self.module.params['token']
)
# perform some basic checks
def basic_checks(self):
# Handling for mandatory parameter name
if not self.name:
self.module.fail_json(msg="Missing mandatory parameter: name")
# Handling for mandatory parameter state
if not self.state:
self.module.fail_json(msg="Missing mandatory parameter: state")
# Handling mutually exclusive cases amoung parameters
if self.user_password and self.nopassword:
self.module.fail_json(msg="Mutually exclusive parameter: user_password, nopassword")
if self.lock and self.unlock:
self.module.fail_json(msg="Mutually exclusive parameter: lock, unlock")
if self.keyfile and self.nokey:
self.module.fail_json(msg="Mutually exclusive parameter: keyfile, nokey")
if self.auth_type == 'usergrp' and not self.usergroup:
self.module.fail_json(msg="Parameter [usergroup] is required when auth_type is usergrp")
# function to get user data
def get_existing_user(self):
merged_result = {}
data = self.restapi.svc_obj_info(cmd='lsuser', cmdopts=None, cmdargs=[self.name])
self.log('GET: user data: %s', data)
if isinstance(data, list):
for d in data:
merged_result.update(d)
else:
merged_result = data
return merged_result
# function for creating new user
def create_user(self):
# Handling unsupported parameter during user creation
if self.nokey or self.nopassword or self.lock or self.unlock or self.forcepasswordchange:
self.module.fail_json(msg="Parameters [nokey, nopassword, lock, unlock, forcepasswordchange] not applicable while creating a user")
# Handling for mandatory parameter role
if not self.auth_type:
self.module.fail_json(msg="Missing required parameter: auth_type")
if self.auth_type == 'usergrp' and not self.usergroup:
self.module.fail_json(msg="Missing required parameter: usergroup")
if self.module.check_mode:
self.changed = True
return
command = 'mkuser'
command_options = {
'name': self.name,
}
if self.user_password:
command_options['password'] = self.user_password
if self.keyfile:
command_options['keyfile'] = self.keyfile
if self.usergroup:
command_options['usergrp'] = self.usergroup
if self.forcepasswordchange:
command_options['forcepasswordchange'] = self.forcepasswordchange
result = self.restapi.svc_run_command(command, command_options, cmdargs=None)
self.log("create user result %s", result)
if 'message' in result:
self.changed = True
self.log("create user result message %s", result['message'])
else:
self.module.fail_json(
msg="Failed to create user [%s]" % self.name)
# function for probing an existing user
def probe_user(self, data):
properties = {}
if self.usergroup:
if self.usergroup != data['usergrp_name']:
properties['usergrp'] = self.usergroup
if self.user_password:
properties['password'] = self.user_password
if self.nopassword:
if data['password'] == 'yes':
properties['nopassword'] = True
if self.keyfile:
properties['keyfile'] = self.keyfile
if self.nokey:
if data['ssh_key'] == "yes":
properties['nokey'] = True
if self.lock:
properties['lock'] = True
if self.unlock:
properties['unlock'] = True
if self.forcepasswordchange:
properties['forcepasswordchange'] = True
return properties
# function for updating an existing user
def update_user(self, data):
if self.module.check_mode:
self.changed = True
return
self.log("updating user '%s'", self.name)
command = 'chuser'
for parameter in data:
command_options = {
parameter: data[parameter]
}
self.restapi.svc_run_command(command, command_options, [self.name])
self.changed = True
# function for removing an existing user
def remove_user(self):
# Handling unsupported parameter during user removal
if self.nokey or self.nopassword or self.lock or self.unlock or self.forcepasswordchange:
self.module.fail_json(msg="Parameters [nokey, nopassword, lock, unlock, forcepasswordchange] not applicable while removing a user")
if self.module.check_mode:
self.changed = True
return
self.log("deleting user '%s'", self.name)
command = 'rmuser'
command_options = None
cmdargs = [self.name]
self.restapi.svc_run_command(command, command_options, cmdargs)
self.changed = True
def apply(self):
changed = False
msg = None
modify = {}
self.basic_checks()
user_data = self.get_existing_user()
if user_data:
if self.state == 'absent':
self.log("CHANGED: user exists, but requested state is 'absent'")
changed = True
elif self.state == 'present':
# initiate probing of an existing user
modify = self.probe_user(user_data)
if modify:
self.log("CHANGED: user exists, but probe detected changes")
changed = True
else:
if self.state == 'present':
self.log("CHANGED: user does not exist, but requested state is 'present'")
changed = True
if changed:
if self.state == 'present':
if not user_data:
# initiate creation of new user
self.create_user()
msg = "User [%s] has been created." % self.name
else:
# initiate updation os an existing user
self.update_user(modify)
msg = "User [%s] has been modified." % self.name
elif self.state == 'absent':
# initiate deletion of an existing user
self.remove_user()
msg = "User [%s] has been removed." % self.name
if self.module.check_mode:
msg = "Skipping changes due to check mode."
else:
if self.state == 'absent':
msg = "User [%s] does not exist." % self.name
elif self.state == 'present':
msg = "User [%s] already exist (no modificationes detected)." % self.name
self.module.exit_json(msg=msg, changed=changed)
def main():
v = IBMSVCUser()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,321 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2021 IBM CORPORATION
# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_manage_usergroup
short_description: This module manages user group on IBM Spectrum Virtualize family storage systems
description:
- Ansible interface to manage 'mkusergrp', 'rmusergrp', and 'chusergrp' commands.
version_added: "1.7.0"
options:
name:
description:
- Specifies the name of the user group.
required: true
type: str
state:
description:
- Creates or updates (C(present)) or removes (C(absent)) a user group.
choices: [ present, absent ]
required: true
type: str
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
type: str
required: true
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the ibm_svc_auth module.
type: str
role:
description:
- Specifies the role associated with all users that belong to this user group.
- Required when I(state=present).
choices: [ Monitor, CopyOperator, Service, FlashCopyAdmin, Administrator, SecurityAdmin, VasaProvider, RestrictedAdmin, 3SiteAdmin ]
type: str
ownershipgroup:
description:
- Specifies the name of the ownership group.
- Applies when I(state=present).
- Parameters I(ownershipgroup) and I(noownershipgroup) are mutually exclusive.
type: str
noownershipgroup:
description:
- Specifies that the usergroup is removed from the ownership group it belonged to.
- Applies when I(state=present), to modify a user group.
- Parameters I(ownershipgroup) and I(noownershipgroup) are mutually exclusive.
type: bool
validate_certs:
description:
- Validates certification.
default: false
type: bool
log_path:
description:
- Path of debug log file.
type: str
author:
- Sreshtant Bohidar(@Sreshtant-Bohidar)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Create a user group
ibm.spectrum_virtualize.ibm_svc_manage_usergroup:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
state: present
name: user-group-name
role: Monitor
ownershipgroup: ownershipgroup-name
- name: Remove a user group
ibm.spectrum_virtualize.ibm_svc_manage_usergroup:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
state: absent
name: user-group-name
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
from ansible.module_utils._text import to_native
class IBMSVCUsergroup(object):
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
role=dict(type='str', required=False, choices=[
'Monitor', 'CopyOperator', 'Service', 'FlashCopyAdmin',
'Administrator', 'SecurityAdmin', 'VasaProvider',
'RestrictedAdmin', '3SiteAdmin'
]),
ownershipgroup=dict(type='str', required=False),
noownershipgroup=dict(type='bool', required=False),
state=dict(type='str', required=True, choices=['present', 'absent'])
)
)
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
# logging setup
log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
# Required
self.name = self.module.params['name']
self.state = self.module.params['state']
# Required during creation of user group
self.role = self.module.params['role']
# Optional
self.ownershipgroup = self.module.params.get('ownershipgroup', False)
self.noownershipgroup = self.module.params.get('noownershipgroup', False)
# creating an instance of IBMSVCRestApi
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=log_path,
token=self.module.params['token']
)
# perform some basic checks
def basic_checks(self):
# Handling for mandatory parameter name
if not self.name:
self.module.fail_json(msg="Missing mandatory parameter: name")
# Handling for mandatory parameter state
if not self.state:
self.module.fail_json(msg="Missing mandatory parameter: state")
# Handing mutually exclusive cases
if self.ownershipgroup and self.noownershipgroup:
self.module.fail_json(msg="Mutually exclusive parameter: ownershipgroup, noownershipgroup")
# Handling unsupported parameter while removing an usergroup
if self.state == 'absent' and (self.role or self.ownershipgroup or self.noownershipgroup):
self.module.fail_json(msg="Parameters [role, ownershipgroup, noownershipgroup] are not applicable while removing a usergroup")
# function to get user group data
def get_existing_usergroup(self):
merged_result = {}
data = self.restapi.svc_obj_info(cmd='lsusergrp', cmdopts=None, cmdargs=[self.name])
self.log('GET: user group data: %s', data)
if isinstance(data, list):
for d in data:
merged_result.update(d)
else:
merged_result = data
return merged_result
# function for creating new user group
def create_user_group(self):
# Handling unsupported parameter during usergroup creation
if self.noownershipgroup:
self.module.fail_json(msg="Parameter [noownershipgroup] is not applicable while creating a usergroup")
# Handling for mandatory parameter role
if not self.role:
self.module.fail_json(msg="Missing mandatory parameter: role")
if self.module.check_mode:
self.changed = True
return
command = 'mkusergrp'
command_options = {
'name': self.name,
}
if self.role:
command_options['role'] = self.role
if self.ownershipgroup:
command_options['ownershipgroup'] = self.ownershipgroup
result = self.restapi.svc_run_command(command, command_options, cmdargs=None)
self.log("create user group result %s", result)
if 'message' in result:
self.changed = True
self.log("create user group result message %s", result['message'])
else:
self.module.fail_json(
msg="Failed to user volume group [%s]" % self.name)
# function for probing an existing user group
def probe_user_group(self, data):
properties = {}
if self.role:
if self.role != data['role']:
properties['role'] = self.role
if self.ownershipgroup:
if self.ownershipgroup != data['owner_name']:
properties['ownershipgroup'] = self.ownershipgroup
if self.noownershipgroup:
if data['owner_name']:
properties['noownershipgroup'] = True
return properties
# function for updating an existing user group
def update_user_group(self, data):
if self.module.check_mode:
self.changed = True
return
self.log("updating user group '%s'", self.name)
command = 'chusergrp'
command_options = {}
if 'role' in data:
command_options['role'] = data['role']
if 'ownershipgroup' in data:
command_options['ownershipgroup'] = data['ownershipgroup']
if 'noownershipgroup' in data:
command_options['noownershipgroup'] = True
cmdargs = [self.name]
self.restapi.svc_run_command(command, command_options, cmdargs)
self.changed = True
# function for removing an existing user group
def remove_user_group(self):
if self.module.check_mode:
self.changed = True
return
self.log("deleting user group '%s'", self.name)
command = 'rmusergrp'
command_options = None
cmdargs = [self.name]
self.restapi.svc_run_command(command, command_options, cmdargs)
self.changed = True
def apply(self):
changed = False
msg = None
modify = {}
self.basic_checks()
user_group_data = self.get_existing_usergroup()
if user_group_data:
if self.state == 'absent':
self.log("CHANGED: user group exists, but requested state is 'absent'")
changed = True
elif self.state == 'present':
# initiate probing
modify = self.probe_user_group(user_group_data)
if modify:
self.log("CHANGED: user group exists, but probe detected changes")
changed = True
else:
if self.state == 'present':
self.log("CHANGED: user group does not exist, but requested state is 'present'")
changed = True
if changed:
if self.state == 'present':
if not user_group_data:
self.create_user_group()
msg = "User group [%s] has been created." % self.name
else:
self.update_user_group(modify)
msg = "User group [%s] has been modified." % self.name
elif self.state == 'absent':
self.remove_user_group()
msg = "User group [%s] has been removed." % self.name
if self.module.check_mode:
msg = "Skipping changes due to check mode."
else:
if self.state == 'absent':
msg = "User group [%s] does not exist." % self.name
elif self.state == 'present':
msg = "User group [%s] already exist (no modificationes detected)." % self.name
self.module.exit_json(msg=msg, changed=changed)
def main():
v = IBMSVCUsergroup()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,747 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2021 IBM CORPORATION
# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_manage_volume
short_description: This module manages standard volumes on IBM Spectrum Virtualize family storage systems
description:
- Ansible interface to manage 'mkvolume', 'rmvolume', and 'chvdisk' volume commands.
version_added: "1.6.0"
options:
name:
description:
- Specifies the name to assign to the new volume.
required: true
type: str
state:
description:
- Creates or updates (C(present)) or removes (C(absent)) a volume.
choices: [ absent, present ]
required: true
type: str
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
required: true
type: str
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the ibm_svc_auth module.
type: str
pool:
description:
- Specifies the name of the storage pool to use while creating the volume.
- This parameter is required when I(state=present), to create a volume.
type: str
size:
description:
- Defines the size of the volume. This parameter can also be used to resize an existing volume.
- Required when I(state=present), to create or modify a volume.
type: str
unit:
description:
- Specifies the data units to use with the capacity that is specified by the 'size' parameter.
- I(size) is required when using I(unit).
type: str
choices: [ b, kb, mb, gb, tb, pb ]
default: mb
iogrp:
description:
- Specifies the list of I/O group names. Group names in the list must be separated by using a comma.
- While creating a new volume, the first I/O group in the list is added as both cached & access I/O group,
while remaining I/O groups are added as access I/O groups.
- This parameter supports update functionality.
- Valid when I(state=present), to create or modify a volume.
type: str
thin:
description:
- Specifies that a thin-provisioned volume is to be created.
- Parameters 'thin' and 'compressed' are mutually exclusive.
- Valid when I(state=present), to create a thin-provisioned volume.
type: bool
compressed:
description:
- Specifies that a compressed volume is to be created.
- Parameters 'compressed' and 'thin' are mutually exclusive.
- Valid when I(state=present), to create a compressed volume.
type: bool
buffersize:
description:
- Specifies the pool capacity that the volume will reserve as a buffer for thin-provisioned and compressed volumes.
- Parameter 'thin' or 'compressed' must be specified to use this parameter.
- The default buffer size is 2%.
- I(thin) or I(compressed) is required when using I(buffersize).
- Valid when I(state=present), to create a volume.
type: str
deduplicated:
description:
- Specifies that a deduplicated volume is to be created.
- Required when I(state=present), to create a deduplicated volume.
type: bool
volumegroup:
description:
- Specifies the name of the volumegroup to which the volume is to be added.
- Parameters 'volumegroup' and 'novolumegroup' are mutually exclusive.
- Valid when I(state=present), to create or modify a volume.
type: str
novolumegroup:
description:
- If specified `True`, the volume is removed from its associated volumegroup.
- Parameters 'novolumegroup' and 'volumegroup' are mutually exclusive.
- Valid when I(state=present), to modify a volume.
type: bool
old_name:
description:
- Specifies the old name of the volume during renaming.
- Valid when I(state=present), to rename an existing volume.
type: str
version_added: '1.9.0'
enable_cloud_snapshot:
description:
- Specify to enable or disable cloud snapshot.
- Valid when I(state=present), to modify an existing volume.
type: bool
version_added: '1.11.0'
cloud_account_name:
description:
- Specifies the name of the cloud account name.
- Valid when I(enable_cloud_snapshot=true).
type: str
version_added: '1.11.0'
validate_certs:
description:
- Validates certification.
default: false
type: bool
log_path:
description:
- Path of debug log file.
type: str
author:
- Sreshtant Bohidar(@Sreshtant-Bohidar)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Create a volume
ibm.spectrum_virtualize.ibm_svc_manage_volume:
clustername: "{{ clustername }}"
domain: "{{domain}}"
username: "{{ username }}"
password: "{{ password }}"
log_path: "{{ log_path }}"
name: "volume_name"
state: "present"
pool: "pool_name"
size: "1"
unit: "gb"
iogrp: "io_grp0, io_grp1"
volumegroup: "test_volumegroup"
- name: Create a thin-provisioned volume
ibm.spectrum_virtualize.ibm_svc_manage_volume:
clustername: "{{ clustername }}"
domain: "{{ domain }}"
username: "{{ username }}"
password: "{{ password }}"
log_path: "{{ log_path }}"
name: "volume_name"
state: "present"
pool: "pool_name"
size: "1"
unit: "gb"
iogrp: "io_grp0, io_grp1"
thin: true
buffersize: 10%
- name: Create a compressed volume
ibm.spectrum_virtualize.ibm_svc_manage_volume:
clustername: "{{ clustername }}"
domain: "{{ domain }}"
username: "{{ username }}"
password: "{{ password }}"
log_path: "{{ log_path }}"
name: "volume_name"
state: "present"
pool: "pool_name"
size: "1"
unit: "gb"
iogrp: "io_grp0, io_grp1"
compressed: true
buffersize: 10%
- name: Creating a volume with iogrp- io_grp0
ibm.spectrum_virtualize.ibm_svc_manage_volume:
clustername: "{{ clustername }}"
domain: "{{ domain}}"
username: "{{ username }}"
password: "{{ password }}"
log_path: "{{ log_path }}"
name: "volume_name"
state: "present"
pool: "pool_name"
size: "1"
unit: "gb"
iogrp: "io_grp0"
- name: Adding a new iogrp- io_grp1
ibm.spectrum_virtualize.ibm_svc_manage_volume:
clustername: "{{ clustername }}"
domain: "{{ domain }}"
username: "{{ username }}"
password: "{{ password }}"
log_path: "{{ log_path }}"
name: "volume_name"
state: "present"
pool: "pool_name"
size: "1"
unit: "gb"
iogrp: "io_grp0, iogrp1"
- name: Rename an existing volume
ibm.spectrum_virtualize.ibm_svc_manage_volume:
clustername: "{{ clustername }}"
domain: "{{ domain }}"
username: "{{ username }}"
password: "{{ password }}"
old_name: "volume_name"
name: "new_volume_name"
state: "present"
- name: Enable cloud backup in an existing volume
ibm.spectrum_virtualize.ibm_svc_manage_volume:
clustername: "{{ clustername }}"
domain: "{{ domain }}"
username: "{{ username }}"
password: "{{ password }}"
name: "volume_name"
enable_cloud_snapshot: true
cloud_account_name: "aws_acc"
state: "present"
- name: Delete a volume
ibm.spectrum_virtualize.ibm_svc_manage_volume:
clustername: "{{ clustername }}"
domain: "{{ domain }}"
username: "{{ username }}"
password: "{{ password }}"
log_path: "{{ log_path }}"
name: "new_volume_name"
state: "absent"
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import (
IBMSVCRestApi,
svc_argument_spec,
get_logger,
strtobool
)
from ansible.module_utils._text import to_native
class IBMSVCvolume(object):
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent', 'present']),
pool=dict(type='str', required=False),
size=dict(type='str', required=False),
unit=dict(type='str', default='mb', choices=['b', 'kb',
'mb', 'gb',
'tb', 'pb']),
buffersize=dict(type='str', required=False),
iogrp=dict(type='str', required=False),
volumegroup=dict(type='str', required=False),
novolumegroup=dict(type='bool', required=False),
thin=dict(type='bool', required=False),
compressed=dict(type='bool', required=False),
deduplicated=dict(type='bool', required=False),
old_name=dict(type='str', required=False),
enable_cloud_snapshot=dict(type='bool'),
cloud_account_name=dict(type='str')
)
)
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
# logging setup
log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
# Required Parameters
self.name = self.module.params['name']
self.state = self.module.params['state']
# Optional Parameters
self.pool = self.module.params['pool']
self.size = self.module.params['size']
self.unit = self.module.params['unit']
self.iogrp = self.module.params['iogrp']
self.buffersize = self.module.params['buffersize']
self.volumegroup = self.module.params['volumegroup']
self.novolumegroup = self.module.params['novolumegroup']
self.thin = self.module.params['thin']
self.compressed = self.module.params['compressed']
self.deduplicated = self.module.params['deduplicated']
self.old_name = self.module.params['old_name']
self.enable_cloud_snapshot = self.module.params['enable_cloud_snapshot']
self.cloud_account_name = self.module.params['cloud_account_name']
# internal variable
self.changed = False
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=log_path,
token=self.module.params['token']
)
# assemble iogrp
def assemble_iogrp(self):
if self.iogrp:
temp = []
invalid = []
active_iogrp = []
existing_iogrp = []
if self.iogrp:
existing_iogrp = [item.strip() for item in self.iogrp.split(',') if item]
uni_exi_iogrp = set(existing_iogrp)
if len(existing_iogrp) != len(uni_exi_iogrp):
self.module.fail_json(msg='Duplicate iogrp detected.')
active_iogrp = [item['name'] for item in self.restapi.svc_obj_info('lsiogrp', None, None) if int(item['node_count']) > 0]
for item in existing_iogrp:
item = item.strip()
if item not in active_iogrp:
invalid.append(item)
else:
temp.append(item)
if invalid:
self.module.fail_json(msg='Empty or non-existing iogrp detected: %s' % invalid)
self.iogrp = temp
# for validating mandatory parameters of the module
def mandatory_parameter_validation(self):
missing = [item[0] for item in [('name', self.name), ('state', self.state)] if not item[1]]
if missing:
self.module.fail_json(msg='Missing mandatory parameter: [{0}]'.format(', '.join(missing)))
if self.volumegroup and self.novolumegroup:
self.module.fail_json(msg='Mutually exclusive parameters detected: [volumegroup] and [novolumegroup]')
# for validating parameter while removing an existing volume
def volume_deletion_parameter_validation(self):
if self.old_name:
self.module.fail_json(msg='Parameter [old_name] is not supported during volume deletion.')
# for validating parameter while creating a volume
def volume_creation_parameter_validation(self):
if self.enable_cloud_snapshot in {True, False}:
self.module.fail_json(msg='Following parameter not applicable for creation: enable_cloud_snapshot')
if self.cloud_account_name:
self.module.fail_json(msg='Following parameter not applicable for creation: cloud_account_name')
if self.old_name:
self.module.fail_json(msg='Parameter [old_name] is not supported during volume creation.')
missing = [item[0] for item in [('pool', self.pool), ('size', self.size)] if not item[1]]
if missing:
self.module.fail_json(msg='Missing required parameter while creating: [{0}]'.format(', '.join(missing)))
# for validating parameter while renaming a volume
def parameter_handling_while_renaming(self):
if not self.old_name:
self.module.fail_json(msg="Parameter is required while renaming: old_name")
parameters = {
"pool": self.pool,
"size": self.size,
"iogrp": self.iogrp,
"buffersize": self.buffersize,
"volumegroup": self.volumegroup,
"novolumegroup": self.novolumegroup,
"thin": self.thin,
"compressed": self.compressed,
"deduplicated": self.deduplicated
}
parameters_exists = [parameter for parameter, value in parameters.items() if value]
if parameters_exists:
self.module.fail_json(msg="Parameters {0} not supported while renaming a volume.".format(parameters_exists))
# for validating if volume type is supported or not
def validate_volume_type(self, data):
unsupported_volume = False
if data[0]['type'] == "many":
unsupported_volume = True
if not unsupported_volume:
relationship_name = data[0]['RC_name']
if relationship_name:
rel_data = self.restapi.svc_obj_info(cmd='lsrcrelationship', cmdopts=None, cmdargs=[relationship_name])
if rel_data['copy_type'] == "activeactive":
unsupported_volume = True
if unsupported_volume:
self.module.fail_json(msg="The module cannot be used for managing Mirrored volume.")
# function to get existing volume data
def get_existing_volume(self, volume_name):
return self.restapi.svc_obj_info(
'lsvdisk', {'bytes': True}, [volume_name]
)
# function to get list of associated iogrp to a volume
def get_existing_iogrp(self):
response = []
data = self.restapi.svc_obj_info(
'lsvdiskaccess', None, [self.name]
)
if data:
for item in data:
response.append(item['IO_group_name'])
return response
# function to create a new volume
def create_volume(self):
self.volume_creation_parameter_validation()
if self.module.check_mode:
self.changed = True
return
cmd = 'mkvolume'
cmdopts = {}
if self.pool:
cmdopts['pool'] = self.pool
if self.size:
cmdopts['size'] = self.size
if self.unit:
cmdopts['unit'] = self.unit
if self.iogrp:
cmdopts['iogrp'] = self.iogrp[0]
if self.volumegroup:
cmdopts['volumegroup'] = self.volumegroup
if self.thin:
cmdopts['thin'] = self.thin
if self.compressed:
cmdopts['compressed'] = self.compressed
if self.deduplicated:
cmdopts['deduplicated'] = self.deduplicated
if self.buffersize:
cmdopts['buffersize'] = self.buffersize
if self.name:
cmdopts['name'] = self.name
result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
if result and 'message' in result:
self.changed = True
self.log("create volume result message %s", result['message'])
else:
self.module.fail_json(
msg="Failed to create volume [%s]" % self.name)
# function to remove an existing volume
def remove_volume(self):
self.volume_deletion_parameter_validation()
if self.module.check_mode:
self.changed = True
return
self.restapi.svc_run_command(
'rmvolume', None, [self.name]
)
self.changed = True
# function that data in other units to b
def convert_to_bytes(self):
return int(self.size) * (1024 ** (['b', 'kb', 'mb', 'gb', 'tb', 'pb'].index((self.unit).lower())))
# function to probe an existing volume
def probe_volume(self, data):
props = {}
# check for changes in iogrp
if self.iogrp:
input_iogrp = set(self.iogrp)
existing_iogrp = set(self.get_existing_iogrp())
if input_iogrp ^ existing_iogrp:
iogrp_to_add = input_iogrp - existing_iogrp
iogrp_to_remove = existing_iogrp - input_iogrp
if iogrp_to_add:
props['iogrp'] = {
'add': list(iogrp_to_add)
}
if iogrp_to_remove:
props['iogrp'] = {
'remove': list(iogrp_to_remove)
}
# check for changes in volume size
if self.size:
input_size = self.convert_to_bytes()
existing_size = int(data[0]['capacity'])
if input_size != existing_size:
if input_size > existing_size:
props['size'] = {
'expand': input_size - existing_size
}
elif existing_size > input_size:
props['size'] = {
'shrink': existing_size - input_size
}
# check for changes in volumegroup
if self.volumegroup:
if self.volumegroup != data[0]['volume_group_name']:
props['volumegroup'] = {
'name': self.volumegroup
}
# check for presence of novolumegroup
if self.novolumegroup:
if data[0]['volume_group_name']:
props['novolumegroup'] = {
'status': True
}
# check for change in -thin parameter
if self.thin is True:
# a standard volume or a compressed volume
if (data[0]['capacity'] == data[1]['real_capacity']) or (data[1]['compressed_copy'] == 'yes'):
props['thin'] = {
'status': True
}
# check for change in -compressed parameter
if self.compressed is True:
# not a compressed volume
if data[1]['compressed_copy'] == 'no':
props['compressed'] = {
'status': True
}
# check for change in -deduplicated parameter
if self.deduplicated is True:
# not a deduplicated volume
if data[1]['deduplicated_copy'] == 'no':
props['deduplicated'] = {
'status': True
}
# check for change in pool
if self.pool:
if self.pool != data[0]['mdisk_grp_name']:
props['pool'] = {
'status': True
}
# Check for change in cloud backup
if self.enable_cloud_snapshot is True:
if not strtobool(data[0].get('cloud_backup_enabled')):
props['cloud_backup'] = {'status': True}
elif self.enable_cloud_snapshot is False:
if strtobool(data[0].get('cloud_backup_enabled')):
props['cloud_backup'] = {'status': True}
if self.cloud_account_name:
if self.cloud_account_name != data[0].get('cloud_account_name'):
props['cloud_backup'] = {'status': True}
return props
# function to expand an existing volume size
def expand_volume(self, expand_size):
self.restapi.svc_run_command(
'expandvdisksize',
{'size': expand_size, 'unit': 'b'},
[self.name]
)
self.changed = True
# function to shrink an existing volume size
def shrink_volume(self, shrink_size):
self.restapi.svc_run_command(
'shrinkvdisksize',
{'size': shrink_size, 'unit': 'b'},
[self.name]
)
self.changed = True
# add iogrp
def add_iogrp(self, list_of_iogrp):
self.restapi.svc_run_command(
'addvdiskaccess',
{'iogrp': ':'.join(list_of_iogrp)},
[self.name]
)
self.changed = True
# remove iogrp
def remove_iogrp(self, list_of_iogrp):
self.restapi.svc_run_command(
'rmvdiskaccess',
{'iogrp': ':'.join(list_of_iogrp)},
[self.name]
)
self.changed = True
def update_cloud_backup(self):
cmdopts = {}
if self.enable_cloud_snapshot is True:
cmdopts['backup'] = 'cloud'
cmdopts['enable'] = True
if self.enable_cloud_snapshot is False:
cmdopts['backup'] = 'cloud'
cmdopts['disable'] = True
if self.cloud_account_name:
cmdopts['account'] = self.cloud_account_name
self.restapi.svc_run_command(
'chvdisk',
cmdopts,
[self.name]
)
self.changed = True
# function to update an existing volume
def update_volume(self, modify):
# raise error for unsupported parameter
unsupported_parameters = ['pool', 'thin', 'compressed', 'deduplicated']
unsupported_exists = []
for parameter in unsupported_parameters:
if parameter in modify:
unsupported_exists.append(parameter)
if unsupported_exists:
self.module.fail_json(msg='Update not supported for parameter: {0}'.format(unsupported_exists))
# when check_mode is enabled
if self.module.check_mode:
self.changed = True
return
# updating iogrps of a volume
if 'iogrp' in modify:
if 'add' in modify['iogrp']:
self.add_iogrp(modify['iogrp']['add'])
if 'remove' in modify['iogrp']:
self.remove_iogrp(modify['iogrp']['remove'])
# updating size of a volume
if 'size' in modify:
if 'expand' in modify['size']:
self.expand_volume(modify['size']['expand'])
elif 'shrink' in modify['size']:
self.shrink_volume(modify['size']['shrink'])
if 'cloud_backup' in modify:
self.update_cloud_backup()
# updating volumegroup, novolumegroup of a volume
cmdopts = {}
if 'volumegroup' in modify:
cmdopts['volumegroup'] = modify['volumegroup']['name']
if 'novolumegroup' in modify:
cmdopts['novolumegroup'] = modify['novolumegroup']['status']
if cmdopts:
self.restapi.svc_run_command(
'chvdisk',
cmdopts,
[self.name]
)
self.changed = True
# function for renaming an existing volume with a new name
def volume_rename(self, volume_data):
msg = None
self.parameter_handling_while_renaming()
old_volume_data = self.get_existing_volume(self.old_name)
if not old_volume_data and not volume_data:
self.module.fail_json(msg="Volume [{0}] does not exists.".format(self.old_name))
elif old_volume_data and volume_data:
self.module.fail_json(msg="Volume [{0}] already exists.".format(self.name))
elif not old_volume_data and volume_data:
msg = "Volume with name [{0}] already exists.".format(self.name)
elif old_volume_data and not volume_data:
# when check_mode is enabled
if self.module.check_mode:
self.changed = True
return
self.restapi.svc_run_command('chvdisk', {'name': self.name}, [self.old_name])
self.changed = True
msg = "Volume [{0}] has been successfully rename to [{1}]".format(self.old_name, self.name)
return msg
def apply(self):
changed, msg, modify = False, None, {}
self.mandatory_parameter_validation()
volume_data = self.get_existing_volume(self.name)
if self.state == "present" and self.old_name:
msg = self.volume_rename(volume_data)
elif self.state == "absent" and self.old_name:
self.module.fail_json(msg="Rename functionality is not supported when 'state' is absent.")
else:
if self.state == 'present':
self.assemble_iogrp()
if volume_data:
self.validate_volume_type(volume_data)
if self.state == 'absent':
changed = True
elif self.state == 'present':
modify = self.probe_volume(volume_data)
if modify:
changed = True
else:
if self.state == 'present':
changed = True
if changed:
if self.state == 'present':
if not volume_data:
self.create_volume()
if isinstance(self.iogrp, list):
if len(self.iogrp) > 1:
self.add_iogrp(self.iogrp[1:])
msg = 'volume [%s] has been created' % self.name
else:
if modify:
self.update_volume(modify)
msg = 'volume [%s] has been modified' % self.name
elif self.state == 'absent':
self.remove_volume()
msg = 'volume [%s] has been deleted.' % self.name
else:
if self.state == 'absent':
msg = "volume [%s] did not exist." % self.name
else:
msg = "volume [%s] already exists." % self.name
if self.module.check_mode:
msg = 'Skipping changes due to check mode.'
self.log('skipping changes due to check mode.')
self.module.exit_json(msg=msg, changed=self.changed)
def main():
v = IBMSVCvolume()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,690 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2021 IBM CORPORATION
# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
# Sanjaikumaar M <sanjaikumaar.m@ibm.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_manage_volumegroup
short_description: This module manages volume groups on IBM Spectrum Virtualize family storage systems
version_added: "1.6.0"
description:
- Ansible interface to manage 'mkvolumegroup', 'chvolumegroup', and 'rmvolumegroup'
commands.
options:
name:
description:
- Specifies the name for the volume group.
required: true
type: str
state:
description:
- Creates or updates (C(present)) or removes (C(absent)) a volume group.
choices: [ absent, present ]
required: true
type: str
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
required: true
type: str
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the M(ibm.spectrum_virtualize.ibm_svc_auth) module.
type: str
log_path:
description:
- Path of debug log file.
type: str
validate_certs:
description:
- Validates certification.
default: false
type: bool
ownershipgroup:
description:
- Specifies the name of the ownership group to which the object is being added.
- I(ownershipgroup) is mutually exclusive with parameters I(safeguardpolicyname) and I(noownershipgroup).
- Applies when I(state=present).
type: str
noownershipgroup:
description:
- If specified `True`, the object is removed from the ownership group to which it belongs.
- Parameters I(ownershipgroup) and I(noownershipgroup) are mutually exclusive.
- Applies when I(state=present) to modify an existing volume group.
type: bool
safeguardpolicyname:
description:
- The name of the Safeguarded policy to be assigned to the volume group.
- I(safeguardpolicyname) is mutually exclusive with parameters I(nosafeguardpolicy) and I(ownershipgroup).
- Applies when I(state=present).
type: str
nosafeguardpolicy:
description:
- If specified `True`, removes the Safeguarded policy assigned to the volume group.
- Parameters I(safeguardpolicyname) and I(nosafeguardpolicy) are mutually exclusive.
- Applies when I(state=present) to modify an existing volume group.
type: bool
snapshotpolicy:
description:
- The name of the snapshot policy to be assigned to the volume group.
- I(snapshotpolicy) is mutually exclusive with parameters I(nosnapshotpolicy) and I(ownershipgroup).
- Applies when I(state=present).
type: str
version_added: 1.9.0
nosnapshotpolicy:
description:
- If specified `True`, removes the snapshot policy assigned to the volume group.
- Parameters I(snapshotpolicy) and I(nosnapshotpolicy) are mutually exclusive.
- Applies when I(state=present) to modify an existing volume group.
type: bool
version_added: 1.9.0
snapshotpolicysuspended:
description:
- Specifies whether to suspend (C(yes)) or resume (C(no)) the snapshot policy on this volume group.
- Applies when I(state=present) to modify an existing volume group.
choices: [ 'yes', 'no' ]
type: str
version_added: 1.9.0
policystarttime:
description:
- Specifies the time when the first Safeguarded backup is to be taken.
- This parameter can also be associated with snapshot policy.
- I(safeguardpolicyname) is required when using I(policystarttime).
- The accepted format is YYMMDDHHMM.
- Applies when I(state=present).
type: str
type:
description:
- Specifies the type of volume group to be created from the snapshot.
- Valid during creation of host accessible volume group from an existing snapshot.
choices: [ clone, thinclone ]
type: str
version_added: 1.9.0
snapshot:
description:
- Specifies the name of the snapshot used to prepopulate the new volumes in the new volume group.
- Required when creating a host accessible volume group from an existing snapshot.
type: str
version_added: 1.9.0
fromsourcegroup:
description:
- Specifies the parent volume group of the snapshot. This is used to prepopulate the new volume in the
new volume group.
- Valid during creation of host accessible volume group from an existing snapshot.
type: str
version_added: 1.9.0
pool:
description:
- Specifies the pool name where the target volumes are to be created.
- Valid during creation of host accessible volume group from an existing snapshot.
type: str
version_added: 1.9.0
iogrp:
description:
- Specifies the I/O group for new volumes.
- Valid during creation of host accessible volume group from an existing snapshot.
type: str
version_added: 1.9.0
safeguarded:
description:
- If specified, the snapshot policy creates safeguarded snapshots.
- Should be specified along with I(snapshotpolicy).
- Valid during creation and update of a volume group.
- Supported from Spectrum Virtualize family storage systems 8.5.2.0 or later.
default: false
type: bool
version_added: 1.10.0
ignoreuserfcmaps:
description:
- Allows user to create snapshots through the scheduler or manually with `addsnapshot`.
This can only be used if a volume in the volume group is used as a source of a user legacy
FlashCopy mapping.
- Valid during creation and update of a volume group.
- Supported from Spectrum Virtualize family storage systems 8.5.2.0 or later.
choices: [ 'yes', 'no' ]
type: str
version_added: 1.10.0
replicationpolicy:
description:
- Specifies the name of the replication policy to be assigned to the volume group.
- Applies when I(state=present).
- Supported from Spectrum Virtualize family storage systems 8.5.2.1 or later.
type: str
version_added: 1.10.0
noreplicationpolicy:
description:
- If specified `True`, removes the replication policy assigned to the volume group.
- Parameters I(replicationpolicy) and I(noreplicationpolicy) are mutually exclusive.
- Applies when I(state=present) to modify an existing volume group.
- Supported from Spectrum Virtualize family storage systems 8.5.2.1 or later.
type: bool
version_added: 1.10.0
author:
- Shilpi Jain(@Shilpi-J)
- Sanjaikumaar M (@sanjaikumaar)
notes:
- This module supports C(check_mode).
- Safeguarded policy and snapshot policy cannot be used at the same time.
Therefore, the parameters I(snapshotpolicy) and I(safeguardpolicyname) are mutually exclusive.
'''
EXAMPLES = '''
- name: Create a new volume group
ibm.spectrum_virtualize.ibm_svc_manage_volumegroup:
clustername: "{{ clustername }}"
domain: "{{ domain }}"
username: "{{ username }}"
password: "{{ password }}"
log_path: /tmp/playbook.debug
name: vg0
state: present
- name: Delete a volume group
ibm.spectrum_virtualize.ibm_svc_manage_volumegroup:
clustername: "{{ clustername }}"
domain: "{{ domain }}"
username: "{{ username }}"
password: "{{ password }}"
log_path: /tmp/playbook.debug
name: vg0
state: absent
- name: Update existing volume group to remove ownershipgroup and attach a safeguardpolicy to it
ibm.spectrum_virtualize.ibm_svc_manage_volumegroup:
clustername: "{{ clustername }}"
domain: "{{ domain }}"
username: "{{ username }}"
password: "{{ password }}"
log_path: /tmp/playbook.debug
name: vg0
state: present
noownershipgroup: True
safeguardpolicyname: sg1
- name: Update volumegroup with snapshot policy and remove safeguarded policy
ibm.spectrum_virtualize.ibm_svc_manage_volumegroup:
clustername: "{{ clustername }}"
domain: "{{ domain }}"
username: "{{ username }}"
password: "{{ password }}"
log_path: /tmp/playbook.debug
name: vg0
nosafeguardpolicy: true
snapshotpolicy: sp1
state: present
- name: Update volumegroup with safeguarded snapshot policy and ignoreuserfcmaps
ibm.spectrum_virtualize.ibm_svc_manage_volumegroup:
clustername: "{{ clustername }}"
domain: "{{ domain }}"
username: "{{ username }}"
password: "{{ password }}"
log_path: /tmp/playbook.debug
name: vg0
safeguarded: true
snapshotpolicy: sp1
ignoreuserfcmaps: yes
state: present
- name: Suspend snapshot policy in an existing volume group
ibm.spectrum_virtualize.ibm_svc_manage_volumegroup:
clustername: "{{ clustername }}"
domain: "{{ domain }}"
username: "{{ username }}"
password: "{{ password }}"
log_path: /tmp/playbook.debug
name: vg0
snapshotpolicysuspended: true
state: present
- name: Create host accessible volume group from an existing snapshot
ibm.spectrum_virtualize.ibm_svc_manage_volumegroup:
clustername: "{{ clustername }}"
domain: "{{ domain }}"
username: "{{ username }}"
password: "{{ password }}"
log_path: /tmp/playbook.debug
name: host_accessible_vg
type: clone
snapshot: snapshot0
fromsourcegroup: vg0
pool: Pool0
state: present
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import \
IBMSVCRestApi, svc_argument_spec, get_logger, strtobool
from ansible.module_utils._text import to_native
class IBMSVCVG(object):
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent',
'present']),
ownershipgroup=dict(type='str', required=False),
noownershipgroup=dict(type='bool', required=False),
safeguardpolicyname=dict(type='str', required=False),
nosafeguardpolicy=dict(type='bool', required=False),
policystarttime=dict(type='str', required=False),
snapshotpolicy=dict(type='str', required=False),
nosnapshotpolicy=dict(type='bool', required=False),
snapshotpolicysuspended=dict(type='str', choices=['yes', 'no']),
type=dict(type='str', choices=['clone', 'thinclone']),
snapshot=dict(type='str'),
fromsourcegroup=dict(type='str'),
pool=dict(type='str'),
iogrp=dict(type='str'),
safeguarded=dict(type='bool', default=False),
ignoreuserfcmaps=dict(type='str', choices=['yes', 'no']),
replicationpolicy=dict(type='str'),
noreplicationpolicy=dict(type='bool')
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# logging setup
log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
# Required
self.name = self.module.params['name']
self.state = self.module.params['state']
# Optional
self.ownershipgroup = self.module.params.get('ownershipgroup', '')
self.noownershipgroup = self.module.params.get('noownershipgroup', False)
self.policystarttime = self.module.params.get('policystarttime', '')
self.snapshotpolicy = self.module.params.get('snapshotpolicy', '')
self.nosnapshotpolicy = self.module.params.get('nosnapshotpolicy', False)
self.snapshotpolicysuspended = self.module.params.get('snapshotpolicysuspended', '')
self.type = self.module.params.get('type', '')
self.snapshot = self.module.params.get('snapshot', '')
self.fromsourcegroup = self.module.params.get('fromsourcegroup', '')
self.pool = self.module.params.get('pool', '')
self.iogrp = self.module.params.get('iogrp', '')
self.safeguardpolicyname = self.module.params.get('safeguardpolicyname', '')
self.nosafeguardpolicy = self.module.params.get('nosafeguardpolicy', False)
self.safeguarded = self.module.params.get('safeguarded', False)
self.ignoreuserfcmaps = self.module.params.get('ignoreuserfcmaps', '')
self.replicationpolicy = self.module.params.get('replicationpolicy', '')
self.noreplicationpolicy = self.module.params.get('noreplicationpolicy', False)
# Dynamic variable
self.parentuid = None
self.changed = False
self.msg = ''
self.basic_checks()
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=log_path,
token=self.module.params['token']
)
def basic_checks(self):
if not self.name:
self.module.fail_json(msg='Missing mandatory parameter: name')
if self.state == 'present':
if self.policystarttime:
if not self.snapshotpolicy and not self.safeguardpolicyname:
self.module.fail_json(
msg='Either `snapshotpolicy` or `safeguardpolicyname` should be passed along with `policystarttime`.'
)
if self.safeguarded:
if not self.snapshotpolicy:
self.module.fail_json(
msg='Parameter `safeguarded` should be passed along with `snapshotpolicy`'
)
else:
unwanted = ('ownershipgroup', 'noownershipgroup', 'safeguardpolicyname',
'nosafeguardpolicy', 'snapshotpolicy', 'nosnapshotpolicy',
'policystarttime', 'type', 'fromsourcegroup', 'pool', 'iogrp',
'safeguarded', 'ignoreuserfcmaps', 'replicationpolicy',
'noreplicationpolicy')
param_exists = ', '.join((param for param in unwanted if getattr(self, param)))
if param_exists:
self.module.fail_json(
msg='State=absent but following parameters exists: {0}'.format(param_exists)
)
def create_validation(self):
mutually_exclusive = (
('ownershipgroup', 'safeguardpolicyname'),
('ownershipgroup', 'snapshotpolicy'),
('ownershipgroup', 'policystarttime'),
('snapshotpolicy', 'safeguardpolicyname'),
('replicationpolicy', 'noreplicationpolicy')
)
for param1, param2 in mutually_exclusive:
if getattr(self, param1) and getattr(self, param2):
self.module.fail_json(
msg='Mutually exclusive parameters: {0}, {1}'.format(param1, param2)
)
unsupported = ('nosafeguardpolicy', 'noownershipgroup', 'nosnapshotpolicy',
'snapshotpolicysuspended', 'noreplicationpolicy')
unsupported_exists = ', '.join((field for field in unsupported if getattr(self, field)))
if unsupported_exists:
self.module.fail_json(
msg='Following paramters not supported during creation scenario: {0}'.format(unsupported_exists)
)
if self.type and not self.snapshot:
self.module.fail_json(
msg='type={0} but following parameter is missing: snapshot'.format(self.type)
)
def update_validation(self, data):
mutually_exclusive = (
('ownershipgroup', 'noownershipgroup'),
('safeguardpolicyname', 'nosafeguardpolicy'),
('ownershipgroup', 'safeguardpolicyname'),
('ownershipgroup', 'snapshotpolicy'),
('ownershipgroup', 'policystarttime'),
('nosafeguardpolicy', 'nosnapshotpolicy'),
('snapshotpolicy', 'nosnapshotpolicy'),
('snapshotpolicy', 'safeguardpolicyname'),
('replicationpolicy', 'noreplicationpolicy')
)
for param1, param2 in mutually_exclusive:
if getattr(self, param1) and getattr(self, param2):
self.module.fail_json(
msg='Mutually exclusive parameters: {0}, {1}'.format(param1, param2)
)
unsupported_maps = (
('type', data.get('volume_group_type', '')),
('snapshot', data.get('source_snapshot', '')),
('fromsourcegroup', data.get('source_volume_group_name', ''))
)
unsupported = (
fields[0] for fields in unsupported_maps if getattr(self, fields[0]) and getattr(self, fields[0]) != fields[1]
)
unsupported_exists = ', '.join(unsupported)
if unsupported_exists:
self.module.fail_json(
msg='Following paramters not supported during update: {0}'.format(unsupported_exists)
)
def get_existing_vg(self):
merged_result = {}
data = self.restapi.svc_obj_info(cmd='lsvolumegroup', cmdopts=None,
cmdargs=['-gui', self.name])
if isinstance(data, list):
for d in data:
merged_result.update(d)
else:
merged_result = data
if merged_result and ((self.snapshotpolicy and self.policystarttime) or self.snapshotpolicysuspended):
# Making new call as snapshot_policy_start_time not available in lsvolumegroup CLI
SP_data = self.restapi.svc_obj_info(
cmd='lsvolumegroupsnapshotpolicy',
cmdopts=None,
cmdargs=[self.name]
)
merged_result['snapshot_policy_start_time'] = SP_data['snapshot_policy_start_time']
merged_result['snapshot_policy_suspended'] = SP_data['snapshot_policy_suspended']
return merged_result
def set_parentuid(self):
if self.snapshot and not self.fromsourcegroup:
cmdopts = {
"filtervalue": "snapshot_name={0}".format(self.snapshot)
}
data = self.restapi.svc_obj_info(
cmd='lsvolumesnapshot',
cmdopts=cmdopts,
cmdargs=None
)
try:
result = next(
filter(
lambda obj: obj['volume_group_name'] == '',
data
)
)
except StopIteration:
self.module.fail_json(
msg='Orphan Snapshot ({0}) does not exists for the given name'.format(self.snapshot)
)
else:
self.parentuid = result['parent_uid']
def vg_probe(self, data):
self.update_validation(data)
# Mapping the parameters with the existing data for comparision
params_mapping = (
('ownershipgroup', data.get('owner_name', '')),
('ignoreuserfcmaps', data.get('ignore_user_flash_copy_maps', '')),
('replicationpolicy', data.get('replication_policy_name', '')),
('noownershipgroup', not bool(data.get('owner_name', ''))),
('nosafeguardpolicy', not bool(data.get('safeguarded_policy_name', ''))),
('nosnapshotpolicy', not bool(data.get('snapshot_policy_name', ''))),
('noreplicationpolicy', not bool(data.get('replication_policy_name', '')))
)
props = dict((k, getattr(self, k)) for k, v in params_mapping if getattr(self, k) and getattr(self, k) != v)
if self.safeguardpolicyname and self.safeguardpolicyname != data.get('safeguarded_policy_name', ''):
props['safeguardedpolicy'] = self.safeguardpolicyname
# If policy is changed, existing policystarttime will be erased so adding time without any check
if self.policystarttime:
props['policystarttime'] = self.policystarttime
elif self.safeguardpolicyname:
if self.policystarttime and self.policystarttime + '00' != data.get('safeguarded_policy_start_time', ''):
props['safeguardedpolicy'] = self.safeguardpolicyname
props['policystarttime'] = self.policystarttime
elif self.snapshotpolicy and self.snapshotpolicy != data.get('snapshot_policy_name', ''):
props['snapshotpolicy'] = self.snapshotpolicy
props['safeguarded'] = self.safeguarded
if self.policystarttime:
props['policystarttime'] = self.policystarttime
elif self.snapshotpolicy:
if self.policystarttime and self.policystarttime + '00' != data.get('snapshot_policy_start_time', ''):
props['snapshotpolicy'] = self.snapshotpolicy
props['policystarttime'] = self.policystarttime
if self.safeguarded not in ('', None) and self.safeguarded != strtobool(data.get('snapshot_policy_safeguarded', 0)):
props['snapshotpolicy'] = self.snapshotpolicy
props['safeguarded'] = self.safeguarded
# Adding snapshotpolicysuspended to props
if self.snapshotpolicysuspended and self.snapshotpolicysuspended != data.get('snapshot_policy_suspended', ''):
props['snapshotpolicysuspended'] = self.snapshotpolicysuspended
self.log("volumegroup props = %s", props)
return props
def vg_create(self):
self.create_validation()
if self.module.check_mode:
self.changed = True
return
self.log("creating volume group '%s'", self.name)
# Make command
cmd = 'mkvolumegroup'
cmdopts = {
'name': self.name,
'safeguarded': self.safeguarded
}
if self.type:
optional_params = ('type', 'snapshot', 'pool')
cmdopts.update(
dict(
(param, getattr(self, param)) for param in optional_params if getattr(self, param)
)
)
if self.iogrp:
cmdopts['iogroup'] = self.iogrp
self.set_parentuid()
if self.parentuid:
cmdopts['fromsourceuid'] = self.parentuid
else:
cmdopts['fromsourcegroup'] = self.fromsourcegroup
if self.ignoreuserfcmaps:
if self.ignoreuserfcmaps == 'yes':
cmdopts['ignoreuserfcmaps'] = True
else:
cmdopts['ignoreuserfcmaps'] = False
if self.replicationpolicy:
cmdopts['replicationpolicy'] = self.replicationpolicy
if self.ownershipgroup:
cmdopts['ownershipgroup'] = self.ownershipgroup
elif self.safeguardpolicyname:
cmdopts['safeguardedpolicy'] = self.safeguardpolicyname
if self.policystarttime:
cmdopts['policystarttime'] = self.policystarttime
elif self.snapshotpolicy:
cmdopts['snapshotpolicy'] = self.snapshotpolicy
if self.policystarttime:
cmdopts['policystarttime'] = self.policystarttime
self.log("creating volumegroup '%s'", cmdopts)
# Run command
result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
self.log("create volume group result %s", result)
# Any error would have been raised in svc_run_command
self.changed = True
def vg_update(self, modify):
if self.module.check_mode:
self.changed = True
return
# update the volume group
self.log("updating volume group '%s' ", self.name)
cmdargs = [self.name]
try:
del modify['snapshotpolicysuspended']
except KeyError:
self.log("snapshotpolicysuspended modification not reqiured!!")
else:
cmd = 'chvolumegroupsnapshotpolicy'
cmdopts = {'snapshotpolicysuspended': self.snapshotpolicysuspended}
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
cmd = 'chvolumegroup'
unmaps = ('noownershipgroup', 'nosafeguardpolicy', 'nosnapshotpolicy', 'noreplicationpolicy')
for field in unmaps:
cmdopts = {}
if field == 'nosafeguardpolicy' and field in modify:
cmdopts['nosafeguardedpolicy'] = modify.pop('nosafeguardpolicy')
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
elif field in modify:
cmdopts[field] = modify.pop(field)
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
if modify:
cmdopts = modify
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
# Any error would have been raised in svc_run_command
self.changed = True
def vg_delete(self):
if self.module.check_mode:
self.changed = True
return
self.log("deleting volume group '%s'", self.name)
cmd = 'rmvolumegroup'
cmdopts = None
cmdargs = [self.name]
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
# Any error will have been raised in svc_run_command
self.changed = True
def apply(self):
vg_data = self.get_existing_vg()
if vg_data:
if self.state == 'present':
modify = self.vg_probe(vg_data)
if modify:
self.vg_update(modify)
self.msg = "volume group [%s] has been modified." % self.name
else:
self.msg = "No Modifications detected, Volume group already exists."
else:
self.vg_delete()
self.msg = "volume group [%s] has been deleted." % self.name
else:
if self.state == 'absent':
self.msg = "Volume group [%s] does not exist." % self.name
else:
self.vg_create()
self.msg = "volume group [%s] has been created." % self.name
if self.module.check_mode:
self.msg = 'skipping changes due to check mode.'
self.module.exit_json(msg=self.msg, changed=self.changed)
def main():
v = IBMSVCVG()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,328 @@
#!/usr/bin/python
# Copyright (C) 2020 IBM CORPORATION
# Author(s): Peng Wang <wangpww@cn.ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_mdisk
short_description: This module manages MDisks on IBM Spectrum Virtualize family storage systems
description:
- Ansible interface to manage 'mkarray' and 'rmmdisk' MDisk commands.
version_added: "1.0.0"
options:
name:
description:
- The MDisk name.
required: true
type: str
state:
description:
- Creates (C(present)) or removes (C(absent)) the MDisk.
choices: [ absent, present ]
required: true
type: str
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
type: str
required: true
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the ibm_svc_auth module.
type: str
version_added: '1.5.0'
drive:
description:
- Drive(s) to use as members of the RAID array.
- Required when I(state=present), to create an MDisk array.
type: str
mdiskgrp:
description:
- The storage pool (mdiskgrp) to which you want to add the MDisk.
type: str
required: true
log_path:
description:
- Path of debug log file.
type: str
validate_certs:
description:
- Validates certification.
default: false
type: bool
level:
description:
- Specifies the RAID level.
- Required when I(state=present), to create an MDisk array.
type: str
choices: ['raid0', 'raid1', 'raid5', 'raid6', 'raid10']
encrypt:
description:
- Defines use of encryption with the MDisk group.
- Applies when I(state=present).
type: str
default: 'no'
choices: ['yes', 'no']
author:
- Peng Wang(@wangpww)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Create MDisk and name as mdisk20
ibm.spectrum_virtualize.ibm_svc_mdisk:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
name: mdisk20
state: present
level: raid0
drive: '5:6'
encrypt: no
mdiskgrp: pool20
- name: Delete MDisk named mdisk20
ibm.spectrum_virtualize.ibm_svc_mdisk:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
name: mdisk20
state: absent
mdiskgrp: pool20
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
class IBMSVCmdisk(object):
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent',
'present']),
level=dict(type='str', choices=['raid0', 'raid1', 'raid5',
'raid6', 'raid10']),
drive=dict(type='str', default=None),
encrypt=dict(type='str', default='no', choices=['yes', 'no']),
mdiskgrp=dict(type='str', required=True)
)
)
mutually_exclusive = []
self.module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
# logging setup
log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
# Required
self.name = self.module.params['name']
self.state = self.module.params['state']
# Optional
self.level = self.module.params.get('level', None)
self.drive = self.module.params.get('drive', None)
self.encrypt = self.module.params.get('encrypt', None)
self.mdiskgrp = self.module.params.get('mdiskgrp', None)
# Handling missing mandatory parameters name
if not self.name:
self.module.fail_json(msg='Missing mandatory parameter: name')
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=log_path,
token=self.module.params['token']
)
def mdisk_exists(self):
return self.restapi.svc_obj_info(cmd='lsmdisk', cmdopts=None,
cmdargs=[self.name])
def mdisk_create(self):
# For now we create mdisk through mkarray which needs these options
# level, drive, mdiskgrp
if not self.level:
self.module.fail_json(msg="You must pass in level to the module.")
if not self.drive:
self.module.fail_json(msg="You must pass in drive to the module.")
if not self.mdiskgrp:
self.module.fail_json(msg="You must pass in "
"mdiskgrp to the module.")
if self.module.check_mode:
self.changed = True
return
self.log("creating mdisk '%s'", self.name)
# Make command
cmd = 'mkarray'
cmdopts = {}
if self.level:
cmdopts['level'] = self.level
if self.drive:
cmdopts['drive'] = self.drive
if self.encrypt:
cmdopts['encrypt'] = self.encrypt
cmdopts['name'] = self.name
cmdargs = [self.mdiskgrp]
self.log("creating mdisk command=%s opts=%s args=%s",
cmd, cmdopts, cmdargs)
# Run command
result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
self.log("create mdisk result %s", result)
if 'message' in result:
self.changed = True
self.log("create mdisk result message %s", result['message'])
else:
self.module.fail_json(
msg="Failed to create mdisk [%s]" % self.name)
def mdisk_delete(self):
if self.module.check_mode:
self.changed = True
return
self.log("deleting mdisk '%s'", self.name)
cmd = 'rmmdisk'
cmdopts = {}
cmdopts['mdisk'] = self.name
cmdargs = [self.mdiskgrp]
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
# Any error will have been raised in svc_run_command
# chmkdiskgrp does not output anything when successful.
self.changed = True
def mdisk_update(self, modify):
# update the mdisk
self.log("updating mdisk '%s'", self.name)
# cmd = 'chmdisk'
# cmdopts = {}
# chmdisk does not like mdisk arrays.
# cmdargs = [self.name]
# TBD: Implement changed logic.
# result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
# Any error will have been raised in svc_run_command
# chmkdiskgrp does not output anything when successful.
self.changed = True
# TBD: Implement a more generic way to check for properties to modify.
def mdisk_probe(self, data):
props = []
if self.encrypt:
if self.encrypt != data['encrypt']:
props += ['encrypt']
if props is []:
props = None
self.log("mdisk_probe props='%s'", data)
return props
def apply(self):
changed = False
msg = None
modify = []
mdisk_data = self.mdisk_exists()
if mdisk_data:
if self.state == 'absent':
self.log("CHANGED: mdisk exists, but "
"requested state is 'absent'")
changed = True
elif self.state == 'present':
# This is where we detect if chmdisk should be called.
modify = self.mdisk_probe(mdisk_data)
if modify:
changed = True
else:
if self.state == 'present':
self.log("CHANGED: mdisk does not exist, "
"but requested state is 'present'")
changed = True
if changed:
if self.state == 'present':
if not mdisk_data:
self.mdisk_create()
msg = "Mdisk [%s] has been created." % self.name
else:
# This is where we would modify
self.mdisk_update(modify)
msg = "Mdisk [%s] has been modified." % self.name
elif self.state == 'absent':
self.mdisk_delete()
msg = "Volume [%s] has been deleted." % self.name
if self.module.check_mode:
msg = 'skipping changes due to check mode'
else:
self.log("exiting with no changes")
if self.state == 'absent':
msg = "Mdisk [%s] did not exist." % self.name
else:
msg = "Mdisk [%s] already exists." % self.name
self.module.exit_json(msg=msg, changed=changed)
def main():
v = IBMSVCmdisk()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,532 @@
#!/usr/bin/python
# Copyright (C) 2020 IBM CORPORATION
# Author(s): Peng Wang <wangpww@cn.ibm.com>
# Sanjaikumaar M <sanjaikumaar.m@ibm.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_mdiskgrp
short_description: This module manages pools on IBM Spectrum Virtualize family storage systems
description:
- Ansible interface to manage 'mkmdiskgrp' and 'rmmdiskgrp' pool commands.
version_added: "1.0.0"
options:
name:
description:
- Specifies the name to assign to the new pool.
required: true
type: str
state:
description:
- Creates (C(present)) or removes (C(absent)) an MDisk group.
choices: [ absent, present ]
required: true
type: str
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
type: str
required: true
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the M(ibm.spectrum_virtualize.ibm_svc_auth) module.
type: str
version_added: '1.5.0'
datareduction:
description:
- Defines use of data reduction pools (DRPs) on the MDisk group.
- Applies when I(state=present), to create a pool.
type: str
default: 'no'
choices: ['yes', 'no']
easytier:
description:
- Defines use of easytier with the MDisk group.
- Applies when I(state=present), to create a pool.
type: str
default: 'off'
choices: ['on', 'off', 'auto']
encrypt:
description:
- Defines use of encryption with the MDisk group.
- Applies when I(state=present), to create a pool.
type: str
default: 'no'
choices: ['yes', 'no']
ext:
description:
- Specifies the size of the extents for this group in MB.
- Applies when I(state=present), to create a pool.
type: int
log_path:
description:
- Path of debug log file.
type: str
validate_certs:
description:
- Validates certification.
default: false
type: bool
parentmdiskgrp:
description:
- Parentmdiskgrp for subpool.
- Applies when I(state=present), to create a pool.
type: str
safeguarded:
description:
- Specify to create a safeguarded child pool.
- Applicable only during child pool creation.
type: bool
version_added: 1.8.0
noquota:
description:
- Specify to create a data reduction child pool.
- I(noquota) and I(size) parameters are mutually exclusive.
- I(noquota) parameter must be used with I(datareduction) set to yes to create a data reduction child pool.
- I(noquota) parameter must be used with I(parentmdiskgrp) in a parent data reduction storage pool.
type: bool
version_added: 1.8.0
unit:
description:
- Unit for subpool.
- Applies when I(state=present), to create a pool.
type: str
provisioningpolicy:
description:
- Specify the name of the provisioning policy to map it with the pool.
- Applies, when I(state=present).
type: str
version_added: 1.10.0
noprovisioningpolicy:
description:
- Specify to unmap provisioning policy from the pool.
- Applies, when I(state=present) to modify an existing pool.
type: bool
version_added: 1.10.0
replicationpoollinkuid:
description:
- Specifies the replication pool unique identifier which should be same as the pool that present in the replication server.
- Applies, when I(state=present).
- Supported in SV build 8.5.2.1 or later.
type: str
version_added: 1.10.0
resetreplicationpoollinkuid:
description:
- If set, any links between this pool on local system and pools on remote systems will be removed.
- Applies, when I(state=present) to modify an existing pool.
- Supported in SV build 8.5.2.1 or later.
type: bool
version_added: 1.10.0
replication_partner_clusterid:
description:
- Specifies the id or name of the partner cluster which will be used for replication.
- Applies, when I(state=present).
- Supported in SV build 8.5.2.1 or later.
type: str
version_added: 1.10.0
size:
description:
- Specifies the child pool capacity. The value must be
a numeric value (and an integer multiple of the extent size).
- Applies when I(state=present), to create a pool.
type: int
author:
- Peng Wang(@wangpww)
- Sanjaikumaar M (@sanjaikumaar)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Create mdisk group
ibm.spectrum_virtualize.ibm_svc_mdiskgrp:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
name: pool1
provisioningpolicy: pp0
replicationpoollinkuid: 000000000000000
replication_partner_clusterid: 000000000032432342
state: present
datareduction: no
easytier: auto
encrypt: no
ext: 1024
- name: Create a safeguarded backup location
ibm.spectrum_virtualize.ibm_svc_mdiskgrp:
clustername: "{{clustername}}"
token: "{{results.token}}"
log_path: "{{log_path}}"
parentmdiskgrp: Pool1
name: Pool1child1
datareduction: 'yes'
safeguarded: True
ext: 1024
noquota: True
state: present
- name: Delete mdisk group
ibm.spectrum_virtualize.ibm_svc_mdiskgrp:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
name: pool1
state: absent
- name: Delete a safeguarded backup location
ibm.spectrum_virtualize.ibm_svc_mdiskgrp:
clustername: "{{clustername}}"
token: "{{results.token}}"
log_path: "{{log_path}}"
parentmdiskgrp: Pool1
name: Pool1child1
state: absent
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
class IBMSVCmdiskgrp(object):
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent',
'present']),
datareduction=dict(type='str', default='no', choices=['yes',
'no']),
easytier=dict(type='str', default='off', choices=['on', 'off',
'auto']),
encrypt=dict(type='str', default='no', choices=['yes', 'no']),
ext=dict(type='int'),
parentmdiskgrp=dict(type='str'),
safeguarded=dict(type='bool'),
noquota=dict(type='bool'),
size=dict(type='int'),
unit=dict(type='str'),
provisioningpolicy=dict(type='str'),
noprovisioningpolicy=dict(type='bool'),
replicationpoollinkuid=dict(type='str'),
resetreplicationpoollinkuid=dict(type='bool'),
replication_partner_clusterid=dict(type='str'),
)
)
mutually_exclusive = []
self.module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
# logging setup
log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
# Required
self.name = self.module.params['name']
self.state = self.module.params['state']
# Optional
self.datareduction = self.module.params.get('datareduction', None)
self.easytier = self.module.params.get('easytier', None)
self.encrypt = self.module.params.get('encrypt', None)
self.ext = self.module.params.get('ext', None)
self.safeguarded = self.module.params.get('safeguarded', False)
self.noquota = self.module.params.get('noquota', False)
self.provisioningpolicy = self.module.params.get('provisioningpolicy', '')
self.noprovisioningpolicy = self.module.params.get('noprovisioningpolicy', False)
self.replicationpoollinkuid = self.module.params.get('replicationpoollinkuid', '')
self.resetreplicationpoollinkuid = self.module.params.get('resetreplicationpoollinkuid', False)
self.replication_partner_clusterid = self.module.params.get('replication_partner_clusterid', '')
self.parentmdiskgrp = self.module.params.get('parentmdiskgrp', None)
self.size = self.module.params.get('size', None)
self.unit = self.module.params.get('unit', None)
# Dynamic variable
self.partnership_index = None
self.basic_checks()
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=log_path,
token=self.module.params['token']
)
def basic_checks(self):
if not self.name:
self.module.fail_json(msg='Missing mandatory parameter: name')
if self.state == 'present':
message = 'Following parameters are required together: replicationpoollinkuid, replication_partner_clusterid'
if self.replication_partner_clusterid:
if not self.replicationpoollinkuid:
self.module.fail_json(msg=message)
else:
if self.replicationpoollinkuid:
self.module.fail_json(msg=message)
if self.replicationpoollinkuid and self.resetreplicationpoollinkuid:
self.module.fail_json(
msg='Mutually exclusive parameters: replicationpoollinkuid, resetreplicationpoollinkuid'
)
def mdiskgrp_exists(self):
merged_result = {}
data = self.restapi.svc_obj_info(
cmd='lsmdiskgrp',
cmdopts=None,
cmdargs=['-gui', self.name]
)
if isinstance(data, list):
for d in data:
merged_result.update(d)
else:
merged_result = data
return merged_result
def mdiskgrp_create(self):
# So ext is optional to mkmdiskgrp but make required in ansible
# until all options for create are implemented.
# if not self.ext:
# self.module.fail_json(msg="You must pass in ext to the module.")
self.log("creating mdisk group '%s'", self.name)
# Make command
cmd = 'mkmdiskgrp'
cmdopts = {}
if not self.ext:
self.module.fail_json(msg="You must pass the ext to the module.")
if self.noquota or self.safeguarded:
if not self.parentmdiskgrp:
self.module.fail_json(msg='Required parameter missing: parentmdiskgrp')
self.check_partnership()
if self.module.check_mode:
self.changed = True
return
if self.parentmdiskgrp:
cmdopts['parentmdiskgrp'] = self.parentmdiskgrp
if self.size:
cmdopts['size'] = self.size
if self.unit:
cmdopts['unit'] = self.unit
if self.safeguarded:
cmdopts['safeguarded'] = self.safeguarded
if self.noquota:
cmdopts['noquota'] = self.noquota
else:
if self.easytier:
cmdopts['easytier'] = self.easytier
if self.encrypt:
cmdopts['encrypt'] = self.encrypt
if self.ext:
cmdopts['ext'] = str(self.ext)
if self.provisioningpolicy:
cmdopts['provisioningpolicy'] = self.provisioningpolicy
if self.datareduction:
cmdopts['datareduction'] = self.datareduction
if self.replicationpoollinkuid:
cmdopts['replicationpoollinkuid'] = self.replicationpoollinkuid
cmdopts['name'] = self.name
self.log("creating mdisk group command %s opts %s", cmd, cmdopts)
# Run command
result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
self.log("creating mdisk group result %s", result)
if self.replication_partner_clusterid:
self.set_bit_mask()
if 'message' in result:
self.changed = True
self.log("creating mdisk group command result message %s",
result['message'])
else:
self.module.fail_json(
msg="Failed to create mdisk group [%s]" % (self.name))
def check_partnership(self):
if self.replication_partner_clusterid:
merged_result = {}
result = self.restapi.svc_obj_info(
cmd='lspartnership',
cmdopts=None,
cmdargs=['-gui', self.replication_partner_clusterid]
)
if isinstance(result, list):
for res in result:
merged_result = res
else:
merged_result = result
if merged_result:
self.partnership_index = merged_result.get('partnership_index')
else:
self.module.fail_json(
msg='Partnership does not exist for the given cluster ({0}).'.format(self.replication_partner_clusterid)
)
def set_bit_mask(self, systemmask=None):
cmd = 'chmdiskgrp'
bit_mask = '1'.ljust(int(self.partnership_index) + 1, '0') if not systemmask else systemmask
cmdopts = {'replicationpoollinkedsystemsmask': bit_mask}
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=[self.name])
def mdiskgrp_delete(self):
if self.module.check_mode:
self.changed = True
return
self.log("deleting mdiskgrp '%s'", self.name)
cmd = 'rmmdiskgrp'
cmdopts = None
cmdargs = [self.name]
self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
# Any error will have been raised in svc_run_command
# chmkdiskgrp does not output anything when successful.
self.changed = True
def mdiskgrp_update(self, modify):
# updte the mdisk group
self.log("updating mdiskgrp '%s'", self.name)
systemmask = None
cmd = 'chmdiskgrp'
if 'replicationpoollinkedsystemsmask' in modify:
systemmask = modify.pop('replicationpoollinkedsystemsmask')
if modify:
cmdopts = modify
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=[self.name])
if systemmask or 'replicationpoollinkuid' in modify:
self.set_bit_mask(systemmask)
self.changed = True
# TBD: Implement a more generic way to check for properties to modify.
def mdiskgrp_probe(self, data):
props = {}
if self.noprovisioningpolicy and data.get('provisioning_policy_name', ''):
props['noprovisioningpolicy'] = self.noprovisioningpolicy
if self.provisioningpolicy and self.provisioningpolicy != data.get('provisioning_policy_name', ''):
props['provisioningpolicy'] = self.provisioningpolicy
if self.replicationpoollinkuid and self.replicationpoollinkuid != data.get('replication_pool_link_uid', ''):
props['replicationpoollinkuid'] = self.replicationpoollinkuid
if self.resetreplicationpoollinkuid:
props['resetreplicationpoollinkuid'] = self.resetreplicationpoollinkuid
if self.replication_partner_clusterid:
self.check_partnership()
bit_mask = '1'.ljust(int(self.partnership_index) + 1, '0')
if bit_mask.zfill(64) != data.get('replication_pool_linked_systems_mask', ''):
props['replicationpoollinkedsystemsmask'] = bit_mask
self.log("mdiskgrp_probe props='%s'", props)
return props
def apply(self):
changed = False
msg = None
modify = []
mdiskgrp_data = self.mdiskgrp_exists()
if mdiskgrp_data:
if self.state == 'absent':
self.log("CHANGED: mdisk group exists, "
"but requested state is 'absent'")
changed = True
elif self.state == 'present':
# This is where we detect if chmdiskgrp should be called.
modify = self.mdiskgrp_probe(mdiskgrp_data)
if modify:
changed = True
else:
if self.state == 'present':
self.log("CHANGED: mdisk group does not exist, "
"but requested state is 'present'")
changed = True
if changed:
if self.state == 'present':
if not mdiskgrp_data:
self.mdiskgrp_create()
msg = "Mdisk group [%s] has been created." % self.name
else:
# This is where we would modify
self.mdiskgrp_update(modify)
msg = "Mdisk group [%s] has been modified." % self.name
elif self.state == 'absent':
self.mdiskgrp_delete()
msg = "Volume [%s] has been deleted." % self.name
if self.module.check_mode:
msg = 'skipping changes due to check mode'
else:
self.log("exiting with no changes")
if self.state == 'absent':
msg = "Mdisk group [%s] did not exist." % self.name
else:
msg = "Mdisk group [%s] already exists. No modifications done" % self.name
self.module.exit_json(msg=msg, changed=changed)
def main():
v = IBMSVCmdiskgrp()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,262 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2021 IBM CORPORATION
# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_svc_start_stop_flashcopy
short_description: This module starts or stops FlashCopy mapping and consistency groups on IBM Spectrum Virtualize family storage systems
description:
- Ansible interface to manage 'startfcmap', 'stopfcmap', 'startfcconsistgrp', and 'stopfcconsistgrp' commands.
version_added: "1.4.0"
options:
name:
description:
- Specifies the name of the FlashCopy mapping or FlashCopy consistency group.
required: true
type: str
state:
description:
- Starts (C(started)) or stops (C(stopped)) a FlashCopy mapping or FlashCopy consistency group.
choices: [ started, stopped ]
required: true
type: str
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
type: str
required: true
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the ibm_svc_auth module.
type: str
version_added: '1.5.0'
isgroup:
description:
- If specified True, the associated I(name) parameter is set as name of the FlashCopy consistency group.
- If specified False, or unspecified, the associated I(name) parameter is set as name of the FlashCopy mapping.
required: false
type: bool
force:
description:
- Specifies that all processing associated with the FlashCopy mapping or FlashCopy consistency group be immediately stopped.
- Valid when I(state=stopped), to stop a FlashCopy mapping or FlashCopy consistency group.
required: false
type: bool
log_path:
description:
- Path of debug log file.
type: str
validate_certs:
description:
- Validates certification.
default: false
type: bool
author:
- Sreshtant Bohidar(@Sreshtant-Bohidar)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Start a FlashCopy mapping
ibm.spectrum_virtualize.ibm_svc_start_stop_flashcopy:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
name: mapping-name
state: started
- name: Stop a FlashCopy mapping
ibm.spectrum_virtualize.ibm_svc_start_stop_flashcopy:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
name: mapping-name
state: stopped
- name: Start a FlashCopy consistency group
ibm.spectrum_virtualize.ibm_svc_start_stop_flashcopy:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
name: fcconsistgrp-name
isgroup: true
state: started
- name: Stop a FlashCopy consistency group
ibm.spectrum_virtualize.ibm_svc_start_stop_flashcopy:
clustername: "{{clustername}}"
domain: "{{domain}}"
username: "{{username}}"
password: "{{password}}"
log_path: /tmp/playbook.debug
name: fcconsistgrp-name
isgroup: true
state: stopped
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
from ansible.module_utils._text import to_native
class IBMSVCFlashcopyStartStop(object):
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['started', 'stopped']),
isgroup=dict(type='bool', required=False),
force=dict(type='bool', required=False),
)
)
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
# logging setup
log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, log_path)
self.log = log.info
# Required
self.name = self.module.params['name']
self.state = self.module.params['state']
# Optional
self.isgroup = self.module.params.get('isgroup', False)
self.force = self.module.params.get('force', False)
# Handling missing mandatory parameters
if not self.name:
self.module.fail_json(msg='Missing mandatory parameter: name')
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=log_path,
token=self.module.params['token']
)
def get_existing_fcmapping(self):
merged_result = {}
data = {}
if self.isgroup:
data = self.restapi.svc_obj_info(cmd='lsfcconsistgrp', cmdopts=None, cmdargs=[self.name])
else:
data = self.restapi.svc_obj_info(cmd='lsfcmap', cmdopts=None, cmdargs=[self.name])
if isinstance(data, list):
for d in data:
merged_result.update(d)
else:
merged_result = data
return merged_result
def start_fc(self):
cmd = ''
if self.isgroup:
cmd = 'startfcconsistgrp'
else:
cmd = 'startfcmap'
cmdopts = {}
cmdopts['prep'] = True
if self.force:
cmdopts["force"] = self.force
self.log("Starting fc mapping.. Command %s opts %s", cmd, cmdopts)
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=[self.name])
def stop_fc(self):
cmd = ''
if self.isgroup:
cmd = 'stopfcconsistgrp'
else:
cmd = 'stopfcmap'
cmdopts = {}
if self.force:
cmdopts["force"] = self.force
self.log("Stopping fc mapping.. Command %s opts %s", cmd, cmdopts)
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=[self.name])
def apply(self):
changed = False
msg = None
fcdata = self.get_existing_fcmapping()
if fcdata:
if self.state == "started" and fcdata["start_time"] == "":
self.log("[%s] exists, but requested state is 'started'", self.name)
changed = True
elif self.state == "stopped" and fcdata["start_time"] != "":
self.log("[%s] exists, but requested state is 'stopped'", self.name)
changed = True
if changed:
if self.module.check_mode:
msg = 'skipping changes due to check mode.'
else:
if self.state == "started":
self.start_fc()
msg = "fc [%s] has been started" % self.name
elif self.state == "stopped":
self.stop_fc()
msg = "fc [%s] has been stopped" % self.name
else:
if fcdata:
if self.state == "started" or self.state == "stopped":
self.log("[%s] exists, but currently in [%s] state", self.name, fcdata["status"])
if self.isgroup:
msg = "FlashCopy Consistency Group [%s] is in [%s] state." % (self.name, fcdata["status"])
else:
msg = "FlashCopy Mapping [%s] is in [%s] state." % (self.name, fcdata["status"])
else:
if self.state == "started" or self.state == "stopped":
if self.isgroup:
msg = "FlashCopy Consistency Group [%s] does not exist." % self.name
else:
msg = "FlashCopy Mapping [%s] does not exist." % self.name
self.module.exit_json(msg=msg, changed=changed)
def main():
v = IBMSVCFlashcopyStartStop()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()

Some files were not shown because too many files have changed in this diff Show More