Init: mediaserver

This commit is contained in:
2023-02-08 12:13:28 +01:00
parent 848bc9739c
commit f7c23d4ba9
31914 changed files with 6175775 additions and 0 deletions

View File

@@ -0,0 +1,332 @@
#!/usr/bin/python
# (c) 2021, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
na_cloudmanager_aggregate
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: na_cloudmanager_aggregate
short_description: NetApp Cloud Manager Aggregate
extends_documentation_fragment:
- netapp.cloudmanager.netapp.cloudmanager
version_added: '21.3.0'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create, Modify or Delete Aggregate on Cloud Manager.
options:
state:
description:
- Whether the specified aggregate should exist or not.
choices: ['present', 'absent']
required: true
type: str
name:
description:
- The name of the new aggregate.
required: true
type: str
working_environment_name:
description:
- The working environment name where the aggregate will be created.
type: str
working_environment_id:
description:
- The public ID of the working environment where the aggregate will be created.
type: str
client_id:
description:
- The connector ID of the Cloud Manager Connector.
required: true
type: str
number_of_disks:
description:
- The required number of disks in the new aggregate.
type: int
disk_size_size:
description:
- The required size of the disks.
type: int
disk_size_unit:
description:
- The disk size unit ['GB' or 'TB']. The default is 'TB'.
choices: ['GB', 'TB']
default: 'TB'
type: str
home_node:
description:
- The home node that the new aggregate should belong to.
type: str
provider_volume_type:
description:
- The cloud provider volume type.
type: str
capacity_tier:
description:
- The aggregate's capacity tier for tiering cold data to object storage.
- If the value is NONE, the capacity_tier will not be set on aggregate creation.
choices: [ 'NONE', 'S3', 'Blob', 'cloudStorage']
type: str
iops:
description:
- Provisioned IOPS. Needed only when providerVolumeType is "io1".
type: int
throughput:
description:
- Unit is Mb/s. Valid range 125-1000.
- Required only when provider_volume_type is 'gp3'.
type: int
notes:
- Support check_mode.
'''
EXAMPLES = '''
- name: Create Aggregate
netapp.cloudmanager.na_cloudmanager_aggregate:
state: present
name: AnsibleAggregate
working_environment_name: testAWS
client_id: "{{ client_id }}"
number_of_disks: 2
refresh_token: xxx
- name: Delete Volume
netapp.cloudmanager.na_cloudmanager_aggregate:
state: absent
name: AnsibleAggregate
working_environment_name: testAWS
client_id: "{{ client_id }}"
refresh_token: xxx
'''
RETURN = '''
msg:
description: Success message.
returned: success
type: str
sample: "Aggregate Created"
'''
from ansible.module_utils.basic import AnsibleModule
import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
class NetAppCloudmanagerAggregate(object):
'''
Contains methods to parse arguments,
derive details of CloudmanagerAggregate objects
and send requests to CloudmanagerAggregate via
the restApi
'''
def __init__(self):
'''
Parse arguments, setup state variables,
check parameters and ensure request module is installed
'''
self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
working_environment_id=dict(required=False, type='str'),
working_environment_name=dict(required=False, type='str'),
client_id=dict(required=True, type='str'),
number_of_disks=dict(required=False, type='int'),
disk_size_size=dict(required=False, type='int'),
disk_size_unit=dict(required=False, choices=['GB', 'TB'], default='TB'),
home_node=dict(required=False, type='str'),
provider_volume_type=dict(required=False, type='str'),
capacity_tier=dict(required=False, choices=['NONE', 'S3', 'Blob', 'cloudStorage'], type='str'),
iops=dict(required=False, type='int'),
throughput=dict(required=False, type='int'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_one_of=[
['refresh_token', 'sa_client_id'],
['working_environment_name', 'working_environment_id'],
],
required_together=[['sa_client_id', 'sa_secret_key']],
required_if=[
['provider_volume_type', 'gp3', ['iops', 'throughput']],
['provider_volume_type', 'io1', ['iops']],
],
supports_check_mode=True
)
self.na_helper = NetAppModule()
# set up state variables
self.parameters = self.na_helper.set_parameters(self.module.params)
# Calling generic rest_api class
self.rest_api = CloudManagerRestAPI(self.module)
self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
self.rest_api.api_root_path = None
self.headers = {
'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
}
def get_aggregate(self):
'''
Get aggregate details
'''
working_environment_detail = None
if 'working_environment_id' in self.parameters:
working_environment_detail, error = self.na_helper.get_working_environment_details(self.rest_api, self.headers)
if error is not None:
self.module.fail_json(msg="Error: Cannot find working environment: %s" % str(error))
elif 'working_environment_name' in self.parameters:
working_environment_detail, error = self.na_helper.get_working_environment_details_by_name(self.rest_api,
self.headers,
self.parameters['working_environment_name'])
if error is not None:
self.module.fail_json(msg="Error: Cannot find working environment: %s" % str(error))
else:
self.module.fail_json(msg="Error: Missing working environment information")
if working_environment_detail is not None:
self.parameters['working_environment_id'] = working_environment_detail['publicId']
self.na_helper.set_api_root_path(working_environment_detail, self.rest_api)
api_root_path = self.rest_api.api_root_path
if working_environment_detail['cloudProviderName'] != "Amazon":
api = '%s/aggregates/%s' % (api_root_path, working_environment_detail['publicId'])
else:
api = '%s/aggregates?workingEnvironmentId=%s' % (api_root_path, working_environment_detail['publicId'])
response, error, dummy = self.rest_api.get(api, header=self.headers)
if error:
self.module.fail_json(msg="Error: Failed to get aggregate list: %s, %s" % (str(error), str(response)))
for aggr in response:
if aggr['name'] == self.parameters['name']:
return aggr
return None
def create_aggregate(self):
'''
Create aggregate
'''
api = '%s/aggregates' % self.rest_api.api_root_path
# check if all the required parameters exist
body = {
'name': self.parameters['name'],
'workingEnvironmentId': self.parameters['working_environment_id'],
'numberOfDisks': self.parameters['number_of_disks'],
'diskSize': {'size': self.parameters['disk_size_size'],
'unit': self.parameters['disk_size_unit']},
}
# optional parameters
if 'home_node' in self.parameters:
body['homeNode'] = self.parameters['home_node']
if 'provider_volume_type' in self.parameters:
body['providerVolumeType'] = self.parameters['provider_volume_type']
if 'capacity_tier' in self.parameters and self.parameters['capacity_tier'] != "NONE":
body['capacityTier'] = self.parameters['capacity_tier']
if 'iops' in self.parameters:
body['iops'] = self.parameters['iops']
if 'throughput' in self.parameters:
body['throughput'] = self.parameters['throughput']
response, error, dummy = self.rest_api.post(api, body, header=self.headers)
if error is not None:
self.module.fail_json(msg="Error: unexpected response on aggregate creation: %s, %s" % (str(error), str(response)))
def update_aggregate(self, add_number_of_disks):
'''
Update aggregate with aggregate name and the parameters number_of_disks will be added
'''
api = '%s/aggregates/%s/%s/disks' % (self.rest_api.api_root_path, self.parameters['working_environment_id'],
self.parameters['name'])
body = {
'aggregateName': self.parameters['name'],
'workingEnvironmentId': self.parameters['working_environment_id'],
'numberOfDisks': add_number_of_disks
}
response, error, dummy = self.rest_api.post(api, body, header=self.headers)
if error is not None:
self.module.fail_json(msg="Error: unexpected response on aggregate adding disks: %s, %s" % (str(error), str(response)))
def delete_aggregate(self):
'''
Delete aggregate with aggregate name
'''
api = '%s/aggregates/%s/%s' % (self.rest_api.api_root_path, self.parameters['working_environment_id'],
self.parameters['name'])
body = {
'aggregateName': self.parameters['name'],
'workingEnvironmentId': self.parameters['working_environment_id'],
}
response, error, dummy = self.rest_api.delete(api, body, header=self.headers)
if error is not None:
self.module.fail_json(msg="Error: unexpected response on aggregate deletion: %s, %s" % (str(error), str(response)))
def apply(self):
'''
Check, process and initiate aggregate operation
'''
# check if aggregate exists
current = self.get_aggregate()
# check the action
cd_action = self.na_helper.get_cd_action(current, self.parameters)
if self.na_helper.changed:
action = cd_action + "_aggregate"
have_all_required, missed_params = self.na_helper.have_required_parameters(action)
if not have_all_required:
self.module.fail_json(msg="Error: Missing required parameters (%s) on %s" % (str(missed_params), action))
add_disks = 0
if current and self.parameters['state'] != 'absent':
have_all_required, missed_params = self.na_helper.have_required_parameters("update_aggregate")
if not have_all_required:
self.module.fail_json(msg="Error: Missing required parameters (%s) on update_aggregate" % str(missed_params))
if len(current['disks']) < self.parameters['number_of_disks']:
add_disks = self.parameters['number_of_disks'] - len(current['disks'])
self.na_helper.changed = True
elif len(current['disks']) > self.parameters['number_of_disks']:
self.module.fail_json(msg="Error: Only add disk support. number_of_disks cannot be reduced")
result_message = ""
if self.na_helper.changed and not self.module.check_mode:
if cd_action == "create":
self.create_aggregate()
result_message = "Aggregate Created"
elif cd_action == "delete":
self.delete_aggregate()
result_message = "Aggregate Deleted"
else: # modify
self.update_aggregate(add_disks)
result_message = "Aggregate Updated"
self.module.exit_json(changed=self.na_helper.changed, msg=result_message)
def main():
'''
Create NetAppCloudmanagerAggregate class instance and invoke apply
:return: None
'''
na_cloudmanager_aggregate = NetAppCloudmanagerAggregate()
na_cloudmanager_aggregate.apply()
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,458 @@
#!/usr/bin/python
# (c) 2022, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
na_cloudmanager_aws_fsx
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: na_cloudmanager_aws_fsx
short_description: Cloud ONTAP file system(FSx) in AWS
extends_documentation_fragment:
- netapp.cloudmanager.netapp.cloudmanager
version_added: '21.13.0'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create or delete CVO/Working Environment for AWS FSx.
options:
state:
description:
- Whether the specified FSx in AWS should exist or not.
choices: ['present', 'absent']
default: 'present'
type: str
name:
required: true
description:
- The name of the CVO/Working Environment for AWS FSx to manage.
type: str
region:
description:
- The region where the working environment will be created.
type: str
aws_credentials_name:
description:
- The name of the AWS Credentials account name.
type: str
workspace_id:
description:
- The ID of the Cloud Manager workspace of working environment.
type: str
tenant_id:
required: true
description:
- The NetApp account ID that the File System will be associated with.
type: str
working_environment_id:
description:
- The ID of the AWS FSx working environment used for delete.
type: str
storage_capacity_size:
description:
- volume size for the first data aggregate.
- For GB, the value can be [100 or 500].
- For TB, the value can be [1,2,4,8,16].
type: int
storage_capacity_size_unit:
description:
- The unit for volume size.
choices: ['GiB', 'TiB']
type: str
fsx_admin_password:
description:
- The admin password for Cloud Volumes ONTAP fsxadmin user.
type: str
throughput_capacity:
description:
- The capacity of the throughput.
choices: [512, 1024, 2048]
type: int
security_group_ids:
description:
- The IDs of the security groups for the working environment, multiple security groups can be provided separated by ','.
type: list
elements: str
kms_key_id:
description:
- AWS encryption parameters. It is required if using aws encryption.
type: str
tags:
description:
- Additional tags for the FSx AWS working environment.
type: list
elements: dict
suboptions:
tag_key:
description: The key of the tag.
type: str
tag_value:
description: The tag value.
type: str
primary_subnet_id:
description:
- The subnet ID of the first node.
type: str
secondary_subnet_id:
description:
- The subnet ID of the second node.
type: str
route_table_ids:
description:
- The list of route table IDs that will be updated with the floating IPs.
type: list
elements: str
minimum_ssd_iops:
description:
- Provisioned SSD IOPS.
type: int
endpoint_ip_address_range:
description:
- The endpoint IP address range.
type: str
import_file_system:
description:
- bool option to existing import AWS file system to CloudManager.
type: bool
default: false
version_added: 21.17.0
file_system_id:
description:
- The AWS file system ID to import to CloudManager. Required when import_file_system is 'True'
type: str
version_added: 21.17.0
notes:
- Support check_mode.
'''
EXAMPLES = """
- name: Create NetApp AWS FSx
netapp.cloudmanager.na_cloudmanager_aws_fsx:
state: present
refresh_token: "{{ xxxxxxxxxxxxxxx }}"
name: fsxAnsible
region: us-east-2
workspace_id: workspace-xxxxx
tenant_id: account-xxxxx
storage_capacity_size: 1024
storage_capacity_size_unit: TiB
aws_credentials_name: xxxxxxx
primary_subnet_id: subnet-xxxxxx
secondary_subnet_id: subnet-xxxxx
throughput_capacity: 512
fsx_admin_password: xxxxxxx
tags: [
{tag_key: abcd,
tag_value: ABCD}]
- name: Import AWS FSX
na_cloudmanager_aws_fsx:
state: present
refresh_token: "{{ xxxxxxxxxxxxxxx }}"
name: fsxAnsible
region: us-west-2
workspace_id: workspace-xxxxx
import_file_system: True
file_system_id: "{{ xxxxxxxxxxxxxxx }}"
tenant_id: account-xxxxx
aws_credentials_name: xxxxxxx
- name: Delete NetApp AWS FSx
netapp.cloudmanager.na_cloudmanager_aws_fsx:
state: absent
refresh_token: "{{ xxxxxxxxxxxxxxx }}"
working_environment_id: fs-xxxxxx
name: fsxAnsible
tenant_id: account-xxxxx
"""
RETURN = '''
working_environment_id:
description: Newly created AWS FSx working_environment_id.
type: str
returned: success
'''
import time
from ansible.module_utils.basic import AnsibleModule
import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
class NetAppCloudManagerAWSFSX:
''' object initialize and class methods '''
def __init__(self):
self.use_rest = False
self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
self.argument_spec.update(dict(
name=dict(required=True, type='str'),
state=dict(required=False, choices=['present', 'absent'], default='present'),
region=dict(required=False, type='str'),
aws_credentials_name=dict(required=False, type='str'),
workspace_id=dict(required=False, type='str'),
tenant_id=dict(required=True, type='str'),
working_environment_id=dict(required=False, type='str'),
storage_capacity_size=dict(required=False, type='int'),
storage_capacity_size_unit=dict(required=False, type='str', choices=['GiB', 'TiB']),
fsx_admin_password=dict(required=False, type='str', no_log=True),
throughput_capacity=dict(required=False, type='int', choices=[512, 1024, 2048]),
security_group_ids=dict(required=False, type='list', elements='str'),
kms_key_id=dict(required=False, type='str', no_log=True),
tags=dict(required=False, type='list', elements='dict', options=dict(
tag_key=dict(type='str', no_log=False),
tag_value=dict(type='str')
)),
primary_subnet_id=dict(required=False, type='str'),
secondary_subnet_id=dict(required=False, type='str'),
route_table_ids=dict(required=False, type='list', elements='str'),
minimum_ssd_iops=dict(required=False, type='int'),
endpoint_ip_address_range=dict(required=False, type='str'),
import_file_system=dict(required=False, type='bool', default=False),
file_system_id=dict(required=False, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
['state', 'present', ['region', 'aws_credentials_name', 'workspace_id', 'fsx_admin_password', 'throughput_capacity',
'primary_subnet_id', 'secondary_subnet_id', 'storage_capacity_size', 'storage_capacity_size_unit']],
['import_file_system', True, ['file_system_id']]
],
required_one_of=[['refresh_token', 'sa_client_id']],
required_together=[['sa_client_id', 'sa_secret_key'], ['storage_capacity_size', 'storage_capacity_size_unit']],
supports_check_mode=True,
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
self.rest_api = CloudManagerRestAPI(self.module)
self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
self.headers = None
if self.rest_api.simulator:
self.headers = {
'x-simulator': 'true'
}
if self.parameters['state'] == 'present':
self.aws_credentials_id, error = self.get_aws_credentials_id()
if error is not None:
self.module.fail_json(msg=str(error))
def get_aws_credentials_id(self):
"""
Get aws_credentials_id
:return: AWS Credentials ID
"""
api = "/fsx-ontap/aws-credentials/"
api += self.parameters['tenant_id']
response, error, dummy = self.rest_api.get(api, None, header=self.headers)
if error:
return response, "Error: getting aws_credentials_id %s" % error
for each in response:
if each['name'] == self.parameters['aws_credentials_name']:
return each['id'], None
return None, "Error: aws_credentials_name not found"
def discover_aws_fsx(self):
"""
discover aws_fsx
"""
api = "/fsx-ontap/working-environments/%s/discover?credentials-id=%s&workspace-id=%s&region=%s"\
% (self.parameters['tenant_id'], self.aws_credentials_id, self.parameters['workspace_id'], self.parameters['region'])
response, error, dummy = self.rest_api.get(api, None, header=self.headers)
if error:
return "Error: discovering aws_fsx %s" % error
id_found = False
for each in response:
if each['id'] == self.parameters['file_system_id']:
id_found = True
break
if not id_found:
return "Error: file_system_id provided could not be found"
def recover_aws_fsx(self):
"""
recover aws_fsx
"""
json = {"name": self.parameters['name'],
"region": self.parameters['region'],
"workspaceId": self.parameters['workspace_id'],
"credentialsId": self.aws_credentials_id,
"fileSystemId": self.parameters['file_system_id'],
}
api_url = "/fsx-ontap/working-environments/%s/recover" % self.parameters['tenant_id']
response, error, dummy = self.rest_api.post(api_url, json, header=self.headers)
if error is not None:
self.module.fail_json(
msg="Error: unexpected response on recovering AWS FSx: %s, %s" % (error, response))
def create_aws_fsx(self):
""" Create AWS FSx """
json = {"name": self.parameters['name'],
"region": self.parameters['region'],
"workspaceId": self.parameters['workspace_id'],
"credentialsId": self.aws_credentials_id,
"throughputCapacity": self.parameters['throughput_capacity'],
"storageCapacity": {
"size": self.parameters['storage_capacity_size'],
"unit": self.parameters['storage_capacity_size_unit']},
"fsxAdminPassword": self.parameters['fsx_admin_password'],
"primarySubnetId": self.parameters['primary_subnet_id'],
"secondarySubnetId": self.parameters['secondary_subnet_id'],
}
if self.parameters.get('tags') is not None:
tags = []
for each_tag in self.parameters['tags']:
tag = {
'key': each_tag['tag_key'],
'value': each_tag['tag_value']
}
tags.append(tag)
json.update({"tags": tags})
if self.parameters.get('security_group_ids'):
json.update({"securityGroupIds": self.parameters['security_group_ids']})
if self.parameters.get('route_table_ids'):
json.update({"routeTableIds": self.parameters['route_table_ids']})
if self.parameters.get('kms_key_id'):
json.update({"kmsKeyId": self.parameters['kms_key_id']})
if self.parameters.get('minimum_ssd_iops'):
json.update({"minimumSsdIops": self.parameters['minimum_ssd_iops']})
if self.parameters.get('endpoint_ip_address_range'):
json.update({"endpointIpAddressRange": self.parameters['endpoint_ip_address_range']})
api_url = '/fsx-ontap/working-environments/%s' % self.parameters['tenant_id']
response, error, dummy = self.rest_api.post(api_url, json, header=self.headers)
if error is not None:
self.module.fail_json(
msg="Error: unexpected response on creating AWS FSx: %s, %s" % (str(error), str(response)))
working_environment_id = response['id']
creation_wait_time = 30
creation_retry_count = 30
wait_on_completion_api_url = '/fsx-ontap/working-environments/%s/%s?provider-details=true' % (self.parameters['tenant_id'], working_environment_id)
err = self.wait_on_completion_for_fsx(wait_on_completion_api_url, "AWS_FSX", "create", creation_retry_count, creation_wait_time)
if err is not None:
self.module.fail_json(msg="Error: unexpected response wait_on_completion for creating AWS FSX: %s" % str(err))
return working_environment_id
def wait_on_completion_for_fsx(self, api_url, action_name, task, retries, wait_interval):
while True:
fsx_status, error = self.check_task_status_for_fsx(api_url)
if error is not None:
return error
if fsx_status['status']['status'] == "ON" and fsx_status['status']['lifecycle'] == "AVAILABLE":
return None
elif fsx_status['status']['status'] == "FAILED":
return 'Failed to %s %s' % (task, action_name)
if retries == 0:
return 'Taking too long for %s to %s or not properly setup' % (action_name, task)
time.sleep(wait_interval)
retries = retries - 1
def check_task_status_for_fsx(self, api_url):
network_retries = 3
exponential_retry_time = 1
while True:
result, error, dummy = self.rest_api.get(api_url, None, header=self.headers)
if error is not None:
if network_retries > 0:
time.sleep(exponential_retry_time)
exponential_retry_time *= 2
network_retries = network_retries - 1
else:
return 0, error
else:
response = result
break
return response['providerDetails'], None
def delete_aws_fsx(self, id, tenant_id):
"""
Delete AWS FSx
"""
api_url = '/fsx-ontap/working-environments/%s/%s' % (tenant_id, id)
response, error, dummy = self.rest_api.delete(api_url, None, header=self.headers)
if error is not None:
self.module.fail_json(msg="Error: unexpected response on deleting AWS FSx: %s, %s" % (str(error), str(response)))
def apply(self):
"""
Apply action to the AWS FSx working Environment
:return: None
"""
working_environment_id = None
current, error = self.na_helper.get_aws_fsx_details(self.rest_api, header=self.headers)
if error is not None:
self.module.fail_json(msg="Error: unexpected response on fetching AWS FSx: %s" % str(error))
cd_action = self.na_helper.get_cd_action(current, self.parameters)
if self.parameters['import_file_system'] and cd_action == "create":
error = self.discover_aws_fsx()
if error is not None:
self.module.fail_json(msg="Error: unexpected response on discovering AWS FSx: %s" % str(error))
cd_action = "import"
self.na_helper.changed = True
if self.na_helper.changed and not self.module.check_mode:
if cd_action == "import":
self.recover_aws_fsx()
working_environment_id = self.parameters['file_system_id']
elif cd_action == "create":
working_environment_id = self.create_aws_fsx()
elif cd_action == "delete":
self.delete_aws_fsx(current['id'], self.parameters['tenant_id'])
self.module.exit_json(changed=self.na_helper.changed, working_environment_id=working_environment_id)
def main():
"""
Create AWS FSx class instance and invoke apply
:return: None
"""
obj_store = NetAppCloudManagerAWSFSX()
obj_store.apply()
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,265 @@
#!/usr/bin/python
# (c) 2021, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
na_cloudmanager_cifs_server
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: na_cloudmanager_cifs_server
short_description: NetApp Cloud Manager cifs server
extends_documentation_fragment:
- netapp.cloudmanager.netapp.cloudmanager
version_added: '21.3.0'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create or Delete a CIFS server on the Cloud Volume ONTAP system to support CIFS volumes, based on an Active Directory or Workgroup.
options:
state:
description:
- Whether the specified cifs server should exist or not.
choices: ['present', 'absent']
default: 'present'
type: str
working_environment_name:
description:
- The working environment name where the cifs server will be created.
type: str
working_environment_id:
description:
- The public ID of the working environment where the cifs server will be created.
type: str
client_id:
description:
- The connector ID of the Cloud Manager Connector.
required: true
type: str
domain:
description:
- The active directory domain name. For CIFS AD only.
type: str
dns_domain:
description:
- The DNS domain name. For CIFS AD only.
type: str
username:
description:
- The active directory admin user name. For CIFS AD only.
type: str
password:
description:
- The active directory admin password. For CIFS AD only.
type: str
ip_addresses:
description:
- The DNS server IP addresses. For CIFS AD only.
type: list
elements: str
netbios:
description:
- The CIFS server NetBIOS name. For CIFS AD only.
type: str
organizational_unit:
description:
- The organizational unit in which to register the CIFS server. For CIFS AD only.
type: str
is_workgroup:
description:
- For CIFS workgroup operations, set to true.
type: bool
server_name:
description:
- The server name. For CIFS workgroup only.
type: str
workgroup_name:
description:
- The workgroup name. For CIFS workgroup only.
type: str
notes:
- Support check_mode.
'''
EXAMPLES = '''
- name: Create cifs server with working_environment_id
netapp.cloudmanager.na_cloudmanager_cifs_server:
state: present
working_environment_id: VsaWorkingEnvironment-abcdefgh
client_id: your_client_id
refresh_token: your_refresh_token
domain: example.com
username: admin
password: pass
dns_domain: example.com
ip_addresses: ["1.0.0.0"]
netbios: cvoname
organizational_unit: CN=Computers
'''
RETURN = r''' # '''
from ansible.module_utils.basic import AnsibleModule
import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
class NetAppCloudmanagerCifsServer:
def __init__(self):
"""
Parse arguments, setup state variables,
check parameters and ensure request module is installed
"""
self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
working_environment_id=dict(required=False, type='str'),
working_environment_name=dict(required=False, type='str'),
client_id=dict(required=True, type='str'),
domain=dict(required=False, type='str'),
dns_domain=dict(required=False, type='str'),
username=dict(required=False, type='str'),
password=dict(required=False, type='str', no_log=True),
ip_addresses=dict(required=False, type='list', elements='str'),
netbios=dict(required=False, type='str'),
organizational_unit=dict(required=False, type='str'),
is_workgroup=dict(required=False, type='bool'),
server_name=dict(required=False, type='str'),
workgroup_name=dict(required=False, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_one_of=[
['refresh_token', 'sa_client_id'],
['working_environment_name', 'working_environment_id'],
],
required_together=[['sa_client_id', 'sa_secret_key']],
mutually_exclusive=[
('domain', 'server_name'),
('dns_domain', 'server_name'),
('username', 'server_name'),
('password', 'server_name'),
('ip_addresses', 'server_name'),
('netbios', 'server_name'),
('organizational_unit', 'server_name'),
('domain', 'workgroup_name'),
('dns_domain', 'workgroup_name'),
('username', 'workgroup_name'),
('password', 'workgroup_name'),
('ip_addresses', 'workgroup_name'),
('netbios', 'workgroup_name'),
('organizational_unit', 'workgroup_name'),
],
supports_check_mode=True
)
self.na_helper = NetAppModule()
# set up state variables
self.parameters = self.na_helper.set_parameters(self.module.params)
# Calling generic rest_api class
self.rest_api = netapp_utils.CloudManagerRestAPI(self.module)
self.rest_api.token_type, self.rest_api.token = self.rest_api.get_token()
self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
self.headers = {
'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
}
if self.parameters.get('working_environment_id'):
working_environment_detail, error = self.na_helper.get_working_environment_details(self.rest_api, self.headers)
else:
working_environment_detail, error = self.na_helper.get_working_environment_details_by_name(self.rest_api,
self.headers,
self.parameters['working_environment_name'])
if working_environment_detail is not None:
self.parameters['working_environment_id'] = working_environment_detail['publicId']
else:
self.module.fail_json(msg="Error: Cannot find working environment: %s" % str(error))
self.na_helper.set_api_root_path(working_environment_detail, self.rest_api)
def get_cifs_server(self):
response, err, dummy = self.rest_api.send_request("GET", "%s/working-environments/%s/cifs" % (
self.rest_api.api_root_path, self.parameters['working_environment_id']), None, header=self.headers)
if err is not None:
self.module.fail_json(changed=False, msg="Error on get_cifs_server: %s, %s" % (str(err), str(response)))
current_cifs = dict()
if response is None or len(response) == 0:
return None
# only one cifs server exists per working environment.
for server in response:
if server.get('activeDirectoryDomain'):
current_cifs['domain'] = server['activeDirectoryDomain']
if server.get('dnsDomain'):
current_cifs['dns_domain'] = server['dnsDomain']
if server.get('ipAddresses'):
current_cifs['ip_addresses'] = server['ipAddresses']
if server.get('organizationalUnit'):
current_cifs['organizational_unit'] = server['organizationalUnit']
if server.get('netBIOS'):
current_cifs['netbios'] = server['netBIOS']
return current_cifs
def create_cifs_server(self):
exclude_list = ['client_id', 'domain', 'netbios', 'username', 'password']
server = self.na_helper.convert_module_args_to_api(self.parameters, exclude_list)
if self.parameters.get('domain'):
server['activeDirectoryDomain'] = self.parameters['domain']
if self.parameters.get('netbios'):
server['netBIOS'] = self.parameters['netbios']
if self.parameters.get('username'):
server['activeDirectoryUsername'] = self.parameters['username']
if self.parameters.get('password'):
server['activeDirectoryPassword'] = self.parameters['password']
url = "%s/working-environments/%s/cifs" % (self.rest_api.api_root_path,
self.parameters['working_environment_id'])
if self.parameters.get('is_workgroup'):
url = url + "-workgroup"
response, err, dummy = self.rest_api.send_request("POST", url, None, server, header=self.headers)
if err is not None:
self.module.fail_json(changed=False, msg="Error on create_cifs_server failed: %s, %s" % (str(err), str(response)))
def delete_cifs_server(self):
response, err, dummy = self.rest_api.send_request("POST", "%s/working-environments/%s/delete-cifs" % (
self.rest_api.api_root_path, self.parameters['working_environment_id']), None, {}, header=self.headers)
if err is not None:
self.module.fail_json(changed=False, msg="Error on delete_cifs_server: %s, %s" % (str(err), str(response)))
def apply(self):
current = self.get_cifs_server()
cd_action = self.na_helper.get_cd_action(current, self.parameters)
if self.na_helper.changed and not self.module.check_mode:
if cd_action == 'create':
self.create_cifs_server()
elif cd_action == 'delete':
self.delete_cifs_server()
self.module.exit_json(changed=self.na_helper.changed)
def main():
'''Main Function'''
server = NetAppCloudmanagerCifsServer()
server.apply()
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,655 @@
#!/usr/bin/python
# (c) 2021, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
na_cloudmanager_connector_aws
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: na_cloudmanager_connector_aws
short_description: NetApp Cloud Manager connector for AWS
extends_documentation_fragment:
- netapp.cloudmanager.netapp.cloudmanager
version_added: '21.3.0'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create or delete Cloud Manager connector for AWS.
- This module requires to be authenticated with AWS. This can be done with C(aws configure).
options:
state:
description:
- Whether the specified Cloud Manager connector for AWS should exist or not.
choices: ['present', 'absent']
default: 'present'
type: str
name:
required: true
description:
- The name of the Cloud Manager connector for AWS to manage.
type: str
instance_type:
description:
- The type of instance (for example, t3.xlarge). At least 4 CPU and 16 GB of memory are required.
type: str
default: t3.xlarge
key_name:
description:
- The name of the key pair to use for the Connector instance.
type: str
subnet_id:
description:
- The ID of the subnet for the instance.
type: str
region:
required: true
description:
- The region where the Cloud Manager Connector will be created.
type: str
instance_id:
description:
- The ID of the EC2 instance used for delete.
type: str
client_id:
description:
- The unique client ID of the Connector.
- The connector ID.
type: str
ami:
description:
- The image ID.
type: str
company:
description:
- The name of the company of the user.
type: str
security_group_ids:
description:
- The IDs of the security groups for the instance, multiple security groups can be provided separated by ','.
type: list
elements: str
iam_instance_profile_name:
description:
- The name of the instance profile for the Connector.
type: str
enable_termination_protection:
description:
- Indicates whether to enable termination protection on the instance.
type: bool
default: false
associate_public_ip_address:
description:
- Indicates whether to associate a public IP address to the instance. If not provided, the association will be done based on the subnet's configuration.
type: bool
default: true
account_id:
description:
- The NetApp tenancy account ID.
type: str
proxy_url:
description:
- The proxy URL, if using a proxy to connect to the internet.
type: str
proxy_user_name:
description:
- The proxy user name, if using a proxy to connect to the internet.
type: str
proxy_password:
description:
- The proxy password, if using a proxy to connect to the internet.
type: str
proxy_certificates:
description:
- The proxy certificates, a list of certificate file names.
type: list
elements: str
version_added: 21.5.0
aws_tag:
description:
- Additional tags for the AWS EC2 instance.
type: list
elements: dict
suboptions:
tag_key:
description: The key of the tag.
type: str
tag_value:
description: The tag value.
type: str
notes:
- Support check_mode.
'''
EXAMPLES = """
- name: Create NetApp Cloud Manager connector for AWS
netapp.cloudmanager.na_cloudmanager_connector_aws:
state: present
refresh_token: "{{ xxxxxxxxxxxxxxx }}"
name: bsuhas_ansible_occm
region: us-west-1
key_name: dev_automation
subnet_id: subnet-xxxxx
security_group_ids: [sg-xxxxxxxxxxx]
iam_instance_profile_name: OCCM_AUTOMATION
account_id: "{{ account-xxxxxxx }}"
company: NetApp
proxy_url: abc.com
proxy_user_name: xyz
proxy_password: abcxyz
proxy_certificates: [abc.crt.txt, xyz.crt.txt]
aws_tag: [
{tag_key: abc,
tag_value: a123}]
- name: Delete NetApp Cloud Manager connector for AWS
netapp.cloudmanager.na_cloudmanager_connector_aws:
state: absent
name: ansible
region: us-west-1
account_id: "{{ account-xxxxxxx }}"
instance_id: i-xxxxxxxxxxxxx
client_id: xxxxxxxxxxxxxxxxxxx
"""
RETURN = """
ids:
description: Newly created AWS client ID in cloud manager, instance ID and account ID.
type: dict
returned: success
"""
import traceback
import uuid
import time
import base64
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
IMPORT_EXCEPTION = None
try:
import boto3
from botocore.exceptions import ClientError
HAS_AWS_LIB = True
except ImportError as exc:
HAS_AWS_LIB = False
IMPORT_EXCEPTION = exc
UUID = str(uuid.uuid4())
class NetAppCloudManagerConnectorAWS(object):
''' object initialize and class methods '''
def __init__(self):
self.use_rest = False
self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
self.argument_spec.update(dict(
name=dict(required=True, type='str'),
state=dict(required=False, choices=['present', 'absent'], default='present'),
instance_type=dict(required=False, type='str', default='t3.xlarge'),
key_name=dict(required=False, type='str'),
subnet_id=dict(required=False, type='str'),
region=dict(required=True, type='str'),
instance_id=dict(required=False, type='str'),
client_id=dict(required=False, type='str'),
ami=dict(required=False, type='str'),
company=dict(required=False, type='str'),
security_group_ids=dict(required=False, type='list', elements='str'),
iam_instance_profile_name=dict(required=False, type='str'),
enable_termination_protection=dict(required=False, type='bool', default=False),
associate_public_ip_address=dict(required=False, type='bool', default=True),
account_id=dict(required=False, type='str'),
proxy_url=dict(required=False, type='str'),
proxy_user_name=dict(required=False, type='str'),
proxy_password=dict(required=False, type='str', no_log=True),
proxy_certificates=dict(required=False, type='list', elements='str'),
aws_tag=dict(required=False, type='list', elements='dict', options=dict(
tag_key=dict(type='str', no_log=False),
tag_value=dict(type='str')
)),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
['state', 'present', ['company', 'iam_instance_profile_name', 'key_name', 'security_group_ids', 'subnet_id']],
],
required_one_of=[['refresh_token', 'sa_client_id']],
required_together=[['sa_client_id', 'sa_secret_key']],
supports_check_mode=True
)
if HAS_AWS_LIB is False:
self.module.fail_json(msg="the python AWS packages boto3 and botocore are required. Command is pip install boto3."
"Import error: %s" % str(IMPORT_EXCEPTION))
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
self.rest_api = CloudManagerRestAPI(self.module)
def get_instance(self):
"""
Get Cloud Manager connector for AWS
:return:
Dictionary of current details if Cloud Manager connector for AWS
None if Cloud Manager connector for AWS is not found
"""
response = None
client = boto3.client('ec2', region_name=self.parameters['region'])
filters = [{'Name': 'tag:Name', 'Values': [self.parameters['name']]},
{'Name': 'tag:OCCMInstance', 'Values': ['true']}]
kwargs = {'Filters': filters} if self.parameters.get('instance_id') is None else {'InstanceIds': [self.parameters['instance_id']]}
try:
response = client.describe_instances(**kwargs)
except ClientError as error:
self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
if len(response['Reservations']) == 0:
return None
actives = [instance for reservation in response['Reservations'] for instance in reservation['Instances'] if instance['State']['Name'] != 'terminated']
if len(actives) == 1:
return actives[0]
if not actives:
return None
self.module.fail_json(msg="Error: found multiple instances for name=%s: %s" % (self.parameters['name'], str(actives)))
def get_ami(self):
"""
Get AWS EC2 Image
:return:
Latest AMI
"""
instance_ami = None
client = boto3.client('ec2', region_name=self.parameters['region'])
try:
instance_ami = client.describe_images(
Filters=[
{
'Name': 'name',
'Values': [
self.rest_api.environment_data['AMI_FILTER'],
]
},
],
Owners=[
self.rest_api.environment_data['AWS_ACCOUNT'],
],
)
except ClientError as error:
self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
latest_date = instance_ami['Images'][0]['CreationDate']
latest_ami = instance_ami['Images'][0]['ImageId']
for image in instance_ami['Images']:
if image['CreationDate'] > latest_date:
latest_date = image['CreationDate']
latest_ami = image['ImageId']
return latest_ami
def create_instance(self):
"""
Create Cloud Manager connector for AWS
:return: client_id, instance_id
"""
if self.parameters.get('ami') is None:
self.parameters['ami'] = self.get_ami()
user_data, client_id = self.register_agent_to_service()
ec2 = boto3.client('ec2', region_name=self.parameters['region'])
tags = [
{
'Key': 'Name',
'Value': self.parameters['name']
},
{
'Key': 'OCCMInstance',
'Value': 'true'
},
]
if self.parameters.get('aws_tag') is not None:
for each_tag in self.parameters['aws_tag']:
tag = {
'Key': each_tag['tag_key'],
'Value': each_tag['tag_value']
}
tags.append(tag)
instance_input = {
'BlockDeviceMappings': [
{
'DeviceName': '/dev/sda1',
'Ebs': {
'Encrypted': True,
'VolumeSize': 100,
'VolumeType': 'gp2',
},
},
],
'ImageId': self.parameters['ami'],
'MinCount': 1,
'MaxCount': 1,
'KeyName': self.parameters['key_name'],
'InstanceType': self.parameters['instance_type'],
'DisableApiTermination': self.parameters['enable_termination_protection'],
'TagSpecifications': [
{
'ResourceType': 'instance',
'Tags': tags
},
],
'IamInstanceProfile': {
'Name': self.parameters['iam_instance_profile_name']
},
'UserData': user_data
}
if self.parameters.get('associate_public_ip_address') is True:
instance_input['NetworkInterfaces'] = [
{
'AssociatePublicIpAddress': self.parameters['associate_public_ip_address'],
'DeviceIndex': 0,
'SubnetId': self.parameters['subnet_id'],
'Groups': self.parameters['security_group_ids']
}
]
else:
instance_input['SubnetId'] = self.parameters['subnet_id']
instance_input['SecurityGroupIds'] = self.parameters['security_group_ids']
try:
result = ec2.run_instances(**instance_input)
except ClientError as error:
self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
# Sleep for 2 minutes
time.sleep(120)
retries = 16
while retries > 0:
agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, client_id)
if error is not None:
self.module.fail_json(
msg="Error: not able to get occm status: %s, %s" % (str(error), str(agent)),
client_id=client_id, instance_id=result['Instances'][0]['InstanceId'])
if agent['status'] == "active":
break
else:
time.sleep(30)
retries -= 1
if retries == 0:
# Taking too long for status to be active
return self.module.fail_json(msg="Error: taking too long for OCCM agent to be active or not properly setup")
return client_id, result['Instances'][0]['InstanceId']
def get_vpc(self):
"""
Get vpc
:return: vpc ID
"""
vpc_result = None
ec2 = boto3.client('ec2', region_name=self.parameters['region'])
vpc_input = {'SubnetIds': [self.parameters['subnet_id']]}
try:
vpc_result = ec2.describe_subnets(**vpc_input)
except ClientError as error:
self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
return vpc_result['Subnets'][0]['VpcId']
def set_account_id(self):
if self.parameters.get('account_id') is None:
response, error = self.na_helper.get_or_create_account(self.rest_api)
if error is not None:
return error
self.parameters['account_id'] = response
return None
def register_agent_to_service(self):
"""
Register agent to service and collect userdata by setting up connector
:return: UserData, ClientID
"""
vpc = self.get_vpc()
if self.parameters.get('account_id') is None:
error = self.set_account_id()
if error is not None:
self.module.fail_json(msg="Error: failed to get account: %s." % str(error))
headers = {
"X-User-Token": self.rest_api.token_type + " " + self.rest_api.token,
"X-Service-Request-Id": "111"
}
body = {
"accountId": self.parameters['account_id'],
"name": self.parameters['name'],
"company": self.parameters['company'],
"placement": {
"provider": "AWS",
"region": self.parameters['region'],
"network": vpc,
"subnet": self.parameters['subnet_id'],
},
"extra": {
"proxy": {
"proxyUrl": self.parameters.get('proxy_url'),
"proxyUserName": self.parameters.get('proxy_user_name'),
"proxyPassword": self.parameters.get('proxy_password')
}
}
}
register_api = '/agents-mgmt/connector-setup'
response, error, dummy = self.rest_api.post(register_api, body, header=headers)
if error is not None:
self.module.fail_json(msg="Error: unexpected response on connector setup: %s, %s" % (str(error), str(response)))
client_id = response['clientId']
client_secret = response['clientSecret']
u_data = {
'instanceName': self.parameters['name'],
'company': self.parameters['company'],
'clientId': client_id,
'clientSecret': client_secret,
'systemId': UUID,
'tenancyAccountId': self.parameters['account_id'],
'proxySettings': {'proxyPassword': self.parameters.get('proxy_password'),
'proxyUserName': self.parameters.get('proxy_user_name'),
'proxyUrl': self.parameters.get('proxy_url'),
},
'localAgent': True
}
if self.parameters.get('proxy_certificates') is not None:
proxy_certificates = []
for certificate_file in self.parameters['proxy_certificates']:
encoded_certificate, error = self.na_helper.encode_certificates(certificate_file)
if error:
self.module.fail_json(msg="Error: could not open/read file '%s' of proxy_certificates: %s" % (certificate_file, error))
proxy_certificates.append(encoded_certificate)
if proxy_certificates:
u_data['proxySettings']['proxyCertificates'] = proxy_certificates
user_data = self.na_helper.convert_data_to_tabbed_jsonstring(u_data)
return user_data, client_id
def delete_instance(self):
"""
Delete OCCM instance
:return:
None
"""
ec2 = boto3.client('ec2', region_name=self.parameters['region'])
try:
ec2.terminate_instances(
InstanceIds=[
self.parameters['instance_id'],
],
)
except ClientError as error:
self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
if 'client_id' not in self.parameters:
return None
retries = 30
while retries > 0:
agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, self.parameters['client_id'])
if error is not None:
return "Error: not able to get occm agent status after deleting instance: %s, %s." % (str(error), str(agent))
if agent['status'] != "active":
break
else:
time.sleep(10)
retries -= 1
if retries == 0:
# Taking too long for terminating OCCM
return "Error: taking too long for instance to finish terminating."
return None
def get_occm_agents(self):
if 'client_id' in self.parameters:
agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, self.parameters['client_id'])
if str(error) == '403' and 'Action not allowed for user' in str(agent):
# assume the agent does not exist anymore
agents, error = [], None
self.module.warn('Client Id %s was not found for this account.' % self.parameters['client_id'])
else:
agents = [agent]
else:
self.set_account_id()
if 'account_id' in self.parameters:
agents, error = self.na_helper.get_occm_agents_by_name(self.rest_api, self.parameters['account_id'],
self.parameters['name'], 'AWS')
else:
self.module.warn('Without account_id, some agents may still exist.')
agents, error = [], None
if error:
self.module.fail_json(
msg="Error: getting OCCM agents: %s, %s" % (str(error), str(agents)))
return agents
def set_client_id(self):
agents = self.get_occm_agents()
client_id = self.parameters.get('client_id')
if client_id is None:
active_client_ids = [agent['agentId'] for agent in agents if 'agentId' in agent and agent['status'] == 'active']
if len(active_client_ids) == 1:
client_id = active_client_ids[0]
self.parameters['client_id'] = client_id
return client_id, agents
def delete_occm_agents(self, agents):
error = self.na_helper.delete_occm_agents(self.rest_api, agents)
if error:
return "Error: deleting OCCM agent(s): %s" % error
return None
def apply(self):
"""
Apply action to the Cloud Manager connector for AWS
:return: None
"""
results = {
'account_id': None,
'client_id': None,
'instance_id': None
}
agents = None
current = self.get_instance()
if current or self.parameters['state'] == 'absent':
if self.parameters.get('instance_id') is None and current:
self.parameters['instance_id'] = current['InstanceId']
results['instance_id'] = self.parameters.get('instance_id')
results['client_id'], agents = self.set_client_id()
if current is None and agents:
# it's possible the VM instance does not exist, but the clients are still present.
current = agents
cd_action = self.na_helper.get_cd_action(current, self.parameters)
if cd_action is None and self.parameters['state'] == 'present':
results['modify'] = 'Note: modifying an existing connector is not supported at this time.'
if not self.module.check_mode and self.na_helper.changed:
if cd_action == 'create':
results['client_id'], results['instance_id'] = self.create_instance()
elif cd_action == 'delete':
errors = []
if self.parameters.get('instance_id'):
errors.append(self.delete_instance())
if agents:
errors.append(self.delete_occm_agents(agents))
errors = [error for error in errors if error]
if errors:
self.module.fail_json(msg='Errors deleting instance or client: %s' % ', '.join(errors))
results['account_id'] = self.parameters.get('account_id')
results['changed'] = self.na_helper.changed
self.module.exit_json(**results)
def main():
"""
Create Cloud Manager connector for AWS class instance and invoke apply
:return: None
"""
obj_store = NetAppCloudManagerConnectorAWS()
obj_store.apply()
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,591 @@
#!/usr/bin/python
# (c) 2021, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
na_cloudmanager_connector_azure
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: na_cloudmanager_connector_azure
short_description: NetApp Cloud Manager connector for Azure.
extends_documentation_fragment:
- netapp.cloudmanager.netapp.cloudmanager
version_added: '21.4.0'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create or delete Cloud Manager connector for Azure.
options:
state:
description:
- Whether the specified Cloud Manager connector for Azure should exist or not.
choices: ['present', 'absent']
default: 'present'
type: str
name:
required: true
description:
- The name of the Cloud Manager connector for Azure to manage.
type: str
virtual_machine_size:
description:
- The virtual machine type. (for example, Standard_DS3_v2).
- At least 4 CPU and 16 GB of memory are required.
type: str
default: Standard_DS3_v2
resource_group:
required: true
description:
- The resource group in Azure where the resources will be created.
type: str
subnet_name:
required: true
description:
- The name of the subnet for the virtual machine.
- For example, in /subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Network/virtualNetworks/xxx/subnets/default,
only default is needed.
aliases:
- subnet_id
type: str
version_added: '21.7.0'
location:
required: true
description:
- The location where the Cloud Manager Connector will be created.
type: str
client_id:
description:
- The unique client ID of the Connector.
- The connector ID.
type: str
subscription_id:
required: true
description:
- The ID of the Azure subscription.
type: str
company:
required: true
description:
- The name of the company of the user.
type: str
vnet_name:
required: true
description:
- The name of the virtual network.
- for example, in /subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Network/virtualNetworks/default,
only default is needed.
aliases:
- vnet_id
type: str
version_added: '21.7.0'
vnet_resource_group:
description:
- The resource group in Azure associated with the virtual network.
- If not provided, its assumed that the VNet is within the previously specified resource group.
type: str
network_security_resource_group:
description:
- The resource group in Azure associated with the security group.
- If not provided, its assumed that the security group is within the previously specified resource group.
type: str
network_security_group_name:
required: true
description:
- The name of the security group for the deployment.
type: str
proxy_certificates:
description:
- The proxy certificates, a list of certificate file names.
type: list
elements: str
associate_public_ip_address:
description:
- Indicates whether to associate the public IP address to the virtual machine.
type: bool
default: true
account_id:
required: true
description:
- The NetApp tenancy account ID.
type: str
proxy_url:
description:
- The proxy URL, if using a proxy to connect to the internet.
type: str
proxy_user_name:
description:
- The proxy user name, if using a proxy to connect to the internet.
type: str
proxy_password:
description:
- The proxy password, if using a proxy to connect to the internet.
type: str
admin_username:
required: true
description:
- The user name for the Connector.
type: str
admin_password:
required: true
description:
- The password for the Connector.
type: str
storage_account:
description:
- The storage account can be created automatically.
- When C(storage_account) is not set, the name is constructed by appending 'sa' to the connector C(name).
- Storage account name must be between 3 and 24 characters in length and use numbers and lower-case letters only.
type: str
version_added: '21.17.0'
'''
EXAMPLES = """
- name: Create NetApp Cloud Manager connector for Azure.
netapp.cloudmanager.na_cloudmanager_connector_azure:
state: present
refresh_token: "{{ xxxxxxxxxxxxxxx }}"
name: bsuhas_ansible_occm
location: westus
resource_group: occm_group_westus
subnet_name: subnetxxxxx
vnet_name: Vnetxxxxx
subscription_id: "{{ xxxxxxxxxxxxxxxxx }}"
account_id: "{{ account-xxxxxxx }}"
company: NetApp
admin_password: Netapp123456
admin_username: bsuhas
network_security_group_name: OCCM_SG
proxy_url: abc.com
proxy_user_name: xyz
proxy_password: abcxyz
proxy_certificates: [abc.crt.txt, xyz.crt.txt]
- name: Delete NetApp Cloud Manager connector for Azure.
netapp.cloudmanager.na_cloudmanager_connector_azure:
state: absent
name: ansible
location: westus
resource_group: occm_group_westus
network_security_group_name: OCCM_SG
subnet_name: subnetxxxxx
company: NetApp
admin_password: Netapp123456
admin_username: bsuhas
vnet_name: Vnetxxxxx
subscription_id: "{{ xxxxxxxxxxxxxxxxx }}"
account_id: "{{ account-xxxxxxx }}"
refresh_token: "{{ xxxxxxxxxxxxxxx }}"
client_id: xxxxxxxxxxxxxxxxxxx
"""
RETURN = """
msg:
description: Newly created Azure connector id in cloud manager.
type: str
returned: success
sample: 'xxxxxxxxxxxxxxxx'
"""
import traceback
import time
import base64
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
IMPORT_EXCEPTION = None
try:
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.resource.resources.models import Deployment
from azure.common.client_factory import get_client_from_cli_profile
from msrestazure.azure_exceptions import CloudError
HAS_AZURE_LIB = True
except ImportError as exc:
HAS_AZURE_LIB = False
IMPORT_EXCEPTION = exc
class NetAppCloudManagerConnectorAzure(object):
''' object initialize and class methods '''
def __init__(self):
self.use_rest = False
self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
self.argument_spec.update(dict(
name=dict(required=True, type='str'),
state=dict(required=False, choices=['present', 'absent'], default='present'),
virtual_machine_size=dict(required=False, type='str', default='Standard_DS3_v2'),
resource_group=dict(required=True, type='str'),
subscription_id=dict(required=True, type='str'),
subnet_name=dict(required=True, type='str', aliases=['subnet_id']),
vnet_name=dict(required=True, type='str', aliases=['vnet_id']),
vnet_resource_group=dict(required=False, type='str'),
location=dict(required=True, type='str'),
network_security_resource_group=dict(required=False, type='str'),
network_security_group_name=dict(required=True, type='str'),
client_id=dict(required=False, type='str'),
company=dict(required=True, type='str'),
proxy_certificates=dict(required=False, type='list', elements='str'),
associate_public_ip_address=dict(required=False, type='bool', default=True),
account_id=dict(required=True, type='str'),
proxy_url=dict(required=False, type='str'),
proxy_user_name=dict(required=False, type='str'),
proxy_password=dict(required=False, type='str', no_log=True),
admin_username=dict(required=True, type='str'),
admin_password=dict(required=True, type='str', no_log=True),
storage_account=dict(required=False, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
['state', 'absent', ['client_id']]
],
required_one_of=[['refresh_token', 'sa_client_id']],
required_together=[['sa_client_id', 'sa_secret_key']],
supports_check_mode=True
)
if HAS_AZURE_LIB is False:
self.module.fail_json(msg="the python AZURE library azure.mgmt and azure.common is required. Command is pip install azure-mgmt, azure-common."
" Import error: %s" % str(IMPORT_EXCEPTION))
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if 'storage_account' not in self.parameters or self.parameters['storage_account'] == "":
self.parameters['storage_account'] = self.parameters['name'].lower() + 'sa'
self.rest_api = CloudManagerRestAPI(self.module)
def get_deploy_azure_vm(self):
"""
Get Cloud Manager connector for AZURE
:return:
Dictionary of current details if Cloud Manager connector for AZURE
None if Cloud Manager connector for AZURE is not found
"""
exists = False
resource_client = get_client_from_cli_profile(ResourceManagementClient)
try:
exists = resource_client.deployments.check_existence(self.parameters['resource_group'], self.parameters['name'])
except CloudError as error:
self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
if not exists:
return None
return exists
def deploy_azure(self):
"""
Create Cloud Manager connector for Azure
:return: client_id
"""
user_data, client_id = self.register_agent_to_service()
template = json.loads(self.na_helper.call_template())
params = json.loads(self.na_helper.call_parameters())
params['adminUsername']['value'] = self.parameters['admin_username']
params['adminPassword']['value'] = self.parameters['admin_password']
params['customData']['value'] = json.dumps(user_data)
params['location']['value'] = self.parameters['location']
params['virtualMachineName']['value'] = self.parameters['name']
params['storageAccount']['value'] = self.parameters['storage_account']
if self.rest_api.environment == 'stage':
params['environment']['value'] = self.rest_api.environment
if '/subscriptions' in self.parameters['vnet_name']:
network = self.parameters['vnet_name']
else:
if self.parameters.get('vnet_resource_group') is not None:
network = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s' % (
self.parameters['subscription_id'], self.parameters['vnet_resource_group'], self.parameters['vnet_name'])
else:
network = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s' % (
self.parameters['subscription_id'], self.parameters['resource_group'], self.parameters['vnet_name'])
if '/subscriptions' in self.parameters['subnet_name']:
subnet = self.parameters['subnet_name']
else:
subnet = '%s/subnets/%s' % (network, self.parameters['subnet_name'])
if self.parameters.get('network_security_resource_group') is not None:
network_security_group_name = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/networkSecurityGroups/%s' % (
self.parameters['subscription_id'], self.parameters['network_security_resource_group'], self.parameters['network_security_group_name'])
else:
network_security_group_name = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/networkSecurityGroups/%s' % (
self.parameters['subscription_id'], self.parameters['resource_group'], self.parameters['network_security_group_name'])
params['virtualNetworkId']['value'] = network
params['networkSecurityGroupName']['value'] = network_security_group_name
params['virtualMachineSize']['value'] = self.parameters['virtual_machine_size']
params['subnetId']['value'] = subnet
try:
resource_client = get_client_from_cli_profile(ResourceManagementClient)
resource_client.resource_groups.create_or_update(
self.parameters['resource_group'],
{"location": self.parameters['location']})
deployment_properties = {
'mode': 'Incremental',
'template': template,
'parameters': params
}
resource_client.deployments.begin_create_or_update(
self.parameters['resource_group'],
self.parameters['name'],
Deployment(properties=deployment_properties)
)
except CloudError as error:
self.module.fail_json(msg="Error in deploy_azure: %s" % to_native(error), exception=traceback.format_exc())
# Sleep for 2 minutes
time.sleep(120)
retries = 30
while retries > 0:
occm_resp, error = self.na_helper.check_occm_status(self.rest_api, client_id)
if error is not None:
self.module.fail_json(
msg="Error: Not able to get occm status: %s, %s" % (str(error), str(occm_resp)))
if occm_resp['agent']['status'] == "active":
break
else:
time.sleep(30)
retries -= 1
if retries == 0:
# Taking too long for status to be active
return self.module.fail_json(msg="Taking too long for OCCM agent to be active or not properly setup")
try:
compute_client = get_client_from_cli_profile(ComputeManagementClient)
vm = compute_client.virtual_machines.get(self.parameters['resource_group'], self.parameters['name'])
except CloudError as error:
return self.module.fail_json(msg="Error in deploy_azure (get identity): %s" % to_native(error), exception=traceback.format_exc())
principal_id = vm.identity.principal_id
return client_id, principal_id
def register_agent_to_service(self):
"""
Register agent to service and collect userdata by setting up connector
:return: UserData, ClientID
"""
if '/subscriptions' in self.parameters['vnet_name']:
network = self.parameters['vnet_name']
else:
if self.parameters.get('vnet_resource_group') is not None:
network = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s' % (
self.parameters['subscription_id'], self.parameters['vnet_resource_group'], self.parameters['vnet_name'])
else:
network = '/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s' % (
self.parameters['subscription_id'], self.parameters['resource_group'], self.parameters['vnet_name'])
if '/subscriptions' in self.parameters['subnet_name']:
subnet = self.parameters['subnet_name']
else:
subnet = '%s/subnets/%s' % (network, self.parameters['subnet_name'])
if self.parameters.get('account_id') is None:
response, error = self.na_helper.get_or_create_account(self.rest_api)
if error is not None:
self.module.fail_json(
msg="Error: unexpected response on getting account: %s, %s" % (str(error), str(response)))
self.parameters['account_id'] = response
headers = {
"X-User-Token": self.rest_api.token_type + " " + self.rest_api.token,
}
body = {
"accountId": self.parameters['account_id'],
"name": self.parameters['name'],
"company": self.parameters['company'],
"placement": {
"provider": "AZURE",
"region": self.parameters['location'],
"network": network,
"subnet": subnet,
},
"extra": {
"proxy": {
"proxyUrl": self.parameters.get('proxy_url'),
"proxyUserName": self.parameters.get('proxy_user_name'),
"proxyPassword": self.parameters.get('proxy_password')
}
}
}
register_url = "%s/agents-mgmt/connector-setup" % self.rest_api.environment_data['CLOUD_MANAGER_HOST']
response, error, dummy = self.rest_api.post(register_url, body, header=headers)
if error is not None:
self.module.fail_json(msg="Error: unexpected response on getting userdata for connector setup: %s, %s" % (str(error), str(response)))
client_id = response['clientId']
proxy_certificates = []
if self.parameters.get('proxy_certificates') is not None:
for each in self.parameters['proxy_certificates']:
try:
data = open(each, "r").read()
except OSError:
self.module.fail_json(msg="Error: Could not open/read file of proxy_certificates: %s" % str(each))
encoded_certificate = base64.b64encode(data)
proxy_certificates.append(encoded_certificate)
if proxy_certificates:
response['proxySettings']['proxyCertificates'] = proxy_certificates
return response, client_id
def delete_azure_occm(self):
"""
Delete OCCM
:return:
None
"""
# delete vm deploy
try:
compute_client = get_client_from_cli_profile(ComputeManagementClient)
vm_delete = compute_client.virtual_machines.begin_delete(
self.parameters['resource_group'],
self.parameters['name'])
while not vm_delete.done():
vm_delete.wait(2)
except CloudError as error:
self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
# delete interfaces deploy
try:
network_client = get_client_from_cli_profile(NetworkManagementClient)
interface_delete = network_client.network_interfaces.begin_delete(
self.parameters['resource_group'],
self.parameters['name'] + '-nic')
while not interface_delete.done():
interface_delete.wait(2)
except CloudError as error:
self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
# delete storage account deploy
try:
storage_client = get_client_from_cli_profile(StorageManagementClient)
storage_client.storage_accounts.delete(
self.parameters['resource_group'],
self.parameters['storage_account'])
except CloudError as error:
self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
# delete storage account deploy
try:
network_client = get_client_from_cli_profile(NetworkManagementClient)
public_ip_addresses_delete = network_client.public_ip_addresses.begin_delete(
self.parameters['resource_group'],
self.parameters['name'] + '-ip')
while not public_ip_addresses_delete.done():
public_ip_addresses_delete.wait(2)
except CloudError as error:
self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
# delete deployment
try:
resource_client = get_client_from_cli_profile(ResourceManagementClient)
deployments_delete = resource_client.deployments.begin_delete(
self.parameters['resource_group'],
self.parameters['name'] + '-ip')
while not deployments_delete.done():
deployments_delete.wait(5)
except CloudError as error:
self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
retries = 16
while retries > 0:
occm_resp, error = self.na_helper.check_occm_status(self.rest_api,
self.parameters['client_id'])
if error is not None:
self.module.fail_json(
msg="Error: Not able to get occm status: %s, %s" % (str(error), str(occm_resp)))
if occm_resp['agent']['status'] != "active":
break
else:
time.sleep(10)
retries -= 1
if retries == 0:
# Taking too long for terminating OCCM
return self.module.fail_json(msg="Taking too long for instance to finish terminating")
client = self.rest_api.format_client_id(self.parameters['client_id'])
error = self.na_helper.delete_occm_agents(self.rest_api, [{'agentId': client}])
if error:
self.module.fail_json(msg="Error: unexpected response on deleting OCCM: %s" % (str(error)))
def apply(self):
"""
Apply action to the Cloud Manager connector for AZURE
:return: None
"""
client_id = None
principal_id = None
if not self.module.check_mode:
if self.parameters['state'] == 'present':
client_id, principal_id = self.deploy_azure()
self.na_helper.changed = True
elif self.parameters['state'] == 'absent':
get_deploy = self.get_deploy_azure_vm()
if get_deploy:
self.delete_azure_occm()
self.na_helper.changed = True
self.module.exit_json(changed=self.na_helper.changed, msg={'client_id': client_id, 'principal_id': principal_id})
def main():
"""
Create Cloud Manager connector for AZURE class instance and invoke apply
:return: None
"""
obj_store = NetAppCloudManagerConnectorAzure()
obj_store.apply()
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,644 @@
#!/usr/bin/python
# (c) 2021, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
na_cloudmanager_connector_gcp
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: na_cloudmanager_connector_gcp
short_description: NetApp Cloud Manager connector for GCP.
extends_documentation_fragment:
- netapp.cloudmanager.netapp.cloudmanager
version_added: '21.4.0'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create or delete Cloud Manager connector for GCP.
options:
state:
description:
- Whether the specified Cloud Manager connector for GCP should exist or not.
choices: ['present', 'absent']
default: 'present'
type: str
name:
required: true
description:
- The name of the Cloud Manager connector for GCP to manage.
type: str
project_id:
description:
- The GCP project_id where the connector will be created.
required: true
type: str
zone:
description:
- The GCP zone where the Connector will be created.
required: true
type: str
gcp_service_account_email:
description:
- The email of the service_account for the connector instance. This service account is used to allow the Connector to create Cloud Volume ONTAP.
required: true
type: str
aliases: ['service_account_email']
version_added: 21.7.0
company:
description:
- The name of the company of the user.
required: true
type: str
gcp_service_account_path:
description:
- The local path of the service_account JSON file for GCP authorization purposes. This service account is used to create the Connector in GCP.
type: str
aliases: ['service_account_path']
version_added: 21.7.0
subnet_id:
description:
- The name of the subnet for the virtual machine.
type: str
default: default
network_project_id:
description:
- The project id in GCP associated with the Subnet. If not provided, it is assumed that the Subnet is within the previously specified project id.
type: str
machine_type:
description:
- The machine_type for the Connector VM.
type: str
default: n2-standard-4
firewall_tags:
description:
- Indicates whether to add firewall_tags to the connector VM (HTTP and HTTP).
type: bool
default: true
associate_public_ip:
description:
- Indicates whether to associate a public IP address to the virtual machine.
type: bool
default: true
proxy_url:
description:
- The proxy URL, if using a proxy to connect to the internet.
type: str
proxy_user_name:
description:
- The proxy user name, if using a proxy to connect to the internet.
type: str
proxy_password:
description:
- The proxy password, if using a proxy to connect to the internet.
type: str
proxy_certificates:
description:
- The proxy certificates. A list of certificate file names.
type: list
elements: str
account_id:
description:
- The NetApp account ID that the Connector will be associated with.
- If not provided, Cloud Manager uses the first account. If no account exists, Cloud Manager creates a new account.
- You can find the account ID in the account tab of Cloud Manager at [https://cloudmanager.netapp.com](https://cloudmanager.netapp.com).
type: str
client_id:
description:
- The client ID of the Cloud Manager Connector.
- The connector ID.
- If state is absent, the client id is used to identify the agent and delete it.
- If state is absent and this parameter is not set, all agents associated with C(name) are deleted.
- Ignored when state is present.
type: str
'''
EXAMPLES = """
- name: Create NetApp Cloud Manager connector for GCP
netapp.cloudmanager.na_cloudmanager_connector_gcp:
state: present
name: ansible-occm-gcp
project_id: xxxxxxx-support
zone: us-east4-b
company: NetApp
gcp_service_account_email: xxxxxxxx@xxxxxxx-support.iam.gserviceaccount.com
gcp_service_account_path: gcp_creds.json
proxy_user_name: test
proxy_password: test
proxy_url: http://abcdefg.com
proxy_certificates: ["D-TRUST_Root_Class_3_CA_2_2009.crt", "DigiCertGlobalRootCA.crt", "DigiCertGlobalRootG2.crt"]
account_id: account-xxxxXXXX
refresh_token: "{{ xxxxxxxxxxxxxxx }}"
- name: Delete NetApp Cloud Manager connector for GCP
netapp.cloudmanager.na_cloudmanager_connector_gcp:
state: absent
name: ansible-occm-gcp
refresh_token: "{{ xxxxxxxxxxxxxxx }}"
client_id: "{{ wwwwwwwwww }}"
project_id: xxxxxxx-support
zone: us-east4-b
company: NetApp
gcp_service_account_email: xxxxxxxx@xxxxxxx-support.iam.gserviceaccount.com
gcp_service_account_path: gcp_creds.json
account_id: account-xxxxXXXX
"""
RETURN = """
client_id:
description: Newly created GCP connector id on cloud manager.
type: str
returned: success
sample: 'FDQE8SwrbjVS6mqUgZoOHQmu2DvBNRRW'
client_ids:
description:
- a list of client ids matching the name and provider if the connector already exists.
- ideally the list should be empty, or contain a single element matching client_id.
type: list
elements: str
returned: success
sample: ['FDQE8SwrbjVS6mqUgZoOHQmu2DvBNRRW']
"""
import uuid
import time
import base64
import json
from ansible.module_utils.basic import AnsibleModule
import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
IMPORT_ERRORS = []
HAS_GCP_COLLECTION = False
try:
import google.auth
from google.auth.transport import requests
from google.oauth2 import service_account
import yaml
HAS_GCP_COLLECTION = True
except ImportError as exc:
IMPORT_ERRORS.append(str(exc))
GCP_DEPLOYMENT_MANAGER = "www.googleapis.com"
UUID = str(uuid.uuid4())
class NetAppCloudManagerConnectorGCP(object):
''' object initialize and class methods '''
def __init__(self):
self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
self.argument_spec.update(dict(
name=dict(required=True, type='str'),
state=dict(required=False, choices=['present', 'absent'], default='present'),
project_id=dict(required=True, type='str'),
zone=dict(required=True, type='str'),
company=dict(required=True, type='str'),
gcp_service_account_email=dict(required=True, type='str', aliases=['service_account_email']),
gcp_service_account_path=dict(required=False, type='str', aliases=['service_account_path']),
subnet_id=dict(required=False, type='str', default='default'),
network_project_id=dict(required=False, type='str'),
machine_type=dict(required=False, type='str', default='n2-standard-4'),
firewall_tags=dict(required=False, type='bool', default=True),
associate_public_ip=dict(required=False, type='bool', default=True),
proxy_url=dict(required=False, type='str'),
proxy_user_name=dict(required=False, type='str'),
proxy_password=dict(required=False, type='str', no_log=True),
proxy_certificates=dict(required=False, type='list', elements='str'),
account_id=dict(required=False, type='str'),
client_id=dict(required=False, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_one_of=[['refresh_token', 'sa_client_id']],
required_together=[['sa_client_id', 'sa_secret_key']],
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
self.rest_api = CloudManagerRestAPI(self.module)
self.gcp_common_suffix_name = "-vm-boot-deployment"
self.fail_when_import_errors(IMPORT_ERRORS, HAS_GCP_COLLECTION)
super(NetAppCloudManagerConnectorGCP, self).__init__()
self.rest_api.gcp_token, error = self.get_gcp_token()
if error:
self.module.fail_json(msg='Error getting gcp token: %s' % repr(error))
def get_gcp_token(self):
'''
get gcp token from gcp service account credential json file
'''
scopes = ["https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/ndev.cloudman",
"https://www.googleapis.com/auth/ndev.cloudman.readonly",
"https://www.googleapis.com/auth/devstorage.full_control",
"https://www.googleapis.com/auth/devstorage.read_write"]
if 'gcp_service_account_path' in self.parameters:
try:
fh = open(self.parameters['gcp_service_account_path'])
except (OSError, IOError) as error:
return None, "opening %s: got: %s" % (self.parameters['gcp_service_account_path'], repr(error))
with fh:
key_bytes = json.load(fh)
if key_bytes is None:
return None, "Error: gcp_service_account_path file is empty"
credentials = service_account.Credentials.from_service_account_file(self.parameters['gcp_service_account_path'], scopes=scopes)
else:
credentials, project = google.auth.default(scopes=scopes)
credentials.refresh(requests.Request())
return credentials.token, None
def fail_when_import_errors(self, import_errors, has_gcp_collection=True):
if has_gcp_collection and not import_errors:
return
msg = ''
if not has_gcp_collection:
msg = 'The python google-auth package is required. '
msg += 'Import errors: %s' % str(import_errors)
self.module.fail_json(msg=msg)
def get_deploy_vm(self):
'''
Get Cloud Manager connector for GCP
:return:
Dictionary of current details if Cloud Manager connector for GCP
None if Cloud Manager connector for GCP is not found
'''
api_url = GCP_DEPLOYMENT_MANAGER + '/deploymentmanager/v2/projects/%s/global/deployments/%s%s' % (
self.parameters['project_id'], self.parameters['name'], self.gcp_common_suffix_name)
headers = {
"X-User-Token": self.rest_api.token_type + " " + self.rest_api.token,
'Authorization': self.rest_api.token_type + " " + self.rest_api.gcp_token,
}
occm_status, error, dummy = self.rest_api.get(api_url, header=headers)
if error is not None:
if error == '404' and b'is not found' in occm_status:
return None
self.module.fail_json(
msg="Error: unexpected response on getting occm: %s, %s" % (str(error), str(occm_status)))
return occm_status
def get_custom_data_for_gcp(self, proxy_certificates):
'''
get custom data for GCP
'''
# get account ID
if 'account_id' not in self.parameters:
# get account ID
response, error = self.na_helper.get_or_create_account(self.rest_api)
if error is not None:
self.module.fail_json(
msg="Error: unexpected response on getting account: %s, %s" % (str(error), str(response)))
self.parameters['account_id'] = response
# registerAgentTOServiceForGCP
response, error = self.na_helper.register_agent_to_service(self.rest_api, "GCP", "")
if error is not None:
self.module.fail_json(
msg="Error: register agent to service for gcp failed: %s, %s" % (str(error), str(response)))
# add proxy_certificates as part of json data
client_id = response['clientId']
client_secret = response['clientSecret']
u_data = {
'instanceName': self.parameters['name'],
'company': self.parameters['company'],
'clientId': client_id,
'clientSecret': client_secret,
'systemId': UUID,
'tenancyAccountId': self.parameters['account_id'],
'proxySettings': {'proxyPassword': self.parameters.get('proxy_password'),
'proxyUserName': self.parameters.get('proxy_user_name'),
'proxyUrl': self.parameters.get('proxy_url'),
'proxyCertificates': proxy_certificates,
},
}
# convert response to json format
user_data = json.dumps(u_data)
return user_data, client_id, None
def deploy_gcp_vm(self, proxy_certificates):
'''
deploy GCP VM
'''
# getCustomDataForGCP
response, client_id, error = self.get_custom_data_for_gcp(proxy_certificates)
if error is not None:
self.module.fail_json(
msg="Error: Not able to get user data for GCP: %s, %s" % (str(error), str(response)))
# compose
user_data = response
gcp_custom_data = base64.b64encode(user_data.encode())
gcp_sa_scopes = ["https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/ndev.cloudman",
"https://www.googleapis.com/auth/ndev.cloudman.readonly"]
tags = []
if self.parameters['firewall_tags'] is True:
tags = {'items': ['firewall-tag-bvsu', 'http-server', 'https-server']}
# first resource
device_name = self.parameters['name'] + '-vm-disk-boot'
t = {
'name': self.parameters['name'] + '-vm',
'properties': {
'disks': [
{'autoDelete': True,
'boot': True,
'deviceName': device_name,
'name': device_name,
'source': "\\\"$(ref.%s.selfLink)\\\"" % device_name,
'type': "PERSISTENT",
},
],
'machineType': "zones/%s/machineTypes/%s" % (self.parameters['zone'], self.parameters['machine_type']),
'metadata': {
'items': [
{'key': 'serial-port-enable',
'value': 1},
{'key': 'customData',
'value': gcp_custom_data}
]
},
'serviceAccounts': [{'email': self.parameters['gcp_service_account_email'],
'scopes': gcp_sa_scopes, }],
'tags': tags,
'zone': self.parameters['zone']
},
'metadata': {'dependsOn': [device_name]},
'type': 'compute.v1.instance',
}
access_configs = []
if self.parameters['associate_public_ip'] is True:
access_configs = [{'kind': 'compute#accessConfig',
'name': 'External NAT',
'type': 'ONE_TO_ONE_NAT',
'networkTier': 'PREMIUM'
}]
project_id = self.parameters['project_id']
if self.parameters.get('network_project_id'):
project_id = self.parameters['network_project_id']
t['properties']['networkInterfaces'] = [
{'accessConfigs': access_configs,
'kind': 'compute#networkInterface',
'subnetwork': 'projects/%s/regions/%s/subnetworks/%s' % (
project_id, self.parameters['region'], self.parameters['subnet_id'])
}]
td = {
'name': device_name,
'properties': {'name': device_name,
'sizeGb': 100,
'sourceImage': 'projects/%s/global/images/family/%s' % (self.rest_api.environment_data['GCP_IMAGE_PROJECT'],
self.rest_api.environment_data['GCP_IMAGE_FAMILY']),
'type': 'zones/%s/diskTypes/pd-ssd' % (self.parameters['zone']),
'zone': self.parameters['zone']
},
'type': 'compute.v1.disks',
}
content = {
'resources': [t, td]
}
my_data = str(yaml.dump(content))
# The template must be in this format:
# {
# "name": "ansible-cycc-vm-boot-deployment",
# "target": {
# "config": {
# "content": "resources:
# - name: xxxx
# properties:
# ...
# "
# }
# }
# }
gcp_deployment_template = '{\n "name": "%s%s",\n "target": {\n "config": {\n "content": "%s"\n }\n}\n}' % (
self.parameters['name'], '-vm-boot-deployment', my_data)
# post
api_url = GCP_DEPLOYMENT_MANAGER + '/deploymentmanager/v2/projects/%s/global/deployments' % (
self.parameters['project_id'])
headers = {
'X-User-Token': self.rest_api.token_type + " " + self.rest_api.gcp_token,
'X-Tenancy-Account-Id': self.parameters['account_id'],
'Authorization': self.rest_api.token_type + " " + self.rest_api.gcp_token,
'Content-type': "application/json",
'Referer': "Ansible_NetApp",
'X-Agent-Id': self.rest_api.format_client_id(client_id)
}
response, error, dummy = self.rest_api.post(api_url, data=gcp_deployment_template, header=headers,
gcp_type=True)
if error is not None:
return response, client_id, error
# check occm status
# Sleep for 1 minutes
time.sleep(60)
retries = 16
while retries > 0:
agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, client_id)
if error is not None:
self.module.fail_json(
msg="Error: Not able to get occm status: %s, %s" % (str(error), str(agent)),
client_id=client_id, changed=True)
if agent['status'] == "active":
break
else:
time.sleep(30)
retries -= 1
if retries == 0:
# Taking too long for status to be active
msg = "Connector VM is created and registered. Taking too long for OCCM agent to be active or not properly setup."
msg += ' Latest status: %s' % agent
self.module.fail_json(msg=msg, client_id=client_id, changed=True)
return response, client_id, error
def create_occm_gcp(self):
'''
Create Cloud Manager connector for GCP
'''
# check proxy configuration
if 'proxy_user_name' in self.parameters and 'proxy_url' not in self.parameters:
self.module.fail_json(msg="Error: missing proxy_url")
if 'proxy_password' in self.parameters and 'proxy_url' not in self.parameters:
self.module.fail_json(msg="Error: missing proxy_url")
proxy_certificates = []
if 'proxy_certificates' in self.parameters:
for c_file in self.parameters['proxy_certificates']:
proxy_certificate, error = self.na_helper.encode_certificates(c_file)
# add to proxy_certificates list
if error is not None:
self.module.fail_json(msg="Error: not able to read certificate file %s" % c_file)
proxy_certificates.append(proxy_certificate)
# region is the super class of zone. For example, zone us-east4-b is one of the zone in region us-east4
self.parameters['region'] = self.parameters['zone'][:-2]
# deploy GCP VM
response, client_id, error = self.deploy_gcp_vm(proxy_certificates)
if error is not None:
self.module.fail_json(
msg="Error: create_occm_gcp: %s, %s" % (str(error), str(response)))
return client_id
def delete_occm_gcp(self):
'''
Delete Cloud Manager connector for GCP
'''
api_url = GCP_DEPLOYMENT_MANAGER + '/deploymentmanager/v2/projects/%s/global/deployments/%s%s' % (
self.parameters['project_id'],
self.parameters['name'],
self.gcp_common_suffix_name)
headers = {
"X-User-Token": self.rest_api.token_type + " " + self.rest_api.token,
'Authorization': self.rest_api.token_type + " " + self.rest_api.gcp_token,
'X-Tenancy-Account-Id': self.parameters['account_id'],
'Content-type': "application/json",
'Referer': "Ansible_NetApp",
}
response, error, dummy = self.rest_api.delete(api_url, None, header=headers)
if error is not None:
return "Error: unexpected response on deleting VM: %s, %s" % (str(error), str(response))
# sleep for 30 sec
time.sleep(30)
if 'client_id' not in self.parameters:
return None
# check occm status
retries = 30
while retries > 0:
agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, self.parameters['client_id'])
if error is not None:
return "Error: Not able to get occm status after deleting VM: %s, %s" % (str(error), str(agent))
if agent['status'] != ["active", "pending"]:
break
else:
time.sleep(10)
retries -= 1 if agent['status'] == "active" else 5
if retries == 0 and agent['status'] == "active":
# Taking too long for terminating OCCM
return "Taking too long for instance to finish terminating. Latest status: %s" % str(agent)
return None
def delete_occm_agents(self, agents):
error = self.na_helper.delete_occm_agents(self.rest_api, agents)
if error:
return "Error: deleting OCCM agent(s): %s" % error
return None
def get_occm_agents(self):
if 'client_id' in self.parameters and self.parameters['state'] == 'absent':
agent, error = self.na_helper.get_occm_agent_by_id(self.rest_api, self.parameters['client_id'])
if error == '403' and b'Action not allowed for user' in agent:
# assume the agent does not exist anymore
agents, error = [], None
self.module.warn('Client Id %s was not found for this account.' % self.parameters['client_id'])
else:
agents = [agent]
else:
agents, error = self.na_helper.get_occm_agents_by_name(self.rest_api, self.parameters['account_id'],
self.parameters['name'], 'GCP')
if error:
self.module.fail_json(
msg="Error: getting OCCM agents: %s, %s" % (str(error), str(agents)))
return agents
def set_client_id(self, agents):
client_id = ""
client_ids = [agent['agentId'] for agent in agents if 'agentId' in agent]
if len(client_ids) == 1:
client_id = client_ids[0]
self.parameters['client_id'] = client_ids[0]
elif 'client_id' in self.parameters and self.parameters['client_id'] in client_ids:
client_id = self.parameters['client_id']
return client_id, client_ids
def apply(self):
"""
Apply action to the Cloud Manager connector for GCP
:return: None
"""
client_id = ""
agents, client_ids = [], []
current_vm = self.get_deploy_vm()
if current_vm and current_vm['operation']['status'] == 'terminated':
current_vm = None
current = current_vm
if self.parameters['state'] == 'absent' or current:
agents = self.get_occm_agents()
client_id, client_ids = self.set_client_id(agents)
if agents and current is None:
current = {}
if agents:
current['agents'] = agents
cd_action = self.na_helper.get_cd_action(current, self.parameters)
if self.na_helper.changed and not self.module.check_mode:
if cd_action == 'create':
client_id = self.create_occm_gcp()
elif cd_action == 'delete':
errors = []
if current_vm:
error = self.delete_occm_gcp()
if error:
errors.append(error)
if agents:
error = self.delete_occm_agents(agents)
if error:
errors.append(error)
if errors:
self.module.fail_json(msg='. '.join(errors))
self.module.exit_json(changed=self.na_helper.changed, client_id=client_id, client_ids=client_ids)
def main():
"""
Create Cloud Manager connector for GCP class instance and invoke apply
:return: None
"""
obj_store = NetAppCloudManagerConnectorGCP()
obj_store.apply()
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,855 @@
#!/usr/bin/python
# (c) 2022, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
na_cloudmanager_cvo_aws
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: na_cloudmanager_cvo_aws
short_description: NetApp Cloud Manager CVO for AWS
extends_documentation_fragment:
- netapp.cloudmanager.netapp.cloudmanager
version_added: '21.3.0'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create, delete, or manage Cloud Manager CVO for AWS.
options:
state:
description:
- Whether the specified Cloud Manager CVO for AWS should exist or not.
choices: ['present', 'absent']
default: 'present'
type: str
name:
required: true
description:
- The name of the Cloud Manager CVO for AWS to manage.
type: str
instance_type:
description:
- The instance type to use, which depends on the license type.
- Explore ['m5.xlarge'].
- Standard ['m5.2xlarge','r5.xlarge'].
- Premium ['m5.4xlarge','r5.2xlarge','c4.8xlarge'].
- For more supported instance types, refer to Cloud Volumes ONTAP Release Notes.
type: str
default: m5.2xlarge
license_type:
description:
- The type of license to use.
- For single node by Capacity ['capacity-paygo']
- For single node by Node paygo ['cot-explore-paygo', 'cot-standard-paygo', 'cot-premium-paygo'].
- For single node by Node boyl ['cot-premium-byol'].
- For HA by Capacity ['ha-capacity-paygo']
- For HA by Node paygo ['ha-cot-explore-paygo','ha-cot-standard-paygo','ha-cot-premium-paygo'].
- For HA by Node boyl ['ha-cot-premium-byol'].
choices: ['capacity-paygo', 'cot-standard-paygo', 'cot-premium-paygo', 'cot-explore-paygo', 'cot-premium-byol', \
'ha-cot-standard-paygo', 'ha-cot-premium-paygo', 'ha-cot-premium-byol', 'ha-cot-explore-paygo', \
'ha-capacity-paygo']
default: capacity-paygo
type: str
provided_license:
description:
- Using a NLF license file for BYOL deployment.
type: str
capacity_package_name:
description:
- Capacity package name is required when selecting a capacity based license.
- Essential only available with Bring Your Own License Capacity-Based.
- Professional available as an annual contract from AWS marketplace or Bring Your Own License Capacity-Based.
choices: ['Professional', 'Essential', 'Freemium']
default: 'Essential'
type: str
version_added: 21.12.0
workspace_id:
description:
- The ID of the Cloud Manager workspace where you want to deploy Cloud Volumes ONTAP.
- If not provided, Cloud Manager uses the first workspace.
- You can find the ID from the Workspace tab on U(https://cloudmanager.netapp.com).
type: str
subnet_id:
description:
- The subnet id where the working environment will be created. Required when single node only.
type: str
vpc_id:
description:
- The VPC ID where the working environment will be created.
- If this argument is not provided, the VPC will be calculated by using the provided subnet ID.
type: str
region:
required: true
description:
- The region where the working environment will be created.
type: str
data_encryption_type:
description:
- The type of encryption to use for the working environment.
choices: ['AWS', 'NONE']
default: 'AWS'
type: str
client_id:
required: true
description:
- The connector ID of the Cloud Manager Connector.
- You can find the ID from the Connector tab on U(https://cloudmanager.netapp.com).
type: str
ebs_volume_size:
description:
- EBS volume size for the first data aggregate.
- For GB, the value can be [100 or 500].
- For TB, the value can be [1,2,4,8,16].
default: 1
type: int
ebs_volume_size_unit:
description:
- The unit for ebs volume size.
choices: ['GB', 'TB']
default: 'TB'
type: str
ebs_volume_type:
description:
- The EBS volume type for the first data aggregate.
choices: ['gp3', 'gp2', 'io1', 'sc1', 'st1']
default: 'gp2'
type: str
security_group_id:
description:
- The ID of the security group for the working environment. If not provided, Cloud Manager creates the security group.
type: str
instance_profile_name:
description:
- The instance profile name for the working environment. If not provided, Cloud Manager creates the instance profile.
type: str
svm_password:
required: true
description:
- The admin password for Cloud Volumes ONTAP.
- It will be updated on each run.
type: str
svm_name:
description:
- The name of the SVM.
type: str
version_added: 21.22.0
ontap_version:
description:
- The required ONTAP version. Ignored if 'use_latest_version' is set to true.
type: str
default: 'latest'
use_latest_version:
description:
- Indicates whether to use the latest available ONTAP version.
type: bool
default: true
platform_serial_number:
description:
- The serial number for the cluster. This is required when using 'cot-premium-byol'.
type: str
tier_level:
description:
- The tiering level when 'capacity_tier' is set to 'S3'.
choices: ['normal', 'ia', 'ia-single', 'intelligent']
default: 'normal'
type: str
cluster_key_pair_name:
description:
- SSH authentication key pair name
type: str
version_added: 21.20.0
nss_account:
description:
- The NetApp Support Site account ID to use with this Cloud Volumes ONTAP system.
- If the license type is BYOL and an NSS account is not provided, Cloud Manager tries to use the first existing NSS account.
type: str
writing_speed_state:
description:
- The write speed setting for Cloud Volumes ONTAP ['NORMAL','HIGH'].
- This argument is not relevant for HA pairs.
type: str
iops:
description:
- Provisioned IOPS. Required only when provider_volume_type is 'io1' or 'gp3'.
type: int
throughput:
description:
- Unit is Mb/s. Valid range 125-1000.
- Required only when provider_volume_type is 'gp3'.
type: int
capacity_tier:
description:
- Whether to enable data tiering for the first data aggregate.
choices: ['S3', 'NONE']
default: 'S3'
type: str
instance_tenancy:
description:
- The EC2 instance tenancy.
choices: ['default', 'dedicated']
default: 'default'
type: str
cloud_provider_account:
description:
- The cloud provider credentials id to use when deploying the Cloud Volumes ONTAP system.
- You can find the ID in Cloud Manager from the Settings > Credentials page.
- If not specified, Cloud Manager uses the instance profile of the Connector.
type: str
backup_volumes_to_cbs:
description:
- Automatically enable back up of all volumes to S3.
default: false
type: bool
enable_compliance:
description:
- Enable the Cloud Compliance service on the working environment.
default: false
type: bool
enable_monitoring:
description:
- Enable the Monitoring service on the working environment.
default: false
type: bool
optimized_network_utilization:
description:
- Use optimized network utilization.
default: true
type: bool
kms_key_id:
description:
- Aws Encryption parameters. It is required if using aws encryption. Only one of KMS key id or KMS arn should be specified.
type: str
kms_key_arn:
description:
- AWS encryption parameters. It is required if using aws encryption. Only one of KMS key id or KMS arn should be specified.
type: str
version_added: 21.10.0
aws_tag:
description:
- Additional tags for the AWS CVO working environment.
type: list
elements: dict
suboptions:
tag_key:
description: The key of the tag.
type: str
tag_value:
description: The tag value.
type: str
is_ha:
description:
- Indicate whether the working environment is an HA pair or not.
type: bool
default: false
platform_serial_number_node1:
description:
- For HA BYOL, the serial number for the first node. This is required when using 'ha-cot-premium-byol'.
type: str
platform_serial_number_node2:
description:
- For HA BYOL, the serial number for the second node. This is required when using 'ha-cot-premium-byol'.
type: str
node1_subnet_id:
description:
- For HA, the subnet ID of the first node.
type: str
node2_subnet_id:
description:
- For HA, the subnet ID of the second node.
type: str
mediator_subnet_id:
description:
- For HA, the subnet ID of the mediator.
type: str
failover_mode:
description:
- For HA, the failover mode for the HA pair. 'PrivateIP' is for a single availability zone and 'FloatingIP' is for multiple availability zones.
type: str
choices: ['PrivateIP', 'FloatingIP']
mediator_assign_public_ip:
description:
- Boolean option to assign public IP.
type: bool
default: true
mediator_key_pair_name:
description:
- For HA, the key pair name for the mediator instance.
type: str
cluster_floating_ip:
description:
- For HA FloatingIP, the cluster management floating IP address.
type: str
data_floating_ip:
description:
- For HA FloatingIP, the data floating IP address.
type: str
data_floating_ip2:
description:
- For HA FloatingIP, the data floating IP address.
type: str
svm_floating_ip:
description:
- For HA FloatingIP, the SVM management floating IP address.
type: str
route_table_ids:
description:
- For HA FloatingIP, the list of route table IDs that will be updated with the floating IPs.
type: list
elements: str
upgrade_ontap_version:
description:
- Indicates whether to upgrade ONTAP image on the CVO.
- If the current version already matches the desired version, no action is taken.
type: bool
default: false
version_added: 21.13.0
update_svm_password:
description:
- Indicates whether to update svm_password on the CVO.
- When set to true, the module is not idempotent, as we cannot read the current password.
type: bool
default: false
version_added: 21.13.0
notes:
- Support check_mode.
'''
EXAMPLES = """
- name: Create NetApp Cloud Manager CVO for AWS single
netapp.cloudmanager.na_cloudmanager_cvo_aws:
state: present
refresh_token: "{{ xxxxxxxxxxxxxxx }}"
name: AnsibleCVO
region: us-west-1
subnet_id: subnet-xxxxxxx
vpc_id: vpc-xxxxxxxx
svm_password: P@assword!
client_id: "{{ xxxxxxxxxxxxxxx }}"
writing_speed_state: NORMAL
aws_tag: [
{tag_key: abc,
tag_value: a123}]
- name: Create NetApp Cloud Manager CVO for AWS HA
netapp.cloudmanager.na_cloudmanager_cvo_aws:
state: present
refresh_token: "{{ xxxxxxxxxxxxxxx }}"
name: AnsibleCVO
region: us-west-1
subnet_id: subnet-xxxxxxx
vpc_id: vpc-xxxxxxxx
svm_password: P@assword!
client_id: "{{ xxxxxxxxxxxxxxx }}"
writing_speed_state: NORMAL
aws_tag: [
{tag_key: abc,
tag_value: a123}]
is_ha: true
failover_mode: FloatingIP
node1_subnet_id: subnet-1
node2_subnet_id: subnet-1
mediator_subnet_id: subnet-1
mediator_key_pair_name: key1
cluster_floating_ip: 2.1.1.1
data_floating_ip: 2.1.1.2
data_floating_ip2: 2.1.1.3
svm_floating_ip: 2.1.1.4
route_table_ids: [rt-1,rt-2]
- name: Delete NetApp Cloud Manager cvo for AWS
netapp.cloudmanager.na_cloudmanager_cvo_aws:
state: absent
name: ansible
region: us-west-1
refresh_token: "{{ xxxxxxxxxxxxxxx }}"
subnet_id: subnet-xxxxxxx
vpc_id: vpc-xxxxxxxx
svm_password: P@assword!
client_id: "{{ xxxxxxxxxxxxxxx }}"
"""
RETURN = '''
working_environment_id:
description: Newly created AWS CVO working_environment_id.
type: str
returned: success
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
IMPORT_EXCEPTION = None
try:
import boto3
from botocore.exceptions import ClientError
HAS_AWS_LIB = True
except ImportError as exc:
HAS_AWS_LIB = False
IMPORT_EXCEPTION = exc
AWS_License_Types = ['cot-standard-paygo', 'cot-premium-paygo', 'cot-explore-paygo', 'cot-premium-byol', 'ha-cot-standard-paygo',
'ha-cot-premium-paygo', 'ha-cot-premium-byol', 'ha-cot-explore-paygo', 'capacity-paygo', 'ha-capacity-paygo']
class NetAppCloudManagerCVOAWS:
''' object initialize and class methods '''
def __init__(self):
self.use_rest = False
self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
self.argument_spec.update(dict(
name=dict(required=True, type='str'),
state=dict(required=False, choices=['present', 'absent'], default='present'),
instance_type=dict(required=False, type='str', default='m5.2xlarge'),
license_type=dict(required=False, type='str', choices=AWS_License_Types, default='capacity-paygo'),
workspace_id=dict(required=False, type='str'),
subnet_id=dict(required=False, type='str'),
vpc_id=dict(required=False, type='str'),
region=dict(required=True, type='str'),
data_encryption_type=dict(required=False, type='str', choices=['AWS', 'NONE'], default='AWS'),
ebs_volume_size=dict(required=False, type='int', default='1'),
ebs_volume_size_unit=dict(required=False, type='str', choices=['GB', 'TB'], default='TB'),
ebs_volume_type=dict(required=False, type='str', choices=['gp3', 'gp2', 'io1', 'sc1', 'st1'], default='gp2'),
svm_password=dict(required=True, type='str', no_log=True),
svm_name=dict(required=False, type='str'),
ontap_version=dict(required=False, type='str', default='latest'),
use_latest_version=dict(required=False, type='bool', default=True),
platform_serial_number=dict(required=False, type='str'),
capacity_package_name=dict(required=False, type='str', choices=['Professional', 'Essential', 'Freemium'], default='Essential'),
provided_license=dict(required=False, type='str'),
tier_level=dict(required=False, type='str', choices=['normal', 'ia', 'ia-single', 'intelligent'], default='normal'),
cluster_key_pair_name=dict(required=False, type='str'),
nss_account=dict(required=False, type='str'),
writing_speed_state=dict(required=False, type='str'),
iops=dict(required=False, type='int'),
throughput=dict(required=False, type='int'),
capacity_tier=dict(required=False, type='str', choices=['S3', 'NONE'], default='S3'),
instance_tenancy=dict(required=False, type='str', choices=['default', 'dedicated'], default='default'),
instance_profile_name=dict(required=False, type='str'),
security_group_id=dict(required=False, type='str'),
cloud_provider_account=dict(required=False, type='str'),
backup_volumes_to_cbs=dict(required=False, type='bool', default=False),
enable_compliance=dict(required=False, type='bool', default=False),
enable_monitoring=dict(required=False, type='bool', default=False),
optimized_network_utilization=dict(required=False, type='bool', default=True),
kms_key_id=dict(required=False, type='str', no_log=True),
kms_key_arn=dict(required=False, type='str', no_log=True),
client_id=dict(required=True, type='str'),
aws_tag=dict(required=False, type='list', elements='dict', options=dict(
tag_key=dict(type='str', no_log=False),
tag_value=dict(type='str')
)),
is_ha=dict(required=False, type='bool', default=False),
platform_serial_number_node1=dict(required=False, type='str'),
platform_serial_number_node2=dict(required=False, type='str'),
failover_mode=dict(required=False, type='str', choices=['PrivateIP', 'FloatingIP']),
mediator_assign_public_ip=dict(required=False, type='bool', default=True),
node1_subnet_id=dict(required=False, type='str'),
node2_subnet_id=dict(required=False, type='str'),
mediator_subnet_id=dict(required=False, type='str'),
mediator_key_pair_name=dict(required=False, type='str'),
cluster_floating_ip=dict(required=False, type='str'),
data_floating_ip=dict(required=False, type='str'),
data_floating_ip2=dict(required=False, type='str'),
svm_floating_ip=dict(required=False, type='str'),
route_table_ids=dict(required=False, type='list', elements='str'),
upgrade_ontap_version=dict(required=False, type='bool', default=False),
update_svm_password=dict(required=False, type='bool', default=False),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
['ebs_volume_type', 'gp3', ['iops', 'throughput']],
['ebs_volume_type', 'io1', ['iops']],
['license_type', 'cot-premium-byol', ['platform_serial_number']],
['license_type', 'ha-cot-premium-byol', ['platform_serial_number_node1', 'platform_serial_number_node2']],
['license_type', 'capacity-paygo', ['capacity_package_name']],
['license_type', 'ha-capacity-paygo', ['capacity_package_name']],
],
required_one_of=[['refresh_token', 'sa_client_id']],
mutually_exclusive=[['kms_key_id', 'kms_key_arn']],
required_together=[['sa_client_id', 'sa_secret_key']],
supports_check_mode=True,
)
if HAS_AWS_LIB is False:
self.module.fail_json(msg="the python AWS library boto3 and botocore is required. Command is pip install boto3."
"Import error: %s" % str(IMPORT_EXCEPTION))
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
self.changeable_params = ['aws_tag', 'svm_password', 'svm_name', 'tier_level', 'ontap_version', 'instance_type', 'license_type', 'writing_speed_state']
self.rest_api = CloudManagerRestAPI(self.module)
self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
self.rest_api.api_root_path = '/occm/api/%s' % ('aws/ha' if self.parameters['is_ha'] else 'vsa')
self.headers = {
'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
}
def get_vpc(self):
"""
Get vpc
:return: vpc ID
"""
vpc_result = None
ec2 = boto3.client('ec2', region_name=self.parameters['region'])
vpc_input = {'SubnetIds': [self.parameters['subnet_id']]}
try:
vpc_result = ec2.describe_subnets(**vpc_input)
except ClientError as error:
self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
return vpc_result['Subnets'][0]['VpcId']
def create_cvo_aws(self):
""" Create AWS CVO """
if self.parameters.get('workspace_id') is None:
response, msg = self.na_helper.get_tenant(self.rest_api, self.headers)
if response is None:
self.module.fail_json(msg)
self.parameters['workspace_id'] = response
if self.parameters.get('vpc_id') is None and self.parameters['is_ha'] is False:
self.parameters['vpc_id'] = self.get_vpc()
if self.parameters.get('nss_account') is None:
if self.parameters.get('platform_serial_number') is not None:
if not self.parameters['platform_serial_number'].startswith('Eval-') and self.parameters['license_type'] == 'cot-premium-byol':
response, msg = self.na_helper.get_nss(self.rest_api, self.headers)
if response is None:
self.module.fail_json(msg)
self.parameters['nss_account'] = response
elif self.parameters.get('platform_serial_number_node1') is not None and self.parameters.get('platform_serial_number_node2') is not None:
if not self.parameters['platform_serial_number_node1'].startswith('Eval-')\
and not self.parameters['platform_serial_number_node2'].startswith('Eval-')\
and self.parameters['license_type'] == 'ha-cot-premium-byol':
response, msg = self.na_helper.get_nss(self.rest_api, self.headers)
if response is None:
self.module.fail_json(msg)
self.parameters['nss_account'] = response
json = {"name": self.parameters['name'],
"region": self.parameters['region'],
"tenantId": self.parameters['workspace_id'],
"vpcId": self.parameters['vpc_id'],
"dataEncryptionType": self.parameters['data_encryption_type'],
"ebsVolumeSize": {
"size": self.parameters['ebs_volume_size'],
"unit": self.parameters['ebs_volume_size_unit']},
"ebsVolumeType": self.parameters['ebs_volume_type'],
"svmPassword": self.parameters['svm_password'],
"backupVolumesToCbs": self.parameters['backup_volumes_to_cbs'],
"enableCompliance": self.parameters['enable_compliance'],
"enableMonitoring": self.parameters['enable_monitoring'],
"optimizedNetworkUtilization": self.parameters['optimized_network_utilization'],
"vsaMetadata": {
"ontapVersion": self.parameters['ontap_version'],
"licenseType": self.parameters['license_type'],
"useLatestVersion": self.parameters['use_latest_version'],
"instanceType": self.parameters['instance_type']},
}
if self.parameters['capacity_tier'] == "S3":
json.update({"capacityTier": self.parameters['capacity_tier'],
"tierLevel": self.parameters['tier_level']})
# clean default value if it is not by Capacity license
if not self.parameters['license_type'].endswith('capacity-paygo'):
json['vsaMetadata'].update({"capacityPackageName": ''})
if self.parameters.get('platform_serial_number') is not None:
json['vsaMetadata'].update({"platformSerialNumber": self.parameters['platform_serial_number']})
if self.parameters.get('provided_license') is not None:
json['vsaMetadata'].update({"providedLicense": self.parameters['provided_license']})
if self.parameters.get('capacity_package_name') is not None:
json['vsaMetadata'].update({"capacityPackageName": self.parameters['capacity_package_name']})
if self.parameters.get('writing_speed_state') is not None:
json.update({"writingSpeedState": self.parameters['writing_speed_state'].upper()})
if self.parameters.get('iops') is not None:
json.update({"iops": self.parameters['iops']})
if self.parameters.get('throughput') is not None:
json.update({"throughput": self.parameters['throughput']})
if self.parameters.get('cluster_key_pair_name') is not None:
json.update({"clusterKeyPairName": self.parameters['cluster_key_pair_name']})
if self.parameters.get('instance_tenancy') is not None:
json.update({"instanceTenancy": self.parameters['instance_tenancy']})
if self.parameters.get('instance_profile_name') is not None:
json.update({"instanceProfileName": self.parameters['instance_profile_name']})
if self.parameters.get('security_group_id') is not None:
json.update({"securityGroupId": self.parameters['security_group_id']})
if self.parameters.get('cloud_provider_account') is not None:
json.update({"cloudProviderAccount": self.parameters['cloud_provider_account']})
if self.parameters.get('backup_volumes_to_cbs') is not None:
json.update({"backupVolumesToCbs": self.parameters['backup_volumes_to_cbs']})
if self.parameters.get('svm_name') is not None:
json.update({"svmName": self.parameters['svm_name']})
if self.parameters['data_encryption_type'] == "AWS":
if self.parameters.get('kms_key_id') is not None:
json.update({"awsEncryptionParameters": {"kmsKeyId": self.parameters['kms_key_id']}})
if self.parameters.get('kms_key_arn') is not None:
json.update({"awsEncryptionParameters": {"kmsKeyArn": self.parameters['kms_key_arn']}})
if self.parameters.get('aws_tag') is not None:
tags = []
for each_tag in self.parameters['aws_tag']:
tag = {
'tagKey': each_tag['tag_key'],
'tagValue': each_tag['tag_value']
}
tags.append(tag)
json.update({"awsTags": tags})
if self.parameters['is_ha'] is True:
ha_params = dict({
"mediatorAssignPublicIP": self.parameters['mediator_assign_public_ip']
})
if self.parameters.get('failover_mode'):
ha_params["failoverMode"] = self.parameters['failover_mode']
if self.parameters.get('node1_subnet_id'):
ha_params["node1SubnetId"] = self.parameters['node1_subnet_id']
if self.parameters.get('node2_subnet_id'):
ha_params["node2SubnetId"] = self.parameters['node2_subnet_id']
if self.parameters.get('mediator_subnet_id'):
ha_params["mediatorSubnetId"] = self.parameters['mediator_subnet_id']
if self.parameters.get('mediator_key_pair_name'):
ha_params["mediatorKeyPairName"] = self.parameters['mediator_key_pair_name']
if self.parameters.get('cluster_floating_ip'):
ha_params["clusterFloatingIP"] = self.parameters['cluster_floating_ip']
if self.parameters.get('data_floating_ip'):
ha_params["dataFloatingIP"] = self.parameters['data_floating_ip']
if self.parameters.get('data_floating_ip2'):
ha_params["dataFloatingIP2"] = self.parameters['data_floating_ip2']
if self.parameters.get('svm_floating_ip'):
ha_params["svmFloatingIP"] = self.parameters['svm_floating_ip']
if self.parameters.get('route_table_ids'):
ha_params["routeTableIds"] = self.parameters['route_table_ids']
if self.parameters.get('platform_serial_number_node1'):
ha_params["platformSerialNumberNode1"] = self.parameters['platform_serial_number_node1']
if self.parameters.get('platform_serial_number_node2'):
ha_params["platformSerialNumberNode2"] = self.parameters['platform_serial_number_node2']
json["haParams"] = ha_params
else:
json["subnetId"] = self.parameters['subnet_id']
api_url = '%s/working-environments' % self.rest_api.api_root_path
response, error, on_cloud_request_id = self.rest_api.post(api_url, json, header=self.headers)
if error is not None:
self.module.fail_json(
msg="Error: unexpected response on creating cvo aws: %s, %s" % (str(error), str(response)))
working_environment_id = response['publicId']
wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id)
err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "create", 60, 60)
if err is not None:
self.module.fail_json(msg="Error: unexpected response wait_on_completion for creating CVO AWS: %s" % str(err))
return working_environment_id
def update_cvo_aws(self, working_environment_id, modify):
base_url = '%s/working-environments/%s/' % (self.rest_api.api_root_path, working_environment_id)
for item in modify:
if item == 'svm_password':
response, error = self.na_helper.update_svm_password(base_url, self.rest_api, self.headers, self.parameters['svm_password'])
if error is not None:
self.module.fail_json(changed=False, msg=error)
if item == 'svm_name':
response, error = self.na_helper.update_svm_name(base_url, self.rest_api, self.headers, self.parameters['svm_name'])
if error is not None:
self.module.fail_json(changed=False, msg=error)
if item == 'aws_tag':
tag_list = None
if 'aws_tag' in self.parameters:
tag_list = self.parameters['aws_tag']
response, error = self.na_helper.update_cvo_tags(base_url, self.rest_api, self.headers, 'aws_tag', tag_list)
if error is not None:
self.module.fail_json(changed=False, msg=error)
if item == 'tier_level':
response, error = self.na_helper.update_tier_level(base_url, self.rest_api, self.headers, self.parameters['tier_level'])
if error is not None:
self.module.fail_json(changed=False, msg=error)
if item == 'writing_speed_state':
response, error = self.na_helper.update_writing_speed_state(base_url, self.rest_api, self.headers, self.parameters['writing_speed_state'])
if error is not None:
self.module.fail_json(changed=False, msg=error)
if item == 'ontap_version':
response, error = self.na_helper.upgrade_ontap_image(self.rest_api, self.headers, self.parameters['ontap_version'])
if error is not None:
self.module.fail_json(changed=False, msg=error)
if item == 'instance_type' or item == 'license_type':
response, error = self.na_helper.update_instance_license_type(base_url, self.rest_api, self.headers,
self.parameters['instance_type'],
self.parameters['license_type'])
if error is not None:
self.module.fail_json(changed=False, msg=error)
def delete_cvo_aws(self, we_id):
"""
Delete AWS CVO
"""
api_url = '%s/working-environments/%s' % (self.rest_api.api_root_path, we_id)
response, error, on_cloud_request_id = self.rest_api.delete(api_url, None, header=self.headers)
if error is not None:
self.module.fail_json(msg="Error: unexpected response on deleting cvo aws: %s, %s" % (str(error), str(response)))
wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id)
err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "delete", 40, 60)
if err is not None:
self.module.fail_json(msg="Error: unexpected response wait_on_completion for deleting CVO AWS: %s" % str(err))
def validate_cvo_params(self):
if self.parameters['use_latest_version'] is True and self.parameters['ontap_version'] != "latest":
self.module.fail_json(msg="ontap_version parameter not required when having use_latest_version as true")
if self.parameters['is_ha'] is True and self.parameters['license_type'] == "ha-cot-premium-byol":
if self.parameters.get('platform_serial_number_node1') is None or self.parameters.get('platform_serial_number_node2') is None:
self.module.fail_json(msg="both platform_serial_number_node1 and platform_serial_number_node2 parameters are required"
"when having ha type as true and license_type as ha-cot-premium-byol")
if self.parameters['is_ha'] is True and self.parameters['license_type'] == 'capacity-paygo':
self.parameters['license_type'] = 'ha-capacity-paygo'
def apply(self):
"""
Apply action to the Cloud Manager CVO for AWS
:return: None
"""
working_environment_id = None
modify = None
current, dummy = self.na_helper.get_working_environment_details_by_name(self.rest_api, self.headers,
self.parameters['name'], "aws")
if current:
self.parameters['working_environment_id'] = current['publicId']
# check the action
cd_action = self.na_helper.get_cd_action(current, self.parameters)
if current and self.parameters['state'] != 'absent':
# Check mandatory parameters
self.validate_cvo_params()
working_environment_id = current['publicId']
modify, error = self.na_helper.is_cvo_update_needed(self.rest_api, self.headers, self.parameters, self.changeable_params, 'aws')
if error is not None:
self.module.fail_json(changed=False, msg=error)
if self.na_helper.changed and not self.module.check_mode:
if cd_action == "create":
self.validate_cvo_params()
working_environment_id = self.create_cvo_aws()
elif cd_action == "delete":
self.delete_cvo_aws(current['publicId'])
else:
self.update_cvo_aws(current['publicId'], modify)
self.module.exit_json(changed=self.na_helper.changed, working_environment_id=working_environment_id)
def main():
"""
Create Cloud Manager CVO for AWS class instance and invoke apply
:return: None
"""
obj_store = NetAppCloudManagerCVOAWS()
obj_store.apply()
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,746 @@
#!/usr/bin/python
# (c) 2022, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
na_cloudmanager_cvo_azure
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: na_cloudmanager_cvo_azure
short_description: NetApp Cloud Manager CVO/working environment in single or HA mode for Azure.
extends_documentation_fragment:
- netapp.cloudmanager.netapp.cloudmanager
version_added: '21.4.0'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create, delete, or manage Cloud Manager CVO/working environment in single or HA mode for Azure.
options:
state:
description:
- Whether the specified Cloud Manager CVO for AZURE should exist or not.
choices: ['present', 'absent']
default: 'present'
type: str
name:
required: true
description:
- The name of the Cloud Manager CVO for AZURE to manage.
type: str
subscription_id:
required: true
description:
- The ID of the Azure subscription.
type: str
instance_type:
description:
- The type of instance to use, which depends on the license type you chose.
- Explore ['Standard_DS3_v2'].
- Standard ['Standard_DS4_v2, Standard_DS13_v2, Standard_L8s_v2'].
- Premium ['Standard_DS5_v2', 'Standard_DS14_v2'].
- For more supported instance types, refer to Cloud Volumes ONTAP Release Notes.
type: str
default: Standard_DS4_v2
license_type:
description:
- The type of license to use.
- For single node by Capacity ['capacity-paygo'].
- For single node by Node paygo ['azure-cot-explore-paygo', 'azure-cot-standard-paygo', 'azure-cot-premium-paygo'].
- For single node by Node byol ['azure-cot-premium-byol'].
- For HA by Capacity ['ha-capacity-paygo'].
- For HA by Node paygo ['azure-ha-cot-standard-paygo', 'azure-ha-cot-premium-paygo'].
- For HA by Node byol ['azure-ha-cot-premium-byol'].
choices: ['azure-cot-standard-paygo', 'azure-cot-premium-paygo', 'azure-cot-premium-byol', \
'azure-cot-explore-paygo', 'azure-ha-cot-standard-paygo', 'azure-ha-cot-premium-paygo', \
'azure-ha-cot-premium-byol', 'capacity-paygo', 'ha-capacity-paygo']
default: 'capacity-paygo'
type: str
provided_license:
description:
- Using a NLF license file for BYOL deployment.
type: str
capacity_package_name:
description:
- Capacity package name is required when selecting a capacity based license.
- Essential only available with Bring Your Own License Capacity-Based.
- Professional available as an annual contract from a cloud provider or Bring Your Own License Capacity-Based.
choices: ['Professional', 'Essential', 'Freemium']
default: 'Essential'
type: str
version_added: 21.12.0
workspace_id:
description:
- The ID of the Cloud Manager workspace where you want to deploy Cloud Volumes ONTAP.
- If not provided, Cloud Manager uses the first workspace.
- You can find the ID from the Workspace tab on [https://cloudmanager.netapp.com].
type: str
subnet_id:
required: true
description:
- The name of the subnet for the Cloud Volumes ONTAP system.
type: str
vnet_id:
required: true
description:
- The name of the virtual network.
type: str
vnet_resource_group:
description:
- The resource group in Azure associated to the virtual network.
type: str
resource_group:
description:
- The resource_group where Cloud Volumes ONTAP will be created.
- If not provided, Cloud Manager generates the resource group name (name of the working environment/CVO with suffix '-rg').
- If the resource group does not exist, it is created.
type: str
allow_deploy_in_existing_rg:
description:
- Indicates if to allow creation in existing resource group.
type: bool
default: false
cidr:
required: true
description:
- The CIDR of the VNET. If not provided, resource needs az login to authorize and fetch the cidr details from Azure.
type: str
location:
required: true
description:
- The location where the working environment will be created.
type: str
data_encryption_type:
description:
- The type of encryption to use for the working environment.
choices: ['AZURE', 'NONE']
default: 'AZURE'
type: str
azure_encryption_parameters:
description:
- AZURE encryption parameters. It is required if using AZURE encryption.
type: str
version_added: 21.10.0
storage_type:
description:
- The type of storage for the first data aggregate.
choices: ['Premium_LRS', 'Standard_LRS', 'StandardSSD_LRS', 'Premium_ZRS']
default: 'Premium_LRS'
type: str
client_id:
required: true
description:
- The connector ID of the Cloud Manager Connector.
- You can find the ID from the Connector tab on [https://cloudmanager.netapp.com].
type: str
disk_size:
description:
- Azure volume size for the first data aggregate.
- For GB, the value can be [100, 500].
- For TB, the value can be [1,2,4,8,16].
default: 1
type: int
disk_size_unit:
description:
- The unit for disk size.
choices: ['GB', 'TB']
default: 'TB'
type: str
security_group_id:
description:
- The ID of the security group for the working environment. If not provided, Cloud Manager creates the security group.
type: str
svm_password:
required: true
description:
- The admin password for Cloud Volumes ONTAP.
- It will be updated on each run.
type: str
svm_name:
description:
- The name of the SVM.
type: str
version_added: 21.22.0
ontap_version:
description:
- The required ONTAP version. Ignored if 'use_latest_version' is set to true.
type: str
default: 'latest'
use_latest_version:
description:
- Indicates whether to use the latest available ONTAP version.
type: bool
default: true
serial_number:
description:
- The serial number for the cluster.
- Required when using one of these, 'azure-cot-premium-byol' or 'azure-ha-cot-premium-byol'.
type: str
tier_level:
description:
- If capacity_tier is Blob, this argument indicates the tiering level.
choices: ['normal', 'cool']
default: 'normal'
type: str
nss_account:
description:
- The NetApp Support Site account ID to use with this Cloud Volumes ONTAP system.
- If the license type is BYOL and an NSS account isn't provided, Cloud Manager tries to use the first existing NSS account.
type: str
writing_speed_state:
description:
- The write speed setting for Cloud Volumes ONTAP ['NORMAL','HIGH'].
- This argument is not relevant for HA pairs.
type: str
capacity_tier:
description:
- Whether to enable data tiering for the first data aggregate.
choices: ['Blob', 'NONE']
default: 'Blob'
type: str
cloud_provider_account:
description:
- The cloud provider credentials id to use when deploying the Cloud Volumes ONTAP system.
- You can find the ID in Cloud Manager from the Settings > Credentials page.
- If not specified, Cloud Manager uses the instance profile of the Connector.
type: str
backup_volumes_to_cbs:
description:
- Automatically enable back up of all volumes to S3.
default: false
type: bool
enable_compliance:
description:
- Enable the Cloud Compliance service on the working environment.
default: false
type: bool
enable_monitoring:
description:
- Enable the Monitoring service on the working environment.
default: false
type: bool
azure_tag:
description:
- Additional tags for the AZURE CVO working environment.
type: list
elements: dict
suboptions:
tag_key:
description: The key of the tag.
type: str
tag_value:
description: The tag value.
type: str
is_ha:
description:
- Indicate whether the working environment is an HA pair or not.
type: bool
default: false
platform_serial_number_node1:
description:
- For HA BYOL, the serial number for the first node.
type: str
platform_serial_number_node2:
description:
- For HA BYOL, the serial number for the second node.
type: str
ha_enable_https:
description:
- For HA, enable the HTTPS connection from CVO to storage accounts. This can impact write performance. The default is false.
type: bool
version_added: 21.10.0
upgrade_ontap_version:
description:
- Indicates whether to upgrade ONTAP image on the CVO.
- If the current version already matches the desired version, no action is taken.
type: bool
default: false
version_added: 21.13.0
update_svm_password:
description:
- Indicates whether to update svm_password on the CVO.
- When set to true, the module is not idempotent, as we cannot read the current password.
type: bool
default: false
version_added: 21.13.0
availability_zone:
description:
- The availability zone on the location configuration.
type: int
version_added: 21.20.0
availability_zone_node1:
description:
- The node1 availability zone on the location configuration for HA.
type: int
version_added: 21.21.0
availability_zone_node2:
description:
- The node2 availability zone on the location configuration for HA.
type: int
version_added: 21.21.0
'''
EXAMPLES = """
- name: create NetApp Cloud Manager CVO for Azure single
netapp.cloudmanager.na_cloudmanager_cvo_azure:
state: present
refresh_token: "{{ xxxxxxxxxxxxxxx }}"
name: AnsibleCVO
location: westus
subnet_id: subnet-xxxxxxx
vnet_id: vnetxxxxxxxx
svm_password: P@assword!
client_id: "{{ xxxxxxxxxxxxxxx }}"
writing_speed_state: NORMAL
azure_tag: [
{tag_key: abc,
tag_value: a123}]
- name: create NetApp Cloud Manager CVO for Azure HA
netapp.cloudmanager.na_cloudmanager_cvo_azure:
state: present
refresh_token: "{{ xxxxxxxxxxxxxxx }}"
name: AnsibleCVO
location: westus
subnet_id: subnet-xxxxxxx
vnet_id: vnetxxxxxxxx
svm_password: P@assword!
client_id: "{{ xxxxxxxxxxxxxxx }}"
writing_speed_state: NORMAL
azure_tag: [
{tag_key: abc,
tag_value: a123}]
is_ha: true
- name: delete NetApp Cloud Manager cvo for Azure
netapp.cloudmanager.na_cloudmanager_cvo_azure:
state: absent
name: ansible
refresh_token: "{{ xxxxxxxxxxxxxxx }}"
location: westus
subnet_id: subnet-xxxxxxx
vnet_id: vnetxxxxxxxx
svm_password: P@assword!
client_id: "{{ xxxxxxxxxxxxxxx }}"
"""
RETURN = '''
working_environment_id:
description: Newly created AZURE CVO working_environment_id.
type: str
returned: success
'''
from ansible.module_utils.basic import AnsibleModule
import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
AZURE_License_Types = ['azure-cot-standard-paygo', 'azure-cot-premium-paygo', 'azure-cot-premium-byol', 'azure-cot-explore-paygo',
'azure-ha-cot-standard-paygo', 'azure-ha-cot-premium-paygo', 'azure-ha-cot-premium-byol', 'capacity-paygo', 'ha-capacity-paygo']
class NetAppCloudManagerCVOAZURE:
""" object initialize and class methods """
def __init__(self):
self.use_rest = False
self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
self.argument_spec.update(dict(
name=dict(required=True, type='str'),
state=dict(required=False, choices=['present', 'absent'], default='present'),
instance_type=dict(required=False, type='str', default='Standard_DS4_v2'),
license_type=dict(required=False, type='str', choices=AZURE_License_Types, default='capacity-paygo'),
workspace_id=dict(required=False, type='str'),
capacity_package_name=dict(required=False, type='str', choices=['Professional', 'Essential', 'Freemium'], default='Essential'),
provided_license=dict(required=False, type='str'),
subnet_id=dict(required=True, type='str'),
vnet_id=dict(required=True, type='str'),
vnet_resource_group=dict(required=False, type='str'),
resource_group=dict(required=False, type='str'),
cidr=dict(required=True, type='str'),
location=dict(required=True, type='str'),
subscription_id=dict(required=True, type='str'),
data_encryption_type=dict(required=False, type='str', choices=['AZURE', 'NONE'], default='AZURE'),
azure_encryption_parameters=dict(required=False, type='str', no_log=True),
storage_type=dict(required=False, type='str', choices=['Premium_LRS', 'Standard_LRS', 'StandardSSD_LRS', 'Premium_ZRS'], default='Premium_LRS'),
disk_size=dict(required=False, type='int', default=1),
disk_size_unit=dict(required=False, type='str', choices=['GB', 'TB'], default='TB'),
svm_password=dict(required=True, type='str', no_log=True),
svm_name=dict(required=False, type='str'),
ontap_version=dict(required=False, type='str', default='latest'),
use_latest_version=dict(required=False, type='bool', default=True),
tier_level=dict(required=False, type='str', choices=['normal', 'cool'], default='normal'),
nss_account=dict(required=False, type='str'),
writing_speed_state=dict(required=False, type='str'),
capacity_tier=dict(required=False, type='str', choices=['Blob', 'NONE'], default='Blob'),
security_group_id=dict(required=False, type='str'),
cloud_provider_account=dict(required=False, type='str'),
backup_volumes_to_cbs=dict(required=False, type='bool', default=False),
enable_compliance=dict(required=False, type='bool', default=False),
enable_monitoring=dict(required=False, type='bool', default=False),
allow_deploy_in_existing_rg=dict(required=False, type='bool', default=False),
client_id=dict(required=True, type='str'),
azure_tag=dict(required=False, type='list', elements='dict', options=dict(
tag_key=dict(type='str', no_log=False),
tag_value=dict(type='str')
)),
serial_number=dict(required=False, type='str'),
is_ha=dict(required=False, type='bool', default=False),
platform_serial_number_node1=dict(required=False, type='str'),
platform_serial_number_node2=dict(required=False, type='str'),
ha_enable_https=dict(required=False, type='bool'),
upgrade_ontap_version=dict(required=False, type='bool', default=False),
update_svm_password=dict(required=False, type='bool', default=False),
availability_zone=dict(required=False, type='int'),
availability_zone_node1=dict(required=False, type='int'),
availability_zone_node2=dict(required=False, type='int'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_one_of=[['refresh_token', 'sa_client_id']],
required_together=[['sa_client_id', 'sa_secret_key']],
required_if=[
['license_type', 'capacity-paygo', ['capacity_package_name']],
['license_type', 'ha-capacity-paygo', ['capacity_package_name']],
['license_type', 'azure-cot-premium-byol', ['serial_number']],
['license_type', 'azure-ha-cot-premium-byol', ['platform_serial_number_node1', 'platform_serial_number_node2']],
],
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
self.changeable_params = ['svm_password', 'svm_name', 'azure_tag', 'tier_level', 'ontap_version',
'instance_type', 'license_type', 'writing_speed_state']
self.rest_api = CloudManagerRestAPI(self.module)
self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
self.rest_api.api_root_path = '/occm/api/azure/%s' % ('ha' if self.parameters['is_ha'] else 'vsa')
self.headers = {
'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
}
def create_cvo_azure(self):
"""
Create AZURE CVO
"""
if self.parameters.get('workspace_id') is None:
response, msg = self.na_helper.get_tenant(self.rest_api, self.headers)
if response is None:
self.module.fail_json(msg)
self.parameters['workspace_id'] = response
if self.parameters.get('nss_account') is None:
if self.parameters.get('serial_number') is not None:
if not self.parameters['serial_number'].startswith('Eval-') and self.parameters['license_type'] == 'azure-cot-premium-byol':
response, msg = self.na_helper.get_nss(self.rest_api, self.headers)
if response is None:
self.module.fail_json(msg)
self.parameters['nss_account'] = response
elif self.parameters.get('platform_serial_number_node1') is not None and self.parameters.get('platform_serial_number_node2') is not None:
if not self.parameters['platform_serial_number_node1'].startswith('Eval-')\
and not self.parameters['platform_serial_number_node2'].startswith('Eval-')\
and self.parameters['license_type'] == 'azure-ha-cot-premium-byol':
response, msg = self.na_helper.get_nss(self.rest_api, self.headers)
if response is None:
self.module.fail_json(msg)
self.parameters['nss_account'] = response
json = {"name": self.parameters['name'],
"region": self.parameters['location'],
"subscriptionId": self.parameters['subscription_id'],
"tenantId": self.parameters['workspace_id'],
"storageType": self.parameters['storage_type'],
"dataEncryptionType": self.parameters['data_encryption_type'],
"optimizedNetworkUtilization": True,
"diskSize": {
"size": self.parameters['disk_size'],
"unit": self.parameters['disk_size_unit']},
"svmPassword": self.parameters['svm_password'],
"backupVolumesToCbs": self.parameters['backup_volumes_to_cbs'],
"enableCompliance": self.parameters['enable_compliance'],
"enableMonitoring": self.parameters['enable_monitoring'],
"vsaMetadata": {
"ontapVersion": self.parameters['ontap_version'],
"licenseType": self.parameters['license_type'],
"useLatestVersion": self.parameters['use_latest_version'],
"instanceType": self.parameters['instance_type']}
}
if self.parameters['capacity_tier'] == "Blob":
json.update({"capacityTier": self.parameters['capacity_tier'],
"tierLevel": self.parameters['tier_level']})
if self.parameters.get('provided_license') is not None:
json['vsaMetadata'].update({"providedLicense": self.parameters['provided_license']})
# clean default value if it is not by Capacity license
if not self.parameters['license_type'].endswith('capacity-paygo'):
json['vsaMetadata'].update({"capacityPackageName": ''})
if self.parameters.get('capacity_package_name') is not None:
json['vsaMetadata'].update({"capacityPackageName": self.parameters['capacity_package_name']})
if self.parameters.get('cidr') is not None:
json.update({"cidr": self.parameters['cidr']})
if self.parameters.get('writing_speed_state') is not None:
json.update({"writingSpeedState": self.parameters['writing_speed_state'].upper()})
if self.parameters.get('resource_group') is not None:
json.update({"resourceGroup": self.parameters['resource_group'],
"allowDeployInExistingRg": self.parameters['allow_deploy_in_existing_rg']})
else:
json.update({"resourceGroup": (self.parameters['name'] + '-rg')})
if self.parameters.get('serial_number') is not None:
json.update({"serialNumber": self.parameters['serial_number']})
if self.parameters.get('security_group_id') is not None:
json.update({"securityGroupId": self.parameters['security_group_id']})
if self.parameters.get('cloud_provider_account') is not None:
json.update({"cloudProviderAccount": self.parameters['cloud_provider_account']})
if self.parameters.get('backup_volumes_to_cbs') is not None:
json.update({"backupVolumesToCbs": self.parameters['backup_volumes_to_cbs']})
if self.parameters.get('nss_account') is not None:
json.update({"nssAccount": self.parameters['nss_account']})
if self.parameters.get('availability_zone') is not None:
json.update({"availabilityZone": self.parameters['availability_zone']})
if self.parameters['data_encryption_type'] == "AZURE":
if self.parameters.get('azure_encryption_parameters') is not None:
json.update({"azureEncryptionParameters": {"key": self.parameters['azure_encryption_parameters']}})
if self.parameters.get('svm_name') is not None:
json.update({"svmName": self.parameters['svm_name']})
if self.parameters.get('azure_tag') is not None:
tags = []
for each_tag in self.parameters['azure_tag']:
tag = {
'tagKey': each_tag['tag_key'],
'tagValue': each_tag['tag_value']
}
tags.append(tag)
json.update({"azureTags": tags})
if self.parameters['is_ha']:
ha_params = dict()
if self.parameters.get('platform_serial_number_node1'):
ha_params["platformSerialNumberNode1"] = self.parameters['platform_serial_number_node1']
if self.parameters.get('platform_serial_number_node2'):
ha_params["platformSerialNumberNode2"] = self.parameters['platform_serial_number_node2']
if self.parameters.get('availability_zone_node1'):
ha_params["availabilityZoneNode1"] = self.parameters['availability_zone_node1']
if self.parameters.get('availability_zone_node2'):
ha_params["availabilityZoneNode2"] = self.parameters['availability_zone_node2']
if self.parameters.get('ha_enable_https') is not None:
ha_params['enableHttps'] = self.parameters['ha_enable_https']
json["haParams"] = ha_params
resource_group = self.parameters['vnet_resource_group'] if self.parameters.get(
'vnet_resource_group') is not None else self.parameters['resource_group']
resource_group_path = 'subscriptions/%s/resourceGroups/%s' % (self.parameters['subscription_id'], resource_group)
vnet_format = '%s/%s' if self.rest_api.simulator else '/%s/providers/Microsoft.Network/virtualNetworks/%s'
vnet = vnet_format % (resource_group_path, self.parameters['vnet_id'])
json.update({"vnetId": vnet})
json.update({"subnetId": '%s/subnets/%s' % (vnet, self.parameters['subnet_id'])})
api_url = '%s/working-environments' % self.rest_api.api_root_path
response, error, on_cloud_request_id = self.rest_api.post(api_url, json, header=self.headers)
if error is not None:
self.module.fail_json(
msg="Error: unexpected response on creating cvo azure: %s, %s" % (str(error), str(response)))
working_environment_id = response['publicId']
wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id)
err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "create", 60, 60)
if err is not None:
self.module.fail_json(msg="Error: unexpected response wait_on_completion for creating CVO AZURE: %s" % str(err))
return working_environment_id
def get_extra_azure_tags(self, rest_api, headers):
# Get extra azure tag from current working environment
# It is created automatically not from the user input
we, err = self.na_helper.get_working_environment_details(rest_api, headers)
if err is not None:
self.module.fail_json(msg="Error: unexpected response to get CVO AZURE details: %s" % str(err))
return [{'tag_key': 'DeployedByOccm', 'tag_value': we['userTags']['DeployedByOccm']}] if 'DeployedByOccm' in \
we['userTags'] else []
def update_cvo_azure(self, working_environment_id, modify):
base_url = '%s/working-environments/%s/' % (self.rest_api.api_root_path, working_environment_id)
for item in modify:
if item == 'svm_password':
response, error = self.na_helper.update_svm_password(base_url, self.rest_api, self.headers, self.parameters['svm_password'])
if error is not None:
self.module.fail_json(changed=False, msg=error)
if item == 'svm_name':
response, error = self.na_helper.update_svm_name(base_url, self.rest_api, self.headers, self.parameters['svm_name'])
if error is not None:
self.module.fail_json(changed=False, msg=error)
if item == 'azure_tag':
# default azure tag
tag_list = self.get_extra_azure_tags(self.rest_api, self.headers)
if 'azure_tag' in self.parameters:
tag_list.extend(self.parameters['azure_tag'])
response, error = self.na_helper.update_cvo_tags(base_url, self.rest_api, self.headers, 'azure_tag', tag_list)
if error is not None:
self.module.fail_json(changed=False, msg=error)
if item == 'tier_level':
response, error = self.na_helper.update_tier_level(base_url, self.rest_api, self.headers, self.parameters['tier_level'])
if error is not None:
self.module.fail_json(changed=False, msg=error)
if item == 'writing_speed_state':
response, error = self.na_helper.update_writing_speed_state(base_url, self.rest_api, self.headers, self.parameters['writing_speed_state'])
if error is not None:
self.module.fail_json(changed=False, msg=error)
if item == 'ontap_version':
response, error = self.na_helper.upgrade_ontap_image(self.rest_api, self.headers, self.parameters['ontap_version'])
if error is not None:
self.module.fail_json(changed=False, msg=error)
if item == 'instance_type' or item == 'license_type':
response, error = self.na_helper.update_instance_license_type(base_url, self.rest_api, self.headers,
self.parameters['instance_type'],
self.parameters['license_type'])
if error is not None:
self.module.fail_json(changed=False, msg=error)
def delete_cvo_azure(self, we_id):
"""
Delete AZURE CVO
"""
api_url = '%s/working-environments/%s' % (self.rest_api.api_root_path, we_id)
response, error, on_cloud_request_id = self.rest_api.delete(api_url, None, header=self.headers)
if error is not None:
self.module.fail_json(msg="Error: unexpected response on deleting cvo azure: %s, %s" % (str(error), str(response)))
wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id)
err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "delete", 40, 60)
if err is not None:
self.module.fail_json(msg="Error: unexpected response wait_on_completion for deleting CVO AZURE: %s" % str(err))
def validate_cvo_params(self):
if self.parameters['use_latest_version'] is True and self.parameters['ontap_version'] != "latest":
self.module.fail_json(msg="ontap_version parameter not required when having use_latest_version as true")
if self.parameters.get('serial_number') is None and self.parameters['license_type'] == "azure-cot-premium-byol":
self.module.fail_json(msg="serial_number parameter required when having license_type as azure-cot-premium-byol")
if self.parameters['is_ha'] and self.parameters['license_type'] == "azure-ha-cot-premium-byol":
if self.parameters.get('platform_serial_number_node1') is None or self.parameters.get('platform_serial_number_node2') is None:
self.module.fail_json(msg="both platform_serial_number_node1 and platform_serial_number_node2 parameters are required"
"when having ha type as true and license_type as azure-ha-cot-premium-byol")
if self.parameters['is_ha'] is True and self.parameters['license_type'] == 'capacity-paygo':
self.parameters['license_type'] == 'ha-capacity-paygo'
def apply(self):
"""
Apply action to the Cloud Manager CVO for AZURE
:return: None
"""
working_environment_id = None
modify = None
current, dummy = self.na_helper.get_working_environment_details_by_name(self.rest_api, self.headers,
self.parameters['name'], "azure")
if current:
self.parameters['working_environment_id'] = current['publicId']
# check the action whether to create, delete, or not
cd_action = self.na_helper.get_cd_action(current, self.parameters)
if current and self.parameters['state'] != 'absent':
working_environment_id = current['publicId']
modify, error = self.na_helper.is_cvo_update_needed(self.rest_api, self.headers, self.parameters, self.changeable_params, 'azure')
if error is not None:
self.module.fail_json(changed=False, msg=error)
if self.na_helper.changed and not self.module.check_mode:
if cd_action == "create":
self.validate_cvo_params()
working_environment_id = self.create_cvo_azure()
elif cd_action == "delete":
self.delete_cvo_azure(current['publicId'])
else:
self.update_cvo_azure(current['publicId'], modify)
self.module.exit_json(changed=self.na_helper.changed, working_environment_id=working_environment_id)
def main():
"""
Create Cloud Manager CVO for AZURE class instance and invoke apply
:return: None
"""
obj_store = NetAppCloudManagerCVOAZURE()
obj_store.apply()
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,858 @@
#!/usr/bin/python
# (c) 2022, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
na_cloudmanager_cvo_gcp
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: na_cloudmanager_cvo_gcp
short_description: NetApp Cloud Manager CVO for GCP
extends_documentation_fragment:
- netapp.cloudmanager.netapp.cloudmanager
version_added: '21.4.0'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create, delete, or manage Cloud Manager CVO for GCP.
options:
backup_volumes_to_cbs:
description:
- Automatically backup all volumes to cloud.
default: false
type: bool
capacity_tier:
description:
- Whether to enable data tiering for the first data aggregate.
choices: ['cloudStorage']
type: str
client_id:
required: true
description:
- The connector ID of the Cloud Manager Connector.
- You can find the ID from the Connector tab on U(https://cloudmanager.netapp.com).
type: str
data_encryption_type:
description:
- Type of encryption to use for this working environment.
choices: ['GCP']
type: str
gcp_encryption_parameters:
description:
- The GCP encryption parameters.
type: str
version_added: 21.10.0
enable_compliance:
description:
- Enable the Cloud Compliance service on the working environment.
default: false
type: bool
firewall_rule:
description:
- Firewall name for a single node cluster.
type: str
gcp_labels:
description:
- Optionally provide up to four key-value pairs with which to all GCP entities created by Cloud Manager.
type: list
elements: dict
suboptions:
label_key:
description: The key of the label.
type: str
label_value:
description: The label value.
type: str
gcp_service_account:
description:
- The gcp_service_account email in order to enable tiering of cold data to Google Cloud Storage.
required: true
type: str
gcp_volume_size:
description:
- GCP volume size.
type: int
gcp_volume_size_unit:
description:
- GCP volume size unit.
choices: ['GB', 'TB']
type: str
gcp_volume_type:
description:
- GCP volume type.
choices: ['pd-balanced', 'pd-standard', 'pd-ssd']
type: str
instance_type:
description:
- The type of instance to use, which depends on the license type you choose.
- Explore ['custom-4-16384'].
- Standard ['n1-standard-8'].
- Premium ['n1-standard-32'].
- BYOL all instance types defined for PayGo.
- For more supported instance types, refer to Cloud Volumes ONTAP Release Notes.
default: 'n1-standard-8'
type: str
is_ha:
description:
- Indicate whether the working environment is an HA pair or not.
type: bool
default: false
license_type:
description:
- The type of license to use.
- For single node by Capacity ['capacity-paygo'].
- For single node by Node paygo ['gcp-cot-explore-paygo', 'gcp-cot-standard-paygo', 'gcp-cot-premium-paygo'].
- For single node by Node byol ['gcp-cot-premium-byol'].
- For HA by Capacity ['ha-capacity-paygo'].
- For HA by Node paygo ['gcp-ha-cot-explore-paygo', 'gcp-ha-cot-standard-paygo', 'gcp-ha-cot-premium-paygo'].
- For HA by Node byol ['gcp-cot-premium-byol'].
choices: ['gcp-cot-standard-paygo', 'gcp-cot-explore-paygo', 'gcp-cot-premium-paygo', 'gcp-cot-premium-byol', \
'gcp-ha-cot-standard-paygo', 'gcp-ha-cot-premium-paygo', 'gcp-ha-cot-explore-paygo', 'gcp-ha-cot-premium-byol', \
'capacity-paygo', 'ha-capacity-paygo']
type: str
default: 'capacity-paygo'
provided_license:
description:
- Using a NLF license file for BYOL deployment
type: str
capacity_package_name:
description:
- Capacity package name is required when selecting a capacity based license.
choices: ['Professional', 'Essential', 'Freemium']
default: 'Essential'
type: str
version_added: 21.12.0
mediator_zone:
description:
- The zone for mediator.
- Option for HA pair only.
type: str
name:
description:
- The name of the Cloud Manager CVO for GCP to manage.
required: true
type: str
network_project_id:
description:
- The project id in GCP associated with the Subnet.
- If not provided, it is assumed that the Subnet is within the previously specified project id.
type: str
node1_zone:
description:
- Zone for node 1.
- Option for HA pair only.
type: str
node2_zone:
description:
- Zone for node 2.
- Option for HA pair only.
type: str
nss_account:
description:
- The NetApp Support Site account ID to use with this Cloud Volumes ONTAP system.
- If the license type is BYOL and an NSS account isn't provided, Cloud Manager tries to use the first existing NSS account.
type: str
ontap_version:
description:
- The required ONTAP version. Ignored if 'use_latest_version' is set to true.
type: str
default: 'latest'
platform_serial_number_node1:
description:
- For HA BYOL, the serial number for the first node.
- Option for HA pair only.
type: str
platform_serial_number_node2:
description:
- For HA BYOL, the serial number for the second node.
- Option for HA pair only.
type: str
project_id:
description:
- The ID of the GCP project.
required: true
type: str
platform_serial_number:
description:
- The serial number for the system. Required when using 'gcp-cot-premium-byol'.
type: str
state:
description:
- Whether the specified Cloud Manager CVO for GCP should exist or not.
choices: ['present', 'absent']
default: 'present'
type: str
subnet_id:
description:
- The name of the subnet for Cloud Volumes ONTAP.
type: str
subnet0_node_and_data_connectivity:
description:
- Subnet path for nic1, required for node and data connectivity.
- If using shared VPC, network_project_id must be provided.
- Option for HA pair only.
type: str
subnet1_cluster_connectivity:
description:
- Subnet path for nic2, required for cluster connectivity.
- Option for HA pair only.
type: str
subnet2_ha_connectivity:
description:
- Subnet path for nic3, required for HA connectivity.
- Option for HA pair only.
type: str
subnet3_data_replication:
description:
- Subnet path for nic4, required for HA connectivity.
- Option for HA pair only.
type: str
svm_password:
description:
- The admin password for Cloud Volumes ONTAP.
- It will be updated on each run.
type: str
svm_name:
description:
- The name of the SVM.
type: str
version_added: 21.22.0
tier_level:
description:
- The tiering level when 'capacity_tier' is set to 'cloudStorage'.
choices: ['standard', 'nearline', 'coldline']
default: 'standard'
type: str
use_latest_version:
description:
- Indicates whether to use the latest available ONTAP version.
type: bool
default: true
vpc_id:
required: true
description:
- The name of the VPC.
type: str
vpc0_firewall_rule_name:
description:
- Firewall rule name for vpc1.
- Option for HA pair only.
type: str
vpc0_node_and_data_connectivity:
description:
- VPC path for nic1, required for node and data connectivity.
- If using shared VPC, network_project_id must be provided.
- Option for HA pair only.
type: str
vpc1_cluster_connectivity:
description:
- VPC path for nic2, required for cluster connectivity.
- Option for HA pair only.
type: str
vpc1_firewall_rule_name:
description:
- Firewall rule name for vpc2.
- Option for HA pair only.
type: str
vpc2_ha_connectivity:
description:
- VPC path for nic3, required for HA connectivity.
- Option for HA pair only.
type: str
vpc2_firewall_rule_name:
description:
- Firewall rule name for vpc3.
- Option for HA pair only.
type: str
vpc3_data_replication:
description:
- VPC path for nic4, required for data replication.
- Option for HA pair only.
type: str
vpc3_firewall_rule_name:
description:
- Firewall rule name for vpc4.
- Option for HA pair only.
type: str
workspace_id:
description:
- The ID of the Cloud Manager workspace where you want to deploy Cloud Volumes ONTAP.
- If not provided, Cloud Manager uses the first workspace.
- You can find the ID from the Workspace tab on [https://cloudmanager.netapp.com].
type: str
writing_speed_state:
description:
- The write speed setting for Cloud Volumes ONTAP ['NORMAL','HIGH'].
- Default value is 'NORMAL' for non-HA GCP CVO
- This argument is not relevant for HA pairs.
type: str
zone:
description:
- The zone of the region where the working environment will be created.
required: true
type: str
upgrade_ontap_version:
description:
- Indicates whether to upgrade ONTAP image on the CVO.
- If the current version already matches the desired version, no action is taken.
type: bool
default: false
version_added: 21.13.0
update_svm_password:
description:
- Indicates whether to update svm_password on the CVO.
- When set to true, the module is not idempotent, as we cannot read the current password.
type: bool
default: false
version_added: 21.13.0
subnet_path:
description:
- Subnet path for a single node cluster.
type: str
version_added: 21.20.0
notes:
- Support check_mode.
'''
EXAMPLES = """
- name: Create NetApp Cloud Manager cvo for GCP
netapp.cloudmanager.na_cloudmanager_cvo_gcp:
state: present
name: ansiblecvogcp
project_id: default-project
zone: us-east4-b
subnet_path: projects/<project>/regions/<region>/subnetworks/<subnetwork>
subnet_id: projects/<project>/regions/<region>/subnetworks/<subnetwork>
gcp_volume_type: pd-ssd
gcp_volume_size: 500
gcp_volume_size_unit: GB
gcp_service_account: "{{ xxxxxxxxxxxxxxx }}"
data_encryption_type: GCP
svm_password: "{{ xxxxxxxxxxxxxxx }}"
ontap_version: latest
use_latest_version: true
license_type: capacity-paygo
instance_type: n1-standard-8
client_id: "{{ xxxxxxxxxxxxxxx }}"
workspace_id: "{{ xxxxxxxxxxxxxxx }}"
capacity_tier: cloudStorage
writing_speed_state: NORMAL
refresh_token: "{{ xxxxxxxxxxxxxxx }}"
vpc_id: default
gcp_labels:
- label_key: key1
label_value: value1
- label_key: key2
label_value: value2
- name: Create NetApp Cloud Manager cvo ha for GCP
netapp.cloudmanager.na_cloudmanager_cvo_gcp:
state: present
name: ansiblecvogcpha
project_id: "default-project"
zone: us-east1-b
gcp_volume_type: pd-ssd
gcp_volume_size: 500
gcp_volume_size_unit: GB
gcp_service_account: "{{ xxxxxxxxxxxxxxx }}"
data_encryption_type: GCP
svm_password: "{{ xxxxxxxxxxxxxxx }}"
ontap_version: ONTAP-9.9.0.T1.gcpha
use_latest_version: false
license_type: ha-capacity-paygo
instance_type: custom-4-16384
client_id: "{{ xxxxxxxxxxxxxxx }}"
workspace_id: "{{ xxxxxxxxxxxxxxx }}"
capacity_tier: cloudStorage
writing_speed_state: NORMAL
refresh_token: "{{ xxxxxxxxxxxxxxx }}"
is_ha: true
mediator_zone: us-east1-b
node1_zone: us-east1-b
node2_zone: us-east1-b
subnet0_node_and_data_connectivity: default
subnet1_cluster_connectivity: subnet2
subnet2_ha_connectivity: subnet3
subnet3_data_replication: subnet1
vpc0_node_and_data_connectivity: default
vpc1_cluster_connectivity: vpc2
vpc2_ha_connectivity: vpc3
vpc3_data_replication: vpc1
vpc_id: default
subnet_id: default
"""
RETURN = '''
working_environment_id:
description: Newly created GCP CVO working_environment_id.
type: str
returned: success
'''
from ansible.module_utils.basic import AnsibleModule
import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
GCP_LICENSE_TYPES = ["gcp-cot-standard-paygo", "gcp-cot-explore-paygo", "gcp-cot-premium-paygo", "gcp-cot-premium-byol",
"gcp-ha-cot-standard-paygo", "gcp-ha-cot-premium-paygo", "gcp-ha-cot-explore-paygo",
"gcp-ha-cot-premium-byol", "capacity-paygo", "ha-capacity-paygo"]
GOOGLE_API_URL = "https://www.googleapis.com/compute/v1/projects"
class NetAppCloudManagerCVOGCP:
''' object initialize and class methods '''
def __init__(self):
self.use_rest = False
self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
self.argument_spec.update(dict(
backup_volumes_to_cbs=dict(required=False, type='bool', default=False),
capacity_tier=dict(required=False, type='str', choices=['cloudStorage']),
client_id=dict(required=True, type='str'),
data_encryption_type=dict(required=False, choices=['GCP'], type='str'),
gcp_encryption_parameters=dict(required=False, type='str', no_log=True),
enable_compliance=dict(required=False, type='bool', default=False),
firewall_rule=dict(required=False, type='str'),
gcp_labels=dict(required=False, type='list', elements='dict', options=dict(
label_key=dict(type='str', no_log=False),
label_value=dict(type='str')
)),
gcp_service_account=dict(required=True, type='str'),
gcp_volume_size=dict(required=False, type='int'),
gcp_volume_size_unit=dict(required=False, choices=['GB', 'TB'], type='str'),
gcp_volume_type=dict(required=False, choices=['pd-balanced', 'pd-standard', 'pd-ssd'], type='str'),
instance_type=dict(required=False, type='str', default='n1-standard-8'),
is_ha=dict(required=False, type='bool', default=False),
license_type=dict(required=False, type='str', choices=GCP_LICENSE_TYPES, default='capacity-paygo'),
mediator_zone=dict(required=False, type='str'),
name=dict(required=True, type='str'),
network_project_id=dict(required=False, type='str'),
node1_zone=dict(required=False, type='str'),
node2_zone=dict(required=False, type='str'),
nss_account=dict(required=False, type='str'),
ontap_version=dict(required=False, type='str', default='latest'),
platform_serial_number=dict(required=False, type='str'),
platform_serial_number_node1=dict(required=False, type='str'),
platform_serial_number_node2=dict(required=False, type='str'),
project_id=dict(required=True, type='str'),
state=dict(required=False, choices=['present', 'absent'], default='present'),
subnet_id=dict(required=False, type='str'),
subnet0_node_and_data_connectivity=dict(required=False, type='str'),
subnet1_cluster_connectivity=dict(required=False, type='str'),
subnet2_ha_connectivity=dict(required=False, type='str'),
subnet3_data_replication=dict(required=False, type='str'),
svm_password=dict(required=False, type='str', no_log=True),
svm_name=dict(required=False, type='str'),
tier_level=dict(required=False, type='str', choices=['standard', 'nearline', 'coldline'],
default='standard'),
use_latest_version=dict(required=False, type='bool', default=True),
capacity_package_name=dict(required=False, type='str', choices=['Professional', 'Essential', 'Freemium'], default='Essential'),
provided_license=dict(required=False, type='str'),
vpc_id=dict(required=True, type='str'),
vpc0_firewall_rule_name=dict(required=False, type='str'),
vpc0_node_and_data_connectivity=dict(required=False, type='str'),
vpc1_cluster_connectivity=dict(required=False, type='str'),
vpc1_firewall_rule_name=dict(required=False, type='str'),
vpc2_firewall_rule_name=dict(required=False, type='str'),
vpc2_ha_connectivity=dict(required=False, type='str'),
vpc3_data_replication=dict(required=False, type='str'),
vpc3_firewall_rule_name=dict(required=False, type='str'),
workspace_id=dict(required=False, type='str'),
writing_speed_state=dict(required=False, type='str'),
zone=dict(required=True, type='str'),
upgrade_ontap_version=dict(required=False, type='bool', default=False),
update_svm_password=dict(required=False, type='bool', default=False),
subnet_path=dict(required=False, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_one_of=[['refresh_token', 'sa_client_id']],
required_together=[['sa_client_id', 'sa_secret_key']],
required_if=[
['license_type', 'capacity-paygo', ['capacity_package_name']],
['license_type', 'ha-capacity-paygo', ['capacity_package_name']],
['license_type', 'gcp-cot-premium-byol', ['platform_serial_number']],
['license_type', 'gcp-ha-cot-premium-byol', ['platform_serial_number_node1', 'platform_serial_number_node2']],
],
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
self.changeable_params = ['svm_password', 'svm_name', 'tier_level', 'gcp_labels', 'ontap_version',
'instance_type', 'license_type', 'writing_speed_state']
self.rest_api = CloudManagerRestAPI(self.module)
self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
self.rest_api.api_root_path = '/occm/api/gcp/%s' % ('ha' if self.parameters['is_ha'] else 'vsa')
self.headers = {
'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
}
@staticmethod
def has_self_link(param):
return param.startswith(("https://www.googleapis.com/compute/", "projects/"))
def create_cvo_gcp(self):
if self.parameters.get('workspace_id') is None:
response, msg = self.na_helper.get_tenant(self.rest_api, self.headers)
if response is None:
self.module.fail_json(msg)
self.parameters['workspace_id'] = response
if self.parameters.get('nss_account') is None:
if self.parameters.get('platform_serial_number') is not None:
if not self.parameters['platform_serial_number'].startswith('Eval-'):
if self.parameters['license_type'] == 'gcp-cot-premium-byol' or self.parameters['license_type'] == 'gcp-ha-cot-premium-byol':
response, msg = self.na_helper.get_nss(self.rest_api, self.headers)
if response is None:
self.module.fail_json(msg)
self.parameters['nss_account'] = response
if self.parameters['is_ha'] is True and self.parameters['license_type'] == 'capacity-paygo':
self.parameters['license_type'] == 'ha-capacity-paygo'
json = {"name": self.parameters['name'],
"region": self.parameters['zone'],
"tenantId": self.parameters['workspace_id'],
"vpcId": self.parameters['vpc_id'],
"gcpServiceAccount": self.parameters['gcp_service_account'],
"gcpVolumeSize": {
"size": self.parameters['gcp_volume_size'],
"unit": self.parameters['gcp_volume_size_unit']},
"gcpVolumeType": self.parameters['gcp_volume_type'],
"svmPassword": self.parameters['svm_password'],
"backupVolumesToCbs": self.parameters['backup_volumes_to_cbs'],
"enableCompliance": self.parameters['enable_compliance'],
"vsaMetadata": {
"ontapVersion": self.parameters['ontap_version'],
"licenseType": self.parameters['license_type'],
"useLatestVersion": self.parameters['use_latest_version'],
"instanceType": self.parameters['instance_type']}
}
if self.parameters['is_ha'] is False:
if self.parameters.get('writing_speed_state') is None:
self.parameters['writing_speed_state'] = 'NORMAL'
json.update({'writingSpeedState': self.parameters['writing_speed_state'].upper()})
if self.parameters.get('data_encryption_type') is not None and self.parameters['data_encryption_type'] == "GCP":
json.update({'dataEncryptionType': self.parameters['data_encryption_type']})
if self.parameters.get('gcp_encryption_parameters') is not None:
json.update({"gcpEncryptionParameters": {"key": self.parameters['gcp_encryption_parameters']}})
if self.parameters.get('provided_license') is not None:
json['vsaMetadata'].update({"providedLicense": self.parameters['provided_license']})
# clean default value if it is not by Capacity license
if not self.parameters['license_type'].endswith('capacity-paygo'):
json['vsaMetadata'].update({"capacityPackageName": ''})
if self.parameters.get('capacity_package_name') is not None:
json['vsaMetadata'].update({"capacityPackageName": self.parameters['capacity_package_name']})
if self.parameters.get('project_id'):
json.update({'project': self.parameters['project_id']})
if self.parameters.get('nss_account'):
json.update({'nssAccount': self.parameters['nss_account']})
if self.parameters.get('subnet_id'):
json.update({'subnetId': self.parameters['subnet_id']})
if self.parameters.get('subnet_path'):
json.update({'subnetPath': self.parameters['subnet_path']})
if self.parameters.get('platform_serial_number') is not None:
json.update({"serialNumber": self.parameters['platform_serial_number']})
if self.parameters.get('capacity_tier') is not None and self.parameters['capacity_tier'] == "cloudStorage":
json.update({"capacityTier": self.parameters['capacity_tier'],
"tierLevel": self.parameters['tier_level']})
if self.parameters.get('svm_name') is not None:
json.update({"svmName": self.parameters['svm_name']})
if self.parameters.get('gcp_labels') is not None:
labels = []
for each_label in self.parameters['gcp_labels']:
label = {
'labelKey': each_label['label_key'],
'labelValue': each_label['label_value']
}
labels.append(label)
json.update({"gcpLabels": labels})
if self.parameters.get('firewall_rule'):
json.update({'firewallRule': self.parameters['firewall_rule']})
if self.parameters['is_ha'] is True:
ha_params = dict()
if self.parameters.get('network_project_id') is not None:
network_project_id = self.parameters.get('network_project_id')
else:
network_project_id = self.parameters['project_id']
if not self.has_self_link(self.parameters['subnet_id']):
json.update({'subnetId': 'projects/%s/regions/%s/subnetworks/%s' % (network_project_id,
self.parameters['zone'][:-2],
self.parameters['subnet_id'])})
if self.parameters.get('platform_serial_number_node1'):
ha_params["platformSerialNumberNode1"] = self.parameters['platform_serial_number_node1']
if self.parameters.get('platform_serial_number_node2'):
ha_params["platformSerialNumberNode2"] = self.parameters['platform_serial_number_node2']
if self.parameters.get('node1_zone'):
ha_params["node1Zone"] = self.parameters['node1_zone']
if self.parameters.get('node2_zone'):
ha_params["node2Zone"] = self.parameters['node2_zone']
if self.parameters.get('mediator_zone'):
ha_params["mediatorZone"] = self.parameters['mediator_zone']
if self.parameters.get('vpc0_node_and_data_connectivity'):
if self.has_self_link(self.parameters['vpc0_node_and_data_connectivity']):
ha_params["vpc0NodeAndDataConnectivity"] = self.parameters['vpc0_node_and_data_connectivity']
else:
ha_params["vpc0NodeAndDataConnectivity"] = GOOGLE_API_URL + "/{0}/global/networks/{1}".format(
network_project_id, self.parameters['vpc0_node_and_data_connectivity'])
if self.parameters.get('vpc1_cluster_connectivity'):
if self.has_self_link(self.parameters['vpc1_cluster_connectivity']):
ha_params["vpc1ClusterConnectivity"] = self.parameters['vpc1_cluster_connectivity']
else:
ha_params["vpc1ClusterConnectivity"] = GOOGLE_API_URL + "/{0}/global/networks/{1}".format(
network_project_id, self.parameters['vpc1_cluster_connectivity'])
if self.parameters.get('vpc2_ha_connectivity'):
if self.has_self_link(self.parameters['vpc2_ha_connectivity']):
ha_params["vpc2HAConnectivity"] = self.parameters['vpc2_ha_connectivity']
else:
ha_params["vpc2HAConnectivity"] = "https://www.googleapis.com/compute/v1/projects/{0}/global/networks" \
"/{1}".format(network_project_id, self.parameters['vpc2_ha_connectivity'])
if self.parameters.get('vpc3_data_replication'):
if self.has_self_link(self.parameters['vpc3_data_replication']):
ha_params["vpc3DataReplication"] = self.parameters['vpc3_data_replication']
else:
ha_params["vpc3DataReplication"] = GOOGLE_API_URL + "/{0}/global/networks/{1}".format(
network_project_id, self.parameters['vpc3_data_replication'])
if self.parameters.get('subnet0_node_and_data_connectivity'):
if self.has_self_link(self.parameters['subnet0_node_and_data_connectivity']):
ha_params["subnet0NodeAndDataConnectivity"] = self.parameters['subnet0_node_and_data_connectivity']
else:
ha_params["subnet0NodeAndDataConnectivity"] = GOOGLE_API_URL + "/{0}/regions/{1}/subnetworks/{2}".\
format(network_project_id, self.parameters['zone'][:-2], self.parameters['subnet0_node_and_data_connectivity'])
if self.parameters.get('subnet1_cluster_connectivity'):
if self.has_self_link(self.parameters['subnet1_cluster_connectivity']):
ha_params["subnet1ClusterConnectivity"] = self.parameters['subnet1_cluster_connectivity']
else:
ha_params["subnet1ClusterConnectivity"] = GOOGLE_API_URL + "/{0}/regions/{1}/subnetworks/{2}".format(
network_project_id, self.parameters['zone'][:-2],
self.parameters['subnet1_cluster_connectivity'])
if self.parameters.get('subnet2_ha_connectivity'):
if self.has_self_link(self.parameters['subnet2_ha_connectivity']):
ha_params["subnet2HAConnectivity"] = self.parameters['subnet2_ha_connectivity']
else:
ha_params["subnet2HAConnectivity"] = GOOGLE_API_URL + "/{0}/regions/{1}/subnetworks/{2}".format(
network_project_id, self.parameters['zone'][:-2],
self.parameters['subnet2_ha_connectivity'])
if self.parameters.get('subnet3_data_replication'):
if self.has_self_link(self.parameters['subnet3_data_replication']):
ha_params["subnet3DataReplication"] = self.parameters['subnet3_data_replication']
else:
ha_params["subnet3DataReplication"] = GOOGLE_API_URL + "/{0}/regions/{1}/subnetworks/{2}". \
format(network_project_id, self.parameters['zone'][:-2],
self.parameters['subnet3_data_replication'])
if self.parameters.get('vpc0_firewall_rule_name'):
ha_params["vpc0FirewallRuleName"] = self.parameters['vpc0_firewall_ruleName']
if self.parameters.get('vpc1_firewall_rule_name'):
ha_params["vpc1FirewallRuleName"] = self.parameters['vpc1_firewall_rule_name']
if self.parameters.get('vpc2_firewall_rule_name'):
ha_params["vpc2FirewallRuleName"] = self.parameters['vpc2_firewall_rule_name']
if self.parameters.get('vpc3_firewall_rule_name'):
ha_params["vpc3FirewallRuleName"] = self.parameters['vpc3_firewall_rule_name']
json["haParams"] = ha_params
api_url = '%s/working-environments' % self.rest_api.api_root_path
response, error, on_cloud_request_id = self.rest_api.post(api_url, json, header=self.headers)
if error is not None:
self.module.fail_json(
msg="Error: unexpected response on creating cvo gcp: %s, %s" % (str(error), str(response)))
working_environment_id = response['publicId']
wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id)
err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "create", 60, 60)
if err is not None:
self.module.fail_json(msg="Error: unexpected response wait_on_completion for creating CVO GCP: %s" % str(err))
return working_environment_id
def update_cvo_gcp(self, working_environment_id, modify):
base_url = '%s/working-environments/%s/' % (self.rest_api.api_root_path, working_environment_id)
for item in modify:
if item == 'svm_password':
response, error = self.na_helper.update_svm_password(base_url, self.rest_api, self.headers, self.parameters['svm_password'])
if error is not None:
self.module.fail_json(changed=False, msg=error)
if item == 'svm_name':
response, error = self.na_helper.update_svm_name(base_url, self.rest_api, self.headers, self.parameters['svm_name'])
if error is not None:
self.module.fail_json(changed=False, msg=error)
if item == 'gcp_labels':
tag_list = None
if 'gcp_labels' in self.parameters:
tag_list = self.parameters['gcp_labels']
response, error = self.na_helper.update_cvo_tags(base_url, self.rest_api, self.headers, 'gcp_labels', tag_list)
if error is not None:
self.module.fail_json(changed=False, msg=error)
if item == 'tier_level':
response, error = self.na_helper.update_tier_level(base_url, self.rest_api, self.headers, self.parameters['tier_level'])
if error is not None:
self.module.fail_json(changed=False, msg=error)
if item == 'writing_speed_state':
response, error = self.na_helper.update_writing_speed_state(base_url, self.rest_api, self.headers, self.parameters['writing_speed_state'])
if error is not None:
self.module.fail_json(changed=False, msg=error)
if item == 'ontap_version':
response, error = self.na_helper.upgrade_ontap_image(self.rest_api, self.headers, self.parameters['ontap_version'])
if error is not None:
self.module.fail_json(changed=False, msg=error)
if item == 'instance_type' or item == 'license_type':
response, error = self.na_helper.update_instance_license_type(base_url, self.rest_api, self.headers,
self.parameters['instance_type'],
self.parameters['license_type'])
if error is not None:
self.module.fail_json(changed=False, msg=error)
def delete_cvo_gcp(self, we_id):
"""
Delete GCP CVO
"""
api_url = '%s/working-environments/%s' % (self.rest_api.api_root_path, we_id)
response, error, on_cloud_request_id = self.rest_api.delete(api_url, None, header=self.headers)
if error is not None:
self.module.fail_json(msg="Error: unexpected response on deleting cvo gcp: %s, %s" % (str(error), str(response)))
wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % str(on_cloud_request_id)
err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "CVO", "delete", 40, 60)
if err is not None:
self.module.fail_json(msg="Error: unexpected response wait_on_completion for deleting cvo gcp: %s" % str(err))
def apply(self):
working_environment_id = None
modify = None
current, dummy = self.na_helper.get_working_environment_details_by_name(self.rest_api, self.headers,
self.parameters['name'], "gcp")
if current:
self.parameters['working_environment_id'] = current['publicId']
# check the action
cd_action = self.na_helper.get_cd_action(current, self.parameters)
if current and self.parameters['state'] != 'absent':
working_environment_id = current['publicId']
modify, error = self.na_helper.is_cvo_update_needed(self.rest_api, self.headers, self.parameters, self.changeable_params, 'gcp')
if error is not None:
self.module.fail_json(changed=False, msg=error)
if self.na_helper.changed and not self.module.check_mode:
if cd_action == "create":
working_environment_id = self.create_cvo_gcp()
elif cd_action == "delete":
self.delete_cvo_gcp(current['publicId'])
else:
self.update_cvo_gcp(current['publicId'], modify)
self.module.exit_json(changed=self.na_helper.changed, working_environment_id=working_environment_id)
def main():
"""
Create Cloud Manager CVO for GCP class instance and invoke apply
:return: None
"""
obj_store = NetAppCloudManagerCVOGCP()
obj_store.apply()
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,235 @@
#!/usr/bin/python
# (c) 2021, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
na_cloudmanager_info
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: na_cloudmanager_info
short_description: NetApp Cloud Manager info
extends_documentation_fragment:
- netapp.cloudmanager.netapp.cloudmanager
version_added: '21.4.0'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- This module allows you to gather various information about cloudmanager using REST APIs.
options:
client_id:
required: true
type: str
description:
- The connector ID of the Cloud Manager Connector.
gather_subsets:
type: list
elements: str
description:
- When supplied, this argument will restrict the information collected to a given subset.
- Possible values for this argument include
- 'working_environments_info'
- 'aggregates_info'
- 'accounts_info'
- 'account_info'
- 'agents_info'
- 'active_agents_info'
default: 'all'
notes:
- Support check_mode
'''
EXAMPLES = """
- name: Get all available subsets
netapp.cloudmanager.na_cloudmanager_info:
client_id: "{{ client_id }}"
refresh_token: "{{ refresh_token }}"
gather_subsets:
- all
- name: Collect data for cloud manager with indicated subsets
netapp.cloudmanager.na_cloudmanager_info:
client_id: "{{ client_id }}"
refresh_token: "{{ refresh_token }}"
gather_subsets:
- aggregates_info
- working_environments_info
"""
RETURN = """
info:
description:
- a dictionary of collected subsets
- each subset if in JSON format
returned: success
type: dict
sample: '{
"info": {
"working_environments_info": [
{
"azureVsaWorkingEnvironments": [],
"gcpVsaWorkingEnvironments": [],
"onPremWorkingEnvironments": [],
"vsaWorkingEnvironments": [
{
"actionsRequired": null,
"activeActions": null,
"awsProperties": null,
"capacityFeatures": null,
"cbsProperties": null,
"cloudProviderName": "Amazon",
"cloudSyncProperties": null,
"clusterProperties": null,
"complianceProperties": null,
"creatorUserEmail": "samlp|NetAppSAML|test_user",
"cronJobSchedules": null,
"encryptionProperties": null,
"fpolicyProperties": null,
"haProperties": null,
"interClusterLifs": null,
"isHA": false,
"k8sProperties": null,
"monitoringProperties": null,
"name": "testAWS",
"ontapClusterProperties": null,
"publicId": "VsaWorkingEnvironment-3txYJOsX",
"replicationProperties": null,
"reservedSize": null,
"saasProperties": null,
"schedules": null,
"snapshotPolicies": null,
"status": null,
"supportRegistrationInformation": [],
"supportRegistrationProperties": null,
"supportedFeatures": null,
"svmName": "svm_testAWS",
"svms": null,
"tenantId": "Tenant-2345",
"workingEnvironmentType": "VSA"
}
]
},
null
]
}
}'
"""
from ansible.module_utils.basic import AnsibleModule
import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
class NetAppCloudmanagerInfo(object):
'''
Contains methods to parse arguments,
derive details of CloudmanagerInfo objects
and send requests to CloudmanagerInfo via
the restApi
'''
def __init__(self):
self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
self.argument_spec.update(dict(
gather_subsets=dict(type='list', elements='str', default='all'),
client_id=dict(required=True, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_one_of=[['refresh_token', 'sa_client_id']],
required_together=[['sa_client_id', 'sa_secret_key']],
supports_check_mode=True
)
self.na_helper = NetAppModule()
# set up state variables
self.parameters = self.na_helper.set_parameters(self.module.params)
# Calling generic rest_api class
self.rest_api = CloudManagerRestAPI(self.module)
self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
self.rest_api.api_root_path = None
self.methods = dict(
working_environments_info=self.na_helper.get_working_environments_info,
aggregates_info=self.get_aggregates_info,
accounts_info=self.na_helper.get_accounts_info,
account_info=self.na_helper.get_account_info,
agents_info=self.na_helper.get_agents_info,
active_agents_info=self.na_helper.get_active_agents_info,
)
self.headers = {}
if 'client_id' in self.parameters:
self.headers['X-Agent-Id'] = self.rest_api.format_client_id(self.parameters['client_id'])
def get_aggregates_info(self, rest_api, headers):
'''
Get aggregates info: there are 4 types of working environments.
Each of the aggregates will be categorized by working environment type and working environment id
'''
aggregates = {}
# get list of working environments
working_environments, error = self.na_helper.get_working_environments_info(rest_api, headers)
if error is not None:
self.module.fail_json(msg="Error: Failed to get working environments: %s" % str(error))
# Four types of working environments:
# azureVsaWorkingEnvironments, gcpVsaWorkingEnvironments, onPremWorkingEnvironments, vsaWorkingEnvironments
for working_env_type in working_environments:
we_aggregates = {}
# get aggregates for each working environment
for we in working_environments[working_env_type]:
provider = we['cloudProviderName']
working_environment_id = we['publicId']
self.na_helper.set_api_root_path(we, rest_api)
if provider != "Amazon":
api = '%s/aggregates/%s' % (rest_api.api_root_path, working_environment_id)
else:
api = '%s/aggregates?workingEnvironmentId=%s' % (rest_api.api_root_path, working_environment_id)
response, error, dummy = rest_api.get(api, None, header=headers)
if error:
self.module.fail_json(msg="Error: Failed to get aggregate list: %s" % str(error))
we_aggregates[working_environment_id] = response
aggregates[working_env_type] = we_aggregates
return aggregates
def get_info(self, func, rest_api):
'''
Main get info function
'''
return self.methods[func](rest_api, self.headers)
def apply(self):
'''
Apply action to the Cloud Manager
:return: None
'''
info = {}
if 'all' in self.parameters['gather_subsets']:
self.parameters['gather_subsets'] = self.methods.keys()
for func in self.parameters['gather_subsets']:
if func in self.methods:
info[func] = self.get_info(func, self.rest_api)
else:
msg = '%s is not a valid gather_subset. Only %s are allowed' % (func, self.methods.keys())
self.module.fail_json(msg=msg)
self.module.exit_json(changed=False, info=info)
def main():
'''
Main function
'''
na_cloudmanager_info = NetAppCloudmanagerInfo()
na_cloudmanager_info.apply()
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,192 @@
#!/usr/bin/python
# (c) 2021, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
na_cloudmanager_nss_account
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: na_cloudmanager_nss_account
short_description: NetApp Cloud Manager nss account
extends_documentation_fragment:
- netapp.cloudmanager.netapp.cloudmanager
version_added: '21.3.0'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create and Delete nss account.
options:
state:
description:
- Whether the specified nss account should exist or not.
choices: ['present', 'absent']
default: 'present'
type: str
client_id:
description:
- The connector ID of the Cloud Manager Connector.
required: true
type: str
public_id:
description:
- The ID of the NSS account.
type: str
name:
description:
- The name of the NSS account.
type: str
username:
description:
- The NSS username.
required: true
type: str
password:
description:
- The NSS password.
type: str
vsa_list:
description:
- The working environment list.
type: list
elements: str
notes:
- Support check_mode.
'''
EXAMPLES = '''
- name: Create nss account
netapp.cloudmanager.na_cloudmanager_nss_account:
state: present
name: test_cloud
username: test_cloud
password: password
client_id: your_client_id
refresh_token: your_refresh_token
'''
RETURN = r''' # '''
from ansible.module_utils.basic import AnsibleModule
import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
class NetAppCloudmanagerNssAccount(object):
def __init__(self):
"""
Parse arguments, setup state variables,
check parameters and ensure request module is installed
"""
self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
name=dict(required=False, type='str'),
client_id=dict(required=True, type='str'),
username=dict(required=True, type='str'),
password=dict(required=False, type='str', no_log=True),
public_id=dict(required=False, type='str'),
vsa_list=dict(required=False, type='list', elements='str')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_one_of=[['refresh_token', 'sa_client_id']],
required_together=[['sa_client_id', 'sa_secret_key']],
required_if=[
('state', 'present', ['password']),
],
supports_check_mode=True
)
self.na_helper = NetAppModule()
# set up state variables
self.parameters = self.na_helper.set_parameters(self.module.params)
# Calling generic rest_api class
self.rest_api = netapp_utils.CloudManagerRestAPI(self.module)
self.rest_api.token_type, self.rest_api.token = self.rest_api.get_token()
self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
self.rest_api.api_root_path = '/occm/api/'
self.headers = {
'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
}
def get_nss_account(self):
response, err, dummy = self.rest_api.send_request("GET", "%s/accounts" % (
self.rest_api.api_root_path), None, header=self.headers)
if err is not None:
self.module.fail_json(changed=False, msg="Error: unexpected response on getting nss account: %s, %s" % (str(err), str(response)))
if response is None:
return None
nss_accounts = []
if response.get('nssAccounts'):
nss_accounts = response['nssAccounts']
if len(nss_accounts) == 0:
return None
result = dict()
for account in nss_accounts:
if account['nssUserName'] == self.parameters['username']:
if self.parameters.get('public_id') and self.parameters['public_id'] != account['publicId']:
self.module.fail_json(changed=False, msg="Error: public_id '%s' does not match username."
% account['publicId'])
else:
self.parameters['public_id'] = account['publicId']
result['name'] = account['accountName']
result['user_name'] = account['nssUserName']
result['vsa_list'] = account['vsaList']
return result
return None
def create_nss_account(self):
account = dict()
if self.parameters.get('name'):
account['accountName'] = self.parameters['name']
account['providerKeys'] = {'nssUserName': self.parameters['username'],
'nssPassword': self.parameters['password']}
account['vsaList'] = []
if self.parameters.get('vsa_list'):
account['vsaList'] = self.parameters['vsa_list']
response, err, second_dummy = self.rest_api.send_request("POST", "%s/accounts/nss" % (
self.rest_api.api_root_path), None, account, header=self.headers)
if err is not None:
self.module.fail_json(changed=False, msg="Error: unexpected response on creating nss account: %s, %s" % (str(err), str(response)))
def delete_nss_account(self):
response, err, second_dummy = self.rest_api.send_request("DELETE", "%s/accounts/%s" % (
self.rest_api.api_root_path, self.parameters['public_id']), None, None, header=self.headers)
if err is not None:
self.module.fail_json(changed=False, msg="Error: unexpected response on deleting nss account: %s, %s" % (str(err), str(response)))
return None
def apply(self):
current = self.get_nss_account()
cd_action = self.na_helper.get_cd_action(current, self.parameters)
if self.na_helper.changed and not self.module.check_mode:
if cd_action == 'create':
self.create_nss_account()
elif cd_action == 'delete':
self.delete_nss_account()
self.module.exit_json(changed=self.na_helper.changed)
def main():
'''Main Function'''
account = NetAppCloudmanagerNssAccount()
account.apply()
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,471 @@
#!/usr/bin/python
# (c) 2021, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
na_cloudmanager_snapmirror
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: na_cloudmanager_snapmirror
short_description: NetApp Cloud Manager SnapMirror
extends_documentation_fragment:
- netapp.cloudmanager.netapp.cloudmanager
version_added: '21.6.0'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create or Delete SnapMirror relationship on Cloud Manager.
options:
state:
description:
- Whether the specified snapmirror relationship should exist or not.
choices: ['present', 'absent']
default: 'present'
type: str
source_working_environment_name:
description:
- The working environment name of the source volume.
type: str
destination_working_environment_name:
description:
- The working environment name of the destination volume.
type: str
source_working_environment_id:
description:
- The public ID of the working environment of the source volume.
type: str
destination_working_environment_id:
description:
- The public ID of the working environment of the destination volume.
type: str
destination_aggregate_name:
description:
- The aggregate in which the volume will be created.
- If not provided, Cloud Manager chooses the best aggregate for you.
type: str
policy:
description:
- The SnapMirror policy name.
type: str
default: 'MirrorAllSnapshots'
max_transfer_rate:
description:
- Maximum transfer rate limit KB/s.
- Use 0 for no limit, otherwise use number between 1024 and 2,147,482,624.
type: int
default: 100000
source_svm_name:
description:
- The name of the source SVM.
- The default SVM name is used, if a name is not provided.
type: str
destination_svm_name:
description:
- The name of the destination SVM.
- The default SVM name is used, if a name is not provided.
type: str
source_volume_name:
description:
- The name of the source volume.
required: true
type: str
destination_volume_name:
description:
- The name of the destination volume to be created for snapmirror relationship.
required: true
type: str
schedule:
description:
- The name of the Schedule.
type: str
default: '1hour'
provider_volume_type:
description:
- The underlying cloud provider volume type.
- For AWS ['gp3', 'gp2', 'io1', 'st1', 'sc1'].
- For Azure ['Premium_LRS','Standard_LRS','StandardSSD_LRS'].
- For GCP ['pd-balanced','pd-ssd','pd-standard'].
type: str
capacity_tier:
description:
- The volume capacity tier for tiering cold data to object storage.
- The default values for each cloud provider are as follows, Amazon 'S3', Azure 'Blob', GCP 'cloudStorage'.
- If NONE, the capacity tier will not be set on volume creation.
type: str
choices: ['S3', 'Blob', 'cloudStorage', 'NONE']
tenant_id:
description:
- The NetApp account ID that the Connector will be associated with. To be used only when using FSx.
type: str
version_added: 21.14.0
client_id:
description:
- The connector ID of the Cloud Manager Connector.
required: true
type: str
notes:
- Support check_mode.
'''
EXAMPLES = '''
- name: Create snapmirror with working_environment_name
netapp.cloudmanager.na_cloudmanager_snapmirror:
state: present
source_working_environment_name: source
destination_working_environment_name: dest
source_volume_name: source
destination_volume_name: source_copy
policy: MirrorAllSnapshots
schedule: 5min
max_transfer_rate: 102400
client_id: client_id
refresh_token: refresh_token
- name: Delete snapmirror
netapp.cloudmanager.na_cloudmanager_snapmirror:
state: absent
source_working_environment_name: source
destination_working_environment_name: dest
source_volume_name: source
destination_volume_name: source_copy
client_id: client_id
refresh_token: refresh_token
'''
RETURN = r''' # '''
from ansible.module_utils.basic import AnsibleModule
import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp import CloudManagerRestAPI
PROVIDER_TO_CAPACITY_TIER = {'amazon': 'S3', 'azure': 'Blob', 'gcp': 'cloudStorage'}
class NetAppCloudmanagerSnapmirror:
def __init__(self):
"""
Parse arguments, setup state variables,
check parameters and ensure request module is installed
"""
self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
source_working_environment_id=dict(required=False, type='str'),
destination_working_environment_id=dict(required=False, type='str'),
source_working_environment_name=dict(required=False, type='str'),
destination_working_environment_name=dict(required=False, type='str'),
destination_aggregate_name=dict(required=False, type='str'),
policy=dict(required=False, type='str', default='MirrorAllSnapshots'),
max_transfer_rate=dict(required=False, type='int', default='100000'),
schedule=dict(required=False, type='str', default='1hour'),
source_svm_name=dict(required=False, type='str'),
destination_svm_name=dict(required=False, type='str'),
source_volume_name=dict(required=True, type='str'),
destination_volume_name=dict(required=True, type='str'),
capacity_tier=dict(required=False, type='str', choices=['NONE', 'S3', 'Blob', 'cloudStorage']),
provider_volume_type=dict(required=False, type='str'),
tenant_id=dict(required=False, type='str'),
client_id=dict(required=True, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_one_of=[
['source_working_environment_id', 'source_working_environment_name'],
['refresh_token', 'sa_client_id'],
],
required_together=(['source_working_environment_id', 'destination_working_environment_id'],
['source_working_environment_name', 'destination_working_environment_name'],
['sa_client_id', 'sa_secret_key'],
),
supports_check_mode=True
)
self.na_helper = NetAppModule()
# set up state variables
self.parameters = self.na_helper.set_parameters(self.module.params)
self.rest_api = CloudManagerRestAPI(self.module)
self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
self.rest_api.api_root_path = None
self.headers = {
'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
}
if self.rest_api.simulator:
self.headers.update({'x-simulator': 'true'})
def get_snapmirror(self):
source_we_info, dest_we_info, err = self.na_helper.get_working_environment_detail_for_snapmirror(self.rest_api, self.headers)
if err is not None:
self.module.fail_json(changed=False, msg=err)
get_url = '/occm/api/replication/status/%s' % source_we_info['publicId']
snapmirror_info, err, dummy = self.rest_api.send_request("GET", get_url, None, header=self.headers)
if err is not None:
self.module.fail_json(changed=False, msg='Error getting snapmirror relationship %s: %s.' % (err, snapmirror_info))
sm_found = False
snapmirror = None
for sm in snapmirror_info:
if sm['destination']['volumeName'] == self.parameters['destination_volume_name']:
sm_found = True
snapmirror = sm
break
if not sm_found:
return None
result = {
'source_working_environment_id': source_we_info['publicId'],
'destination_svm_name': snapmirror['destination']['svmName'],
'destination_working_environment_id': dest_we_info['publicId'],
}
if not dest_we_info['publicId'].startswith('fs-'):
result['cloud_provider_name'] = dest_we_info['cloudProviderName']
return result
def create_snapmirror(self):
snapmirror_build_data = {}
replication_request = {}
replication_volume = {}
source_we_info, dest_we_info, err = self.na_helper.get_working_environment_detail_for_snapmirror(self.rest_api, self.headers)
if err is not None:
self.module.fail_json(changed=False, msg=err)
if self.parameters.get('capacity_tier') is not None:
if self.parameters['capacity_tier'] == 'NONE':
self.parameters.pop('capacity_tier')
else:
if dest_we_info.get('cloudProviderName'):
self.parameters['capacity_tier'] = PROVIDER_TO_CAPACITY_TIER[dest_we_info['cloudProviderName'].lower()]
interclusterlifs_info = self.get_interclusterlifs(source_we_info['publicId'], dest_we_info['publicId'])
if source_we_info['workingEnvironmentType'] != 'ON_PREM':
source_volumes = self.get_volumes(source_we_info, self.parameters['source_volume_name'])
else:
source_volumes = self.get_volumes_on_prem(source_we_info, self.parameters['source_volume_name'])
if len(source_volumes) == 0:
self.module.fail_json(changed=False, msg='source volume not found')
vol_found = False
vol_dest_quote = {}
source_volume_resp = {}
for vol in source_volumes:
if vol['name'] == self.parameters['source_volume_name']:
vol_found = True
vol_dest_quote = vol
source_volume_resp = vol
if self.parameters.get('source_svm_name') is not None and vol['svmName'] != self.parameters['source_svm_name']:
vol_found = False
if vol_found:
break
if not vol_found:
self.module.fail_json(changed=False, msg='source volume not found')
if self.parameters.get('source_svm_name') is None:
self.parameters['source_svm_name'] = source_volume_resp['svmName']
if self.parameters.get('destination_svm_name') is None:
if dest_we_info.get('svmName') is not None:
self.parameters['destination_svm_name'] = dest_we_info['svmName']
else:
self.parameters['destination_working_environment_name'] = dest_we_info['name']
dest_working_env_detail, err = self.na_helper.get_working_environment_details_by_name(self.rest_api,
self.headers,
self.parameters['destination_working_environment_name'])
if err:
self.module.fail_json(changed=False, msg='Error getting destination info %s: %s.' % (err, dest_working_env_detail))
self.parameters['destination_svm_name'] = dest_working_env_detail['svmName']
if dest_we_info.get('workingEnvironmentType') and dest_we_info['workingEnvironmentType'] != 'ON_PREM'\
and not dest_we_info['publicId'].startswith('fs-'):
quote = self.build_quote_request(source_we_info, dest_we_info, vol_dest_quote)
quote_response = self.quote_volume(quote)
replication_volume['numOfDisksApprovedToAdd'] = int(quote_response['numOfDisks'])
if 'iops' in quote:
replication_volume['iops'] = quote['iops']
if 'throughput' in quote:
replication_volume['throughput'] = quote['throughput']
if self.parameters.get('destination_aggregate_name') is not None:
replication_volume['advancedMode'] = True
else:
replication_volume['advancedMode'] = False
replication_volume['destinationAggregateName'] = quote_response['aggregateName']
if self.parameters.get('provider_volume_type') is None:
replication_volume['destinationProviderVolumeType'] = source_volume_resp['providerVolumeType']
if self.parameters.get('capacity_tier') is not None:
replication_volume['destinationCapacityTier'] = self.parameters['capacity_tier']
replication_request['sourceWorkingEnvironmentId'] = source_we_info['publicId']
if dest_we_info['publicId'].startswith('fs-'):
replication_request['destinationFsxId'] = dest_we_info['publicId']
else:
replication_request['destinationWorkingEnvironmentId'] = dest_we_info['publicId']
replication_volume['sourceVolumeName'] = self.parameters['source_volume_name']
replication_volume['destinationVolumeName'] = self.parameters['destination_volume_name']
replication_request['policyName'] = self.parameters['policy']
replication_request['scheduleName'] = self.parameters['schedule']
replication_request['maxTransferRate'] = self.parameters['max_transfer_rate']
replication_volume['sourceSvmName'] = source_volume_resp['svmName']
replication_volume['destinationSvmName'] = self.parameters['destination_svm_name']
replication_request['sourceInterclusterLifIps'] = [interclusterlifs_info['interClusterLifs'][0]['address']]
replication_request['destinationInterclusterLifIps'] = [interclusterlifs_info['peerInterClusterLifs'][0]['address']]
snapmirror_build_data['replicationRequest'] = replication_request
snapmirror_build_data['replicationVolume'] = replication_volume
if dest_we_info['publicId'].startswith('fs-'):
api = '/occm/api/replication/fsx'
elif dest_we_info['workingEnvironmentType'] != 'ON_PREM':
api = '/occm/api/replication/vsa'
else:
api = '/occm/api/replication/onprem'
response, err, on_cloud_request_id = self.rest_api.send_request("POST", api, None, snapmirror_build_data, header=self.headers)
if err is not None:
self.module.fail_json(changed=False, msg='Error creating snapmirror relationship %s: %s.' % (err, response))
wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % (str(on_cloud_request_id))
err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "snapmirror", "create", 20, 5)
if err is not None:
self.module.fail_json(changed=False, msg=err)
def get_volumes(self, working_environment_detail, name):
self.na_helper.set_api_root_path(working_environment_detail, self.rest_api)
response, err, dummy = self.rest_api.send_request("GET", "%s/volumes?workingEnvironmentId=%s&name=%s" % (
self.rest_api.api_root_path, working_environment_detail['publicId'], name), None, header=self.headers)
if err is not None:
self.module.fail_json(changed=False, msg='Error getting volume %s: %s.' % (err, response))
return response
def quote_volume(self, quote):
response, err, on_cloud_request_id = self.rest_api.send_request("POST", '%s/volumes/quote' %
self.rest_api.api_root_path, None, quote, header=self.headers)
if err is not None:
self.module.fail_json(changed=False, msg='Error quoting destination volume %s: %s.' % (err, response))
wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % (str(on_cloud_request_id))
err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "volume", "quote", 20, 5)
if err is not None:
self.module.fail_json(changed=False, msg=err)
return response
def get_volumes_on_prem(self, working_environment_detail, name):
response, err, dummy = self.rest_api.send_request("GET", "/occm/api/onprem/volumes?workingEnvironmentId=%s&name=%s" %
(working_environment_detail['publicId'], name), None, header=self.headers)
if err is not None:
self.module.fail_json(changed=False, msg='Error getting volume on prem %s: %s.' % (err, response))
return response
def get_aggregate_detail(self, working_environment_detail, aggregate_name):
if working_environment_detail['workingEnvironmentType'] == 'ON_PREM':
api = "/occm/api/onprem/aggregates?workingEnvironmentId=%s" % working_environment_detail['publicId']
else:
self.na_helper.set_api_root_path(working_environment_detail, self.rest_api)
api_root_path = self.rest_api.api_root_path
if working_environment_detail['cloudProviderName'] != "Amazon":
api = '%s/aggregates/%s'
else:
api = '%s/aggregates?workingEnvironmentId=%s'
api = api % (api_root_path, working_environment_detail['publicId'])
response, error, dummy = self.rest_api.get(api, header=self.headers)
if error:
self.module.fail_json(msg="Error: Failed to get aggregate list: %s" % str(error))
for aggr in response:
if aggr['name'] == aggregate_name:
return aggr
return None
def build_quote_request(self, source_we_info, dest_we_info, vol_dest_quote):
quote = dict()
quote['size'] = {'size': vol_dest_quote['size']['size'], 'unit': vol_dest_quote['size']['unit']}
quote['name'] = self.parameters['destination_volume_name']
quote['snapshotPolicyName'] = vol_dest_quote['snapshotPolicy']
quote['enableDeduplication'] = vol_dest_quote['deduplication']
quote['enableThinProvisioning'] = vol_dest_quote['thinProvisioning']
quote['enableCompression'] = vol_dest_quote['compression']
quote['verifyNameUniqueness'] = True
quote['replicationFlow'] = True
# Use source working environment to get physical properties info of volumes
aggregate = self.get_aggregate_detail(source_we_info, vol_dest_quote['aggregateName'])
if aggregate is None:
self.module.fail_json(changed=False, msg='Error getting aggregate on source volume')
# All the volumes in one aggregate have the same physical properties
if source_we_info['workingEnvironmentType'] != 'ON_PREM':
if aggregate['providerVolumes'][0]['diskType'] == 'gp3' or aggregate['providerVolumes'][0]['diskType'] == 'io1'\
or aggregate['providerVolumes'][0]['diskType'] == 'io2':
quote['iops'] = aggregate['providerVolumes'][0]['iops']
if aggregate['providerVolumes'][0]['diskType'] == 'gp3':
quote['throughput'] = aggregate['providerVolumes'][0]['throughput']
quote['workingEnvironmentId'] = dest_we_info['publicId']
quote['svmName'] = self.parameters['destination_svm_name']
if self.parameters.get('capacity_tier') is not None:
quote['capacityTier'] = self.parameters['capacity_tier']
if self.parameters.get('provider_volume_type') is None:
quote['providerVolumeType'] = vol_dest_quote['providerVolumeType']
else:
quote['providerVolumeType'] = self.parameters['provider_volume_type']
return quote
def delete_snapmirror(self, sm_detail):
api_delete = '/occm/api/replication/%s/%s/%s' %\
(sm_detail['destination_working_environment_id'], sm_detail['destination_svm_name'], self.parameters['destination_volume_name'])
dummy, err, dummy_second = self.rest_api.send_request("DELETE", api_delete, None, None, header=self.headers)
if err is not None:
self.module.fail_json(changed=False, msg='Error deleting snapmirror relationship %s: %s.' % (err, dummy))
def get_interclusterlifs(self, source_we_id, dest_we_id):
api_get = '/occm/api/replication/intercluster-lifs?peerWorkingEnvironmentId=%s&workingEnvironmentId=%s' % (dest_we_id, source_we_id)
response, err, dummy_second = self.rest_api.send_request("GET", api_get, None, header=self.headers)
if err is not None:
self.module.fail_json(changed=False, msg='Error getting interclusterlifs %s: %s.' % (err, response))
return response
def apply(self):
current = self.get_snapmirror()
cd_action = self.na_helper.get_cd_action(current, self.parameters)
if self.na_helper.changed and not self.module.check_mode:
if cd_action == 'create':
self.create_snapmirror()
elif cd_action == 'delete':
self.delete_snapmirror(current)
self.module.exit_json(changed=self.na_helper.changed)
def main():
'''Main Function'''
volume = NetAppCloudmanagerSnapmirror()
volume.apply()
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,660 @@
#!/usr/bin/python
# (c) 2022, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
na_cloudmanager_volume
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: na_cloudmanager_volume
short_description: NetApp Cloud Manager volume
extends_documentation_fragment:
- netapp.cloudmanager.netapp.cloudmanager
version_added: '21.3.0'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create, Modify or Delete volume on Cloud Manager.
options:
state:
description:
- Whether the specified volume should exist or not.
choices: ['present', 'absent']
default: 'present'
type: str
name:
description:
- The name of the volume.
required: true
type: str
working_environment_name:
description:
- The working environment name where the volume will be created.
type: str
working_environment_id:
description:
- The public ID of the working environment where the volume will be created.
type: str
client_id:
description:
- The connector ID of the Cloud Manager Connector.
required: true
type: str
size:
description:
- The size of the volume.
type: float
size_unit:
description:
- The size unit of volume.
choices: ['GB']
default: 'GB'
type: str
snapshot_policy_name:
description:
- The snapshot policy name.
type: str
provider_volume_type:
description:
- The underlying cloud provider volume type.
- For AWS is ["gp3", "gp2", "io1", "st1", "sc1"].
- For Azure is ['Premium_LRS','Standard_LRS','StandardSSD_LRS'].
- For GCP is ['pd-balanced','pd-ssd','pd-standard'].
type: str
enable_deduplication:
description:
- Enabling deduplication.
- Default to true if not specified.
type: bool
enable_compression:
description:
- Enabling cpmpression.
- Default to true if not specified.
type: bool
enable_thin_provisioning:
description:
- Enabling thin provisioning.
- Default to true if not specified.
type: bool
svm_name:
description:
- The name of the SVM. The default SVM name is used, if a name is not provided.
type: str
aggregate_name:
description:
- The aggregate in which the volume will be created. If not provided, Cloud Manager chooses the best aggregate.
type: str
capacity_tier:
description:
- The volume's capacity tier for tiering cold data to object storage.
- The default values for each cloud provider are as follows. Amazon as 'S3', Azure as 'Blob', GCP as 'cloudStorage'.
- If 'NONE', the capacity tier will not be set on volume creation.
choices: ['NONE', 'S3', 'Blob', 'cloudStorage']
type: str
tiering_policy:
description:
- The tiering policy.
choices: ['none', 'snapshot_only', 'auto', 'all']
type: str
export_policy_type:
description:
- The export policy type (NFS protocol parameters).
type: str
export_policy_ip:
description:
- Custom export policy list of IPs (NFS protocol parameters).
type: list
elements: str
export_policy_nfs_version:
description:
- Export policy protocol (NFS protocol parameters).
type: list
elements: str
iops:
description:
- Provisioned IOPS. Needed only when provider_volume_type is "io1".
type: int
throughput:
description:
- Unit is Mb/s. Valid range 125-1000.
- Required only when provider_volume_type is 'gp3'.
type: int
volume_protocol:
description:
- The protocol for the volume. This affects the provided parameters.
choices: ['nfs', 'cifs', 'iscsi']
type: str
default: 'nfs'
share_name:
description:
- Share name (CIFS protocol parameters).
type: str
permission:
description:
- CIFS share permission type (CIFS protocol parameters).
type: str
users:
description:
- List of users with the permission (CIFS protocol parameters).
type: list
elements: str
igroups:
description:
- List of igroups (iSCSI protocol parameters).
type: list
elements: str
os_name:
description:
- Operating system (iSCSI protocol parameters).
type: str
tenant_id:
description:
- The NetApp account ID that the Connector will be associated with. To be used only when using FSx.
type: str
version_added: 21.20.0
initiators:
description:
- Set of attributes of Initiators (iSCSI protocol parameters).
type: list
elements: dict
suboptions:
iqn:
description: The initiator node name.
required: true
type: str
alias:
description: The alias which associates with the node.
required: true
type: str
notes:
- Support check_mode.
'''
EXAMPLES = '''
- name: Create nfs volume with working_environment_name
netapp.cloudmanager.na_cloudmanager_volume:
state: present
name: test_vol
size: 15
size_unit: GB
working_environment_name: working_environment_1
client_id: client_id
refresh_token: refresh_token
svm_name: svm_1
snapshot_policy_name: default
export_policy_type: custom
export_policy_ip: ["10.0.0.1/16"]
export_policy_nfs_version: ["nfs3","nfs4"]
- name: Delete volume
netapp.cloudmanager.na_cloudmanager_volume:
state: absent
name: test_vol
working_environment_name: working_environment_1
client_id: client_id
refresh_token: refresh_token
svm_name: svm_1
'''
RETURN = r''' # '''
from ansible.module_utils.basic import AnsibleModule
import ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp as netapp_utils
from ansible_collections.netapp.cloudmanager.plugins.module_utils.netapp_module import NetAppModule
class NetAppCloudmanagerVolume(object):
def __init__(self):
"""
Parse arguments, setup state variables,
check parameters and ensure request module is installed
"""
self.argument_spec = netapp_utils.cloudmanager_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
name=dict(required=True, type='str'),
working_environment_id=dict(required=False, type='str'),
working_environment_name=dict(required=False, type='str'),
client_id=dict(required=True, type='str'),
size=dict(required=False, type='float'),
size_unit=dict(required=False, choices=['GB'], default='GB'),
snapshot_policy_name=dict(required=False, type='str'),
provider_volume_type=dict(required=False, type='str'),
enable_deduplication=dict(required=False, type='bool'),
enable_thin_provisioning=dict(required=False, type='bool'),
enable_compression=dict(required=False, type='bool'),
svm_name=dict(required=False, type='str'),
aggregate_name=dict(required=False, type='str'),
capacity_tier=dict(required=False, type='str', choices=['NONE', 'S3', 'Blob', 'cloudStorage']),
tiering_policy=dict(required=False, type='str', choices=['none', 'snapshot_only', 'auto', 'all']),
export_policy_type=dict(required=False, type='str'),
export_policy_ip=dict(required=False, type='list', elements='str'),
export_policy_nfs_version=dict(required=False, type='list', elements='str'),
iops=dict(required=False, type='int'),
throughput=dict(required=False, type='int'),
volume_protocol=dict(required=False, type='str', choices=['nfs', 'cifs', 'iscsi'], default='nfs'),
share_name=dict(required=False, type='str'),
permission=dict(required=False, type='str'),
users=dict(required=False, type='list', elements='str'),
igroups=dict(required=False, type='list', elements='str'),
os_name=dict(required=False, type='str'),
tenant_id=dict(required=False, type='str'),
initiators=dict(required=False, type='list', elements='dict', options=dict(
alias=dict(required=True, type='str'),
iqn=dict(required=True, type='str'),)),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_one_of=[
['refresh_token', 'sa_client_id'],
['working_environment_name', 'working_environment_id'],
],
required_together=[['sa_client_id', 'sa_secret_key']],
required_if=[
['provider_volume_type', 'gp3', ['iops', 'throughput']],
['provider_volume_type', 'io1', ['iops']],
['capacity_tier', 'S3', ['tiering_policy']],
],
# enable_thin_provisioning reflects storage efficiency.
required_by={
'capacity_tier': ('tiering_policy', 'enable_thin_provisioning'),
},
supports_check_mode=True
)
self.na_helper = NetAppModule()
# set up state variables
self.parameters = self.na_helper.set_parameters(self.module.params)
# Calling generic rest_api class
self.rest_api = netapp_utils.CloudManagerRestAPI(self.module)
self.rest_api.token_type, self.rest_api.token = self.rest_api.get_token()
self.rest_api.url += self.rest_api.environment_data['CLOUD_MANAGER_HOST']
self.headers = {
'X-Agent-Id': self.rest_api.format_client_id(self.parameters['client_id'])
}
if self.rest_api.simulator:
self.headers.update({'x-simulator': 'true'})
if self.parameters.get('tenant_id'):
working_environment_detail, error = self.na_helper.get_aws_fsx_details(self.rest_api, self.headers, self.parameters['working_environment_name'])
elif self.parameters.get('working_environment_id'):
working_environment_detail, error = self.na_helper.get_working_environment_details(self.rest_api, self.headers)
else:
working_environment_detail, error = self.na_helper.get_working_environment_details_by_name(self.rest_api,
self.headers,
self.parameters['working_environment_name'])
if working_environment_detail is None:
self.module.fail_json(msg="Error: Cannot find working environment, if it is an AWS FSxN, please provide tenant_id: %s" % str(error))
self.parameters['working_environment_id'] = working_environment_detail['publicId']\
if working_environment_detail.get('publicId') else working_environment_detail['id']
self.na_helper.set_api_root_path(working_environment_detail, self.rest_api)
self.is_fsx = self.parameters['working_environment_id'].startswith('fs-')
if self.parameters.get('svm_name') is None:
fsx_path = ''
if self.is_fsx:
fsx_path = '/svms'
response, err, dummy = self.rest_api.send_request("GET", "%s/working-environments/%s%s" % (
self.rest_api.api_root_path, self.parameters['working_environment_id'], fsx_path), None, None, header=self.headers)
if err is not None:
self.module.fail_json(changed=False, msg="Error: unexpected response on getting svm: %s, %s" % (str(err), str(response)))
if self.is_fsx:
self.parameters['svm_name'] = response[0]['name']
else:
self.parameters['svm_name'] = response['svmName']
if self.parameters['volume_protocol'] == 'nfs':
extra_options = []
for option in ['share_name', 'permission', 'users', 'igroups', 'os_name', 'initiator']:
if self.parameters.get(option) is not None:
extra_options.append(option)
if len(extra_options) > 0:
self.module.fail_json(msg="Error: The following options are not allowed when volume_protocol is nfs: "
" %s" % extra_options)
elif self.parameters['volume_protocol'] == 'cifs':
extra_options = []
for option in ['export_policy_type', 'export_policy_ip', 'export_policy_nfs_version', 'igroups', 'os_name', 'initiator']:
if self.parameters.get(option) is not None:
extra_options.append(option)
if len(extra_options) > 0:
self.module.fail_json(msg="Error: The following options are not allowed when volume_protocol is cifs: "
"%s" % extra_options)
else:
extra_options = []
for option in ['export_policy_type', 'export_policy_ip', 'export_policy_nfs_version', 'share_name', 'permission', 'users']:
if self.parameters.get(option) is not None:
extra_options.append(option)
if len(extra_options) > 0:
self.module.fail_json(msg="Error: The following options are not allowed when volume_protocol is iscsi: "
"%s" % extra_options)
if self.parameters.get('igroups'):
current_igroups = []
for igroup in self.parameters['igroups']:
current = self.get_igroup(igroup)
current_igroups.append(current)
if any(isinstance(x, dict) for x in current_igroups) and None in current_igroups:
self.module.fail_json(changed=False, msg="Error: can not specify existing"
"igroup and new igroup together.")
if len(current_igroups) > 1 and None in current_igroups:
self.module.fail_json(changed=False, msg="Error: can not create more than one igroups.")
if current_igroups[0] is None:
if self.parameters.get('initiators') is None:
self.module.fail_json(changed=False, msg="Error: initiator is required when creating new igroup.")
if self.parameters.get('users'):
# When creating volume, 'Everyone' must have upper case E, 'everyone' will not work.
# When modifying volume, 'everyone' is fine.
new_users = []
for user in self.parameters['users']:
if user.lower() == 'everyone':
new_users.append('Everyone')
else:
new_users.append(user)
self.parameters['users'] = new_users
def get_volume(self):
if self.is_fsx:
query_param = 'fileSystemId'
else:
query_param = 'workingEnvironmentId'
response, err, dummy = self.rest_api.send_request("GET", "%s/volumes?%s=%s" % (
self.rest_api.api_root_path, query_param, self.parameters['working_environment_id']), None, header=self.headers)
if err is not None:
self.module.fail_json(changed=False, msg="Error: unexpected response on getting volume: %s, %s" % (str(err), str(response)))
target_vol = dict()
if response is None:
return None
for volume in response:
if volume['name'] == self.parameters['name']:
target_vol['name'] = volume['name']
target_vol['enable_deduplication'] = volume['deduplication']
target_vol['enable_thin_provisioning'] = volume['thinProvisioning']
target_vol['enable_compression'] = volume['compression']
if self.parameters.get('size'):
target_vol['size'] = volume['size']['size']
if self.parameters.get('size_unit'):
target_vol['size_unit'] = volume['size']['unit']
if self.parameters.get('export_policy_nfs_version') and volume.get('exportPolicyInfo'):
target_vol['export_policy_nfs_version'] = volume['exportPolicyInfo']['nfsVersion']
if self.parameters.get('export_policy_ip') and volume.get('exportPolicyInfo'):
target_vol['export_policy_ip'] = volume['exportPolicyInfo']['ips']
if self.parameters.get('export_policy_type') and volume.get('exportPolicyInfo'):
target_vol['export_policy_type'] = volume['exportPolicyInfo']['policyType']
if self.parameters.get('snapshot_policy'):
target_vol['snapshot_policy'] = volume['snapshotPolicy']
if self.parameters.get('provider_volume_type'):
target_vol['provider_volume_type'] = volume['providerVolumeType']
if self.parameters.get('capacity_tier') and self.parameters.get('capacity_tier') != 'NONE':
target_vol['capacity_tier'] = volume['capacityTier']
if self.parameters.get('tiering_policy'):
target_vol['tiering_policy'] = volume['tieringPolicy']
if self.parameters.get('share_name') and volume.get('shareInfo'):
target_vol['share_name'] = volume['shareInfo'][0]['shareName']
if self.parameters.get('users') and volume.get('shareInfo'):
if len(volume['shareInfo'][0]['accessControlList']) > 0:
target_vol['users'] = volume['shareInfo'][0]['accessControlList'][0]['users']
else:
target_vol['users'] = []
if self.parameters.get('users') and volume.get('shareInfo'):
if len(volume['shareInfo'][0]['accessControlList']) > 0:
target_vol['permission'] = volume['shareInfo'][0]['accessControlList'][0]['permission']
else:
target_vol['permission'] = []
if self.parameters.get('os_name') and volume.get('iscsiInfo'):
target_vol['os_name'] = volume['iscsiInfo']['osName']
if self.parameters.get('igroups') and volume.get('iscsiInfo'):
target_vol['igroups'] = volume['iscsiInfo']['igroups']
return target_vol
return None
def create_volume(self):
exclude_list = ['client_id', 'size_unit', 'export_policy_name', 'export_policy_type', 'export_policy_ip',
'export_policy_nfs_version', 'capacity_tier']
quote = self.na_helper.convert_module_args_to_api(self.parameters, exclude_list)
quote['verifyNameUniqueness'] = True # Always hard coded to true.
quote['unit'] = self.parameters['size_unit']
quote['size'] = {'size': self.parameters['size'], 'unit': self.parameters['size_unit']}
create_aggregate_if_not_exists = True
if self.parameters.get('aggregate_name'):
quote['aggregateName'] = self.parameters['aggregate_name']
create_aggregate_if_not_exists = False
if self.parameters.get('capacity_tier') and self.parameters['capacity_tier'] != "NONE":
quote['capacityTier'] = self.parameters['capacity_tier']
if self.parameters['volume_protocol'] == 'nfs':
quote['exportPolicyInfo'] = dict()
if self.parameters.get('export_policy_type'):
quote['exportPolicyInfo']['policyType'] = self.parameters['export_policy_type']
if self.parameters.get('export_policy_ip'):
quote['exportPolicyInfo']['ips'] = self.parameters['export_policy_ip']
if self.parameters.get('export_policy_nfs_version'):
quote['exportPolicyInfo']['nfsVersion'] = self.parameters['export_policy_nfs_version']
elif self.parameters['volume_protocol'] == 'iscsi':
iscsi_info = self.iscsi_volume_helper()
quote.update(iscsi_info)
else:
quote['shareInfo'] = dict()
quote['shareInfo']['accessControl'] = dict()
quote['shareInfo']['accessControl']['users'] = self.parameters['users']
if self.parameters.get('permission'):
quote['shareInfo']['accessControl']['permission'] = self.parameters['permission']
if self.parameters.get('share_name'):
quote['shareInfo']['shareName'] = self.parameters['share_name']
if not self.is_fsx:
response, err, dummy = self.rest_api.send_request("POST", "%s/volumes/quote" % self.rest_api.api_root_path,
None, quote, header=self.headers)
if err is not None:
self.module.fail_json(changed=False, msg="Error: unexpected response on quoting volume: %s, %s" % (str(err), str(response)))
quote['newAggregate'] = response['newAggregate']
quote['aggregateName'] = response['aggregateName']
quote['maxNumOfDisksApprovedToAdd'] = response['numOfDisks']
else:
quote['fileSystemId'] = self.parameters['working_environment_id']
if self.parameters.get('enable_deduplication'):
quote['deduplication'] = self.parameters.get('enable_deduplication')
if self.parameters.get('enable_thin_provisioning'):
quote['thinProvisioning'] = self.parameters.get('enable_thin_provisioning')
if self.parameters.get('enable_compression'):
quote['compression'] = self.parameters.get('enable_compression')
if self.parameters.get('snapshot_policy_name'):
quote['snapshotPolicy'] = self.parameters['snapshot_policy_name']
if self.parameters.get('capacity_tier') and self.parameters['capacity_tier'] != "NONE":
quote['capacityTier'] = self.parameters['capacity_tier']
if self.parameters.get('tiering_policy'):
quote['tieringPolicy'] = self.parameters['tiering_policy']
if self.parameters.get('provider_volume_type'):
quote['providerVolumeType'] = self.parameters['provider_volume_type']
if self.parameters.get('iops'):
quote['iops'] = self.parameters.get('iops')
if self.parameters.get('throughput'):
quote['throughput'] = self.parameters.get('throughput')
response, err, on_cloud_request_id = self.rest_api.send_request("POST", "%s/volumes?createAggregateIfNotFound=%s" % (
self.rest_api.api_root_path, create_aggregate_if_not_exists), None, quote, header=self.headers)
if err is not None:
self.module.fail_json(changed=False, msg="Error: unexpected on creating volume: %s, %s" % (str(err), str(response)))
wait_on_completion_api_url = '/occm/api/audit/activeTask/%s' % (str(on_cloud_request_id))
err = self.rest_api.wait_on_completion(wait_on_completion_api_url, "volume", "create", 20, 5)
if err is not None:
self.module.fail_json(changed=False, msg="Error: unexpected response wait_on_completion for creating volume: %s, %s" % (str(err), str(response)))
def modify_volume(self, modify):
vol = dict()
if self.parameters['volume_protocol'] == 'nfs':
export_policy_info = dict()
if self.parameters.get('export_policy_type'):
export_policy_info['policyType'] = self.parameters['export_policy_type']
if self.parameters.get('export_policy_ip'):
export_policy_info['ips'] = self.parameters['export_policy_ip']
if self.parameters.get('export_policy_nfs_version'):
export_policy_info['nfsVersion'] = self.parameters['export_policy_nfs_version']
vol['exportPolicyInfo'] = export_policy_info
elif self.parameters['volume_protocol'] == 'cifs':
vol['shareInfo'] = dict()
vol['shareInfo']['accessControlList'] = []
vol['shareInfo']['accessControlList'].append(dict())
if self.parameters.get('users'):
vol['shareInfo']['accessControlList'][0]['users'] = self.parameters['users']
if self.parameters.get('permission'):
vol['shareInfo']['accessControlList'][0]['permission'] = self.parameters['permission']
if self.parameters.get('share_name'):
vol['shareInfo']['shareName'] = self.parameters['share_name']
if modify.get('snapshot_policy_name'):
vol['snapshotPolicyName'] = self.parameters.get('snapshot_policy_name')
if modify.get('tiering_policy'):
vol['tieringPolicy'] = self.parameters.get('tiering_policy')
response, err, dummy = self.rest_api.send_request("PUT", "%s/volumes/%s/%s/%s" % (
self.rest_api.api_root_path, self.parameters['working_environment_id'], self.parameters['svm_name'],
self.parameters['name']), None, vol, header=self.headers)
if err is not None:
self.module.fail_json(changed=False, msg="Error: unexpected response on modifying volume: %s, %s" % (str(err), str(response)))
def delete_volume(self):
response, err, dummy = self.rest_api.send_request("DELETE", "%s/volumes/%s/%s/%s" % (
self.rest_api.api_root_path, self.parameters['working_environment_id'], self.parameters['svm_name'],
self.parameters['name']), None, None, header=self.headers)
if err is not None:
self.module.fail_json(changed=False, msg="Error: unexpected response on deleting volume: %s, %s" % (str(err), str(response)))
def get_initiator(self, alias_name):
response, err, dummy = self.rest_api.send_request("GET", "%s/volumes/initiator" % (
self.rest_api.api_root_path), None, header=self.headers)
if err is not None:
self.module.fail_json(changed=False, msg="Error: unexpected response on getting initiator: %s, %s" % (str(err), str(response)))
result = dict()
if response is None:
return None
for initiator in response:
if initiator.get('aliasName') and initiator.get('aliasName') == alias_name:
result['alias'] = initiator.get('aliasName')
result['iqn'] = initiator.get('iqn')
return result
return None
def create_initiator(self, initiator):
ini = self.na_helper.convert_module_args_to_api(initiator)
response, err, dummy = self.rest_api.send_request("POST", "%s/volumes/initiator" % (
self.rest_api.api_root_path), None, ini, header=self.headers)
if err is not None:
self.module.fail_json(changed=False, msg="Error: unexpected response on creating initiator: %s, %s" % (str(err), str(response)))
def get_igroup(self, igroup_name):
response, err, dummy = self.rest_api.send_request("GET", "%s/volumes/igroups/%s/%s" % (
self.rest_api.api_root_path, self.parameters['working_environment_id'], self.parameters['svm_name']),
None, None, header=self.headers)
if err is not None:
self.module.fail_json(changed=False, msg="Error: unexpected response on getting igroup: %s, %s" % (str(err), str(response)))
result = dict()
if response is None:
return None
for igroup in response:
if igroup['igroupName'] == igroup_name:
result['igroup_name'] = igroup['igroupName']
result['os_type'] = igroup['osType']
result['portset_name'] = igroup['portsetName']
result['igroup_type'] = igroup['igroupType']
result['initiators'] = igroup['initiators']
return result
return None
def iscsi_volume_helper(self):
quote = dict()
quote['iscsiInfo'] = dict()
if self.parameters.get('igroups'):
current_igroups = []
for igroup in self.parameters['igroups']:
current = self.get_igroup(igroup)
current_igroups.append(current)
for igroup in current_igroups:
if igroup is None:
quote['iscsiInfo']['igroupCreationRequest'] = dict()
quote['iscsiInfo']['igroupCreationRequest']['igroupName'] = self.parameters['igroups'][0]
iqn_list = []
for initiator in self.parameters['initiators']:
if initiator.get('iqn'):
iqn_list.append(initiator['iqn'])
current_initiator = self.get_initiator(initiator['alias'])
if current_initiator is None:
initiator_request = dict()
if initiator.get('alias'):
initiator_request['aliasName'] = initiator['alias']
if initiator.get('iqn'):
initiator_request['iqn'] = initiator['iqn']
self.create_initiator(initiator_request)
quote['iscsiInfo']['igroupCreationRequest']['initiators'] = iqn_list
quote['iscsiInfo']['osName'] = self.parameters['os_name']
else:
quote['iscsiInfo']['igroups'] = self.parameters['igroups']
quote['iscsiInfo']['osName'] = self.parameters['os_name']
return quote
def apply(self):
current = self.get_volume()
cd_action, modify = None, None
cd_action = self.na_helper.get_cd_action(current, self.parameters)
if cd_action is None:
modify = self.na_helper.get_modified_attributes(current, self.parameters)
unmodifiable = []
for attr in modify:
if attr not in ['export_policy_ip', 'export_policy_nfs_version', 'snapshot_policy_name', 'users',
'permission', 'tiering_policy', 'snapshot_policy_name']:
unmodifiable.append(attr)
if len(unmodifiable) > 0:
self.module.fail_json(changed=False, msg="%s cannot be modified." % str(unmodifiable))
if self.na_helper.changed and not self.module.check_mode:
if cd_action == 'create':
self.create_volume()
elif cd_action == 'delete':
self.delete_volume()
elif modify:
self.modify_volume(modify)
self.module.exit_json(changed=self.na_helper.changed)
def main():
'''Main Function'''
volume = NetAppCloudmanagerVolume()
volume.apply()
if __name__ == '__main__':
main()