Init: mediaserver

This commit is contained in:
2023-02-08 12:13:28 +01:00
parent 848bc9739c
commit f7c23d4ba9
31914 changed files with 6175775 additions and 0 deletions

View File

@@ -0,0 +1,19 @@
Wait tests
----------
wait tests require at least one node, and don't work on the normal k8s
openshift-origin container as provided by ansible-test --docker -v k8s
minikube, Kubernetes from Docker or any other Kubernetes service will
suffice.
If kubectl is already using the right config file and context, you can
just do
```
cd tests/integration/targets/okd
./runme.sh -vv
```
otherwise set one or both of `K8S_AUTH_KUBECONFIG` and `K8S_AUTH_CONTEXT`
and use the same command

View File

@@ -0,0 +1,99 @@
---
- name: Converge
hosts: localhost
connection: local
gather_facts: no
vars:
ansible_python_interpreter: '{{ virtualenv_interpreter }}'
vars_files:
- vars/main.yml
tasks:
# OpenShift Resources
- name: Create a project
community.okd.k8s:
name: testing
kind: Project
api_version: project.openshift.io/v1
apply: no
register: output
- name: show output
debug:
var: output
- name: Create deployment config
community.okd.k8s:
state: present
name: hello-world
namespace: testing
definition: '{{ okd_dc_template }}'
wait: yes
wait_condition:
type: Available
status: True
vars:
k8s_pod_name: hello-world
k8s_pod_image: python
k8s_pod_command:
- python
- '-m'
- http.server
k8s_pod_env:
- name: TEST
value: test
okd_dc_triggers:
- type: ConfigChange
register: output
- name: Show output
debug:
var: output
- vars:
image: docker.io/python
image_name: python
image_tag: latest
k8s_pod_image: python
k8s_pod_command:
- python
- '-m'
- http.server
namespace: idempotence-testing
block:
- name: Create a namespace
community.okd.k8s:
name: '{{ namespace }}'
kind: Namespace
api_version: v1
- name: Create imagestream
community.okd.k8s:
namespace: '{{ namespace }}'
definition: '{{ okd_imagestream_template }}'
- name: Create DeploymentConfig to reference ImageStream
community.okd.k8s:
name: '{{ k8s_pod_name }}'
namespace: '{{ namespace }}'
definition: '{{ okd_dc_template }}'
vars:
k8s_pod_name: is-idempotent-dc
- name: Create Deployment to reference ImageStream
community.okd.k8s:
name: '{{ k8s_pod_name }}'
namespace: '{{ namespace }}'
definition: '{{ k8s_deployment_template | combine(metadata) }}'
vars:
k8s_pod_annotations:
"alpha.image.policy.openshift.io/resolve-names": "*"
k8s_pod_name: is-idempotent-deployment
annotation:
- from:
kind: ImageStreamTag
name: "{{ image_name }}:{{ image_tag}}}"
fieldPath: 'spec.template.spec.containers[?(@.name=="{{ k8s_pod_name }}")].image}'
metadata:
metadata:
annotations:
image.openshift.io/triggers: '{{ annotation | to_json }}'

View File

@@ -0,0 +1,6 @@
---
- name: Destroy
hosts: localhost
connection: local
gather_facts: no
tasks: []

View File

@@ -0,0 +1,21 @@
---
apiVersion: certmanager.k8s.io/v1alpha1
kind: Certificate
metadata:
name: acme-crt
spec:
secretName: acme-crt-secret
dnsNames:
- foo.example.com
- bar.example.com
acme:
config:
- ingressClass: nginx
domains:
- foo.example.com
- bar.example.com
issuerRef:
name: letsencrypt-prod
# We can reference ClusterIssuers by changing the kind here.
# The default value is Issuer (i.e. a locally namespaced Issuer)
kind: Issuer

View File

@@ -0,0 +1,9 @@
#
NAME=example
# Multiline values shouldn't break things
export CONTENT=This is a long message\
that may take one or more lines to parse\
but should still work without issue
# This shouldn't throw an error
UNUSED=

View File

@@ -0,0 +1,22 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: kuard
name: kuard
namespace: default
spec:
replicas: 3
selector:
matchLabels:
app: kuard
unwanted: value
template:
metadata:
labels:
app: kuard
spec:
containers:
- image: gcr.io/kuar-demo/kuard-amd64:1
name: kuard

View File

@@ -0,0 +1,21 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: kuard
name: kuard
namespace: default
spec:
replicas: hello
selector:
matchLabels:
app: kuard
template:
metadata:
labels:
app: kuard
spec:
containers:
- image: gcr.io/kuar-demo/kuard-amd64:1
name: kuard

View File

@@ -0,0 +1,12 @@
# Want to make sure comments don't break it
export NAME=test123
NAMESPACE=openshift
# Blank lines should be fine too
# Equals in comments shouldn't break things=True
MEMORY_LIMIT=1Gi

View File

@@ -0,0 +1,23 @@
---
kind: Template
apiVersion: template.openshift.io/v1
metadata:
name: pod-template
objects:
- apiVersion: v1
kind: Pod
metadata:
name: "Pod-${{ NAME }}"
spec:
containers:
- args:
- /bin/sh
- -c
- while true; do echo $(date); sleep 15; done
image: python:3.7-alpine
imagePullPolicy: Always
name: python
parameters:
- name: NAME
description: trailing name of the pod
required: true

View File

@@ -0,0 +1,15 @@
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: certificates.certmanager.k8s.io
spec:
group: certmanager.k8s.io
version: v1alpha1
scope: Namespaced
names:
kind: Certificate
plural: certificates
shortNames:
- cert
- certs

View File

@@ -0,0 +1,34 @@
---
apiVersion: template.openshift.io/v1
kind: Template
labels:
template: simple-example-test
message: |-
The following configmaps have been created in your project: ${NAME}.
metadata:
annotations:
description: A super basic template for testing
openshift.io/display-name: Super basic template
openshift.io/provider-display-name: Red Hat, Inc.
tags: quickstart,examples
name: simple-example
objects:
- apiVersion: v1
kind: ConfigMap
metadata:
annotations:
description: Big example
name: ${NAME}
data:
content: "${CONTENT}"
parameters:
- description: The name assigned to the ConfigMap
displayName: Name
name: NAME
required: true
value: example
- description: The value for the content key of the configmap
displayName: Content
name: CONTENT
required: true
value: ''

View File

@@ -0,0 +1,49 @@
---
dependency:
name: galaxy
options:
requirements-file: requirements.yml
driver:
name: delegated
platforms:
- name: cluster
groups:
- k8s
provisioner:
name: ansible
log: true
options:
vvv: True
config_options:
inventory:
enable_plugins: community.okd.openshift
lint: |
set -e
ansible-lint
inventory:
hosts:
plugin: community.okd.openshift
host_vars:
localhost:
virtualenv: ${MOLECULE_EPHEMERAL_DIRECTORY}/virtualenv
virtualenv_command: '{{ ansible_playbook_python }} -m virtualenv'
virtualenv_interpreter: '{{ virtualenv }}/bin/python'
playbook_namespace: molecule-tests
env:
ANSIBLE_FORCE_COLOR: 'true'
ANSIBLE_COLLECTIONS_PATHS: ${OVERRIDE_COLLECTION_PATH:-$MOLECULE_PROJECT_DIRECTORY}
verifier:
name: ansible
lint: |
set -e
ansible-lint
scenario:
name: default
test_sequence:
- dependency
- lint
- syntax
- prepare
- converge
- idempotence
- verify

View File

@@ -0,0 +1,61 @@
---
- name: Prepare
hosts: localhost
connection: local
gather_facts: no
tasks:
- pip:
name: virtualenv
- pip:
name:
- kubernetes>=12.0.0
- coverage
- python-ldap
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- name: 'Configure htpasswd secret (username: test, password: testing123)'
community.okd.k8s:
definition:
apiVersion: v1
kind: Secret
metadata:
name: htpass-secret
namespace: openshift-config
stringData:
htpasswd: "test:$2y$05$zgjczyp96jCIp//CGmnWiefhd7G3l54IdsZoV4IwA1UWtd04L0lE2"
- name: Configure htpasswd identity provider
community.okd.k8s:
definition:
apiVersion: config.openshift.io/v1
kind: OAuth
metadata:
name: cluster
spec:
identityProviders:
- name: htpasswd_provider
mappingMethod: claim
type: HTPasswd
htpasswd:
fileData:
name: htpass-secret
- name: Create ClusterRoleBinding for test user
community.okd.k8s:
definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: test-cluster-reader
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-reader
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: test

View File

@@ -0,0 +1,4 @@
---
ldap_admin_user: "admin"
ldap_admin_password: "testing123!"
ldap_root: "dc=ansible,dc=redhat"

View File

@@ -0,0 +1,186 @@
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
module: openshift_ldap_entry
short_description: add/remove entry to LDAP Server.
author:
- Aubin Bikouo (@abikouo)
description:
- This module perform basic operations on the LDAP Server (add/remove entries).
- Similar to `community.general.ldap_entry` this has been created to avoid dependency with this collection for the test.
- This module is not supported outside of testing this collection.
options:
attributes:
description:
- If I(state=present), attributes necessary to create an entry. Existing
entries are never modified. To assert specific attribute values on an
existing entry, use M(community.general.ldap_attrs) module instead.
type: dict
objectClass:
description:
- If I(state=present), value or list of values to use when creating
the entry. It can either be a string or an actual list of
strings.
type: list
elements: str
state:
description:
- The target state of the entry.
choices: [present, absent]
default: present
type: str
bind_dn:
description:
- A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism as default.
- If this is blank, we'll use an anonymous bind.
type: str
required: true
bind_pw:
description:
- The password to use with I(bind_dn).
type: str
dn:
required: true
description:
- The DN of the entry to add or remove.
type: str
server_uri:
description:
- A URI to the LDAP server.
- The default value lets the underlying LDAP client library look for a UNIX domain socket in its default location.
type: str
default: ldapi:///
requirements:
- python-ldap
'''
EXAMPLES = r'''
'''
RETURN = r'''
# Default return values
'''
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.text.converters import to_native, to_bytes
LDAP_IMP_ERR = None
try:
import ldap
import ldap.modlist
HAS_LDAP = True
except ImportError:
LDAP_IMP_ERR = traceback.format_exc()
HAS_LDAP = False
def argument_spec():
args = {}
args['attributes'] = dict(default={}, type='dict')
args['objectClass'] = dict(type='list', elements='str')
args['state'] = dict(default='present', choices=['present', 'absent'])
args['bind_dn'] = dict(required=True)
args['bind_pw'] = dict(default='', no_log=True)
args['dn'] = dict(required=True)
args['server_uri'] = dict(default='ldapi:///')
return args
class LdapEntry(AnsibleModule):
def __init__(self):
AnsibleModule.__init__(
self,
argument_spec=argument_spec(),
required_if=[('state', 'present', ['objectClass'])],
)
if not HAS_LDAP:
self.fail_json(msg=missing_required_lib('python-ldap'), exception=LDAP_IMP_ERR)
self.__connection = None
# Add the objectClass into the list of attributes
self.params['attributes']['objectClass'] = (self.params['objectClass'])
# Load attributes
if self.params['state'] == 'present':
self.attrs = {}
for name, value in self.params['attributes'].items():
if isinstance(value, list):
self.attrs[name] = list(map(to_bytes, value))
else:
self.attrs[name] = [to_bytes(value)]
@property
def connection(self):
if not self.__connection:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
self.__connection = ldap.initialize(self.params['server_uri'])
try:
self.__connection.simple_bind_s(self.params['bind_dn'], self.params['bind_pw'])
except ldap.LDAPError as e:
self.fail_json(msg="Cannot bind to the server due to: %s" % e)
return self.__connection
def add(self):
""" If self.dn does not exist, returns a callable that will add it. """
changed = False
msg = "LDAP Entry '%s' already exist." % self.params["dn"]
if not self._is_entry_present():
modlist = ldap.modlist.addModlist(self.attrs)
self.connection.add_s(self.params['dn'], modlist)
changed = True
msg = "LDAP Entry '%s' successfully created." % self.params["dn"]
self.exit_json(changed=changed, msg=msg)
def delete(self):
""" If self.dn exists, returns a callable that will delete it. """
changed = False
msg = "LDAP Entry '%s' does not exist." % self.params["dn"]
if self._is_entry_present():
self.connection.delete_s(self.params['dn'])
changed = True
msg = "LDAP Entry '%s' successfully deleted." % self.params["dn"]
self.exit_json(changed=changed, msg=msg)
def _is_entry_present(self):
try:
self.connection.search_s(self.params['dn'], ldap.SCOPE_BASE)
except ldap.NO_SUCH_OBJECT:
is_present = False
else:
is_present = True
return is_present
def execute(self):
try:
if self.params['state'] == 'present':
self.add()
else:
self.delete()
except Exception as e:
self.fail_json(msg="Entry action failed.", details=to_native(e), exception=traceback.format_exc())
def main():
module = LdapEntry()
module.execute()
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,109 @@
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
module: openshift_ldap_entry_info
short_description: Validate entry from LDAP server.
author:
- Aubin Bikouo (@abikouo)
description:
- This module connect to a ldap server and search for entry.
- This module is not supported outside of testing this collection.
options:
bind_dn:
description:
- A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism as default.
- If this is blank, we'll use an anonymous bind.
type: str
required: true
bind_pw:
description:
- The password to use with I(bind_dn).
type: str
required: True
dn:
description:
- The DN of the entry to test.
type: str
required: True
server_uri:
description:
- A URI to the LDAP server.
- The default value lets the underlying LDAP client library look for a UNIX domain socket in its default location.
type: str
default: ldapi:///
required: True
requirements:
- python-ldap
'''
EXAMPLES = r'''
'''
RETURN = r'''
# Default return values
'''
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
LDAP_IMP_ERR = None
try:
import ldap
import ldap.modlist
HAS_LDAP = True
except ImportError:
LDAP_IMP_ERR = traceback.format_exc()
HAS_LDAP = False
def argument_spec():
args = {}
args['bind_dn'] = dict(required=True)
args['bind_pw'] = dict(required=True, no_log=True)
args['dn'] = dict(required=True)
args['server_uri'] = dict(required=True)
return args
def execute():
module = AnsibleModule(
argument_spec=argument_spec(),
supports_check_mode=True
)
if not HAS_LDAP:
module.fail_json(msg=missing_required_lib("python-ldap"), exception=LDAP_IMP_ERR)
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
connection = ldap.initialize(module.params['server_uri'])
try:
connection.simple_bind_s(module.params['bind_dn'], module.params['bind_pw'])
except ldap.LDAPError as e:
module.fail_json(msg="Cannot bind to the server due to: %s" % e)
try:
connection.search_s(module.params['dn'], ldap.SCOPE_BASE)
module.exit_json(changed=False, found=True)
except ldap.NO_SUCH_OBJECT:
module.exit_json(changed=False, found=False)
def main():
execute()
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,4 @@
---
collections:
- community.okd
- kubernetes.core

View File

@@ -0,0 +1,235 @@
- block:
- name: Get LDAP definition
set_fact:
ldap_entries: "{{ lookup('template', 'ad/definition.j2') | from_yaml }}"
- name: Delete openshift groups if existing
community.okd.k8s:
state: absent
kind: Group
version: "user.openshift.io/v1"
name: "{{ item }}"
with_items:
- admins
- developers
- name: Delete existing LDAP Entries
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
state: absent
with_items: "{{ ldap_entries.users + ldap_entries.units | reverse | list }}"
- name: Create LDAP Entries
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
attributes: "{{ item.attr }}"
objectClass: "{{ item.class }}"
with_items: "{{ ldap_entries.units + ldap_entries.users }}"
- name: Load test configurations
set_fact:
sync_config: "{{ lookup('template', 'ad/sync-config.j2') | from_yaml }}"
- name: Synchronize Groups
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
check_mode: yes
register: result
- name: Validate Group going to be created
assert:
that:
- result is changed
- admins_group
- devs_group
- '"jane.smith@ansible.org" in {{ admins_group.users }}'
- '"jim.adams@ansible.org" in {{ admins_group.users }}'
- '"jordanbulls@ansible.org" in {{ devs_group.users }}'
- admins_group.users | length == 2
- devs_group.users | length == 1
vars:
admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'admins') | first }}"
devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'developers') | first }}"
- name: Synchronize Groups (Remove check_mode)
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
register: result
- name: Validate Group going to be created
assert:
that:
- result is changed
- name: Read admins group
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: admins
register: result
- name: Validate group was created
assert:
that:
- result.resources | length == 1
- '"jane.smith@ansible.org" in {{ result.resources.0.users }}'
- '"jim.adams@ansible.org" in {{ result.resources.0.users }}'
- name: Read developers group
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: developers
register: result
- name: Validate group was created
assert:
that:
- result.resources | length == 1
- '"jordanbulls@ansible.org" in {{ result.resources.0.users }}'
- name: Define user dn to delete
set_fact:
user_to_delete: "cn=Jane,ou=engineers,ou=activeD,{{ ldap_root }}"
- name: Delete 1 admin user
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ user_to_delete }}"
state: absent
- name: Synchronize Openshift groups using allow_groups
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
allow_groups:
- developers
type: openshift
register: openshift_sync
- name: Validate that only developers group was sync
assert:
that:
- openshift_sync is changed
- openshift_sync.groups | length == 1
- openshift_sync.groups.0.metadata.name == "developers"
- name: Read admins group
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: admins
register: result
- name: Validate admins group content has not changed
assert:
that:
- result.resources | length == 1
- '"jane.smith@ansible.org" in {{ result.resources.0.users }}'
- '"jim.adams@ansible.org" in {{ result.resources.0.users }}'
- name: Synchronize Openshift groups using deny_groups
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
deny_groups:
- developers
type: openshift
register: openshift_sync
- name: Validate that only admins group was sync
assert:
that:
- openshift_sync is changed
- openshift_sync.groups | length == 1
- openshift_sync.groups.0.metadata.name == "admins"
- name: Read admins group
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: admins
register: result
- name: Validate admins group contains only 1 user now
assert:
that:
- result.resources | length == 1
- result.resources.0.users == ["jim.adams@ansible.org"]
- name: Set users to delete (delete all developers users)
set_fact:
user_to_delete: "cn=Jordan,ou=engineers,ou=activeD,{{ ldap_root }}"
- name: Delete 1 admin user
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ user_to_delete }}"
state: absent
- name: Prune groups
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
state: absent
register: result
- name: Validate result is changed (only developers group be deleted)
assert:
that:
- result is changed
- result.groups | length == 1
- name: Get developers group info
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: developers
register: result
- name: assert group was deleted
assert:
that:
- result.resources | length == 0
- name: Get admins group info
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: admins
register: result
- name: assert group was not deleted
assert:
that:
- result.resources | length == 1
- name: Prune groups once again (idempotency)
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
state: absent
register: result
- name: Assert nothing was changed
assert:
that:
- result is not changed
always:
- name: Delete openshift groups if existing
community.okd.k8s:
state: absent
kind: Group
version: "user.openshift.io/v1"
name: "{{ item }}"
with_items:
- admins
- developers

View File

@@ -0,0 +1,174 @@
- block:
- name: Get LDAP definition
set_fact:
ldap_entries: "{{ lookup('template', 'augmented-ad/definition.j2') | from_yaml }}"
- name: Delete openshift groups if existing
community.okd.k8s:
state: absent
kind: Group
version: "user.openshift.io/v1"
name: "{{ item }}"
with_items:
- banking
- insurance
- name: Delete existing LDAP entries
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
state: absent
with_items: "{{ ldap_entries.users + ldap_entries.groups + ldap_entries.units | reverse | list }}"
- name: Create LDAP Entries
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
attributes: "{{ item.attr }}"
objectClass: "{{ item.class }}"
with_items: "{{ ldap_entries.units + ldap_entries.groups + ldap_entries.users }}"
- name: Load test configurations
set_fact:
sync_config: "{{ lookup('template', 'augmented-ad/sync-config.j2') | from_yaml }}"
- name: Synchronize Groups
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
check_mode: yes
register: result
- name: Validate that 'banking' and 'insurance' groups were created
assert:
that:
- result is changed
- banking_group
- insurance_group
- '"james-allan@ansible.org" in {{ banking_group.users }}'
- '"gordon-kane@ansible.org" in {{ banking_group.users }}'
- '"alice-courtney@ansible.org" in {{ insurance_group.users }}'
- banking_group.users | length == 2
- insurance_group.users | length == 1
vars:
banking_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'banking') | first }}"
insurance_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'insurance') | first }}"
- name: Synchronize Groups (Remove check_mode)
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
register: result
- name: Validate Group going to be created
assert:
that:
- result is changed
- name: Define facts for group to create
set_fact:
ldap_groups:
- name: banking
users:
- "james-allan@ansible.org"
- "gordon-kane@ansible.org"
- name: insurance
users:
- "alice-courtney@ansible.org"
- name: Read 'banking' openshift group
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: banking
register: result
- name: Validate group info
assert:
that:
- result.resources | length == 1
- '"james-allan@ansible.org" in {{ result.resources.0.users }}'
- '"gordon-kane@ansible.org" in {{ result.resources.0.users }}'
- name: Read 'insurance' openshift group
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: insurance
register: result
- name: Validate group info
assert:
that:
- result.resources | length == 1
- 'result.resources.0.users == ["alice-courtney@ansible.org"]'
- name: Delete employee from 'insurance' group
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "cn=Alice,ou=employee,ou=augmentedAD,{{ ldap_root }}"
state: absent
- name: Prune groups
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
state: absent
register: result
- name: Validate result is changed (only insurance group be deleted)
assert:
that:
- result is changed
- result.groups | length == 1
- name: Get 'insurance' openshift group info
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: insurance
register: result
- name: assert group was deleted
assert:
that:
- result.resources | length == 0
- name: Get 'banking' openshift group info
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: banking
register: result
- name: assert group was not deleted
assert:
that:
- result.resources | length == 1
- name: Prune groups once again (idempotency)
community.okd.openshift_adm_groups_sync:
config: "{{ sync_config }}"
state: absent
register: result
- name: Assert no change was made
assert:
that:
- result is not changed
always:
- name: Delete openshift groups if existing
community.okd.k8s:
state: absent
kind: Group
version: "user.openshift.io/v1"
name: "{{ item }}"
with_items:
- banking
- insurance

View File

@@ -0,0 +1,61 @@
---
- name: Get cluster information
kubernetes.core.k8s_cluster_info:
register: info
- name: Create LDAP Pod
community.okd.k8s:
namespace: "default"
wait: yes
definition:
kind: Pod
apiVersion: v1
metadata:
name: ldap-pod
labels:
app: ldap
spec:
containers:
- name: ldap
image: bitnami/openldap
env:
- name: LDAP_ADMIN_USERNAME
value: "{{ ldap_admin_user }}"
- name: LDAP_ADMIN_PASSWORD
value: "{{ ldap_admin_password }}"
- name: LDAP_USERS
value: "ansible"
- name: LDAP_PASSWORDS
value: "ansible123"
- name: LDAP_ROOT
value: "{{ ldap_root }}"
ports:
- containerPort: 1389
register: pod_info
- name: Set Pod Internal IP
set_fact:
podIp: "{{ pod_info.result.status.podIP }}"
- name: Set LDAP Common facts
set_fact:
ldap_server_uri: "ldap://{{ podIp }}:1389"
ldap_bind_dn: "cn={{ ldap_admin_user }},{{ ldap_root }}"
ldap_bind_pw: "{{ ldap_admin_password }}"
- name: Display LDAP Server URI
debug:
var: ldap_server_uri
- name: Test existing user from LDAP server
openshift_ldap_entry_info:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
dn: "ou=users,{{ ldap_root }}"
server_uri: "{{ ldap_server_uri }}"
# ignore_errors: true
# register: ping_ldap
- include_tasks: "tasks/rfc2307.yml"
- include_tasks: "tasks/activeDirectory.yml"
- include_tasks: "tasks/augmentedActiveDirectory.yml"

View File

@@ -0,0 +1,468 @@
- block:
- name: Get LDAP definition
set_fact:
ldap_resources: "{{ lookup('template', 'rfc2307/definition.j2') | from_yaml }}"
- name: Delete openshift groups if existing
community.okd.k8s:
state: absent
kind: Group
version: "user.openshift.io/v1"
name: "{{ item }}"
with_items:
- admins
- engineers
- developers
- name: Delete existing LDAP entries
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
state: absent
with_items: "{{ ldap_resources.users + ldap_resources.groups + ldap_resources.units | reverse | list }}"
- name: Create LDAP units
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
attributes: "{{ item.attr }}"
objectClass: "{{ item.class }}"
with_items: "{{ ldap_resources.units }}"
- name: Create LDAP Groups
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
attributes: "{{ item.attr }}"
objectClass: "{{ item.class }}"
with_items: "{{ ldap_resources.groups }}"
- name: Create LDAP users
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item.dn }}"
attributes: "{{ item.attr }}"
objectClass: "{{ item.class }}"
with_items: "{{ ldap_resources.users }}"
- name: Load test configurations
set_fact:
configs: "{{ lookup('template', 'rfc2307/sync-config.j2') | from_yaml }}"
- name: Synchronize Groups
community.okd.openshift_adm_groups_sync:
config: "{{ configs.simple }}"
check_mode: yes
register: result
- name: Validate Group going to be created
assert:
that:
- result is changed
- admins_group
- devs_group
- '"jane.smith@ansible.org" in {{ admins_group.users }}'
- '"jim.adams@ansible.org" in {{ devs_group.users }}'
- '"jordanbulls@ansible.org" in {{ devs_group.users }}'
- admins_group.users | length == 1
- devs_group.users | length == 2
vars:
admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'admins') | first }}"
devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'developers') | first }}"
- name: Synchronize Groups - User defined mapping
community.okd.openshift_adm_groups_sync:
config: "{{ configs.user_defined }}"
check_mode: yes
register: result
- name: Validate Group going to be created
assert:
that:
- result is changed
- admins_group
- devs_group
- '"jane.smith@ansible.org" in {{ admins_group.users }}'
- '"jim.adams@ansible.org" in {{ devs_group.users }}'
- '"jordanbulls@ansible.org" in {{ devs_group.users }}'
- admins_group.users | length == 1
- devs_group.users | length == 2
vars:
admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'ansible-admins') | first }}"
devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'ansible-devs') | first }}"
- name: Synchronize Groups - Using dn for every query
community.okd.openshift_adm_groups_sync:
config: "{{ configs.dn_everywhere }}"
check_mode: yes
register: result
- name: Validate Group going to be created
assert:
that:
- result is changed
- admins_group
- devs_group
- '"cn=Jane,ou=people,ou=rfc2307,{{ ldap_root }}" in {{ admins_group.users }}'
- '"cn=Jim,ou=people,ou=rfc2307,{{ ldap_root }}" in {{ devs_group.users }}'
- '"cn=Jordan,ou=people,ou=rfc2307,{{ ldap_root }}" in {{ devs_group.users }}'
- admins_group.users | length == 1
- devs_group.users | length == 2
vars:
admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'cn=admins,ou=groups,ou=rfc2307,' + ldap_root ) | first }}"
devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'cn=developers,ou=groups,ou=rfc2307,' + ldap_root ) | first }}"
- name: Synchronize Groups - Partially user defined mapping
community.okd.openshift_adm_groups_sync:
config: "{{ configs.partially_user_defined }}"
check_mode: yes
register: result
- name: Validate Group going to be created
assert:
that:
- result is changed
- admins_group
- devs_group
- '"jane.smith@ansible.org" in {{ admins_group.users }}'
- '"jim.adams@ansible.org" in {{ devs_group.users }}'
- '"jordanbulls@ansible.org" in {{ devs_group.users }}'
- admins_group.users | length == 1
- devs_group.users | length == 2
vars:
admins_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'ansible-admins') | first }}"
devs_group: "{{ result.groups | selectattr('metadata.name', 'equalto', 'developers') | first }}"
- name: Delete Group 'engineers' if created before
community.okd.k8s:
state: absent
kind: Group
version: "user.openshift.io/v1"
name: 'engineers'
wait: yes
ignore_errors: yes
- name: Synchronize Groups - Partially user defined mapping
community.okd.openshift_adm_groups_sync:
config: "{{ configs.out_scope }}"
check_mode: yes
register: result
ignore_errors: yes
- name: Assert group sync failed due to non-existent member
assert:
that:
- result is failed
- result.msg.startswith("Entry not found for base='cn=Matthew,ou=people,ou=outrfc2307,{{ ldap_root }}'")
- name: Define sync configuration with tolerateMemberNotFoundErrors
set_fact:
config_out_of_scope_tolerate_not_found: "{{ configs.out_scope | combine({'rfc2307': merge_rfc2307 })}}"
vars:
merge_rfc2307: "{{ configs.out_scope.rfc2307 | combine({'tolerateMemberNotFoundErrors': 'true'}) }}"
- name: Synchronize Groups - Partially user defined mapping (tolerateMemberNotFoundErrors=true)
community.okd.openshift_adm_groups_sync:
config: "{{ config_out_of_scope_tolerate_not_found }}"
check_mode: yes
register: result
- name: Assert group sync did not fail (tolerateMemberNotFoundErrors=true)
assert:
that:
- result is changed
- result.groups | length == 1
- result.groups.0.metadata.name == 'engineers'
- result.groups.0.users == ['Abraham']
- name: Create Group 'engineers'
community.okd.k8s:
state: present
wait: yes
definition:
kind: Group
apiVersion: "user.openshift.io/v1"
metadata:
name: engineers
users: []
- name: Try to sync LDAP group with Openshift existing group not created using sync should failed
community.okd.openshift_adm_groups_sync:
config: "{{ config_out_of_scope_tolerate_not_found }}"
check_mode: yes
register: result
ignore_errors: yes
- name: Validate group sync failed
assert:
that:
- result is failed
- '"openshift.io/ldap.host label did not match sync host" in result.msg'
- name: Define allow_groups and deny_groups groups
set_fact:
allow_groups:
- "cn=developers,ou=groups,ou=rfc2307,{{ ldap_root }}"
deny_groups:
- "cn=admins,ou=groups,ou=rfc2307,{{ ldap_root }}"
- name: Synchronize Groups using allow_groups
community.okd.openshift_adm_groups_sync:
config: "{{ configs.simple }}"
allow_groups: "{{ allow_groups }}"
register: result
check_mode: yes
- name: Validate Group going to be created
assert:
that:
- result is changed
- result.groups | length == 1
- result.groups.0.metadata.name == "developers"
- name: Synchronize Groups using deny_groups
community.okd.openshift_adm_groups_sync:
config: "{{ configs.simple }}"
deny_groups: "{{ deny_groups }}"
register: result
check_mode: yes
- name: Validate Group going to be created
assert:
that:
- result is changed
- result.groups | length == 1
- result.groups.0.metadata.name == "developers"
- name: Synchronize groups, remove check_mode
community.okd.openshift_adm_groups_sync:
config: "{{ configs.simple }}"
register: result
- name: Validate result is changed
assert:
that:
- result is changed
- name: Read Groups
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: admins
register: result
- name: Validate group was created
assert:
that:
- result.resources | length == 1
- '"jane.smith@ansible.org" in {{ result.resources.0.users }}'
- name: Read Groups
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: developers
register: result
- name: Validate group was created
assert:
that:
- result.resources | length == 1
- '"jim.adams@ansible.org" in {{ result.resources.0.users }}'
- '"jordanbulls@ansible.org" in {{ result.resources.0.users }}'
- name: Set users to delete (no admins users anymore and only 1 developer kept)
set_fact:
users_to_delete:
- "cn=Jane,ou=people,ou=rfc2307,{{ ldap_root }}"
- "cn=Jim,ou=people,ou=rfc2307,{{ ldap_root }}"
- name: Delete users from LDAP servers
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item }}"
state: absent
with_items: "{{ users_to_delete }}"
- name: Define sync configuration with tolerateMemberNotFoundErrors
set_fact:
config_simple_tolerate_not_found: "{{ configs.simple | combine({'rfc2307': merge_rfc2307 })}}"
vars:
merge_rfc2307: "{{ configs.simple.rfc2307 | combine({'tolerateMemberNotFoundErrors': 'true'}) }}"
- name: Synchronize groups once again after users deletion
community.okd.openshift_adm_groups_sync:
config: "{{ config_simple_tolerate_not_found }}"
register: result
- name: Validate result is changed
assert:
that:
- result is changed
- name: Read Groups
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: admins
register: result
- name: Validate admins group does not contains users anymore
assert:
that:
- result.resources | length == 1
- result.resources.0.users == []
- name: Read Groups
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: developers
register: result
- name: Validate group was created
assert:
that:
- result.resources | length == 1
- '"jordanbulls@ansible.org" in {{ result.resources.0.users }}'
- name: Set group to delete
set_fact:
groups_to_delete:
- "cn=developers,ou=groups,ou=rfc2307,{{ ldap_root }}"
- name: Delete Group from LDAP servers
openshift_ldap_entry:
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
server_uri: "{{ ldap_server_uri }}"
dn: "{{ item }}"
state: absent
with_items: "{{ groups_to_delete }}"
- name: Prune groups
community.okd.openshift_adm_groups_sync:
config: "{{ config_simple_tolerate_not_found }}"
state: absent
register: result
check_mode: yes
- name: Validate that only developers group is candidate for Prune
assert:
that:
- result is changed
- result.groups | length == 1
- result.groups.0.metadata.name == "developers"
- name: Read Group (validate that check_mode did not performed update in the cluster)
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: developers
register: result
- name: Assert group was found
assert:
that:
- result.resources | length == 1
- name: Prune using allow_groups
community.okd.openshift_adm_groups_sync:
config: "{{ config_simple_tolerate_not_found }}"
allow_groups:
- developers
state: absent
register: result
check_mode: yes
- name: assert developers group was candidate for prune
assert:
that:
- result is changed
- result.groups | length == 1
- result.groups.0.metadata.name == "developers"
- name: Prune using deny_groups
community.okd.openshift_adm_groups_sync:
config: "{{ config_simple_tolerate_not_found }}"
deny_groups:
- developers
state: absent
register: result
check_mode: yes
- name: assert nothing found candidate for prune
assert:
that:
- result is not changed
- result.groups | length == 0
- name: Prune groups
community.okd.openshift_adm_groups_sync:
config: "{{ config_simple_tolerate_not_found }}"
state: absent
register: result
- name: Validate result is changed
assert:
that:
- result is changed
- result.groups | length == 1
- name: Get developers group info
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: developers
register: result
- name: assert group was deleted
assert:
that:
- result.resources | length == 0
- name: Get admins group info
kubernetes.core.k8s_info:
kind: Group
version: "user.openshift.io/v1"
name: admins
register: result
- name: assert group was not deleted
assert:
that:
- result.resources | length == 1
- name: Prune groups once again (idempotency)
community.okd.openshift_adm_groups_sync:
config: "{{ config_simple_tolerate_not_found }}"
state: absent
register: result
- name: Assert nothing changed
assert:
that:
- result is not changed
- result.groups | length == 0
always:
- name: Delete openshift groups if existing
community.okd.k8s:
state: absent
kind: Group
version: "user.openshift.io/v1"
name: "{{ item }}"
with_items:
- admins
- engineers
- developers

View File

@@ -0,0 +1,39 @@
units:
- dn: "ou=activeD,{{ ldap_root }}"
class:
- organizationalUnit
attr:
ou: activeD
- dn: "ou=engineers,ou=activeD,{{ ldap_root }}"
class:
- organizationalUnit
attr:
ou: engineers
users:
- dn: cn=Jane,ou=engineers,ou=activeD,{{ ldap_root }}
class:
- inetOrgPerson
attr:
cn: Jane
sn: Smith
displayName: Jane Smith
mail: jane.smith@ansible.org
employeeType: admins
- dn: cn=Jim,ou=engineers,ou=activeD,{{ ldap_root }}
class:
- inetOrgPerson
attr:
cn: Jim
sn: Adams
displayName: Jim Adams
mail: jim.adams@ansible.org
employeeType: admins
- dn: cn=Jordan,ou=engineers,ou=activeD,{{ ldap_root }}
class:
- inetOrgPerson
attr:
cn: Jordan
sn: Bulls
displayName: Jordan Bulls
mail: jordanbulls@ansible.org
employeeType: developers

View File

@@ -0,0 +1,12 @@
kind: LDAPSyncConfig
apiVersion: v1
url: "{{ ldap_server_uri }}"
insecure: true
activeDirectory:
usersQuery:
baseDN: "ou=engineers,ou=activeD,{{ ldap_root }}"
scope: sub
derefAliases: never
filter: (objectclass=inetOrgPerson)
userNameAttributes: [ mail ]
groupMembershipAttributes: [ employeeType ]

View File

@@ -0,0 +1,59 @@
units:
- dn: "ou=augmentedAD,{{ ldap_root }}"
class:
- organizationalUnit
attr:
ou: augmentedAD
- dn: "ou=employee,ou=augmentedAD,{{ ldap_root }}"
class:
- organizationalUnit
attr:
ou: employee
- dn: "ou=category,ou=augmentedAD,{{ ldap_root }}"
class:
- organizationalUnit
attr:
ou: category
groups:
- dn: "cn=banking,ou=category,ou=augmentedAD,{{ ldap_root }}"
class:
- groupOfNames
attr:
cn: banking
description: Banking employees
member:
- cn=James,ou=employee,ou=augmentedAD,{{ ldap_root }}
- cn=Gordon,ou=employee,ou=augmentedAD,{{ ldap_root }}
- dn: "cn=insurance,ou=category,ou=augmentedAD,{{ ldap_root }}"
class:
- groupOfNames
attr:
cn: insurance
description: Insurance employees
member:
- cn=Alice,ou=employee,ou=augmentedAD,{{ ldap_root }}
users:
- dn: cn=James,ou=employee,ou=augmentedAD,{{ ldap_root }}
class:
- inetOrgPerson
attr:
cn: James
sn: Allan
mail: james-allan@ansible.org
businessCategory: cn=banking,ou=category,ou=augmentedAD,{{ ldap_root }}
- dn: cn=Gordon,ou=employee,ou=augmentedAD,{{ ldap_root }}
class:
- inetOrgPerson
attr:
cn: Gordon
sn: Kane
mail: gordon-kane@ansible.org
businessCategory: cn=banking,ou=category,ou=augmentedAD,{{ ldap_root }}
- dn: cn=Alice,ou=employee,ou=augmentedAD,{{ ldap_root }}
class:
- inetOrgPerson
attr:
cn: Alice
sn: Courtney
mail: alice-courtney@ansible.org
businessCategory: cn=insurance,ou=category,ou=augmentedAD,{{ ldap_root }}

View File

@@ -0,0 +1,20 @@
kind: LDAPSyncConfig
apiVersion: v1
url: "{{ ldap_server_uri }}"
insecure: true
augmentedActiveDirectory:
groupsQuery:
baseDN: "ou=category,ou=augmentedAD,{{ ldap_root }}"
scope: sub
derefAliases: never
pageSize: 0
groupUIDAttribute: dn
groupNameAttributes: [ cn ]
usersQuery:
baseDN: "ou=employee,ou=augmentedAD,{{ ldap_root }}"
scope: sub
derefAliases: never
filter: (objectclass=inetOrgPerson)
pageSize: 0
userNameAttributes: [ mail ]
groupMembershipAttributes: [ businessCategory ]

View File

@@ -0,0 +1,102 @@
units:
- dn: "ou=rfc2307,{{ ldap_root }}"
class:
- organizationalUnit
attr:
ou: rfc2307
- dn: "ou=groups,ou=rfc2307,{{ ldap_root }}"
class:
- organizationalUnit
attr:
ou: groups
- dn: "ou=people,ou=rfc2307,{{ ldap_root }}"
class:
- organizationalUnit
attr:
ou: people
- dn: "ou=outrfc2307,{{ ldap_root }}"
class:
- organizationalUnit
attr:
ou: outrfc2307
- dn: "ou=groups,ou=outrfc2307,{{ ldap_root }}"
class:
- organizationalUnit
attr:
ou: groups
- dn: "ou=people,ou=outrfc2307,{{ ldap_root }}"
class:
- organizationalUnit
attr:
ou: people
groups:
- dn: "cn=admins,ou=groups,ou=rfc2307,{{ ldap_root }}"
class:
- groupOfNames
attr:
cn: admins
description: System Administrators
member:
- cn=Jane,ou=people,ou=rfc2307,{{ ldap_root }}
- dn: "cn=developers,ou=groups,ou=rfc2307,{{ ldap_root }}"
class:
- groupOfNames
attr:
cn: developers
description: Developers
member:
- cn=Jim,ou=people,ou=rfc2307,{{ ldap_root }}
- cn=Jordan,ou=people,ou=rfc2307,{{ ldap_root }}
- dn: "cn=engineers,ou=groups,ou=outrfc2307,{{ ldap_root }}"
class:
- groupOfNames
attr:
cn: engineers
description: Engineers
member:
- cn=Jim,ou=people,ou=rfc2307,{{ ldap_root }}
- cn=Jordan,ou=people,ou=rfc2307,{{ ldap_root }}
- cn=Julia,ou=people,ou=outrfc2307,{{ ldap_root }}
- cn=Matthew,ou=people,ou=outrfc2307,{{ ldap_root }}
users:
- dn: cn=Jane,ou=people,ou=rfc2307,{{ ldap_root }}
class:
- person
- organizationalPerson
- inetOrgPerson
attr:
cn: Jane
sn: Smith
displayName: Jane Smith
mail: jane.smith@ansible.org
admin: yes
- dn: cn=Jim,ou=people,ou=rfc2307,{{ ldap_root }}
class:
- person
- organizationalPerson
- inetOrgPerson
attr:
cn: Jim
sn: Adams
displayName: Jim Adams
mail: jim.adams@ansible.org
- dn: cn=Jordan,ou=people,ou=rfc2307,{{ ldap_root }}
class:
- person
- organizationalPerson
- inetOrgPerson
attr:
cn: Jordan
sn: Bulls
displayName: Jordan Bulls
mail: jordanbulls@ansible.org
- dn: cn=Julia,ou=people,ou=outrfc2307,{{ ldap_root }}
class:
- person
- organizationalPerson
- inetOrgPerson
attr:
cn: Julia
sn: Abraham
displayName: Julia Abraham
mail: juliaabraham@ansible.org

View File

@@ -0,0 +1,105 @@
simple:
kind: LDAPSyncConfig
apiVersion: v1
url: "{{ ldap_server_uri }}"
insecure: true
rfc2307:
groupsQuery:
baseDN: "ou=groups,ou=rfc2307,{{ ldap_root }}"
scope: sub
derefAliases: never
filter: (objectclass=groupOfNames)
groupUIDAttribute: dn
groupNameAttributes: [ cn ]
groupMembershipAttributes: [ member ]
usersQuery:
baseDN: "ou=people,ou=rfc2307,{{ ldap_root }}"
scope: sub
derefAliases: never
userUIDAttribute: dn
userNameAttributes: [ mail ]
user_defined:
kind: LDAPSyncConfig
apiVersion: v1
url: "{{ ldap_server_uri }}"
insecure: true
groupUIDNameMapping:
"cn=admins,ou=groups,ou=rfc2307,{{ ldap_root }}": ansible-admins
"cn=developers,ou=groups,ou=rfc2307,{{ ldap_root }}": ansible-devs
rfc2307:
groupsQuery:
baseDN: "ou=groups,ou=rfc2307,{{ ldap_root }}"
scope: sub
derefAliases: never
filter: (objectclass=groupOfNames)
groupUIDAttribute: dn
groupNameAttributes: [ cn ]
groupMembershipAttributes: [ member ]
usersQuery:
baseDN: "ou=people,ou=rfc2307,{{ ldap_root }}"
scope: sub
derefAliases: never
userUIDAttribute: dn
userNameAttributes: [ mail ]
partially_user_defined:
kind: LDAPSyncConfig
apiVersion: v1
url: "{{ ldap_server_uri }}"
insecure: true
groupUIDNameMapping:
"cn=admins,ou=groups,ou=rfc2307,{{ ldap_root }}": ansible-admins
rfc2307:
groupsQuery:
baseDN: "ou=groups,ou=rfc2307,{{ ldap_root }}"
scope: sub
derefAliases: never
filter: (objectclass=groupOfNames)
groupUIDAttribute: dn
groupNameAttributes: [ cn ]
groupMembershipAttributes: [ member ]
usersQuery:
baseDN: "ou=people,ou=rfc2307,{{ ldap_root }}"
scope: sub
derefAliases: never
userUIDAttribute: dn
userNameAttributes: [ mail ]
dn_everywhere:
kind: LDAPSyncConfig
apiVersion: v1
url: "{{ ldap_server_uri }}"
insecure: true
rfc2307:
groupsQuery:
baseDN: "ou=groups,ou=rfc2307,{{ ldap_root }}"
scope: sub
derefAliases: never
filter: (objectclass=groupOfNames)
groupUIDAttribute: dn
groupNameAttributes: [ dn ]
groupMembershipAttributes: [ member ]
usersQuery:
baseDN: "ou=people,ou=rfc2307,{{ ldap_root }}"
scope: sub
derefAliases: never
userUIDAttribute: dn
userNameAttributes: [ dn ]
out_scope:
kind: LDAPSyncConfig
apiVersion: v1
url: "{{ ldap_server_uri }}"
insecure: true
rfc2307:
groupsQuery:
baseDN: "ou=groups,ou=outrfc2307,{{ ldap_root }}"
scope: sub
derefAliases: never
filter: (objectclass=groupOfNames)
groupUIDAttribute: dn
groupNameAttributes: [ cn ]
groupMembershipAttributes: [ member ]
usersQuery:
baseDN: "ou=people,ou=outrfc2307,{{ ldap_root }}"
scope: sub
derefAliases: never
userUIDAttribute: dn
userNameAttributes: [ sn ]

View File

@@ -0,0 +1,319 @@
- block:
- set_fact:
test_sa: "clusterrole-sa"
test_ns: "clusterrole-ns"
- name: Ensure namespace
kubernetes.core.k8s:
kind: Namespace
name: "{{ test_ns }}"
- name: Get cluster information
kubernetes.core.k8s_cluster_info:
register: cluster_info
no_log: true
- set_fact:
cluster_host: "{{ cluster_info['connection']['host'] }}"
- name: Create Service account
kubernetes.core.k8s:
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: "{{ test_sa }}"
namespace: "{{ test_ns }}"
- name: Read Service Account
kubernetes.core.k8s_info:
kind: ServiceAccount
namespace: "{{ test_ns }}"
name: "{{ test_sa }}"
register: result
- set_fact:
secret_token: "{{ result.resources[0]['secrets'][0]['name'] }}"
- name: Get secret details
kubernetes.core.k8s_info:
kind: Secret
namespace: '{{ test_ns }}'
name: '{{ secret_token }}'
register: _secret
retries: 10
delay: 10
until:
- ("'openshift.io/token-secret.value' in _secret.resources[0]['metadata']['annotations']") or ("'token' in _secret.resources[0]['data']")
- set_fact:
api_token: "{{ _secret.resources[0]['metadata']['annotations']['openshift.io/token-secret.value'] }}"
when: "'openshift.io/token-secret.value' in _secret.resources[0]['metadata']['annotations']"
- set_fact:
api_token: "{{ _secret.resources[0]['data']['token'] | b64decode }}"
when: "'token' in _secret.resources[0]['data']"
- name: list Node should failed (forbidden user)
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Node
register: error
ignore_errors: true
- assert:
that:
- '"nodes is forbidden: User" in error.msg'
- name: list Pod for all namespace should failed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
register: error
ignore_errors: true
- assert:
that:
- '"pods is forbidden: User" in error.msg'
- name: list Pod for test namespace should failed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
namespace: "{{ test_ns }}"
register: error
ignore_errors: true
- assert:
that:
- '"pods is forbidden: User" in error.msg'
- set_fact:
test_labels:
phase: dev
cluster_roles:
- name: pod-manager
resources:
- pods
verbs:
- list
api_version_binding: "authorization.openshift.io/v1"
- name: node-manager
resources:
- nodes
verbs:
- list
api_version_binding: "rbac.authorization.k8s.io/v1"
- name: Create cluster roles
kubernetes.core.k8s:
definition:
kind: ClusterRole
apiVersion: "rbac.authorization.k8s.io/v1"
metadata:
name: "{{ item.name }}"
labels: "{{ test_labels }}"
rules:
- apiGroups: [""]
resources: "{{ item.resources }}"
verbs: "{{ item.verbs }}"
with_items: '{{ cluster_roles }}'
- name: Create Role Binding (namespaced)
kubernetes.core.k8s:
definition:
kind: RoleBinding
apiVersion: "rbac.authorization.k8s.io/v1"
metadata:
name: "{{ cluster_roles[0].name }}-binding"
namespace: "{{ test_ns }}"
labels: "{{ test_labels }}"
subjects:
- kind: ServiceAccount
name: "{{ test_sa }}"
namespace: "{{ test_ns }}"
apiGroup: ""
roleRef:
kind: ClusterRole
name: "{{ cluster_roles[0].name }}"
apiGroup: ""
- name: list Pod for all namespace should failed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
register: error
ignore_errors: true
- assert:
that:
- '"pods is forbidden: User" in error.msg'
- name: list Pod for test namespace should succeed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
namespace: "{{ test_ns }}"
no_log: true
- name: Create Cluster role Binding
kubernetes.core.k8s:
definition:
kind: ClusterRoleBinding
apiVersion: "{{ item.api_version_binding }}"
metadata:
name: "{{ item.name }}-binding"
labels: "{{ test_labels }}"
subjects:
- kind: ServiceAccount
name: "{{ test_sa }}"
namespace: "{{ test_ns }}"
apiGroup: ""
roleRef:
kind: ClusterRole
name: "{{ item.name }}"
apiGroup: ""
with_items: "{{ cluster_roles }}"
- name: list Pod for all namespace should succeed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
no_log: true
- name: list Pod for test namespace should succeed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
namespace: "{{ test_ns }}"
no_log: true
- name: list Node using ServiceAccount
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Node
namespace: "{{ test_ns }}"
no_log: true
- name: Prune clusterroles (check mode)
community.okd.openshift_adm_prune_auth:
resource: clusterroles
label_selectors:
- phase=dev
register: check
check_mode: true
- name: validate clusterrole binding candidates for prune
assert:
that:
- '"{{ item.name }}-binding" in check.cluster_role_binding'
- '"{{ test_ns }}/{{ cluster_roles[0].name }}-binding" in check.role_binding'
with_items: "{{ cluster_roles }}"
- name: Prune Cluster Role for managing Pod
community.okd.openshift_adm_prune_auth:
resource: clusterroles
name: "{{ cluster_roles[0].name }}"
- name: list Pod for all namespace should failed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
register: error
no_log: true
ignore_errors: true
- assert:
that:
- '"pods is forbidden: User" in error.msg'
- name: list Pod for test namespace should failed
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Pod
namespace: "{{ test_ns }}"
register: error
no_log: true
ignore_errors: true
- assert:
that:
- '"pods is forbidden: User" in error.msg'
- name: list Node using ServiceAccount
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Node
namespace: "{{ test_ns }}"
no_log: true
- name: Prune clusterroles (remaining)
community.okd.openshift_adm_prune_auth:
resource: clusterroles
label_selectors:
- phase=dev
- name: list Node using ServiceAccount should fail
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
kind: Node
namespace: "{{ test_ns }}"
register: error
ignore_errors: true
- assert:
that:
- '"nodes is forbidden: User" in error.msg'
always:
- name: Ensure namespace is deleted
kubernetes.core.k8s:
state: absent
kind: Namespace
name: "{{ test_ns }}"
wait: yes
ignore_errors: true
- name: Delete ClusterRoleBinding
kubernetes.core.k8s:
kind: ClusterRoleBinding
api_version: "rbac.authorization.k8s.io/v1"
name: "{{ item.name }}-binding"
state: absent
ignore_errors: true
with_items: "{{ cluster_roles }}"
when: cluster_roles is defined
- name: Delete ClusterRole
kubernetes.core.k8s:
kind: ClusterRole
api_version: "rbac.authorization.k8s.io/v1"
name: "{{ item.name }}"
state: absent
ignore_errors: true
with_items: "{{ cluster_roles }}"
when: cluster_roles is defined

View File

@@ -0,0 +1,340 @@
- block:
- set_fact:
test_ns: "prune-roles"
sa_name: "roles-sa"
pod_name: "pod-prune"
role_definition:
- name: pod-list
labels:
action: list
verbs:
- list
role_binding:
api_version: rbac.authorization.k8s.io/v1
- name: pod-create
labels:
action: create
verbs:
- create
- get
role_binding:
api_version: authorization.openshift.io/v1
- name: pod-delete
labels:
action: delete
verbs:
- delete
role_binding:
api_version: rbac.authorization.k8s.io/v1
- name: Ensure namespace
kubernetes.core.k8s:
kind: Namespace
name: '{{ test_ns }}'
- name: Get cluster information
kubernetes.core.k8s_cluster_info:
register: cluster_info
no_log: true
- set_fact:
cluster_host: "{{ cluster_info['connection']['host'] }}"
- name: Create Service account
kubernetes.core.k8s:
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: '{{ sa_name }}'
namespace: '{{ test_ns }}'
- name: Read Service Account
kubernetes.core.k8s_info:
kind: ServiceAccount
namespace: '{{ test_ns }}'
name: '{{ sa_name }}'
register: sa_out
- set_fact:
secret_token: "{{ sa_out.resources[0]['secrets'][0]['name'] }}"
- name: Get secret details
kubernetes.core.k8s_info:
kind: Secret
namespace: '{{ test_ns }}'
name: '{{ secret_token }}'
register: r_secret
retries: 10
delay: 10
until:
- ("'openshift.io/token-secret.value' in r_secret.resources[0]['metadata']['annotations']") or ("'token' in r_secret.resources[0]['data']")
- set_fact:
api_token: "{{ r_secret.resources[0]['metadata']['annotations']['openshift.io/token-secret.value'] }}"
when: "'openshift.io/token-secret.value' in r_secret.resources[0]['metadata']['annotations']"
- set_fact:
api_token: "{{ r_secret.resources[0]['data']['token'] | b64decode }}"
when: "'token' in r_secret.resources[0]['data']"
- name: list resources using service account
kubernetes.core.k8s_info:
api_key: '{{ api_token }}'
host: '{{ cluster_host }}'
validate_certs: no
kind: Pod
namespace: '{{ test_ns }}'
register: error
ignore_errors: true
- assert:
that:
- '"pods is forbidden: User" in error.msg'
- name: Create a role to manage Pod from namespace "{{ test_ns }}"
kubernetes.core.k8s:
definition:
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
namespace: "{{ test_ns }}"
name: "{{ item.name }}"
labels: "{{ item.labels }}"
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: "{{ item.verbs }}"
with_items: "{{ role_definition }}"
- name: Create Role Binding
kubernetes.core.k8s:
definition:
kind: RoleBinding
apiVersion: "{{ item.role_binding.api_version }}"
metadata:
name: "{{ item.name }}-bind"
namespace: "{{ test_ns }}"
subjects:
- kind: ServiceAccount
name: "{{ sa_name }}"
namespace: "{{ test_ns }}"
apiGroup: ""
roleRef:
kind: Role
name: "{{ item.name }}"
namespace: "{{ test_ns }}"
apiGroup: ""
with_items: "{{ role_definition }}"
- name: Create Pod should succeed
kubernetes.core.k8s:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
definition:
kind: Pod
metadata:
name: "{{ pod_name }}"
spec:
containers:
- name: python
image: python:3.7-alpine
command:
- /bin/sh
- -c
- while true; do echo $(date); sleep 15; done
imagePullPolicy: IfNotPresent
register: result
- name: assert pod creation succeed
assert:
that:
- result is successful
- name: List Pod
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
kind: Pod
register: result
- name: assert user is still authorize to list pods
assert:
that:
- result is successful
- name: Prune auth roles (check mode)
community.okd.openshift_adm_prune_auth:
resource: roles
namespace: "{{ test_ns }}"
register: check
check_mode: true
- name: validate that list role binding are candidates for prune
assert:
that: '"{{ test_ns }}/{{ item.name }}-bind" in check.role_binding'
with_items: "{{ role_definition }}"
- name: Prune resource using label_selectors option
community.okd.openshift_adm_prune_auth:
resource: roles
namespace: "{{ test_ns }}"
label_selectors:
- action=delete
register: prune
- name: assert that role binding 'delete' was pruned
assert:
that:
- prune is changed
- '"{{ test_ns }}/{{ role_definition[2].name }}-bind" in check.role_binding'
- name: assert that user could not delete pod anymore
kubernetes.core.k8s:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
state: absent
namespace: "{{ test_ns }}"
kind: Pod
name: "{{ pod_name }}"
register: result
ignore_errors: true
- name: assert pod deletion failed due to forbidden user
assert:
that:
- '"forbidden: User" in error.msg'
- name: List Pod
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
kind: Pod
register: result
- name: assert user is still able to list pods
assert:
that:
- result is successful
- name: Create Pod should succeed
kubernetes.core.k8s:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
definition:
kind: Pod
metadata:
name: "{{ pod_name }}-1"
spec:
containers:
- name: python
image: python:3.7-alpine
command:
- /bin/sh
- -c
- while true; do echo $(date); sleep 15; done
imagePullPolicy: IfNotPresent
register: result
- name: assert user is still authorize to create pod
assert:
that:
- result is successful
- name: Prune role using name
community.okd.openshift_adm_prune_auth:
resource: roles
namespace: "{{ test_ns }}"
name: "{{ role_definition[1].name }}"
register: prune
- name: assert that role binding 'create' was pruned
assert:
that:
- prune is changed
- '"{{ test_ns }}/{{ role_definition[1].name }}-bind" in check.role_binding'
- name: Create Pod (should failed)
kubernetes.core.k8s:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
definition:
kind: Pod
metadata:
name: "{{ pod_name }}-2"
spec:
containers:
- name: python
image: python:3.7-alpine
command:
- /bin/sh
- -c
- while true; do echo $(date); sleep 15; done
imagePullPolicy: IfNotPresent
register: result
ignore_errors: true
- name: assert user is not authorize to create pod anymore
assert:
that:
- '"forbidden: User" in error.msg'
- name: List Pod
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
kind: Pod
register: result
- name: assert user is still able to list pods
assert:
that:
- result is successful
- name: Prune all role for namespace (neither name nor label_selectors are specified)
community.okd.openshift_adm_prune_auth:
resource: roles
namespace: "{{ test_ns }}"
register: prune
- name: assert that role binding 'list' was pruned
assert:
that:
- prune is changed
- '"{{ test_ns }}/{{ role_definition[0].name }}-bind" in check.role_binding'
- name: List Pod
kubernetes.core.k8s_info:
api_key: "{{ api_token }}"
host: "{{ cluster_host }}"
validate_certs: no
namespace: "{{ test_ns }}"
kind: Pod
register: result
ignore_errors: true
- name: assert user is not authorize to list pod anymore
assert:
that:
- '"forbidden: User" in error.msg'
always:
- name: Ensure namespace is deleted
kubernetes.core.k8s:
state: absent
kind: Namespace
name: "{{ test_ns }}"
ignore_errors: true

View File

@@ -0,0 +1,269 @@
- name: Prune deployments
block:
- set_fact:
dc_name: "hello"
deployment_ns: "prune-deployments"
deployment_ns_2: "prune-deployments-2"
- name: Ensure namespace
community.okd.k8s:
kind: Namespace
name: '{{ deployment_ns }}'
- name: Create deployment config
community.okd.k8s:
namespace: '{{ deployment_ns }}'
definition:
kind: DeploymentConfig
apiVersion: apps.openshift.io/v1
metadata:
name: '{{ dc_name }}'
spec:
replicas: 1
selector:
name: '{{ dc_name }}'
template:
metadata:
labels:
name: '{{ dc_name }}'
spec:
containers:
- name: hello-openshift
imagePullPolicy: IfNotPresent
image: python:3.7-alpine
command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"]
wait: yes
- name: prune deployments (no candidate DeploymentConfig)
community.okd.openshift_adm_prune_deployments:
namespace: "{{ deployment_ns }}"
register: test_prune
- assert:
that:
- test_prune is not changed
- test_prune.replication_controllers | length == 0
- name: Update DeploymentConfig - set replicas to 0
community.okd.k8s:
namespace: "{{ deployment_ns }}"
definition:
kind: DeploymentConfig
apiVersion: "apps.openshift.io/v1"
metadata:
name: "{{ dc_name }}"
spec:
replicas: 0
selector:
name: "{{ dc_name }}"
template:
metadata:
labels:
name: "{{ dc_name }}"
spec:
containers:
- name: hello-openshift
imagePullPolicy: IfNotPresent
image: python:3.7-alpine
command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"]
wait: yes
- name: Wait for ReplicationController candidate for pruning
kubernetes.core.k8s_info:
kind: ReplicationController
namespace: "{{ deployment_ns }}"
register: result
retries: 10
delay: 30
until:
- result.resources.0.metadata.annotations["openshift.io/deployment.phase"] in ("Failed", "Complete")
- name: Prune deployments - should delete 1 ReplicationController
community.okd.openshift_adm_prune_deployments:
namespace: "{{ deployment_ns }}"
check_mode: yes
register: test_prune
- name: Read ReplicationController
kubernetes.core.k8s_info:
kind: ReplicationController
namespace: "{{ deployment_ns }}"
register: replications
- name: Assert that Replication controller was not deleted
assert:
that:
- replications.resources | length == 1
- 'replications.resources.0.metadata.name is match("{{ dc_name }}-*")'
- name: Assure that candidate ReplicationController was found for pruning
assert:
that:
- test_prune is changed
- test_prune.replication_controllers | length == 1
- test_prune.replication_controllers.0.metadata.name == replications.resources.0.metadata.name
- test_prune.replication_controllers.0.metadata.namespace == replications.resources.0.metadata.namespace
- name: Prune deployments - keep younger than 45min (check_mode)
community.okd.openshift_adm_prune_deployments:
keep_younger_than: 45
namespace: "{{ deployment_ns }}"
check_mode: true
register: keep_younger
- name: assert no candidate was found
assert:
that:
- keep_younger is not changed
- keep_younger.replication_controllers == []
- name: Ensure second namespace is created
community.okd.k8s:
kind: Namespace
name: '{{ deployment_ns_2 }}'
- name: Create deployment config from 2nd namespace
community.okd.k8s:
namespace: '{{ deployment_ns_2 }}'
definition:
kind: DeploymentConfig
apiVersion: apps.openshift.io/v1
metadata:
name: '{{ dc_name }}2'
spec:
replicas: 1
selector:
name: '{{ dc_name }}2'
template:
metadata:
labels:
name: '{{ dc_name }}2'
spec:
containers:
- name: hello-openshift
imagePullPolicy: IfNotPresent
image: python:3.7-alpine
command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"]
wait: yes
- name: Stop deployment config - replicas = 0
community.okd.k8s:
namespace: '{{ deployment_ns_2 }}'
definition:
kind: DeploymentConfig
apiVersion: apps.openshift.io/v1
metadata:
name: '{{ dc_name }}2'
spec:
replicas: 0
selector:
name: '{{ dc_name }}2'
template:
metadata:
labels:
name: '{{ dc_name }}2'
spec:
containers:
- name: hello-openshift
imagePullPolicy: IfNotPresent
image: python:3.7-alpine
command: [ "/bin/sh", "-c", "while true;do date;sleep 2s; done"]
wait: yes
- name: Wait for ReplicationController candidate for pruning
kubernetes.core.k8s_info:
kind: ReplicationController
namespace: "{{ deployment_ns_2 }}"
register: result
retries: 10
delay: 30
until:
- result.resources.0.metadata.annotations["openshift.io/deployment.phase"] in ("Failed", "Complete")
# Prune from one namespace should not have any effect on others namespaces
- name: Prune deployments from 2nd namespace
community.okd.openshift_adm_prune_deployments:
namespace: "{{ deployment_ns_2 }}"
check_mode: yes
register: test_prune
- name: Assure that candidate ReplicationController was found for pruning
assert:
that:
- test_prune is changed
- test_prune.replication_controllers | length == 1
- "test_prune.replication_controllers.0.metadata.namespace == deployment_ns_2"
# Prune without namespace option
- name: Prune from all namespace should update more deployments
community.okd.openshift_adm_prune_deployments:
check_mode: yes
register: no_namespace_prune
- name: Assure multiple ReplicationController were found for pruning
assert:
that:
- no_namespace_prune is changed
- no_namespace_prune.replication_controllers | length == 2
# Execute Prune from 2nd namespace
- name: Read ReplicationController before Prune operation
kubernetes.core.k8s_info:
kind: ReplicationController
namespace: "{{ deployment_ns_2 }}"
register: replications
- assert:
that:
- replications.resources | length == 1
- name: Prune DeploymentConfig from 2nd namespace
community.okd.openshift_adm_prune_deployments:
namespace: "{{ deployment_ns_2 }}"
register: _prune
- name: Assert DeploymentConfig was deleted
assert:
that:
- _prune is changed
- _prune.replication_controllers | length == 1
- _prune.replication_controllers.0.details.name == replications.resources.0.metadata.name
# Execute Prune without namespace option
- name: Read ReplicationController before Prune operation
kubernetes.core.k8s_info:
kind: ReplicationController
namespace: "{{ deployment_ns }}"
register: replications
- assert:
that:
- replications.resources | length == 1
- name: Prune from all namespace should update more deployments
community.okd.openshift_adm_prune_deployments:
register: _prune
- name: Assure multiple ReplicationController were found for pruning
assert:
that:
- _prune is changed
- _prune.replication_controllers | length > 0
always:
- name: Delete 1st namespace
community.okd.k8s:
state: absent
kind: Namespace
name: "{{ deployment_ns }}"
ignore_errors: yes
when: deployment_ns is defined
- name: Delete 2nd namespace
community.okd.k8s:
state: absent
kind: Namespace
name: "{{ deployment_ns_2 }}"
ignore_errors: yes
when: deployment_ns_2 is defined

View File

@@ -0,0 +1,56 @@
---
- block:
- name: Retrieve cluster info
kubernetes.core.k8s_cluster_info:
register: k8s_cluster
- name: set openshift host value
set_fact:
openshift_host: "{{ k8s_cluster.connection.host }}"
- name: Log in (obtain access token)
community.okd.openshift_auth:
username: test
password: testing123
host: '{{ openshift_host }}'
verify_ssl: false
register: openshift_auth_results
- name: Get the test User
kubernetes.core.k8s_info:
api_key: "{{ openshift_auth_results.openshift_auth.api_key }}"
host: '{{ openshift_host }}'
verify_ssl: false
kind: User
api_version: user.openshift.io/v1
name: test
register: user_result
- name: assert that the user was found
assert:
that: (user_result.resources | length) == 1
always:
- name: If login succeeded, try to log out (revoke access token)
when: openshift_auth_results.openshift_auth.api_key is defined
community.okd.openshift_auth:
state: absent
api_key: "{{ openshift_auth_results.openshift_auth.api_key }}"
host: '{{ openshift_host }}'
verify_ssl: false
- name: Get the test user
kubernetes.core.k8s_info:
api_key: "{{ openshift_auth_results.openshift_auth.api_key }}"
host: '{{ openshift_host }}'
verify_ssl: false
kind: User
name: test
api_version: user.openshift.io/v1
register: failed_user_result
ignore_errors: yes
# TODO(fabianvf) determine why token is not being rejected, maybe add more info to return
# - name: assert that the user was not found
# assert:
# that: (failed_user_result.resources | length) == 0

View File

@@ -0,0 +1,179 @@
- name: Openshift import image testing
block:
- set_fact:
test_ns: "import-images"
- name: Ensure namespace
community.okd.k8s:
kind: Namespace
name: '{{ test_ns }}'
- name: Import image using tag (should import latest tag only)
community.okd.openshift_import_image:
namespace: "{{ test_ns }}"
name: "ansible/awx"
check_mode: yes
register: import_tag
- name: Assert only latest was imported
assert:
that:
- import_tag is changed
- import_tag.result | length == 1
- import_tag.result.0.spec.import
- import_tag.result.0.spec.images.0.from.kind == "DockerImage"
- import_tag.result.0.spec.images.0.from.name == "ansible/awx"
- name: check image stream
kubernetes.core.k8s_info:
kind: ImageStream
namespace: "{{ test_ns }}"
name: awx
register: resource
- name: assert that image stream is not created when using check_mode=yes
assert:
that:
- resource.resources == []
- name: Import image using tag (should import latest tag only)
community.okd.openshift_import_image:
namespace: "{{ test_ns }}"
name: "ansible/awx"
register: import_tag
- name: Assert only latest was imported
assert:
that:
- import_tag is changed
- name: check image stream
kubernetes.core.k8s_info:
kind: ImageStream
namespace: "{{ test_ns }}"
name: awx
register: resource
- name: assert that image stream contains only tag latest
assert:
that:
- resource.resources | length == 1
- resource.resources.0.status.tags.0.tag == 'latest'
- name: Import once again the latest tag
community.okd.openshift_import_image:
namespace: "{{ test_ns }}"
name: "ansible/awx"
register: import_tag
- name: assert change was performed
assert:
that:
- import_tag is changed
- name: check image stream
kubernetes.core.k8s_info:
kind: ImageStream
version: image.openshift.io/v1
namespace: "{{ test_ns }}"
name: awx
register: resource
- name: assert that image stream still contains unique tag
assert:
that:
- resource.resources | length == 1
- resource.resources.0.status.tags.0.tag == 'latest'
- name: Import another tags
community.okd.openshift_import_image:
namespace: "{{ test_ns }}"
name: "ansible/awx:17.1.0"
register: import_another_tag
ignore_errors: yes
- name: assert that another tag was imported
assert:
that:
- import_another_tag is failed
- '"the tag 17.1.0 does not exist on the image stream" in import_another_tag.msg'
- name: Create simple ImageStream (without docker external container)
community.okd.k8s:
namespace: "{{ test_ns }}"
name: "local-is"
definition:
apiVersion: image.openshift.io/v1
kind: ImageStream
spec:
lookupPolicy:
local: false
tags: []
- name: Import all tag for image stream not pointing on external container image should failed
community.okd.openshift_import_image:
namespace: "{{ test_ns }}"
name: "local-is"
all: true
register: error_tag
ignore_errors: true
check_mode: yes
- name: Assert module cannot import from non-existing tag from ImageStream
assert:
that:
- error_tag is failed
- 'error_tag.msg == "image stream {{ test_ns }}/local-is does not have tags pointing to external container images"'
- name: import all tags for container image ibmcom/pause and specific tag for redhat/ubi8-micro
community.okd.openshift_import_image:
namespace: "{{ test_ns }}"
name:
- "ibmcom/pause"
- "redhat/ubi8-micro:8.5-437"
all: true
register: multiple_import
- name: Assert that import succeed
assert:
that:
- multiple_import is changed
- multiple_import.result | length == 2
- name: Read ibmcom/pause ImageStream
kubernetes.core.k8s_info:
version: image.openshift.io/v1
kind: ImageStream
namespace: "{{ test_ns }}"
name: pause
register: pause
- name: assert that ibmcom/pause has multiple tags
assert:
that:
- pause.resources | length == 1
- pause.resources.0.status.tags | length > 1
- name: Read redhat/ubi8-micro ImageStream
kubernetes.core.k8s_info:
version: image.openshift.io/v1
kind: ImageStream
namespace: "{{ test_ns }}"
name: ubi8-micro
register: resource
- name: assert that redhat/ubi8-micro has only one tag
assert:
that:
- resource.resources | length == 1
- resource.resources.0.status.tags | length == 1
- 'resource.resources.0.status.tags.0.tag == "8.5-437"'
always:
- name: Delete testing namespace
community.okd.k8s:
state: absent
kind: Namespace
name: "{{ test_ns }}"
ignore_errors: yes

View File

@@ -0,0 +1,183 @@
---
- name: Process a template in the cluster
community.okd.openshift_process:
name: nginx-example
namespace: openshift # only needed if using a template already on the server
parameters:
NAMESPACE: openshift
NAME: test123
register: result
- name: Create the rendered resources
community.okd.k8s:
namespace: process-test
definition: '{{ item }}'
wait: yes
apply: yes
loop: '{{ result.resources }}'
- name: Delete the rendered resources
community.okd.k8s:
namespace: process-test
definition: '{{ item }}'
wait: yes
state: absent
loop: '{{ result.resources }}'
- name: Process a template and create the resources in the cluster
community.okd.openshift_process:
name: nginx-example
namespace: openshift # only needed if using a template already on the server
parameters:
NAMESPACE: openshift
NAME: test123
state: present
namespace_target: process-test
register: result
- name: Process a template and update the resources in the cluster
community.okd.openshift_process:
name: nginx-example
namespace: openshift # only needed if using a template already on the server
parameters:
NAMESPACE: openshift
NAME: test123
MEMORY_LIMIT: 1Gi
state: present
namespace_target: process-test
register: result
- name: Process a template and delete the resources in the cluster
community.okd.openshift_process:
name: nginx-example
namespace: openshift # only needed if using a template already on the server
parameters:
NAMESPACE: openshift
NAME: test123
state: absent
namespace_target: process-test
register: result
- name: Process a template with parameters from an env file and create the resources
community.okd.openshift_process:
name: nginx-example
namespace: openshift
namespace_target: process-test
parameter_file: '{{ files_dir }}/nginx.env'
state: present
wait: yes
- name: Process a template with parameters from an env file and delete the resources
community.okd.openshift_process:
name: nginx-example
namespace: openshift
namespace_target: process-test
parameter_file: '{{ files_dir }}/nginx.env'
state: absent
wait: yes
- name: Process a template with duplicate values
community.okd.openshift_process:
name: nginx-example
namespace: openshift # only needed if using a template already on the server
parameters:
NAME: test123
parameter_file: '{{ files_dir }}/nginx.env'
ignore_errors: yes
register: result
- name: Assert the expected failure occurred
assert:
that:
- result.msg is defined
- result.msg == "Duplicate value for 'NAME' detected in parameter file"
- name: Process a local template
community.okd.openshift_process:
src: '{{ files_dir }}/simple-template.yaml'
parameter_file: '{{ files_dir }}/example.env'
register: rendered
- name: Process a local template and create the resources
community.okd.openshift_process:
src: '{{ files_dir }}/simple-template.yaml'
parameter_file: '{{ files_dir }}/example.env'
namespace_target: process-test
state: present
register: result
- assert:
that: result is changed
- name: Create the processed resources
community.okd.k8s:
namespace: process-test
definition: '{{ item }}'
loop: '{{ rendered.resources }}'
register: result
- assert:
that: result is not changed
- name: Process a local template and create the resources
community.okd.openshift_process:
definition: "{{ lookup('template', files_dir + '/simple-template.yaml') | from_yaml }}"
parameter_file: '{{ files_dir }}/example.env'
namespace_target: process-test
state: present
register: result
- assert:
that: result is not changed
- name: Get the created configmap
kubernetes.core.k8s_info:
api_version: v1
kind: ConfigMap
name: example
namespace: process-test
register: templated_cm
- assert:
that:
- (templated_cm.resources | length) == 1
- templated_cm.resources.0.data.content is defined
- templated_cm.resources.0.data.content == "This is a long message that may take one or more lines to parse but should still work without issue"
- name: Create the Template resource
community.okd.k8s:
src: '{{ files_dir }}/simple-template.yaml'
namespace: process-test
- name: Process the template and create the resources
community.okd.openshift_process:
name: simple-example
namespace: process-test # only needed if using a template already on the server
namespace_target: process-test
parameter_file: '{{ files_dir }}/example.env'
state: present
register: result
- assert:
that: result is not changed
# Processing template without message
- name: create template with file {{ files_dir }}/pod-template.yaml
kubernetes.core.k8s:
namespace: process-test
src: "{{ files_dir }}/pod-template.yaml"
state: present
- name: Process pod template
community.okd.openshift_process:
name: pod-template
namespace: process-test
state: rendered
parameters:
NAME: ansible
register: rendered_template
- assert:
that: rendered_template.message == ""

View File

@@ -0,0 +1,230 @@
---
- name: Read registry information
community.okd.openshift_registry_info:
check: yes
register: registry
- name: Display registry information
debug: var=registry
- block:
- set_fact:
prune_ns: "prune-images"
prune_registry: "{{ registry.public_hostname }}"
container:
name: "httpd"
from: "centos/python-38-centos7:20210629-304c7c8"
pod_name: "test-pod"
- name: Ensure namespace is created
community.okd.k8s:
kind: Namespace
name: "{{ prune_ns }}"
- name: Import image into internal registry
community.okd.openshift_import_image:
namespace: "{{ prune_ns }}"
name: "{{ container.name }}"
source: "{{ container.from }}"
- name: Create simple Pod
community.okd.k8s:
namespace: "{{ prune_ns }}"
wait: yes
definition:
apiVersion: v1
kind: Pod
metadata:
name: "{{ pod_name }}"
spec:
containers:
- name: test-container
image: "{{ prune_registry }}/{{ prune_ns }}/{{ container.name }}:latest"
command:
- /bin/sh
- -c
- while true;do date;sleep 5; done
- name: Create limit range for images size
community.okd.k8s:
namespace: "{{ prune_ns }}"
definition:
kind: "LimitRange"
metadata:
name: "image-resource-limits"
spec:
limits:
- type: openshift.io/Image
max:
storage: 1Gi
- name: Prune images from namespace
community.okd.openshift_adm_prune_images:
registry_url: "{{ prune_registry }}"
namespace: "{{ prune_ns }}"
check_mode: yes
register: prune
- name: Assert that nothing to prune as image is in used
assert:
that:
- prune is not changed
- prune is successful
- prune.deleted_images == []
- prune.updated_image_streams == []
- name: Delete Pod created before
community.okd.k8s:
state: absent
name: "{{ pod_name }}"
kind: Pod
namespace: "{{ prune_ns }}"
wait: yes
- name: Prune images from namespace
community.okd.openshift_adm_prune_images:
registry_url: "{{ prune_registry }}"
namespace: "{{ prune_ns }}"
check_mode: yes
register: prune
- name: Read ImageStream
kubernetes.core.k8s_info:
version: image.openshift.io/v1
kind: ImageStream
namespace: "{{ prune_ns }}"
name: "{{ container.name }}"
register: isinfo
- set_fact:
is_image_name: "{{ isinfo.resources.0.status.tags[0]['items'].0.image }}"
- name: Assert that corresponding Image and ImageStream were candidate for pruning
assert:
that:
- prune is changed
- prune.deleted_images | length == 1
- prune.deleted_images.0.metadata.name == is_image_name
- prune.updated_image_streams | length == 1
- prune.updated_image_streams.0.metadata.name == container.name
- prune.updated_image_streams.0.metadata.namespace == prune_ns
- prune.updated_image_streams.0.status.tags == []
- name: Prune images from namespace keeping images and referrer younger than 60minutes
community.okd.openshift_adm_prune_images:
registry_url: "{{ prune_registry }}"
namespace: "{{ prune_ns }}"
keep_younger_than: 60
check_mode: yes
register: younger
- assert:
that:
- younger is not changed
- younger is successful
- younger.deleted_images == []
- younger.updated_image_streams == []
- name: Prune images over size limit
community.okd.openshift_adm_prune_images:
registry_url: "{{ prune_registry }}"
namespace: "{{ prune_ns }}"
prune_over_size_limit: yes
check_mode: yes
register: prune_over_size
- assert:
that:
- prune_over_size is not changed
- prune_over_size is successful
- prune_over_size.deleted_images == []
- prune_over_size.updated_image_streams == []
- name: Update limit range for images size
community.okd.k8s:
namespace: "{{ prune_ns }}"
definition:
kind: "LimitRange"
metadata:
name: "image-resource-limits"
spec:
limits:
- type: openshift.io/Image
max:
storage: 1Ki
- name: Prune images over size limit (check_mode=yes)
community.okd.openshift_adm_prune_images:
registry_url: "{{ prune_registry }}"
namespace: "{{ prune_ns }}"
prune_over_size_limit: yes
check_mode: yes
register: prune
- name: Assert Images and ImageStream were candidate for prune
assert:
that:
- prune is changed
- prune.deleted_images | length == 1
- prune.deleted_images.0.metadata.name == is_image_name
- prune.updated_image_streams | length == 1
- prune.updated_image_streams.0.metadata.name == container.name
- prune.updated_image_streams.0.metadata.namespace == prune_ns
- prune.updated_image_streams.0.status.tags == []
- name: Prune images over size limit
community.okd.openshift_adm_prune_images:
registry_url: "{{ prune_registry }}"
namespace: "{{ prune_ns }}"
prune_over_size_limit: yes
register: prune
- name: Assert that Images and ImageStream were candidate for prune
assert:
that:
- prune is changed
- prune.deleted_images | length == 1
- prune.deleted_images.0.details.name == is_image_name
- prune.updated_image_streams | length == 1
- prune.updated_image_streams.0.metadata.name == container.name
- prune.updated_image_streams.0.metadata.namespace == prune_ns
- '"tags" not in prune.updated_image_streams.0.status'
- name: Validate that ImageStream was updated
kubernetes.core.k8s_info:
version: image.openshift.io/v1
kind: ImageStream
namespace: "{{ prune_ns }}"
name: "{{ container.name }}"
register: stream
- name: Assert that ImageStream was updated
assert:
that:
- stream.resources | length == 1
- '"tags" not in stream.resources.0.status'
- name: Validate that Image was deleted
kubernetes.core.k8s_info:
version: image.openshift.io/v1
kind: Image
name: "{{ is_image_name }}"
register: image
- name: Assert that image was deleted
assert:
that:
- image.resources | length == 0
always:
- name: Delete namespace
community.okd.k8s:
name: "{{ prune_ns }}"
kind: Namespace
state: absent
wait: yes
ignore_errors: true
when:
- registry.public_hostname
- registry.check.reached

View File

@@ -0,0 +1,275 @@
---
- name: Create Deployment
community.okd.k8s:
wait: yes
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: hello-kubernetes
namespace: default
spec:
replicas: 3
selector:
matchLabels:
app: hello-kubernetes
template:
metadata:
labels:
app: hello-kubernetes
spec:
containers:
- name: hello-kubernetes
image: docker.io/openshift/hello-openshift
ports:
- containerPort: 8080
- name: Create Service
community.okd.k8s:
wait: yes
definition:
apiVersion: v1
kind: Service
metadata:
name: hello-kubernetes
namespace: default
spec:
ports:
- port: 80
targetPort: 8080
selector:
app: hello-kubernetes
- name: Create Route with fewest possible arguments
community.okd.openshift_route:
service: hello-kubernetes
namespace: default
register: route
- name: Attempt to hit http URL
uri:
url: 'http://{{ route.result.spec.host }}'
return_content: yes
until: result is successful
retries: 20
register: result
- name: Assert the page content is as expected
assert:
that:
- not result.redirected
- result.status == 200
- result.content == 'Hello OpenShift!\n'
- name: Delete route
community.okd.openshift_route:
name: '{{ route.result.metadata.name }}'
namespace: default
state: absent
wait: yes
- name: Create Route with custom name and wait
community.okd.openshift_route:
service: hello-kubernetes
namespace: default
name: test1
wait: yes
register: route
- name: Assert that the condition is properly set
assert:
that:
- route.duration is defined
- route.result.status.ingress.0.conditions.0.type == 'Admitted'
- route.result.status.ingress.0.conditions.0.status == 'True'
- name: Attempt to hit http URL
uri:
url: 'http://{{ route.result.spec.host }}'
return_content: yes
until: result is successful
retries: 20
register: result
- name: Assert the page content is as expected
assert:
that:
- not result.redirected
- result.status == 200
- result.content == 'Hello OpenShift!\n'
- name: Delete route
community.okd.openshift_route:
name: '{{ route.result.metadata.name }}'
namespace: default
state: absent
wait: yes
- name: Create edge-terminated route that allows insecure traffic
community.okd.openshift_route:
service: hello-kubernetes
namespace: default
name: hello-kubernetes-https
tls:
insecure_policy: allow
termination: edge
register: route
- name: Attempt to hit http URL
uri:
url: 'http://{{ route.result.spec.host }}'
return_content: yes
until: result is successful
retries: 20
register: result
- name: Assert the page content is as expected
assert:
that:
- not result.redirected
- result.status == 200
- result.content == 'Hello OpenShift!\n'
- name: Attempt to hit https URL
uri:
url: 'https://{{ route.result.spec.host }}'
validate_certs: no
return_content: yes
until: result is successful
retries: 10
register: result
- name: Assert the page content is as expected
assert:
that:
- not result.redirected
- result.status == 200
- result.content == 'Hello OpenShift!\n'
- name: Alter edge-terminated route to redirect insecure traffic
community.okd.openshift_route:
service: hello-kubernetes
namespace: default
name: hello-kubernetes-https
tls:
insecure_policy: redirect
termination: edge
register: route
- name: Attempt to hit http URL
uri:
url: 'http://{{ route.result.spec.host }}'
return_content: yes
validate_certs: no
until:
- result is successful
- result.redirected
retries: 10
register: result
- name: Assert the page content is as expected
assert:
that:
- result.redirected
- result.status == 200
- result.content == 'Hello OpenShift!\n'
- name: Attempt to hit https URL
uri:
url: 'https://{{ route.result.spec.host }}'
validate_certs: no
return_content: yes
until: result is successful
retries: 20
register: result
- name: Assert the page content is as expected
assert:
that:
- not result.redirected
- result.status == 200
- result.content == 'Hello OpenShift!\n'
- name: Alter edge-terminated route with insecure traffic disabled
community.okd.openshift_route:
service: hello-kubernetes
namespace: default
name: hello-kubernetes-https
tls:
insecure_policy: disallow
termination: edge
register: route
- debug: var=route
- name: Attempt to hit https URL
uri:
url: 'https://{{ route.result.spec.host }}'
validate_certs: no
return_content: yes
until: result is successful
retries: 20
register: result
- name: Assert the page content is as expected
assert:
that:
- not result.redirected
- result.status == 200
- result.content == 'Hello OpenShift!\n'
- name: Attempt to hit http URL
uri:
url: 'http://{{ route.result.spec.host }}'
status_code: 503
until: result is successful
retries: 20
register: result
- debug: var=result
- name: Assert the page content is as expected
assert:
that:
- not result.redirected
- result.status == 503
- name: Delete route
community.okd.openshift_route:
name: '{{ route.result.metadata.name }}'
namespace: default
state: absent
wait: yes
# Route with labels and annotations
- name: Create route with labels and annotations
community.okd.openshift_route:
service: hello-kubernetes
namespace: default
name: route-label-annotation
labels:
ansible: test
annotations:
haproxy.router.openshift.io/balance: roundrobin
- name: Get route information
kubernetes.core.k8s_info:
api_version: route.openshift.io/v1
kind: Route
name: route-label-annotation
namespace: default
register: route
- assert:
that:
- route.resources[0].metadata.annotations is defined
- '"haproxy.router.openshift.io/balance" in route.resources[0].metadata.annotations'
- route.resources[0].metadata.labels is defined
- '"ansible" in route.resources[0].metadata.labels'
- name: Delete route
community.okd.openshift_route:
name: route-label-annotation
namespace: default
state: absent
wait: yes

View File

@@ -0,0 +1,123 @@
---
- block:
- name: Create a project
community.okd.k8s:
name: "{{ playbook_namespace }}"
kind: Project
api_version: project.openshift.io/v1
- name: incredibly simple ConfigMap
community.okd.k8s:
definition:
apiVersion: v1
kind: ConfigMap
metadata:
name: hello
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
register: k8s_with_validate
- name: assert that k8s_with_validate succeeds
assert:
that:
- k8s_with_validate is successful
- name: extra property does not fail without strict
community.okd.k8s:
src: "files/kuard-extra-property.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
strict: no
- name: extra property fails with strict
community.okd.k8s:
src: "files/kuard-extra-property.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
strict: yes
ignore_errors: yes
register: extra_property
- name: check that extra property fails with strict
assert:
that:
- extra_property is failed
- name: invalid type fails at validation stage
community.okd.k8s:
src: "files/kuard-invalid-type.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
strict: no
ignore_errors: yes
register: invalid_type
- name: check that invalid type fails
assert:
that:
- invalid_type is failed
- name: invalid type fails with warnings when fail_on_error is False
community.okd.k8s:
src: "files/kuard-invalid-type.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: no
strict: no
ignore_errors: yes
register: invalid_type_no_fail
- name: check that invalid type fails
assert:
that:
- invalid_type_no_fail is failed
- name: setup custom resource definition
community.okd.k8s:
src: "files/setup-crd.yml"
- name: wait a few seconds
pause:
seconds: 5
- name: add custom resource definition
community.okd.k8s:
src: "files/crd-resource.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
strict: yes
register: unknown_kind
- name: check that unknown kind warns
assert:
that:
- unknown_kind is successful
- "'warnings' in unknown_kind"
always:
- name: remove custom resource
community.okd.k8s:
definition: "{{ lookup('file', 'files/crd-resource.yml') }}"
namespace: "{{ playbook_namespace }}"
state: absent
ignore_errors: yes
- name: remove custom resource definitions
community.okd.k8s:
definition: "{{ lookup('file', 'files/setup-crd.yml') }}"
state: absent
- name: Delete namespace
community.okd.k8s:
state: absent
definition:
- kind: Project
apiVersion: project.openshift.io/v1
metadata:
name: "{{ playbook_namespace }}"
ignore_errors: yes

View File

@@ -0,0 +1,25 @@
---
# TODO: Not available in ansible-base
# - python_requirements_info:
# dependencies:
# - openshift
# - kubernetes
# - kubernetes-validate
- community.okd.k8s:
definition:
apiVersion: v1
kind: ConfigMap
metadata:
name: hello
namespace: default
validate:
fail_on_error: yes
ignore_errors: yes
register: k8s_no_validate
- name: assert that k8s_no_validate fails gracefully
assert:
that:
- k8s_no_validate is failed
- "k8s_no_validate.msg == 'kubernetes-validate python library is required to validate resources'"

View File

@@ -0,0 +1,94 @@
---
k8s_pod_annotations: {}
k8s_pod_metadata:
labels:
app: '{{ k8s_pod_name }}'
annotations: '{{ k8s_pod_annotations }}'
k8s_pod_spec:
serviceAccount: "{{ k8s_pod_service_account }}"
containers:
- image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
name: "{{ k8s_pod_name }}"
command: "{{ k8s_pod_command }}"
readinessProbe:
initialDelaySeconds: 15
exec:
command:
- /bin/true
resources: "{{ k8s_pod_resources }}"
ports: "{{ k8s_pod_ports }}"
env: "{{ k8s_pod_env }}"
k8s_pod_service_account: default
k8s_pod_resources:
limits:
cpu: "100m"
memory: "100Mi"
k8s_pod_command: []
k8s_pod_ports: []
k8s_pod_env: []
k8s_pod_template:
metadata: "{{ k8s_pod_metadata }}"
spec: "{{ k8s_pod_spec }}"
k8s_deployment_spec:
template: '{{ k8s_pod_template }}'
selector:
matchLabels:
app: '{{ k8s_pod_name }}'
replicas: 1
k8s_deployment_template:
apiVersion: apps/v1
kind: Deployment
spec: '{{ k8s_deployment_spec }}'
okd_dc_triggers:
- type: ConfigChange
- type: ImageChange
imageChangeParams:
automatic: true
containerNames:
- '{{ k8s_pod_name }}'
from:
kind: ImageStreamTag
name: '{{ image_name }}:{{ image_tag }}'
okd_dc_spec:
template: '{{ k8s_pod_template }}'
triggers: '{{ okd_dc_triggers }}'
replicas: 1
strategy:
type: Recreate
okd_dc_template:
apiVersion: v1
kind: DeploymentConfig
spec: '{{ okd_dc_spec }}'
okd_imagestream_template:
apiVersion: image.openshift.io/v1
kind: ImageStream
metadata:
name: '{{ image_name }}'
spec:
lookupPolicy:
local: true
tags:
- annotations: null
from:
kind: DockerImage
name: '{{ image }}'
name: '{{ image_tag }}'
referencePolicy:
type: Source
image_tag: latest

View File

@@ -0,0 +1,88 @@
---
- name: Verify inventory and connection plugins
# This group is created by the openshift_inventory plugin
# It is automatically configured to use the `oc` connection plugin
hosts: namespace_testing_pods
gather_facts: no
vars:
file_content: |
Hello world
tasks:
- name: End play if host not running (TODO should we not add these to the inventory?)
meta: end_host
when: pod_phase != "Running"
- setup:
- debug: var=ansible_facts
- name: Assert the TEST environment variable was retrieved
assert:
that: ansible_facts.env.TEST == 'test'
- name: Copy a file into the host
copy:
content: '{{ file_content }}'
dest: /tmp/test_file
- name: Retrieve the file from the host
slurp:
src: /tmp/test_file
register: slurped_file
- name: Assert the file content matches expectations
assert:
that: (slurped_file.content|b64decode) == file_content
- name: Verify
hosts: localhost
connection: local
gather_facts: no
vars:
ansible_python_interpreter: '{{ virtualenv_interpreter }}'
tasks:
- pip:
name: kubernetes-validate==1.12.0
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- import_tasks: tasks/validate_installed.yml
- pip:
name: kubernetes-validate
state: absent
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- import_tasks: tasks/validate_not_installed.yml
- import_tasks: tasks/openshift_auth.yml
- import_tasks: tasks/openshift_adm_prune_auth_clusterroles.yml
- import_tasks: tasks/openshift_adm_prune_auth_roles.yml
- import_tasks: tasks/openshift_adm_prune_deployments.yml
- import_tasks: tasks/openshift_route.yml
- import_tasks: tasks/openshift_import_images.yml
- import_tasks: tasks/openshift_prune_images.yml
- block:
- name: Create namespace
community.okd.k8s:
api_version: v1
kind: Namespace
name: process-test
- import_tasks: tasks/openshift_process.yml
vars:
files_dir: '{{ playbook_dir }}/files'
always:
- name: Delete namespace
community.okd.k8s:
api_version: v1
kind: Namespace
name: process-test
state: absent
roles:
- role: openshift_adm_groups