Init: mediaserver

This commit is contained in:
2023-02-08 12:13:28 +01:00
parent 848bc9739c
commit f7c23d4ba9
31914 changed files with 6175775 additions and 0 deletions

View File

@@ -0,0 +1,422 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: postgresql_copy
short_description: Copy data between a file/program and a PostgreSQL table
description:
- Copy data between a file/program and a PostgreSQL table.
options:
copy_to:
description:
- Copy the contents of a table to a file.
- Can also copy the results of a SELECT query.
- Mutually exclusive with I(copy_from) and I(dst).
type: path
aliases: [ to ]
copy_from:
description:
- Copy data from a file to a table (appending the data to whatever is in the table already).
- Mutually exclusive with I(copy_to) and I(src).
type: path
aliases: [ from ]
src:
description:
- Copy data from I(copy_from) to I(src=tablename).
- Used with I(copy_to) only.
type: str
aliases: [ source ]
dst:
description:
- Copy data to I(dst=tablename) from I(copy_from=/path/to/data.file).
- Used with I(copy_from) only.
type: str
aliases: [ destination ]
columns:
description:
- List of column names for the src/dst table to COPY FROM/TO.
type: list
elements: str
aliases: [ column ]
program:
description:
- Mark I(src)/I(dst) as a program. Data will be copied to/from a program.
- See block Examples and PROGRAM arg description U(https://www.postgresql.org/docs/current/sql-copy.html).
type: bool
default: false
options:
description:
- Options of COPY command.
- See the full list of available options U(https://www.postgresql.org/docs/current/sql-copy.html).
type: dict
db:
description:
- Name of database to connect to.
type: str
aliases: [ login_db ]
session_role:
description:
- Switch to session_role after connecting.
The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
trust_input:
description:
- If C(false), check whether values of parameters are potentially dangerous.
- It makes sense to use C(false) only when SQL injections are possible.
type: bool
default: true
version_added: '0.2.0'
notes:
- Supports PostgreSQL version 9.4+.
- COPY command is only allowed to database superusers.
- If I(check_mode=true), we just check the src/dst table availability
and return the COPY query that actually has not been executed.
- If i(check_mode=true) and the source has been passed as SQL, the module
will execute it and rolled the transaction back but pay attention
it can affect database performance (e.g., if SQL collects a lot of data).
seealso:
- name: COPY command reference
description: Complete reference of the COPY command documentation.
link: https://www.postgresql.org/docs/current/sql-copy.html
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment:
- community.postgresql.postgres
'''
EXAMPLES = r'''
- name: Copy text TAB-separated data from file /tmp/data.txt to acme table
community.postgresql.postgresql_copy:
copy_from: /tmp/data.txt
dst: acme
- name: Copy CSV (comma-separated) data from file /tmp/data.csv to columns id, name of table acme
community.postgresql.postgresql_copy:
copy_from: /tmp/data.csv
dst: acme
columns: id,name
options:
format: csv
- name: >
Copy text vertical-bar-separated data from file /tmp/data.txt to bar table.
The NULL values are specified as N
community.postgresql.postgresql_copy:
copy_from: /tmp/data.csv
dst: bar
options:
delimiter: '|'
null: 'N'
- name: Copy data from acme table to file /tmp/data.txt in text format, TAB-separated
community.postgresql.postgresql_copy:
src: acme
copy_to: /tmp/data.txt
- name: Copy data from SELECT query to/tmp/data.csv in CSV format
community.postgresql.postgresql_copy:
src: 'SELECT * FROM acme'
copy_to: /tmp/data.csv
options:
format: csv
- name: Copy CSV data from my_table to gzip
community.postgresql.postgresql_copy:
src: my_table
copy_to: 'gzip > /tmp/data.csv.gz'
program: true
options:
format: csv
- name: >
Copy data from columns id, name of table bar to /tmp/data.txt.
Output format is text, vertical-bar-separated, NULL as N
community.postgresql.postgresql_copy:
src: bar
columns:
- id
- name
copy_to: /tmp/data.csv
options:
delimiter: '|'
null: 'N'
'''
RETURN = r'''
queries:
description: List of executed queries.
returned: always
type: str
sample: [ "COPY test_table FROM '/tmp/data_file.txt' (FORMAT csv, DELIMITER ',', NULL 'NULL')" ]
src:
description: Data source.
returned: always
type: str
sample: "mytable"
dst:
description: Data destination.
returned: always
type: str
sample: "/tmp/data.csv"
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.postgresql.plugins.module_utils.database import (
check_input,
pg_quote_identifier,
)
from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
ensure_required_libs,
get_conn_params,
postgres_common_argument_spec,
)
from ansible.module_utils.six import iteritems
class PgCopyData(object):
"""Implements behavior of COPY FROM, COPY TO PostgreSQL command.
Arguments:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
Attributes:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
changed (bool) -- something was changed after execution or not
executed_queries (list) -- executed queries
dst (str) -- data destination table (when copy_from)
src (str) -- data source table (when copy_to)
opt_need_quotes (tuple) -- values of these options must be passed
to SQL in quotes
"""
def __init__(self, module, cursor):
self.module = module
self.cursor = cursor
self.executed_queries = []
self.changed = False
self.dst = ''
self.src = ''
self.opt_need_quotes = (
'DELIMITER',
'NULL',
'QUOTE',
'ESCAPE',
'ENCODING',
)
def copy_from(self):
"""Implements COPY FROM command behavior."""
self.src = self.module.params['copy_from']
self.dst = self.module.params['dst']
query_fragments = ['COPY %s' % pg_quote_identifier(self.dst, 'table')]
if self.module.params.get('columns'):
query_fragments.append('(%s)' % ','.join(self.module.params['columns']))
query_fragments.append('FROM')
if self.module.params.get('program'):
query_fragments.append('PROGRAM')
query_fragments.append("'%s'" % self.src)
if self.module.params.get('options'):
query_fragments.append(self.__transform_options())
# Note: check mode is implemented here:
if self.module.check_mode:
self.changed = self.__check_table(self.dst)
if self.changed:
self.executed_queries.append(' '.join(query_fragments))
else:
if exec_sql(self, ' '.join(query_fragments), return_bool=True):
self.changed = True
def copy_to(self):
"""Implements COPY TO command behavior."""
self.src = self.module.params['src']
self.dst = self.module.params['copy_to']
if 'SELECT ' in self.src.upper():
# If src is SQL SELECT statement:
query_fragments = ['COPY (%s)' % self.src]
else:
# If src is a table:
query_fragments = ['COPY %s' % pg_quote_identifier(self.src, 'table')]
if self.module.params.get('columns'):
query_fragments.append('(%s)' % ','.join(self.module.params['columns']))
query_fragments.append('TO')
if self.module.params.get('program'):
query_fragments.append('PROGRAM')
query_fragments.append("'%s'" % self.dst)
if self.module.params.get('options'):
query_fragments.append(self.__transform_options())
# Note: check mode is implemented here:
if self.module.check_mode:
self.changed = self.__check_table(self.src)
if self.changed:
self.executed_queries.append(' '.join(query_fragments))
else:
if exec_sql(self, ' '.join(query_fragments), return_bool=True):
self.changed = True
def __transform_options(self):
"""Transform options dict into a suitable string."""
for (key, val) in iteritems(self.module.params['options']):
if key.upper() in self.opt_need_quotes:
self.module.params['options'][key] = "'%s'" % val
opt = ['%s %s' % (key, val) for (key, val) in iteritems(self.module.params['options'])]
return '(%s)' % ', '.join(opt)
def __check_table(self, table):
"""Check table or SQL in transaction mode for check_mode.
Return True if it is OK.
Arguments:
table (str) - Table name that needs to be checked.
It can be SQL SELECT statement that was passed
instead of the table name.
"""
if 'SELECT ' in table.upper():
# In this case table is actually SQL SELECT statement.
# If SQL fails, it's handled by exec_sql():
exec_sql(self, table, add_to_executed=False)
# If exec_sql was passed, it means all is OK:
return True
exec_sql(self, 'SELECT 1 FROM %s' % pg_quote_identifier(table, 'table'),
add_to_executed=False)
# If SQL was executed successfully:
return True
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
copy_to=dict(type='path', aliases=['to']),
copy_from=dict(type='path', aliases=['from']),
src=dict(type='str', aliases=['source']),
dst=dict(type='str', aliases=['destination']),
columns=dict(type='list', elements='str', aliases=['column']),
options=dict(type='dict'),
program=dict(type='bool', default=False),
db=dict(type='str', aliases=['login_db']),
session_role=dict(type='str'),
trust_input=dict(type='bool', default=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['copy_from', 'copy_to'],
['copy_from', 'src'],
['copy_to', 'dst'],
]
)
if not module.params['trust_input']:
# Check input for potentially dangerous elements:
opt_list = None
if module.params['options']:
opt_list = ['%s %s' % (key, val) for (key, val) in iteritems(module.params['options'])]
check_input(module,
module.params['copy_to'],
module.params['copy_from'],
module.params['src'],
module.params['dst'],
opt_list,
module.params['columns'],
module.params['session_role'])
# Note: we don't need to check mutually exclusive params here, because they are
# checked automatically by AnsibleModule (mutually_exclusive=[] list above).
if module.params.get('copy_from') and not module.params.get('dst'):
module.fail_json(msg='dst param is necessary with copy_from')
elif module.params.get('copy_to') and not module.params.get('src'):
module.fail_json(msg='src param is necessary with copy_to')
# Ensure psycopg2 libraries are available before connecting to DB:
ensure_required_libs(module)
# Connect to DB and make cursor object:
conn_params = get_conn_params(module, module.params)
db_connection, dummy = connect_to_db(module, conn_params, autocommit=False)
cursor = db_connection.cursor(cursor_factory=DictCursor)
##############
# Create the object and do main job:
data = PgCopyData(module, cursor)
# Note: parameters like dst, src, etc. are got
# from module object into data object of PgCopyData class.
# Therefore not need to pass args to the methods below.
# Note: check mode is implemented inside the methods below
# by checking passed module.check_mode arg.
if module.params.get('copy_to'):
data.copy_to()
elif module.params.get('copy_from'):
data.copy_from()
# Finish:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
cursor.close()
db_connection.close()
# Return some values:
module.exit_json(
changed=data.changed,
queries=data.executed_queries,
src=data.src,
dst=data.dst,
)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,797 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: postgresql_db
short_description: Add or remove PostgreSQL databases from a remote host
description:
- Add or remove PostgreSQL databases from a remote host.
options:
name:
description:
- Name of the database to add or remove.
type: str
required: true
aliases: [ db ]
port:
description:
- Database port to connect (if needed).
type: int
default: 5432
aliases:
- login_port
owner:
description:
- Name of the role to set as owner of the database.
type: str
default: ''
template:
description:
- Template used to create the database.
type: str
default: ''
encoding:
description:
- Encoding of the database.
type: str
default: ''
lc_collate:
description:
- Collation order (LC_COLLATE) to use in the database
must match collation order of template database unless C(template0) is used as template.
type: str
default: ''
lc_ctype:
description:
- Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...).
- Must match LC_CTYPE of template database unless C(template0) is used as template.
type: str
default: ''
session_role:
description:
- Switch to session_role after connecting.
- The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though the session_role
were the one that had logged in originally.
type: str
state:
description:
- The database state.
- C(present) implies that the database should be created if necessary.
- C(absent) implies that the database should be removed if present.
- C(dump) requires a target definition to which the database will be backed up. (Added in Ansible 2.4)
Note that in some PostgreSQL versions of pg_dump, which is an embedded PostgreSQL utility and is used by the module,
returns rc 0 even when errors occurred (e.g. the connection is forbidden by pg_hba.conf, etc.),
so the module returns changed=True but the dump has not actually been done. Please, be sure that your version of
pg_dump returns rc 1 in this case.
- C(restore) also requires a target definition from which the database will be restored. (Added in Ansible 2.4).
- The format of the backup will be detected based on the target name.
- Supported compression formats for dump and restore include C(.pgc), C(.bz2), C(.gz) and C(.xz).
- Supported formats for dump and restore include C(.sql), C(.tar), and C(.dir) (for the directory format which is supported since collection version 1.4.0).
- "Restore program is selected by target file format: C(.tar), C(.pgc), and C(.dir) are handled by pg_restore, other with pgsql."
- "."
- C(rename) is used to rename the database C(name) to C(target).
- If the database C(name) exists, it will be renamed to C(target).
- If the database C(name) does not exist and the C(target) database exists,
the module will report that nothing has changed.
- If both the databases exist as well as when they have the same value, an error will be raised.
- When I(state=rename), in addition to the C(name) option, the module requires the C(target) option. Other options are ignored.
Supported since collection version 1.4.0.
type: str
choices: [ absent, dump, present, rename, restore ]
default: present
force:
description:
- Used to forcefully drop a database when the I(state) is C(absent), ignored otherwise.
type: bool
default: False
target:
description:
- File to back up or restore from.
- Used when I(state) is C(dump) or C(restore).
type: path
default: ''
target_opts:
description:
- Additional arguments for pg_dump or restore program (pg_restore or psql, depending on target's format).
- Used when I(state) is C(dump) or C(restore).
type: str
default: ''
maintenance_db:
description:
- The value specifies the initial database (which is also called as maintenance DB) that Ansible connects to.
type: str
default: postgres
conn_limit:
description:
- Specifies the database connection limit.
type: str
default: ''
tablespace:
description:
- The tablespace to set for the database
U(https://www.postgresql.org/docs/current/sql-alterdatabase.html).
- If you want to move the database back to the default tablespace,
explicitly set this to pg_default.
type: path
default: ''
dump_extra_args:
description:
- Provides additional arguments when I(state) is C(dump).
- Cannot be used with dump-file-format-related arguments like ``--format=d``.
type: str
version_added: '0.2.0'
trust_input:
description:
- If C(false), check whether values of parameters I(owner), I(conn_limit), I(encoding),
I(db), I(template), I(tablespace), I(session_role) are potentially dangerous.
- It makes sense to use C(false) only when SQL injections via the parameters are possible.
type: bool
default: true
version_added: '0.2.0'
seealso:
- name: CREATE DATABASE reference
description: Complete reference of the CREATE DATABASE command documentation.
link: https://www.postgresql.org/docs/current/sql-createdatabase.html
- name: DROP DATABASE reference
description: Complete reference of the DROP DATABASE command documentation.
link: https://www.postgresql.org/docs/current/sql-dropdatabase.html
- name: pg_dump reference
description: Complete reference of pg_dump documentation.
link: https://www.postgresql.org/docs/current/app-pgdump.html
- name: pg_restore reference
description: Complete reference of pg_restore documentation.
link: https://www.postgresql.org/docs/current/app-pgrestore.html
- module: community.postgresql.postgresql_tablespace
- module: community.postgresql.postgresql_info
- module: community.postgresql.postgresql_ping
notes:
- State C(dump) and C(restore) don't require I(psycopg2) since version 2.8.
- Supports C(check_mode).
author: "Ansible Core Team"
extends_documentation_fragment:
- community.postgresql.postgres
'''
EXAMPLES = r'''
- name: Create a new database with name "acme"
community.postgresql.postgresql_db:
name: acme
# Note: If a template different from "template0" is specified,
# encoding and locale settings must match those of the template.
- name: Create a new database with name "acme" and specific encoding and locale # settings
community.postgresql.postgresql_db:
name: acme
encoding: UTF-8
lc_collate: de_DE.UTF-8
lc_ctype: de_DE.UTF-8
template: template0
# Note: Default limit for the number of concurrent connections to
# a specific database is "-1", which means "unlimited"
- name: Create a new database with name "acme" which has a limit of 100 concurrent connections
community.postgresql.postgresql_db:
name: acme
conn_limit: "100"
- name: Dump an existing database to a file
community.postgresql.postgresql_db:
name: acme
state: dump
target: /tmp/acme.sql
- name: Dump an existing database to a file excluding the test table
community.postgresql.postgresql_db:
name: acme
state: dump
target: /tmp/acme.sql
dump_extra_args: --exclude-table=test
- name: Dump an existing database to a file (with compression)
community.postgresql.postgresql_db:
name: acme
state: dump
target: /tmp/acme.sql.gz
- name: Dump a single schema for an existing database
community.postgresql.postgresql_db:
name: acme
state: dump
target: /tmp/acme.sql
target_opts: "-n public"
- name: Dump only table1 and table2 from the acme database
community.postgresql.postgresql_db:
name: acme
state: dump
target: /tmp/table1_table2.sql
target_opts: "-t table1 -t table2"
- name: Dump an existing database using the directory format
community.postgresql.postgresql_db:
name: acme
state: dump
target: /tmp/acme.dir
# Note: In the example below, if database foo exists and has another tablespace
# the tablespace will be changed to foo. Access to the database will be locked
# until the copying of database files is finished.
- name: Create a new database called foo in tablespace bar
community.postgresql.postgresql_db:
name: foo
tablespace: bar
# Rename the database foo to bar.
# If the database foo exists, it will be renamed to bar.
# If the database foo does not exist and the bar database exists,
# the module will report that nothing has changed.
# If both the databases exist, an error will be raised.
- name: Rename the database foo to bar
community.postgresql.postgresql_db:
name: foo
state: rename
target: bar
'''
RETURN = r'''
executed_commands:
description: List of commands which tried to run.
returned: always
type: list
sample: ["CREATE DATABASE acme"]
version_added: '0.2.0'
'''
import os
import subprocess
import traceback
try:
import psycopg2
import psycopg2.extras
except ImportError:
HAS_PSYCOPG2 = False
else:
HAS_PSYCOPG2 = True
import ansible_collections.community.postgresql.plugins.module_utils.postgres as pgutils
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.postgresql.plugins.module_utils.database import (
check_input,
SQLParseError,
)
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_native
from ansible_collections.community.postgresql.plugins.module_utils.version import LooseVersion
executed_commands = []
class NotSupportedError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def set_owner(cursor, db, owner):
query = 'ALTER DATABASE "%s" OWNER TO "%s"' % (db, owner)
executed_commands.append(query)
cursor.execute(query)
return True
def set_conn_limit(cursor, db, conn_limit):
query = 'ALTER DATABASE "%s" CONNECTION LIMIT %s' % (db, conn_limit)
executed_commands.append(query)
cursor.execute(query)
return True
def get_encoding_id(cursor, encoding):
query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;"
cursor.execute(query, {'encoding': encoding})
return cursor.fetchone()['encoding_id']
def get_db_info(cursor, db):
query = """
SELECT rolname AS owner,
pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id,
datcollate AS lc_collate, datctype AS lc_ctype, pg_database.datconnlimit AS conn_limit,
spcname AS tablespace
FROM pg_database
JOIN pg_roles ON pg_roles.oid = pg_database.datdba
JOIN pg_tablespace ON pg_tablespace.oid = pg_database.dattablespace
WHERE datname = %(db)s
"""
cursor.execute(query, {'db': db})
return cursor.fetchone()
def db_exists(cursor, db):
query = "SELECT * FROM pg_database WHERE datname=%(db)s"
cursor.execute(query, {'db': db})
return cursor.rowcount == 1
def db_dropconns(cursor, db):
if cursor.connection.server_version >= 90200:
""" Drop DB connections in Postgres 9.2 and above """
query_terminate = ("SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity "
"WHERE pg_stat_activity.datname=%(db)s AND pid <> pg_backend_pid()")
else:
""" Drop DB connections in Postgres 9.1 and below """
query_terminate = ("SELECT pg_terminate_backend(pg_stat_activity.procpid) FROM pg_stat_activity "
"WHERE pg_stat_activity.datname=%(db)s AND procpid <> pg_backend_pid()")
query_block = ("UPDATE pg_database SET datallowconn = false WHERE datname=%(db)s")
query = query_block + '; ' + query_terminate
cursor.execute(query, {'db': db})
def db_delete(cursor, db, force=False):
if db_exists(cursor, db):
query = 'DROP DATABASE "%s"' % db
if force:
if cursor.connection.server_version >= 130000:
query = ('DROP DATABASE "%s" WITH (FORCE)' % db)
else:
db_dropconns(cursor, db)
executed_commands.append(query)
cursor.execute(query)
return True
else:
return False
def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype, conn_limit=conn_limit, tablespace=tablespace)
if not db_exists(cursor, db):
query_fragments = ['CREATE DATABASE "%s"' % db]
if owner:
query_fragments.append('OWNER "%s"' % owner)
if template:
query_fragments.append('TEMPLATE "%s"' % template)
if encoding:
query_fragments.append('ENCODING %(enc)s')
if lc_collate:
query_fragments.append('LC_COLLATE %(collate)s')
if lc_ctype:
query_fragments.append('LC_CTYPE %(ctype)s')
if tablespace:
query_fragments.append('TABLESPACE "%s"' % tablespace)
if conn_limit:
query_fragments.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
query = ' '.join(query_fragments)
executed_commands.append(cursor.mogrify(query, params))
cursor.execute(query, params)
return True
else:
db_info = get_db_info(cursor, db)
if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
raise NotSupportedError(
'Changing database encoding is not supported. '
'Current encoding: %s' % db_info['encoding']
)
elif lc_collate and lc_collate != db_info['lc_collate']:
raise NotSupportedError(
'Changing LC_COLLATE is not supported. '
'Current LC_COLLATE: %s' % db_info['lc_collate']
)
elif lc_ctype and lc_ctype != db_info['lc_ctype']:
raise NotSupportedError(
'Changing LC_CTYPE is not supported.'
'Current LC_CTYPE: %s' % db_info['lc_ctype']
)
else:
changed = False
if owner and owner != db_info['owner']:
changed = set_owner(cursor, db, owner)
if conn_limit and conn_limit != str(db_info['conn_limit']):
changed = set_conn_limit(cursor, db, conn_limit)
if tablespace and tablespace != db_info['tablespace']:
changed = set_tablespace(cursor, db, tablespace)
return changed
def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
if not db_exists(cursor, db):
return False
else:
db_info = get_db_info(cursor, db)
if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
return False
elif lc_collate and lc_collate != db_info['lc_collate']:
return False
elif lc_ctype and lc_ctype != db_info['lc_ctype']:
return False
elif owner and owner != db_info['owner']:
return False
elif conn_limit and conn_limit != str(db_info['conn_limit']):
return False
elif tablespace and tablespace != db_info['tablespace']:
return False
else:
return True
def db_dump(module, target, target_opts="",
db=None,
dump_extra_args=None,
user=None,
password=None,
host=None,
port=None,
**kw):
flags = login_flags(db, host, port, user, db_prefix=False)
cmd = module.get_bin_path('pg_dump', True)
comp_prog_path = None
if os.path.splitext(target)[-1] == '.tar':
flags.append(' --format=t')
elif os.path.splitext(target)[-1] == '.pgc':
flags.append(' --format=c')
elif os.path.splitext(target)[-1] == '.dir':
flags.append(' --format=d')
if os.path.splitext(target)[-1] == '.gz':
if module.get_bin_path('pigz'):
comp_prog_path = module.get_bin_path('pigz', True)
else:
comp_prog_path = module.get_bin_path('gzip', True)
elif os.path.splitext(target)[-1] == '.bz2':
comp_prog_path = module.get_bin_path('bzip2', True)
elif os.path.splitext(target)[-1] == '.xz':
comp_prog_path = module.get_bin_path('xz', True)
cmd += "".join(flags)
if dump_extra_args:
cmd += " {0} ".format(dump_extra_args)
if target_opts:
cmd += " {0} ".format(target_opts)
if comp_prog_path:
# Use a fifo to be notified of an error in pg_dump
# Using shell pipe has no way to return the code of the first command
# in a portable way.
fifo = os.path.join(module.tmpdir, 'pg_fifo')
os.mkfifo(fifo)
cmd = '{1} <{3} > {2} & {0} >{3}'.format(cmd, comp_prog_path, shlex_quote(target), fifo)
else:
if ' --format=d' in cmd:
cmd = '{0} -f {1}'.format(cmd, shlex_quote(target))
else:
cmd = '{0} > {1}'.format(cmd, shlex_quote(target))
return do_with_password(module, cmd, password)
def db_restore(module, target, target_opts="",
db=None,
user=None,
password=None,
host=None,
port=None,
**kw):
flags = login_flags(db, host, port, user)
comp_prog_path = None
cmd = module.get_bin_path('psql', True)
if os.path.splitext(target)[-1] == '.sql':
flags.append(' --file={0}'.format(target))
elif os.path.splitext(target)[-1] == '.tar':
flags.append(' --format=Tar')
cmd = module.get_bin_path('pg_restore', True)
elif os.path.splitext(target)[-1] == '.pgc':
flags.append(' --format=Custom')
cmd = module.get_bin_path('pg_restore', True)
elif os.path.splitext(target)[-1] == '.dir':
flags.append(' --format=Directory')
cmd = module.get_bin_path('pg_restore', True)
elif os.path.splitext(target)[-1] == '.gz':
comp_prog_path = module.get_bin_path('zcat', True)
elif os.path.splitext(target)[-1] == '.bz2':
comp_prog_path = module.get_bin_path('bzcat', True)
elif os.path.splitext(target)[-1] == '.xz':
comp_prog_path = module.get_bin_path('xzcat', True)
cmd += "".join(flags)
if target_opts:
cmd += " {0} ".format(target_opts)
if comp_prog_path:
env = os.environ.copy()
if password:
env = {"PGPASSWORD": password}
p1 = subprocess.Popen([comp_prog_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env)
(stdout2, stderr2) = p2.communicate()
p1.stdout.close()
p1.wait()
if p1.returncode != 0:
stderr1 = p1.stderr.read()
return p1.returncode, '', stderr1, 'cmd: ****'
else:
return p2.returncode, '', stderr2, 'cmd: ****'
else:
if '--format=Directory' in cmd:
cmd = '{0} {1}'.format(cmd, shlex_quote(target))
else:
cmd = '{0} < {1}'.format(cmd, shlex_quote(target))
return do_with_password(module, cmd, password)
def login_flags(db, host, port, user, db_prefix=True):
"""
returns a list of connection argument strings each prefixed
with a space and quoted where necessary to later be combined
in a single shell string with `"".join(rv)`
db_prefix determines if "--dbname" is prefixed to the db argument,
since the argument was introduced in 9.3.
"""
flags = []
if db:
if db_prefix:
flags.append(' --dbname={0}'.format(shlex_quote(db)))
else:
flags.append(' {0}'.format(shlex_quote(db)))
if host:
flags.append(' --host={0}'.format(host))
if port:
flags.append(' --port={0}'.format(port))
if user:
flags.append(' --username={0}'.format(user))
return flags
def do_with_password(module, cmd, password):
env = {}
if password:
env = {"PGPASSWORD": password}
executed_commands.append(cmd)
rc, stderr, stdout = module.run_command(cmd, use_unsafe_shell=True, environ_update=env)
return rc, stderr, stdout, cmd
def set_tablespace(cursor, db, tablespace):
query = 'ALTER DATABASE "%s" SET TABLESPACE "%s"' % (db, tablespace)
executed_commands.append(query)
cursor.execute(query)
return True
def rename_db(module, cursor, db, target, check_mode=False):
source_db = db_exists(cursor, db)
target_db = db_exists(cursor, target)
if source_db and target_db:
module.fail_json(msg='Both the source and the target databases exist.')
if not source_db and target_db:
# If the source db doesn't exist and
# the target db exists, we assume that
# the desired state has been reached and
# respectively nothing needs to be changed
return False
if not source_db and not target_db:
module.fail_json(msg='The source and the target databases do not exist.')
if source_db and not target_db:
if check_mode:
return True
query = 'ALTER DATABASE "%s" RENAME TO "%s"' % (db, target)
executed_commands.append(query)
cursor.execute(query)
return True
# ===========================================
# Module execution.
#
def main():
argument_spec = pgutils.postgres_common_argument_spec()
argument_spec.update(
db=dict(type='str', required=True, aliases=['name']),
owner=dict(type='str', default=''),
template=dict(type='str', default=''),
encoding=dict(type='str', default=''),
lc_collate=dict(type='str', default=''),
lc_ctype=dict(type='str', default=''),
state=dict(type='str', default='present',
choices=['absent', 'dump', 'present', 'rename', 'restore']),
target=dict(type='path', default=''),
target_opts=dict(type='str', default=''),
maintenance_db=dict(type='str', default="postgres"),
session_role=dict(type='str'),
conn_limit=dict(type='str', default=''),
tablespace=dict(type='path', default=''),
dump_extra_args=dict(type='str', default=None),
trust_input=dict(type='bool', default=True),
force=dict(type='bool', default=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
db = module.params["db"]
owner = module.params["owner"]
template = module.params["template"]
encoding = module.params["encoding"]
lc_collate = module.params["lc_collate"]
lc_ctype = module.params["lc_ctype"]
target = module.params["target"]
target_opts = module.params["target_opts"]
state = module.params["state"]
changed = False
maintenance_db = module.params['maintenance_db']
session_role = module.params["session_role"]
conn_limit = module.params['conn_limit']
tablespace = module.params['tablespace']
dump_extra_args = module.params['dump_extra_args']
trust_input = module.params['trust_input']
force = module.params['force']
if state == 'rename':
if not target:
module.fail_json(msg='The "target" option must be defined when the "rename" option is used.')
if db == target:
module.fail_json(msg='The "name/db" option and the "target" option cannot be the same.')
if maintenance_db == db:
module.fail_json(msg='The "maintenance_db" option and the "name/db" option cannot be the same.')
# Check input
if not trust_input:
# Check input for potentially dangerous elements:
check_input(module, owner, conn_limit, encoding, db, template, tablespace, session_role)
raw_connection = state in ("dump", "restore")
if not raw_connection:
pgutils.ensure_required_libs(module)
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host": "host",
"login_user": "user",
"login_password": "password",
"port": "port",
"ssl_mode": "sslmode",
"ca_cert": "sslrootcert"
}
kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
if k in params_map and v != '' and v is not None)
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
if target == "":
target = "{0}/{1}.sql".format(os.getcwd(), db)
target = os.path.expanduser(target)
if not raw_connection:
try:
if LooseVersion(psycopg2.__version__) >= LooseVersion('2.7.0'):
db_connection = psycopg2.connect(dbname=maintenance_db, **kw)
else:
db_connection = psycopg2.connect(database=maintenance_db, **kw)
# Enable autocommit so we can create databases
if LooseVersion(psycopg2.__version__) >= LooseVersion('2.4.2'):
db_connection.autocommit = True
else:
db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
except TypeError as e:
if 'sslrootcert' in e.args[0]:
module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}'.format(to_native(e)),
exception=traceback.format_exc())
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
if session_role:
try:
cursor.execute('SET ROLE "%s"' % session_role)
except Exception as e:
module.fail_json(msg="Could not switch role: %s" % to_native(e), exception=traceback.format_exc())
try:
if module.check_mode:
if state == "absent":
changed = db_exists(cursor, db)
elif state == "present":
changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
elif state == "rename":
changed = rename_db(module, cursor, db, target, check_mode=True)
module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
if state == "absent":
try:
changed = db_delete(cursor, db, force)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
elif state == "present":
try:
changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
elif state in ("dump", "restore"):
method = state == "dump" and db_dump or db_restore
try:
if state == 'dump':
rc, stdout, stderr, cmd = method(module, target, target_opts, db, dump_extra_args, **kw)
else:
rc, stdout, stderr, cmd = method(module, target, target_opts, db, **kw)
if rc != 0:
module.fail_json(msg=stderr, stdout=stdout, rc=rc, cmd=cmd)
else:
module.exit_json(changed=True, msg=stdout, stderr=stderr, rc=rc, cmd=cmd,
executed_commands=executed_commands)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
elif state == 'rename':
changed = rename_db(module, cursor, db, target)
except NotSupportedError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except SystemExit:
# Avoid catching this on Python 2.4
raise
except Exception as e:
module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,479 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: postgresql_ext
short_description: Add or remove PostgreSQL extensions from a database
description:
- Add or remove PostgreSQL extensions from a database.
options:
name:
description:
- Name of the extension to add or remove.
required: true
type: str
aliases:
- ext
db:
description:
- Name of the database to add or remove the extension to/from.
required: true
type: str
aliases:
- login_db
schema:
description:
- Name of the schema to add the extension to.
type: str
session_role:
description:
- Switch to session_role after connecting.
- The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
type: str
state:
description:
- The database extension state.
default: present
choices: [ absent, present ]
type: str
cascade:
description:
- Automatically install/remove any extensions that this extension depends on
that are not already installed/removed (supported since PostgreSQL 9.6).
type: bool
default: false
login_unix_socket:
description:
- Path to a Unix domain socket for local connections.
type: str
ssl_mode:
description:
- Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
- Default of C(prefer) matches libpq default.
type: str
default: prefer
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
ca_cert:
description:
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
type: str
aliases: [ ssl_rootcert ]
version:
description:
- Extension version to add or update to. Has effect with I(state=present) only.
- If not specified and extension is not installed in the database,
the latest version available will be created.
- If extension is already installed, will update to the given version if a valid update
path exists.
- Downgrading is only supported if the extension provides a downgrade path otherwise
the extension must be removed and a lower version of the extension must be made available.
- Set I(version=latest) to always update the extension to the latest available version.
type: str
trust_input:
description:
- If C(false), check whether values of parameters I(ext), I(schema),
I(version), I(session_role) are potentially dangerous.
- It makes sense to use C(false) only when SQL injections via the parameters are possible.
type: bool
default: true
version_added: '0.2.0'
seealso:
- name: PostgreSQL extensions
description: General information about PostgreSQL extensions.
link: https://www.postgresql.org/docs/current/external-extensions.html
- name: CREATE EXTENSION reference
description: Complete reference of the CREATE EXTENSION command documentation.
link: https://www.postgresql.org/docs/current/sql-createextension.html
- name: ALTER EXTENSION reference
description: Complete reference of the ALTER EXTENSION command documentation.
link: https://www.postgresql.org/docs/current/sql-alterextension.html
- name: DROP EXTENSION reference
description: Complete reference of the DROP EXTENSION command documentation.
link: https://www.postgresql.org/docs/current/sql-droppublication.html
notes:
- Supports C(check_mode).
- The default authentication assumes that you are either logging in as
or sudo'ing to the C(postgres) account on the host.
- This module uses I(psycopg2), a Python PostgreSQL database adapter.
- You must ensure that C(psycopg2) is installed on the host before using this module.
- If the remote host is the PostgreSQL server (which is the default case),
then PostgreSQL must also be installed on the remote host.
- For Ubuntu-based systems, install the C(postgresql), C(libpq-dev),
and C(python-psycopg2) packages on the remote host before using this module.
- Incomparable versions, for example PostGIS ``unpackaged``, cannot be installed.
requirements: [ psycopg2 ]
author:
- Daniel Schep (@dschep)
- Thomas O'Donnell (@andytom)
- Sandro Santilli (@strk)
- Andrew Klychkov (@Andersson007)
- Keith Fiske (@keithf4)
extends_documentation_fragment:
- community.postgresql.postgres
'''
EXAMPLES = r'''
- name: Adds postgis extension to the database acme in the schema foo
community.postgresql.postgresql_ext:
name: postgis
db: acme
schema: foo
- name: Removes postgis extension to the database acme
community.postgresql.postgresql_ext:
name: postgis
db: acme
state: absent
- name: Adds earthdistance extension to the database template1 cascade
community.postgresql.postgresql_ext:
name: earthdistance
db: template1
cascade: true
# In the example below, if earthdistance extension is installed,
# it will be removed too because it depends on cube:
- name: Removes cube extension from the database acme cascade
community.postgresql.postgresql_ext:
name: cube
db: acme
cascade: true
state: absent
- name: Create extension foo of version 1.2 or update it to that version if it's already created and a valid update path exists
community.postgresql.postgresql_ext:
db: acme
name: foo
version: 1.2
- name: Create the latest available version of extension foo. If already installed, update it to the latest version
community.postgresql.postgresql_ext:
db: acme
name: foo
version: latest
'''
RETURN = r'''
query:
description: List of executed queries.
returned: always
type: list
sample: ["DROP EXTENSION \"acme\""]
'''
import traceback
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.postgresql.plugins.module_utils.database import (
check_input,
)
from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
connect_to_db,
ensure_required_libs,
get_conn_params,
postgres_common_argument_spec,
)
from ansible.module_utils._text import to_native
executed_queries = []
# ===========================================
# PostgreSQL module specific support methods.
#
def ext_delete(cursor, ext, current_version, cascade):
"""Remove the extension from the database.
Return True if success.
Args:
cursor (cursor) -- cursor object of psycopg2 library
ext (str) -- extension name
current_version (str) -- installed version of the extension.
Value obtained from ext_get_versions and used to
determine if the extension was installed.
cascade (boolean) -- Pass the CASCADE flag to the DROP commmand
"""
if current_version:
query = "DROP EXTENSION \"%s\"" % ext
if cascade:
query += " CASCADE"
cursor.execute(query)
executed_queries.append(cursor.mogrify(query))
return True
else:
return False
def ext_update_version(cursor, ext, version):
"""Update extension version.
Return True if success.
Args:
cursor (cursor) -- cursor object of psycopg2 library
ext (str) -- extension name
version (str) -- extension version
"""
query = "ALTER EXTENSION \"%s\" UPDATE" % ext
params = {}
if version != 'latest':
query += " TO %(ver)s"
params['ver'] = version
cursor.execute(query, params)
executed_queries.append(cursor.mogrify(query, params))
return True
def ext_create(cursor, ext, schema, cascade, version):
"""
Create the extension objects inside the database.
Return True if success.
Args:
cursor (cursor) -- cursor object of psycopg2 library
ext (str) -- extension name
schema (str) -- target schema for extension objects
version (str) -- extension version
"""
query = "CREATE EXTENSION \"%s\"" % ext
params = {}
if schema:
query += " WITH SCHEMA \"%s\"" % schema
if version != 'latest':
query += " VERSION %(ver)s"
params['ver'] = version
if cascade:
query += " CASCADE"
cursor.execute(query, params)
executed_queries.append(cursor.mogrify(query, params))
return True
def ext_get_versions(cursor, ext):
"""
Get the currently created extension version if it is installed
in the database and versions that are available if it is
installed on the system.
Return tuple (current_version, [list of available versions]).
Note: the list of available versions contains only versions
that higher than the current created version.
If the extension is not created, this list will contain all
available versions.
Args:
cursor (cursor) -- cursor object of psycopg2 library
ext (str) -- extension name
"""
current_version = None
params = {}
params['ext'] = ext
# 1. Get the current extension version:
query = ("SELECT extversion FROM pg_catalog.pg_extension "
"WHERE extname = %(ext)s")
cursor.execute(query, params)
res = cursor.fetchone()
if res:
current_version = res[0]
# 2. Get available versions:
query = ("SELECT version FROM pg_available_extension_versions "
"WHERE name = %(ext)s")
cursor.execute(query, params)
available_versions = set(r[0] for r in cursor.fetchall())
if current_version is None:
current_version = False
return (current_version, available_versions)
def ext_valid_update_path(cursor, ext, current_version, version):
"""
Check to see if the installed extension version has a valid update
path to the given version. A version of 'latest' is always a valid path.
Return True if a valid path exists. Otherwise return False.
Args:
cursor (cursor) -- cursor object of psycopg2 library
ext (str) -- extension name
current_version (str) -- installed version of the extension.
version (str) -- target extension version to update to.
A value of 'latest' is always a valid path and will result
in the extension update command always being run.
"""
valid_path = False
params = {}
if version != 'latest':
query = ("SELECT path FROM pg_extension_update_paths(%(ext)s) "
"WHERE source = %(cv)s "
"AND target = %(ver)s")
params['ext'] = ext
params['cv'] = current_version
params['ver'] = version
cursor.execute(query, params)
res = cursor.fetchone()
if res is not None:
valid_path = True
else:
valid_path = True
return (valid_path)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
db=dict(type="str", required=True, aliases=["login_db"]),
ext=dict(type="str", required=True, aliases=["name"]),
schema=dict(type="str"),
state=dict(type="str", default="present", choices=["absent", "present"]),
cascade=dict(type="bool", default=False),
session_role=dict(type="str"),
version=dict(type="str"),
trust_input=dict(type="bool", default=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
ext = module.params["ext"]
schema = module.params["schema"]
state = module.params["state"]
cascade = module.params["cascade"]
version = module.params["version"]
session_role = module.params["session_role"]
trust_input = module.params["trust_input"]
changed = False
if not trust_input:
check_input(module, ext, schema, version, session_role)
if version and state == 'absent':
module.warn("Parameter version is ignored when state=absent")
# Ensure psycopg2 libraries are available before connecting to DB:
ensure_required_libs(module)
conn_params = get_conn_params(module, module.params)
db_connection, dummy = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
try:
# Get extension info and available versions:
curr_version, available_versions = ext_get_versions(cursor, ext)
if state == "present":
# If version passed
if version:
# If extension is installed, update to passed version if a valid path exists
if curr_version:
# Given version already installed
if curr_version == version:
changed = False
# Attempt to update to given version or latest version defined in extension control file
# ALTER EXTENSION is actually run if valid, so 'changed' will be true even if nothing updated
else:
valid_update_path = ext_valid_update_path(cursor, ext, curr_version, version)
if valid_update_path:
if module.check_mode:
changed = True
else:
changed = ext_update_version(cursor, ext, version)
else:
module.fail_json(msg="Passed version '%s' has no valid update path from "
"the currently installed version '%s' or "
"the passed version is not available" % (version, curr_version))
else:
# If not requesting latest version and passed version not available
if version != 'latest' and version not in available_versions:
module.fail_json(msg="Passed version '%s' is not available" % version)
# Else install the passed version when available
else:
if module.check_mode:
changed = True
else:
changed = ext_create(cursor, ext, schema, cascade, version)
# If version is not passed:
else:
# Extension exists, no request to update so no change
if curr_version:
changed = False
else:
# If the ext doesn't exist and is available:
if available_versions:
if module.check_mode:
changed = True
else:
changed = ext_create(cursor, ext, schema, cascade, 'latest')
# If the ext doesn't exist and is not available:
else:
module.fail_json(msg="Extension %s is not available" % ext)
elif state == "absent":
if curr_version:
if module.check_mode:
changed = True
else:
changed = ext_delete(cursor, ext, curr_version, cascade)
else:
changed = False
except Exception as e:
db_connection.close()
module.fail_json(msg="Management of PostgreSQL extension failed: %s" % to_native(e), exception=traceback.format_exc())
db_connection.close()
module.exit_json(changed=changed, db=module.params["db"], ext=ext, queries=executed_queries)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,592 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018-2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: postgresql_idx
short_description: Create or drop indexes from a PostgreSQL database
description:
- Create or drop indexes from a PostgreSQL database.
options:
idxname:
description:
- Name of the index to create or drop.
type: str
required: true
aliases:
- name
db:
description:
- Name of database to connect to and where the index will be created/dropped.
type: str
aliases:
- login_db
session_role:
description:
- Switch to session_role after connecting.
The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
schema:
description:
- Name of a database schema where the index will be created.
type: str
state:
description:
- Index state.
- C(present) implies the index will be created if it does not exist.
- C(absent) implies the index will be dropped if it exists.
type: str
default: present
choices: [ absent, present ]
table:
description:
- Table to create index on it.
- Mutually exclusive with I(state=absent).
type: str
columns:
description:
- List of index columns that need to be covered by index.
- Mutually exclusive with I(state=absent).
type: list
elements: str
aliases:
- column
cond:
description:
- Index conditions.
- Mutually exclusive with I(state=absent).
type: str
idxtype:
description:
- Index type (like btree, gist, gin, etc.).
- Mutually exclusive with I(state=absent).
type: str
aliases:
- type
concurrent:
description:
- Enable or disable concurrent mode (CREATE / DROP INDEX CONCURRENTLY).
- Pay attention, if I(concurrent=false), the table will be locked (ACCESS EXCLUSIVE) during the building process.
For more information about the lock levels see U(https://www.postgresql.org/docs/current/explicit-locking.html).
- If the building process was interrupted for any reason when I(cuncurrent=true), the index becomes invalid.
In this case it should be dropped and created again.
- Mutually exclusive with I(cascade=true).
type: bool
default: true
unique:
description:
- Enable unique index.
- Only btree currently supports unique indexes.
type: bool
default: false
version_added: '0.2.0'
tablespace:
description:
- Set a tablespace for the index.
- Mutually exclusive with I(state=absent).
type: str
storage_params:
description:
- Storage parameters like fillfactor, vacuum_cleanup_index_scale_factor, etc.
- Mutually exclusive with I(state=absent).
type: list
elements: str
cascade:
description:
- Automatically drop objects that depend on the index,
and in turn all objects that depend on those objects.
- It used only with I(state=absent).
- Mutually exclusive with I(concurrent=true).
type: bool
default: false
trust_input:
description:
- If C(false), check whether values of parameters I(idxname), I(session_role),
I(schema), I(table), I(columns), I(tablespace), I(storage_params),
I(cond) are potentially dangerous.
- It makes sense to use C(false) only when SQL injections via the parameters are possible.
type: bool
default: true
version_added: '0.2.0'
seealso:
- module: community.postgresql.postgresql_table
- module: community.postgresql.postgresql_tablespace
- name: PostgreSQL indexes reference
description: General information about PostgreSQL indexes.
link: https://www.postgresql.org/docs/current/indexes.html
- name: CREATE INDEX reference
description: Complete reference of the CREATE INDEX command documentation.
link: https://www.postgresql.org/docs/current/sql-createindex.html
- name: ALTER INDEX reference
description: Complete reference of the ALTER INDEX command documentation.
link: https://www.postgresql.org/docs/current/sql-alterindex.html
- name: DROP INDEX reference
description: Complete reference of the DROP INDEX command documentation.
link: https://www.postgresql.org/docs/current/sql-dropindex.html
notes:
- Supports C(check_mode).
- The index building process can affect database performance.
- To avoid table locks on production databases, use I(concurrent=true) (default behavior).
author:
- Andrew Klychkov (@Andersson007)
- Thomas O'Donnell (@andytom)
extends_documentation_fragment:
- community.postgresql.postgres
'''
EXAMPLES = r'''
- name: Create btree index if not exists test_idx concurrently covering columns id and name of table products
community.postgresql.postgresql_idx:
db: acme
table: products
columns: id,name
name: test_idx
- name: Create btree index test_idx concurrently with tablespace called ssd and storage parameter
community.postgresql.postgresql_idx:
db: acme
table: products
columns:
- id
- name
idxname: test_idx
tablespace: ssd
storage_params:
- fillfactor=90
- name: Create gist index test_gist_idx concurrently on column geo_data of table map
community.postgresql.postgresql_idx:
db: somedb
table: map
idxtype: gist
columns: geo_data
idxname: test_gist_idx
# Note: for the example below pg_trgm extension must be installed for gin_trgm_ops
- name: Create gin index gin0_idx not concurrently on column comment of table test
community.postgresql.postgresql_idx:
idxname: gin0_idx
table: test
columns: comment gin_trgm_ops
concurrent: false
idxtype: gin
- name: Drop btree test_idx concurrently
community.postgresql.postgresql_idx:
db: mydb
idxname: test_idx
state: absent
- name: Drop test_idx cascade
community.postgresql.postgresql_idx:
db: mydb
idxname: test_idx
state: absent
cascade: true
concurrent: false
- name: Create btree index test_idx concurrently on columns id,comment where column id > 1
community.postgresql.postgresql_idx:
db: mydb
table: test
columns: id,comment
idxname: test_idx
cond: id > 1
- name: Create unique btree index if not exists test_unique_idx on column name of table products
community.postgresql.postgresql_idx:
db: acme
table: products
columns: name
name: test_unique_idx
unique: true
concurrent: false
'''
RETURN = r'''
name:
description: Index name.
returned: always
type: str
sample: 'foo_idx'
state:
description: Index state.
returned: always
type: str
sample: 'present'
schema:
description: Schema where index exists.
returned: always
type: str
sample: 'public'
tablespace:
description: Tablespace where index exists.
returned: always
type: str
sample: 'ssd'
query:
description: Query that was tried to be executed.
returned: always
type: str
sample: 'CREATE INDEX CONCURRENTLY foo_idx ON test_table USING BTREE (id)'
storage_params:
description: Index storage parameters.
returned: always
type: list
sample: [ "fillfactor=90" ]
valid:
description: Index validity.
returned: always
type: bool
sample: true
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.postgresql.plugins.module_utils.database import check_input
from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
ensure_required_libs,
get_conn_params,
postgres_common_argument_spec,
)
VALID_IDX_TYPES = ('BTREE', 'HASH', 'GIST', 'SPGIST', 'GIN', 'BRIN')
# ===========================================
# PostgreSQL module specific support methods.
#
class Index(object):
"""Class for working with PostgreSQL indexes.
TODO:
1. Add possibility to change ownership
2. Add possibility to change tablespace
3. Add list called executed_queries (executed_query should be left too)
4. Use self.module instead of passing arguments to the methods whenever possible
Args:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
schema (str) -- name of the index schema
name (str) -- name of the index
Attrs:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
schema (str) -- name of the index schema
name (str) -- name of the index
exists (bool) -- flag the index exists in the DB or not
info (dict) -- dict that contents information about the index
executed_query (str) -- executed query
"""
def __init__(self, module, cursor, schema, name):
self.name = name
if schema:
self.schema = schema
else:
self.schema = 'public'
self.module = module
self.cursor = cursor
self.info = {
'name': self.name,
'state': 'absent',
'schema': '',
'tblname': '',
'tblspace': '',
'valid': True,
'storage_params': [],
}
self.exists = False
self.__exists_in_db()
self.executed_query = ''
def get_info(self):
"""Refresh index info.
Return self.info dict.
"""
self.__exists_in_db()
return self.info
def __exists_in_db(self):
"""Check index existence, collect info, add it to self.info dict.
Return True if the index exists, otherwise, return False.
"""
query = ("SELECT i.schemaname, i.tablename, i.tablespace, "
"pi.indisvalid, c.reloptions "
"FROM pg_catalog.pg_indexes AS i "
"JOIN pg_catalog.pg_class AS c "
"ON i.indexname = c.relname "
"JOIN pg_catalog.pg_index AS pi "
"ON c.oid = pi.indexrelid "
"WHERE i.indexname = %(name)s")
res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False)
if res:
self.exists = True
self.info = dict(
name=self.name,
state='present',
schema=res[0][0],
tblname=res[0][1],
tblspace=res[0][2] if res[0][2] else '',
valid=res[0][3],
storage_params=res[0][4] if res[0][4] else [],
)
return True
else:
self.exists = False
return False
def create(self, tblname, idxtype, columns, cond, tblspace,
storage_params, concurrent=True, unique=False):
"""Create PostgreSQL index.
Return True if success, otherwise, return False.
Args:
tblname (str) -- name of a table for the index
idxtype (str) -- type of the index like BTREE, BRIN, etc
columns (str) -- string of comma-separated columns that need to be covered by index
tblspace (str) -- tablespace for storing the index
storage_params (str) -- string of comma-separated storage parameters
Kwargs:
concurrent (bool) -- build index in concurrent mode, default True
"""
if self.exists:
return False
if idxtype is None:
idxtype = "BTREE"
query = 'CREATE'
if unique:
query += ' UNIQUE'
query += ' INDEX'
if concurrent:
query += ' CONCURRENTLY'
query += ' "%s"' % self.name
query += ' ON "%s"."%s" ' % (self.schema, tblname)
query += 'USING %s (%s)' % (idxtype, columns)
if storage_params:
query += ' WITH (%s)' % storage_params
if tblspace:
query += ' TABLESPACE "%s"' % tblspace
if cond:
query += ' WHERE %s' % cond
self.executed_query = query
return exec_sql(self, query, return_bool=True, add_to_executed=False)
def drop(self, cascade=False, concurrent=True):
"""Drop PostgreSQL index.
Return True if success, otherwise, return False.
Args:
schema (str) -- name of the index schema
Kwargs:
cascade (bool) -- automatically drop objects that depend on the index,
default False
concurrent (bool) -- build index in concurrent mode, default True
"""
if not self.exists:
return False
query = 'DROP INDEX'
if concurrent:
query += ' CONCURRENTLY'
query += ' "%s"."%s"' % (self.schema, self.name)
if cascade:
query += ' CASCADE'
self.executed_query = query
return exec_sql(self, query, return_bool=True, add_to_executed=False)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
idxname=dict(type='str', required=True, aliases=['name']),
db=dict(type='str', aliases=['login_db']),
state=dict(type='str', default='present', choices=['absent', 'present']),
concurrent=dict(type='bool', default=True),
unique=dict(type='bool', default=False),
table=dict(type='str'),
idxtype=dict(type='str', aliases=['type']),
columns=dict(type='list', elements='str', aliases=['column']),
cond=dict(type='str'),
session_role=dict(type='str'),
tablespace=dict(type='str'),
storage_params=dict(type='list', elements='str'),
cascade=dict(type='bool', default=False),
schema=dict(type='str'),
trust_input=dict(type='bool', default=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
idxname = module.params["idxname"]
state = module.params["state"]
concurrent = module.params["concurrent"]
unique = module.params["unique"]
table = module.params["table"]
idxtype = module.params["idxtype"]
columns = module.params["columns"]
cond = module.params["cond"]
tablespace = module.params["tablespace"]
storage_params = module.params["storage_params"]
cascade = module.params["cascade"]
schema = module.params["schema"]
session_role = module.params["session_role"]
trust_input = module.params["trust_input"]
if not trust_input:
# Check input for potentially dangerous elements:
check_input(module, idxname, session_role, schema, table, columns,
tablespace, storage_params, cond)
if concurrent and cascade:
module.fail_json(msg="Concurrent mode and cascade parameters are mutually exclusive")
if unique and (idxtype and idxtype != 'btree'):
module.fail_json(msg="Only btree currently supports unique indexes")
if state == 'present':
if not table:
module.fail_json(msg="Table must be specified")
if not columns:
module.fail_json(msg="At least one column must be specified")
else:
if table or columns or cond or idxtype or tablespace:
module.fail_json(msg="Index %s is going to be removed, so it does not "
"make sense to pass a table name, columns, conditions, "
"index type, or tablespace" % idxname)
if cascade and state != 'absent':
module.fail_json(msg="cascade parameter used only with state=absent")
# Ensure psycopg2 libraries are available before connecting to DB:
ensure_required_libs(module)
conn_params = get_conn_params(module, module.params)
db_connection, dummy = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Set defaults:
changed = False
# Do job:
index = Index(module, cursor, schema, idxname)
kw = index.get_info()
kw['query'] = ''
#
# check_mode start
if module.check_mode:
if state == 'present' and index.exists:
kw['changed'] = False
module.exit_json(**kw)
elif state == 'present' and not index.exists:
kw['changed'] = True
module.exit_json(**kw)
elif state == 'absent' and not index.exists:
kw['changed'] = False
module.exit_json(**kw)
elif state == 'absent' and index.exists:
kw['changed'] = True
module.exit_json(**kw)
# check_mode end
#
if state == "present":
if idxtype and idxtype.upper() not in VALID_IDX_TYPES:
module.fail_json(msg="Index type '%s' of %s is not in valid types" % (idxtype, idxname))
columns = ','.join(columns)
if storage_params:
storage_params = ','.join(storage_params)
changed = index.create(table, idxtype, columns, cond, tablespace, storage_params, concurrent, unique)
if changed:
kw = index.get_info()
kw['state'] = 'present'
kw['query'] = index.executed_query
else:
changed = index.drop(cascade, concurrent)
if changed:
kw['state'] = 'absent'
kw['query'] = index.executed_query
if not kw['valid']:
db_connection.rollback()
module.warn("Index %s is invalid! ROLLBACK" % idxname)
if not concurrent:
db_connection.commit()
kw['changed'] = changed
db_connection.close()
module.exit_json(**kw)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,368 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2014, Jens Depuydt <http://www.jensd.be>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: postgresql_lang
short_description: Adds, removes or changes procedural languages with a PostgreSQL database
description:
- Adds, removes or changes procedural languages with a PostgreSQL database.
- This module allows you to add a language, remote a language or change the trust
relationship with a PostgreSQL database.
- The module can be used on the machine where executed or on a remote host.
- When removing a language from a database, it is possible that dependencies prevent
the database from being removed. In that case, you can specify I(cascade=true) to
automatically drop objects that depend on the language (such as functions in the
language).
- In case the language can't be deleted because it is required by the
database system, you can specify I(fail_on_drop=false) to ignore the error.
- Be careful when marking a language as trusted since this could be a potential
security breach. Untrusted languages allow only users with the PostgreSQL superuser
privilege to use this language to create new functions.
options:
lang:
description:
- Name of the procedural language to add, remove or change.
required: true
type: str
aliases:
- name
trust:
description:
- Make this language trusted for the selected db.
type: bool
default: 'false'
db:
description:
- Name of database to connect to and where the language will be added, removed or changed.
type: str
aliases:
- login_db
required: true
force_trust:
description:
- Marks the language as trusted, even if it's marked as untrusted in pg_pltemplate.
- Use with care!
type: bool
default: 'false'
fail_on_drop:
description:
- If C(true), fail when removing a language. Otherwise just log and continue.
- In some cases, it is not possible to remove a language (used by the db-system).
- When dependencies block the removal, consider using I(cascade).
type: bool
default: 'true'
cascade:
description:
- When dropping a language, also delete object that depend on this language.
- Only used when I(state=absent).
type: bool
default: 'false'
session_role:
description:
- Switch to session_role after connecting.
- The specified I(session_role) must be a role that the current I(login_user) is a member of.
- Permissions checking for SQL commands is carried out as though the
I(session_role) were the one that had logged in originally.
type: str
state:
description:
- The state of the language for the selected database.
type: str
default: present
choices: [ absent, present ]
login_unix_socket:
description:
- Path to a Unix domain socket for local connections.
type: str
ssl_mode:
description:
- Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
- Default of C(prefer) matches libpq default.
type: str
default: prefer
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
ca_cert:
description:
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
type: str
aliases: [ ssl_rootcert ]
owner:
description:
- Set an owner for the language.
- Ignored when I(state=absent).
type: str
version_added: '0.2.0'
trust_input:
description:
- If C(false), check whether values of parameters I(lang), I(session_role),
I(owner) are potentially dangerous.
- It makes sense to use C(false) only when SQL injections via the parameters are possible.
type: bool
default: true
version_added: '0.2.0'
seealso:
- name: PostgreSQL languages
description: General information about PostgreSQL languages.
link: https://www.postgresql.org/docs/current/xplang.html
- name: CREATE LANGUAGE reference
description: Complete reference of the CREATE LANGUAGE command documentation.
link: https://www.postgresql.org/docs/current/sql-createlanguage.html
- name: ALTER LANGUAGE reference
description: Complete reference of the ALTER LANGUAGE command documentation.
link: https://www.postgresql.org/docs/current/sql-alterlanguage.html
- name: DROP LANGUAGE reference
description: Complete reference of the DROP LANGUAGE command documentation.
link: https://www.postgresql.org/docs/current/sql-droplanguage.html
author:
- Jens Depuydt (@jensdepuydt)
- Thomas O'Donnell (@andytom)
extends_documentation_fragment:
- community.postgresql.postgres
notes:
- Supports C(check_mode).
'''
EXAMPLES = r'''
- name: Add language pltclu to database testdb if it doesn't exist
community.postgresql.postgresql_lang: db=testdb lang=pltclu state=present
# Add language pltclu to database testdb if it doesn't exist and mark it as trusted.
# Marks the language as trusted if it exists but isn't trusted yet.
# force_trust makes sure that the language will be marked as trusted
- name: Add language pltclu to database testdb if it doesn't exist and mark it as trusted
community.postgresql.postgresql_lang:
db: testdb
lang: pltclu
state: present
trust: true
force_trust: true
- name: Remove language pltclu from database testdb
community.postgresql.postgresql_lang:
db: testdb
lang: pltclu
state: absent
- name: Remove language pltclu from database testdb and remove all dependencies
community.postgresql.postgresql_lang:
db: testdb
lang: pltclu
state: absent
cascade: true
- name: Remove language c from database testdb but ignore errors if something prevents the removal
community.postgresql.postgresql_lang:
db: testdb
lang: pltclu
state: absent
fail_on_drop: false
- name: In testdb change owner of mylang to alice
community.postgresql.postgresql_lang:
db: testdb
lang: mylang
owner: alice
'''
RETURN = r'''
queries:
description: List of executed queries.
returned: always
type: list
sample: ['CREATE LANGUAGE "acme"']
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.postgresql.plugins.module_utils.database import check_input
from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
connect_to_db,
ensure_required_libs,
get_conn_params,
postgres_common_argument_spec,
)
executed_queries = []
def lang_exists(cursor, lang):
"""Checks if language exists for db"""
query = "SELECT lanname FROM pg_language WHERE lanname = %(lang)s"
cursor.execute(query, {'lang': lang})
return cursor.rowcount > 0
def lang_istrusted(cursor, lang):
"""Checks if language is trusted for db"""
query = "SELECT lanpltrusted FROM pg_language WHERE lanname = %(lang)s"
cursor.execute(query, {'lang': lang})
return cursor.fetchone()[0]
def lang_altertrust(cursor, lang, trust):
"""Changes if language is trusted for db"""
query = "UPDATE pg_language SET lanpltrusted = %(trust)s WHERE lanname = %(lang)s"
cursor.execute(query, {'trust': trust, 'lang': lang})
executed_queries.append(cursor.mogrify(query, {'trust': trust, 'lang': lang}))
return True
def lang_add(cursor, lang, trust):
"""Adds language for db"""
if trust:
query = 'CREATE TRUSTED LANGUAGE "%s"' % lang
else:
query = 'CREATE LANGUAGE "%s"' % lang
executed_queries.append(query)
cursor.execute(query)
return True
def lang_drop(cursor, lang, cascade):
"""Drops language for db"""
cursor.execute("SAVEPOINT ansible_pgsql_lang_drop")
try:
if cascade:
query = "DROP LANGUAGE \"%s\" CASCADE" % lang
else:
query = "DROP LANGUAGE \"%s\"" % lang
executed_queries.append(query)
cursor.execute(query)
except Exception:
cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_lang_drop")
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
return False
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
return True
def get_lang_owner(cursor, lang):
"""Get language owner.
Args:
cursor (cursor): psycopg2 cursor object.
lang (str): language name.
"""
query = ("SELECT r.rolname FROM pg_language l "
"JOIN pg_roles r ON l.lanowner = r.oid "
"WHERE l.lanname = %(lang)s")
cursor.execute(query, {'lang': lang})
return cursor.fetchone()[0]
def set_lang_owner(cursor, lang, owner):
"""Set language owner.
Args:
cursor (cursor): psycopg2 cursor object.
lang (str): language name.
owner (str): name of new owner.
"""
query = "ALTER LANGUAGE \"%s\" OWNER TO \"%s\"" % (lang, owner)
executed_queries.append(query)
cursor.execute(query)
return True
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
db=dict(type="str", required=True, aliases=["login_db"]),
lang=dict(type="str", required=True, aliases=["name"]),
state=dict(type="str", default="present", choices=["absent", "present"]),
trust=dict(type="bool", default="false"),
force_trust=dict(type="bool", default="false"),
cascade=dict(type="bool", default="false"),
fail_on_drop=dict(type="bool", default="true"),
session_role=dict(type="str"),
owner=dict(type="str"),
trust_input=dict(type="bool", default="true")
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
db = module.params["db"]
lang = module.params["lang"]
state = module.params["state"]
trust = module.params["trust"]
force_trust = module.params["force_trust"]
cascade = module.params["cascade"]
fail_on_drop = module.params["fail_on_drop"]
owner = module.params["owner"]
session_role = module.params["session_role"]
trust_input = module.params["trust_input"]
if not trust_input:
# Check input for potentially dangerous elements:
check_input(module, lang, session_role, owner)
# Ensure psycopg2 libraries are available before connecting to DB:
ensure_required_libs(module)
conn_params = get_conn_params(module, module.params)
db_connection, dummy = connect_to_db(module, conn_params, autocommit=False)
cursor = db_connection.cursor()
changed = False
kw = {'db': db, 'lang': lang, 'trust': trust}
if state == "present":
if lang_exists(cursor, lang):
lang_trusted = lang_istrusted(cursor, lang)
if (lang_trusted and not trust) or (not lang_trusted and trust):
if module.check_mode:
changed = True
else:
changed = lang_altertrust(cursor, lang, trust)
else:
if module.check_mode:
changed = True
else:
changed = lang_add(cursor, lang, trust)
if force_trust:
changed = lang_altertrust(cursor, lang, trust)
else:
if lang_exists(cursor, lang):
if module.check_mode:
changed = True
kw['lang_dropped'] = True
else:
changed = lang_drop(cursor, lang, cascade)
if fail_on_drop and not changed:
msg = ("unable to drop language, use cascade "
"to delete dependencies or fail_on_drop=false to ignore")
module.fail_json(msg=msg)
kw['lang_dropped'] = changed
if owner and state == 'present':
if lang_exists(cursor, lang):
if owner != get_lang_owner(cursor, lang):
changed = set_lang_owner(cursor, lang, owner)
if changed:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
kw['changed'] = changed
kw['queries'] = executed_queries
db_connection.close()
module.exit_json(**kw)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,261 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: postgresql_membership
short_description: Add or remove PostgreSQL roles from groups
description:
- Adds or removes PostgreSQL roles from groups (other roles).
- Users are roles with login privilege.
- Groups are PostgreSQL roles usually without LOGIN privilege.
- "Common use case:"
- 1) add a new group (groups) by M(community.postgresql.postgresql_user) module with I(role_attr_flags=NOLOGIN)
- 2) grant them desired privileges by M(community.postgresql.postgresql_privs) module
- 3) add desired PostgreSQL users to the new group (groups) by this module
options:
groups:
description:
- The list of groups (roles) that need to be granted to or revoked from I(target_roles).
required: true
type: list
elements: str
aliases:
- group
- source_role
- source_roles
target_roles:
description:
- The list of target roles (groups will be granted to them).
required: true
type: list
elements: str
aliases:
- target_role
- users
- user
fail_on_role:
description:
- If C(true), fail when group or target_role doesn't exist. If C(false), just warn and continue.
default: true
type: bool
state:
description:
- Membership state.
- I(state=present) implies the I(groups)must be granted to I(target_roles).
- I(state=absent) implies the I(groups) must be revoked from I(target_roles).
- I(state=exact) implies that I(target_roles) will be members of only the I(groups)
(available since community.postgresql 2.2.0).
Any other groups will be revoked from I(target_roles).
type: str
default: present
choices: [ absent, exact, present ]
db:
description:
- Name of database to connect to.
type: str
aliases:
- login_db
session_role:
description:
- Switch to session_role after connecting.
The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
trust_input:
description:
- If C(false), check whether values of parameters I(groups),
I(target_roles), I(session_role) are potentially dangerous.
- It makes sense to use C(false) only when SQL injections via the parameters are possible.
type: bool
default: true
version_added: '0.2.0'
seealso:
- module: community.postgresql.postgresql_user
- module: community.postgresql.postgresql_privs
- module: community.postgresql.postgresql_owner
- name: PostgreSQL role membership reference
description: Complete reference of the PostgreSQL role membership documentation.
link: https://www.postgresql.org/docs/current/role-membership.html
- name: PostgreSQL role attributes reference
description: Complete reference of the PostgreSQL role attributes documentation.
link: https://www.postgresql.org/docs/current/role-attributes.html
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment:
- community.postgresql.postgres
notes:
- Supports C(check_mode).
'''
EXAMPLES = r'''
- name: Grant role read_only to alice and bob
community.postgresql.postgresql_membership:
group: read_only
target_roles:
- alice
- bob
state: present
# you can also use target_roles: alice,bob,etc to pass the role list
- name: Revoke role read_only and exec_func from bob. Ignore if roles don't exist
community.postgresql.postgresql_membership:
groups:
- read_only
- exec_func
target_role: bob
fail_on_role: false
state: absent
- name: >
Make sure alice and bob are members only of marketing and sales.
If they are members of other groups, they will be removed from those groups
community.postgresql.postgresql_membership:
group:
- marketing
- sales
target_roles:
- alice
- bob
state: exact
- name: Make sure alice and bob do not belong to any groups
community.postgresql.postgresql_membership:
group: []
target_roles:
- alice
- bob
state: exact
'''
RETURN = r'''
queries:
description: List of executed queries.
returned: always
type: str
sample: [ "GRANT \"user_ro\" TO \"alice\"" ]
granted:
description: Dict of granted groups and roles.
returned: if I(state=present)
type: dict
sample: { "ro_group": [ "alice", "bob" ] }
revoked:
description: Dict of revoked groups and roles.
returned: if I(state=absent)
type: dict
sample: { "ro_group": [ "alice", "bob" ] }
state:
description: Membership state that tried to be set.
returned: always
type: str
sample: "present"
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.postgresql.plugins.module_utils.database import check_input
from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
connect_to_db,
ensure_required_libs,
get_conn_params,
PgMembership,
postgres_common_argument_spec,
)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
groups=dict(type='list', elements='str', required=True, aliases=['group', 'source_role', 'source_roles']),
target_roles=dict(type='list', elements='str', required=True, aliases=['target_role', 'user', 'users']),
fail_on_role=dict(type='bool', default=True),
state=dict(type='str', default='present', choices=['absent', 'exact', 'present']),
db=dict(type='str', aliases=['login_db']),
session_role=dict(type='str'),
trust_input=dict(type='bool', default=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
groups = module.params['groups']
target_roles = module.params['target_roles']
fail_on_role = module.params['fail_on_role']
state = module.params['state']
session_role = module.params['session_role']
trust_input = module.params['trust_input']
if not trust_input:
# Check input for potentially dangerous elements:
check_input(module, groups, target_roles, session_role)
# Ensure psycopg2 libraries are available before connecting to DB:
ensure_required_libs(module)
conn_params = get_conn_params(module, module.params, warn_db_default=False)
db_connection, dummy = connect_to_db(module, conn_params, autocommit=False)
cursor = db_connection.cursor(cursor_factory=DictCursor)
##############
# Create the object and do main job:
pg_membership = PgMembership(module, cursor, groups, target_roles, fail_on_role)
if state == 'present':
pg_membership.grant()
elif state == 'exact':
pg_membership.match()
elif state == 'absent':
pg_membership.revoke()
# Rollback if it's possible and check_mode:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
cursor.close()
db_connection.close()
# Make return values:
return_dict = dict(
changed=pg_membership.changed,
state=state,
groups=pg_membership.groups,
target_roles=pg_membership.target_roles,
queries=pg_membership.executed_queries,
)
if state == 'present':
return_dict['granted'] = pg_membership.granted
elif state == 'absent':
return_dict['revoked'] = pg_membership.revoked
elif state == 'exact':
return_dict['granted'] = pg_membership.granted
return_dict['revoked'] = pg_membership.revoked
module.exit_json(**return_dict)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,459 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: postgresql_owner
short_description: Change an owner of PostgreSQL database object
description:
- Change an owner of PostgreSQL database object.
- Also allows to reassign the ownership of database objects owned by a database role to another role.
options:
new_owner:
description:
- Role (user/group) to set as an I(obj_name) owner.
type: str
required: true
obj_name:
description:
- Name of a database object to change ownership.
- Mutually exclusive with I(reassign_owned_by).
type: str
obj_type:
description:
- Type of a database object.
- Mutually exclusive with I(reassign_owned_by).
type: str
choices: [ database, function, matview, sequence, schema, table, tablespace, view ]
aliases:
- type
reassign_owned_by:
description:
- Caution - the ownership of all the objects within the specified I(db),
owned by this role(s) will be reassigned to I(new_owner).
- REASSIGN OWNED is often used to prepare for the removal of one or more roles.
- REASSIGN OWNED does not affect objects within other databases.
- Execute this command in each database that contains objects owned by a role that is to be removed.
- If role(s) exists, always returns changed True.
- Cannot reassign ownership of objects that are required by the database system.
- Mutually exclusive with C(obj_type).
type: list
elements: str
fail_on_role:
description:
- If C(true), fail when I(reassign_owned_by) role does not exist.
Otherwise just warn and continue.
- Mutually exclusive with I(obj_name) and I(obj_type).
default: true
type: bool
db:
description:
- Name of database to connect to.
type: str
aliases:
- login_db
session_role:
description:
- Switch to session_role after connecting.
The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
trust_input:
description:
- If C(false), check whether values of parameters I(new_owner), I(obj_name),
I(reassign_owned_by), I(session_role) are potentially dangerous.
- It makes sense to use C(false) only when SQL injections via the parameters are possible.
type: bool
default: true
version_added: '0.2.0'
seealso:
- module: community.postgresql.postgresql_user
- module: community.postgresql.postgresql_privs
- module: community.postgresql.postgresql_membership
- name: PostgreSQL REASSIGN OWNED command reference
description: Complete reference of the PostgreSQL REASSIGN OWNED command documentation.
link: https://www.postgresql.org/docs/current/sql-reassign-owned.html
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment:
- community.postgresql.postgres
notes:
- Supports C(check_mode).
'''
EXAMPLES = r'''
# Set owner as alice for function myfunc in database bar by ansible ad-hoc command:
# ansible -m postgresql_owner -a "db=bar new_owner=alice obj_name=myfunc obj_type=function"
- name: The same as above by playbook
community.postgresql.postgresql_owner:
db: bar
new_owner: alice
obj_name: myfunc
obj_type: function
- name: Set owner as bob for table acme in database bar
community.postgresql.postgresql_owner:
db: bar
new_owner: bob
obj_name: acme
obj_type: table
- name: Set owner as alice for view test_view in database bar
community.postgresql.postgresql_owner:
db: bar
new_owner: alice
obj_name: test_view
obj_type: view
- name: Set owner as bob for tablespace ssd in database foo
community.postgresql.postgresql_owner:
db: foo
new_owner: bob
obj_name: ssd
obj_type: tablespace
- name: Reassign all databases owned by bob to alice and all objects in database bar owned by bob to alice
community.postgresql.postgresql_owner:
db: bar
new_owner: alice
reassign_owned_by: bob
- name: Reassign all databases owned by bob or bill to alice and all objects in database bar owned by bob or bill to alice
community.postgresql.postgresql_owner:
db: bar
new_owner: alice
reassign_owned_by:
- bob
- bill
'''
RETURN = r'''
queries:
description: List of executed queries.
returned: always
type: str
sample: [ 'REASSIGN OWNED BY "bob" TO "alice"' ]
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.postgresql.plugins.module_utils.database import (
check_input,
pg_quote_identifier,
)
from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
ensure_required_libs,
get_conn_params,
postgres_common_argument_spec,
)
class PgOwnership(object):
"""Class for changing ownership of PostgreSQL objects.
Arguments:
module (AnsibleModule): Object of Ansible module class.
cursor (psycopg2.connect.cursor): Cursor object for interaction with the database.
role (str): Role name to set as a new owner of objects.
Important:
If you want to add handling of a new type of database objects:
1. Add a specific method for this like self.__set_db_owner(), etc.
2. Add a condition with a check of ownership for new type objects to self.__is_owner()
3. Add a condition with invocation of the specific method to self.set_owner()
4. Add the information to the module documentation
That's all.
"""
def __init__(self, module, cursor, role):
self.module = module
self.cursor = cursor
self.check_role_exists(role)
self.role = role
self.changed = False
self.executed_queries = []
self.obj_name = ''
self.obj_type = ''
def check_role_exists(self, role, fail_on_role=True):
"""Check the role exists or not.
Arguments:
role (str): Role name.
fail_on_role (bool): If True, fail when the role does not exist.
Otherwise just warn and continue.
"""
if not self.__role_exists(role):
if fail_on_role:
self.module.fail_json(msg="Role '%s' does not exist" % role)
else:
self.module.warn("Role '%s' does not exist, pass" % role)
return False
else:
return True
def reassign(self, old_owners, fail_on_role):
"""Implements REASSIGN OWNED BY command.
If success, set self.changed as True.
Arguments:
old_owners (list): The ownership of all the objects within
the current database, and of all shared objects (databases, tablespaces),
owned by these roles will be reassigned to self.role.
fail_on_role (bool): If True, fail when a role from old_owners does not exist.
Otherwise just warn and continue.
"""
roles = []
for r in old_owners:
if self.check_role_exists(r, fail_on_role):
roles.append('"%s"' % r)
# Roles do not exist, nothing to do, exit:
if not roles:
return False
old_owners = ','.join(roles)
query = ['REASSIGN OWNED BY']
query.append(old_owners)
query.append('TO "%s"' % self.role)
query = ' '.join(query)
self.changed = exec_sql(self, query, return_bool=True)
def set_owner(self, obj_type, obj_name):
"""Change owner of a database object.
Arguments:
obj_type (str): Type of object (like database, table, view, etc.).
obj_name (str): Object name.
"""
self.obj_name = obj_name
self.obj_type = obj_type
# if a new_owner is the object owner now,
# nothing to do:
if self.__is_owner():
return False
if obj_type == 'database':
self.__set_db_owner()
elif obj_type == 'function':
self.__set_func_owner()
elif obj_type == 'sequence':
self.__set_seq_owner()
elif obj_type == 'schema':
self.__set_schema_owner()
elif obj_type == 'table':
self.__set_table_owner()
elif obj_type == 'tablespace':
self.__set_tablespace_owner()
elif obj_type == 'view':
self.__set_view_owner()
elif obj_type == 'matview':
self.__set_mat_view_owner()
def __is_owner(self):
"""Return True if self.role is the current object owner."""
if self.obj_type == 'table':
query = ("SELECT 1 FROM pg_tables "
"WHERE tablename = %(obj_name)s "
"AND tableowner = %(role)s")
elif self.obj_type == 'database':
query = ("SELECT 1 FROM pg_database AS d "
"JOIN pg_roles AS r ON d.datdba = r.oid "
"WHERE d.datname = %(obj_name)s "
"AND r.rolname = %(role)s")
elif self.obj_type == 'function':
query = ("SELECT 1 FROM pg_proc AS f "
"JOIN pg_roles AS r ON f.proowner = r.oid "
"WHERE f.proname = %(obj_name)s "
"AND r.rolname = %(role)s")
elif self.obj_type == 'sequence':
query = ("SELECT 1 FROM pg_class AS c "
"JOIN pg_roles AS r ON c.relowner = r.oid "
"WHERE c.relkind = 'S' AND c.relname = %(obj_name)s "
"AND r.rolname = %(role)s")
elif self.obj_type == 'schema':
query = ("SELECT 1 FROM information_schema.schemata "
"WHERE schema_name = %(obj_name)s "
"AND schema_owner = %(role)s")
elif self.obj_type == 'tablespace':
query = ("SELECT 1 FROM pg_tablespace AS t "
"JOIN pg_roles AS r ON t.spcowner = r.oid "
"WHERE t.spcname = %(obj_name)s "
"AND r.rolname = %(role)s")
elif self.obj_type == 'view':
query = ("SELECT 1 FROM pg_views "
"WHERE viewname = %(obj_name)s "
"AND viewowner = %(role)s")
elif self.obj_type == 'matview':
query = ("SELECT 1 FROM pg_matviews "
"WHERE matviewname = %(obj_name)s "
"AND matviewowner = %(role)s")
query_params = {'obj_name': self.obj_name, 'role': self.role}
return exec_sql(self, query, query_params, add_to_executed=False)
def __set_db_owner(self):
"""Set the database owner."""
query = 'ALTER DATABASE "%s" OWNER TO "%s"' % (self.obj_name, self.role)
self.changed = exec_sql(self, query, return_bool=True)
def __set_func_owner(self):
"""Set the function owner."""
query = 'ALTER FUNCTION %s OWNER TO "%s"' % (self.obj_name, self.role)
self.changed = exec_sql(self, query, return_bool=True)
def __set_seq_owner(self):
"""Set the sequence owner."""
query = 'ALTER SEQUENCE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
self.role)
self.changed = exec_sql(self, query, return_bool=True)
def __set_schema_owner(self):
"""Set the schema owner."""
query = 'ALTER SCHEMA %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'schema'),
self.role)
self.changed = exec_sql(self, query, return_bool=True)
def __set_table_owner(self):
"""Set the table owner."""
query = 'ALTER TABLE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
self.role)
self.changed = exec_sql(self, query, return_bool=True)
def __set_tablespace_owner(self):
"""Set the tablespace owner."""
query = 'ALTER TABLESPACE "%s" OWNER TO "%s"' % (self.obj_name, self.role)
self.changed = exec_sql(self, query, return_bool=True)
def __set_view_owner(self):
"""Set the view owner."""
query = 'ALTER VIEW %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
self.role)
self.changed = exec_sql(self, query, return_bool=True)
def __set_mat_view_owner(self):
"""Set the materialized view owner."""
query = 'ALTER MATERIALIZED VIEW %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
self.role)
self.changed = exec_sql(self, query, return_bool=True)
def __role_exists(self, role):
"""Return True if role exists, otherwise return False."""
query_params = {'role': role}
query = "SELECT 1 FROM pg_roles WHERE rolname = %(role)s"
return exec_sql(self, query, query_params, add_to_executed=False)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
new_owner=dict(type='str', required=True),
obj_name=dict(type='str'),
obj_type=dict(type='str', aliases=['type'], choices=[
'database', 'function', 'matview', 'sequence', 'schema', 'table', 'tablespace', 'view']),
reassign_owned_by=dict(type='list', elements='str'),
fail_on_role=dict(type='bool', default=True),
db=dict(type='str', aliases=['login_db']),
session_role=dict(type='str'),
trust_input=dict(type='bool', default=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
['obj_name', 'reassign_owned_by'],
['obj_type', 'reassign_owned_by'],
['obj_name', 'fail_on_role'],
['obj_type', 'fail_on_role'],
],
supports_check_mode=True,
)
new_owner = module.params['new_owner']
obj_name = module.params['obj_name']
obj_type = module.params['obj_type']
reassign_owned_by = module.params['reassign_owned_by']
fail_on_role = module.params['fail_on_role']
session_role = module.params['session_role']
trust_input = module.params['trust_input']
if not trust_input:
# Check input for potentially dangerous elements:
check_input(module, new_owner, obj_name, reassign_owned_by, session_role)
# Ensure psycopg2 libraries are available before connecting to DB:
ensure_required_libs(module)
conn_params = get_conn_params(module, module.params)
db_connection, dummy = connect_to_db(module, conn_params, autocommit=False)
cursor = db_connection.cursor(cursor_factory=DictCursor)
##############
# Create the object and do main job:
pg_ownership = PgOwnership(module, cursor, new_owner)
# if we want to change ownership:
if obj_name:
pg_ownership.set_owner(obj_type, obj_name)
# if we want to reassign objects owned by roles:
elif reassign_owned_by:
pg_ownership.reassign(reassign_owned_by, fail_on_role)
# Rollback if it's possible and check_mode:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
cursor.close()
db_connection.close()
module.exit_json(
changed=pg_ownership.changed,
queries=pg_ownership.executed_queries,
)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,901 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Sebastiaan Mannem (@sebasmannem) <sebastiaan.mannem@enterprisedb.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
'''
This module is used to manage postgres pg_hba files with Ansible.
'''
__metaclass__ = type
DOCUMENTATION = r'''
---
module: postgresql_pg_hba
short_description: Add, remove or modify a rule in a pg_hba file
description:
- The fundamental function of the module is to create, or delete lines in pg_hba files.
- The lines in the file should be in a typical pg_hba form and lines should be unique per key (type, databases, users, source).
If they are not unique and the SID is 'the one to change', only one for I(state=present) or
none for I(state=absent) of the SID's will remain.
extends_documentation_fragment: files
options:
address:
description:
- The source address/net where the connections could come from.
- Will not be used for entries of I(type)=C(local).
- You can also use keywords C(all), C(samehost), and C(samenet).
default: samehost
type: str
aliases: [ source, src ]
backup:
description:
- If set, create a backup of the C(pg_hba) file before it is modified.
The location of the backup is returned in the (backup) variable by this module.
default: false
type: bool
backup_file:
description:
- Write backup to a specific backupfile rather than a temp file.
type: str
create:
description:
- Create an C(pg_hba) file if none exists.
- When set to false, an error is raised when the C(pg_hba) file doesn't exist.
default: false
type: bool
contype:
description:
- Type of the rule. If not set, C(postgresql_pg_hba) will only return contents.
type: str
choices: [ local, host, hostnossl, hostssl, hostgssenc, hostnogssenc ]
comment:
description:
- A comment that will be placed in the same line behind the rule. See also the I(keep_comments_at_rules) parameter.
type: str
version_added: '1.5.0'
databases:
description:
- Databases this line applies to.
default: all
type: str
dest:
description:
- Path to C(pg_hba) file to modify.
type: path
required: true
method:
description:
- Authentication method to be used.
type: str
choices: [ cert, gss, ident, krb5, ldap, md5, pam, password, peer, radius, reject, scram-sha-256 , sspi, trust ]
default: md5
netmask:
description:
- The netmask of the source address.
type: str
options:
description:
- Additional options for the authentication I(method).
type: str
order:
description:
- The entries will be written out in a specific order.
With this option you can control by which field they are ordered first, second and last.
s=source, d=databases, u=users.
This option is deprecated since 2.9 and will be removed in community.postgresql 3.0.0.
Sortorder is now hardcoded to sdu.
type: str
default: sdu
choices: [ sdu, sud, dsu, dus, usd, uds ]
overwrite:
description:
- Remove all existing rules before adding rules. (Like I(state=absent) for all pre-existing rules.)
type: bool
default: false
keep_comments_at_rules:
description:
- If C(true), comments that stand together with a rule in one line are kept behind that line.
- If C(false), such comments are moved to the beginning of the file, like all other comments.
type: bool
default: false
version_added: '1.5.0'
rules:
description:
- A list of objects, specifying rules for the pg_hba.conf. Use this to manage multiple rules at once.
- "Each object can have the following keys (the 'rule-specific arguments'), which are treated the same as if they were arguments of this module:"
- C(address), C(comment), C(contype), C(databases), C(method), C(netmask), C(options), C(state), C(users)
- See also C(rules_behavior).
type: list
elements: dict
rules_behavior:
description:
- "Configure how the I(rules) argument works together with the rule-specific arguments outside the I(rules) argument."
- See I(rules) for the complete list of rule-specific arguments.
- When set to C(conflict), fail if I(rules) and, for example, I(address) are set.
- If C(combine), the normal rule-specific arguments are not defining a rule, but are used as defaults for the arguments in the I(rules) argument.
- Is used only when I(rules) is specified, ignored otherwise.
type: str
choices: [ conflict, combine ]
default: conflict
state:
description:
- The lines will be added/modified when C(state=present) and removed when C(state=absent).
type: str
default: present
choices: [ absent, present ]
users:
description:
- Users this line applies to.
type: str
default: all
notes:
- The default authentication assumes that on the host, you are either logging in as or
sudo'ing to an account with appropriate permissions to read and modify the file.
- This module also returns the pg_hba info. You can use this module to only retrieve it by only specifying I(dest).
The info can be found in the returned data under key pg_hba, being a list, containing a dict per rule.
- This module will sort resulting C(pg_hba) files if a rule change is required.
This could give unexpected results with manual created hba files, if it was improperly sorted.
For example a rule was created for a net first and for a ip in that net range next.
In that situation, the 'ip specific rule' will never hit, it is in the C(pg_hba) file obsolete.
After the C(pg_hba) file is rewritten by the M(community.postgresql.postgresql_pg_hba) module, the ip specific rule will be sorted above the range rule.
And then it will hit, which will give unexpected results.
- With the 'order' parameter you can control which field is used to sort first, next and last.
- The module supports a check mode and a diff mode.
seealso:
- name: PostgreSQL pg_hba.conf file reference
description: Complete reference of the PostgreSQL pg_hba.conf file documentation.
link: https://www.postgresql.org/docs/current/auth-pg-hba-conf.html
requirements:
- ipaddress
author:
- Sebastiaan Mannem (@sebasmannem)
- Felix Hamme (@betanummeric)
'''
EXAMPLES = '''
- name: Grant users joe and simon access to databases sales and logistics from ipv6 localhost ::1/128 using peer authentication
community.postgresql.postgresql_pg_hba:
dest: /var/lib/postgres/data/pg_hba.conf
contype: host
users: joe,simon
source: ::1
databases: sales,logistics
method: peer
create: true
- name: Grant user replication from network 192.168.0.100/24 access for replication with client cert authentication
community.postgresql.postgresql_pg_hba:
dest: /var/lib/postgres/data/pg_hba.conf
contype: host
users: replication
source: 192.168.0.100/24
databases: replication
method: cert
- name: Revoke access from local user mary on database mydb
community.postgresql.postgresql_pg_hba:
dest: /var/lib/postgres/data/pg_hba.conf
contype: local
users: mary
databases: mydb
state: absent
- name: Grant some_user access to some_db, comment that and keep other rule-specific comments attached to their rules
community.postgresql.postgresql_pg_hba:
dest: /var/lib/postgres/data/pg_hba.conf
contype: host
users: some_user
databases: some_db
method: md5
source: ::/0
keep_comments_at_rules: true
comment: "this rule is an example"
- name: Replace everything with a new set of rules
community.postgresql.postgresql_pg_hba:
dest: /var/lib/postgres/data/pg_hba.conf
overwrite: true # remove preexisting rules
# custom defaults
rules_behavior: combine
contype: hostssl
address: 2001:db8::/64
comment: added in bulk
rules:
- users: user1
databases: db1
# contype, address and comment come from custom default
- users: user2
databases: db2
comment: added with love # overwrite custom default for this rule
# contype and address come from custom default
- users: user3
databases: db3
# contype, address and comment come from custom default
'''
RETURN = r'''
msgs:
description: List of textual messages what was done.
returned: always
type: list
sample:
"msgs": [
"Removing",
"Changed",
"Writing"
]
backup_file:
description: File that the original pg_hba file was backed up to.
returned: changed
type: str
sample: /tmp/pg_hba_jxobj_p
pg_hba:
description: List of the pg_hba rules as they are configured in the specified hba file.
returned: always
type: list
sample:
"pg_hba": [
{
"db": "all",
"method": "md5",
"src": "samehost",
"type": "host",
"usr": "all"
}
]
'''
import os
import re
import traceback
IPADDRESS_IMP_ERR = None
try:
import ipaddress
except ImportError:
IPADDRESS_IMP_ERR = traceback.format_exc()
import tempfile
import shutil
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
# from ansible.module_utils.postgres import postgres_common_argument_spec
PG_HBA_METHODS = ["trust", "reject", "md5", "password", "gss", "sspi", "krb5", "ident", "peer",
"ldap", "radius", "cert", "pam", "scram-sha-256"]
PG_HBA_TYPES = ["local", "host", "hostssl", "hostnossl", "hostgssenc", "hostnogssenc"]
PG_HBA_ORDERS = ["sdu", "sud", "dsu", "dus", "usd", "uds"]
PG_HBA_HDR = ['type', 'db', 'usr', 'src', 'mask', 'method', 'options']
WHITESPACES_RE = re.compile(r'\s+')
class PgHbaError(Exception):
'''
This exception is raised when parsing the pg_hba file ends in an error.
'''
class PgHbaRuleError(PgHbaError):
'''
This exception is raised when parsing the pg_hba file ends in an error.
'''
class PgHbaRuleChanged(PgHbaRuleError):
'''
This exception is raised when a new parsed rule is a changed version of an existing rule.
'''
class PgHbaValueError(PgHbaError):
'''
This exception is raised when a new parsed rule is a changed version of an existing rule.
'''
class PgHbaRuleValueError(PgHbaRuleError):
'''
This exception is raised when a new parsed rule is a changed version of an existing rule.
'''
class PgHba(object):
"""
PgHba object to read/write entries to/from.
pg_hba_file - the pg_hba file almost always /etc/pg_hba
"""
def __init__(self, pg_hba_file=None, order="sdu", backup=False, create=False, keep_comments_at_rules=False):
if order not in PG_HBA_ORDERS:
msg = "invalid order setting {0} (should be one of '{1}')."
raise PgHbaError(msg.format(order, "', '".join(PG_HBA_ORDERS)))
self.pg_hba_file = pg_hba_file
self.rules = None
self.comment = None
self.order = order
self.backup = backup
self.last_backup = None
self.create = create
self.keep_comments_at_rules = keep_comments_at_rules
self.unchanged()
# self.databases will be update by add_rule and gives some idea of the number of databases
# (at least that are handled by this pg_hba)
self.databases = set(['postgres', 'template0', 'template1'])
# self.databases will be update by add_rule and gives some idea of the number of users
# (at least that are handled by this pg_hba) since this might also be groups with multiple
# users, this might be totally off, but at least it is some info...
self.users = set(['postgres'])
self.preexisting_rules = None
self.read()
def clear_rules(self):
self.rules = {}
def unchanged(self):
'''
This method resets self.diff to a empty default
'''
self.diff = {'before': {'file': self.pg_hba_file, 'pg_hba': []},
'after': {'file': self.pg_hba_file, 'pg_hba': []}}
def read(self):
'''
Read in the pg_hba from the system
'''
self.rules = {}
self.comment = []
# read the pg_hbafile
try:
with open(self.pg_hba_file, 'r') as file:
for line in file:
# split into line and comment
line = line.strip()
comment = None
if '#' in line:
line, comment = line.split('#', 1)
if comment == '':
comment = None
line = line.rstrip()
# if there is just a comment, save it
if line == '':
if comment is not None:
self.comment.append('#' + comment)
else:
if comment is not None and not self.keep_comments_at_rules:
# save the comment independent of the line
self.comment.append('#' + comment)
comment = None
try:
self.add_rule(PgHbaRule(line=line, comment=comment))
except PgHbaRuleError:
pass
self.unchanged()
self.preexisting_rules = dict(self.rules)
except IOError:
pass
def write(self, backup_file=''):
'''
This method writes the PgHba rules (back) to a file.
'''
if not self.changed():
return False
contents = self.render()
if self.pg_hba_file:
if not (os.path.isfile(self.pg_hba_file) or self.create):
raise PgHbaError("pg_hba file '{0}' doesn't exist. "
"Use create option to autocreate.".format(self.pg_hba_file))
if self.backup and os.path.isfile(self.pg_hba_file):
if backup_file:
self.last_backup = backup_file
else:
_backup_file_h, self.last_backup = tempfile.mkstemp(prefix='pg_hba')
shutil.copy(self.pg_hba_file, self.last_backup)
fileh = open(self.pg_hba_file, 'w')
else:
filed, _path = tempfile.mkstemp(prefix='pg_hba')
fileh = os.fdopen(filed, 'w')
fileh.write(contents)
self.unchanged()
fileh.close()
return True
def add_rule(self, rule):
'''
This method can be used to add a rule to the list of rules in this PgHba object
'''
key = rule.key()
try:
try:
oldrule = self.rules[key]
except KeyError:
raise PgHbaRuleChanged
ekeys = set(list(oldrule.keys()) + list(rule.keys()))
ekeys.remove('line')
for k in ekeys:
if oldrule.get(k) != rule.get(k):
raise PgHbaRuleChanged('{0} changes {1}'.format(rule, oldrule))
except PgHbaRuleChanged:
self.rules[key] = rule
self.diff['after']['pg_hba'].append(rule.line())
if rule['db'] not in ['all', 'samerole', 'samegroup', 'replication']:
databases = set(rule['db'].split(','))
self.databases.update(databases)
if rule['usr'] != 'all':
user = rule['usr']
if user[0] == '+':
user = user[1:]
self.users.add(user)
def remove_rule(self, rule):
'''
This method can be used to find and remove a rule. It doesn't look for the exact rule, only
the rule with the same key.
'''
keys = rule.key()
try:
del self.rules[keys]
self.diff['before']['pg_hba'].append(rule.line())
except KeyError:
pass
def get_rules(self, with_lines=False):
'''
This method returns all the rules of the PgHba object
'''
rules = sorted(self.rules.values())
for rule in rules:
ret = {}
for key, value in rule.items():
ret[key] = value
if not with_lines:
if 'line' in ret:
del ret['line']
else:
ret['line'] = rule.line()
yield ret
def render(self):
'''
This method renders the content of the PgHba rules and comments.
The returning value can be used directly to write to a new file.
'''
comment = '\n'.join(self.comment)
rule_lines = []
for rule in self.get_rules(with_lines=True):
if 'comment' in rule:
rule_lines.append(rule['line'] + '\t#' + rule['comment'])
else:
rule_lines.append(rule['line'])
result = comment + '\n' + '\n'.join(rule_lines)
# End it properly with a linefeed (if not already).
if result and result[-1] not in ['\n', '\r']:
result += '\n'
return result
def changed(self):
'''
This method can be called to detect if the PgHba file has been changed.
'''
if not self.preexisting_rules and not self.rules:
return False
return self.preexisting_rules != self.rules
class PgHbaRule(dict):
'''
This class represents one rule as defined in a line in a PgHbaFile.
'''
def __init__(self, contype=None, databases=None, users=None, source=None, netmask=None,
method=None, options=None, line=None, comment=None):
'''
This function can be called with a comma seperated list of databases and a comma seperated
list of users and it will act as a generator that returns a expanded list of rules one by
one.
'''
super(PgHbaRule, self).__init__()
if line:
# Read values from line if parsed
self.fromline(line)
if comment:
self['comment'] = comment
# read rule cols from parsed items
rule = dict(zip(PG_HBA_HDR, [contype, databases, users, source, netmask, method, options]))
for key, value in rule.items():
if value:
self[key] = value
# Some sanity checks
for key in ['method', 'type']:
if key not in self:
raise PgHbaRuleError('Missing {0} in rule {1}'.format(key, self))
if self['method'] not in PG_HBA_METHODS:
msg = "invalid method {0} (should be one of '{1}')."
raise PgHbaRuleValueError(msg.format(self['method'], "', '".join(PG_HBA_METHODS)))
if self['type'] not in PG_HBA_TYPES:
msg = "invalid connection type {0} (should be one of '{1}')."
raise PgHbaRuleValueError(msg.format(self['type'], "', '".join(PG_HBA_TYPES)))
if self['type'] == 'local':
self.unset('src')
self.unset('mask')
elif 'src' not in self:
raise PgHbaRuleError('Missing src in rule {1}'.format(self))
elif '/' in self['src']:
self.unset('mask')
else:
self['src'] = str(self.source())
self.unset('mask')
def unset(self, key):
'''
This method is used to unset certain columns if they exist
'''
if key in self:
del self[key]
def line(self):
'''
This method can be used to return (or generate) the line
'''
try:
return self['line']
except KeyError:
self['line'] = "\t".join([self[k] for k in PG_HBA_HDR if k in self.keys()])
return self['line']
def fromline(self, line):
'''
split into 'type', 'db', 'usr', 'src', 'mask', 'method', 'options' cols
'''
if WHITESPACES_RE.sub('', line) == '':
# empty line. skip this one...
return
cols = WHITESPACES_RE.split(line)
if len(cols) < 4:
msg = "Rule {0} has too few columns."
raise PgHbaValueError(msg.format(line))
if cols[0] not in PG_HBA_TYPES:
msg = "Rule {0} has unknown type: {1}."
raise PgHbaValueError(msg.format(line, cols[0]))
if cols[0] == 'local':
cols.insert(3, None) # No address
cols.insert(3, None) # No IP-mask
if len(cols) < 6:
cols.insert(4, None) # No IP-mask
elif cols[5] not in PG_HBA_METHODS:
cols.insert(4, None) # No IP-mask
if cols[5] not in PG_HBA_METHODS:
raise PgHbaValueError("Rule {0} of '{1}' type has invalid auth-method '{2}'".format(line, cols[0], cols[5]))
if len(cols) < 7:
cols.insert(6, None) # No auth-options
else:
cols[6] = " ".join(cols[6:]) # combine all auth-options
rule = dict(zip(PG_HBA_HDR, cols[:7]))
for key, value in rule.items():
if value:
self[key] = value
def key(self):
'''
This method can be used to get the key from a rule.
'''
if self['type'] == 'local':
source = 'local'
else:
source = str(self.source())
return (source, self['db'], self['usr'])
def source(self):
'''
This method is used to get the source of a rule as an ipaddress object if possible.
'''
if 'mask' in self.keys():
try:
ipaddress.ip_address(u'{0}'.format(self['src']))
except ValueError:
raise PgHbaValueError('Mask was specified, but source "{0}" '
'is not valid ip'.format(self['src']))
# ipaddress module cannot work with ipv6 netmask, so lets convert it to prefixlen
# furthermore ipv4 with bad netmask throws 'Rule {} doesn't seem to be an ip, but has a
# mask error that doesn't seem to describe what is going on.
try:
mask_as_ip = ipaddress.ip_address(u'{0}'.format(self['mask']))
except ValueError:
raise PgHbaValueError('Mask {0} seems to be invalid'.format(self['mask']))
binvalue = "{0:b}".format(int(mask_as_ip))
if '01' in binvalue:
raise PgHbaValueError('IP mask {0} seems invalid '
'(binary value has 1 after 0)'.format(self['mask']))
prefixlen = binvalue.count('1')
sourcenw = '{0}/{1}'.format(self['src'], prefixlen)
try:
return ipaddress.ip_network(u'{0}'.format(sourcenw), strict=False)
except ValueError:
raise PgHbaValueError('{0} is not valid address range'.format(sourcenw))
try:
return ipaddress.ip_network(u'{0}'.format(self['src']), strict=False)
except ValueError:
return self['src']
def __lt__(self, other):
"""This function helps sorted to decide how to sort.
It just checks itself against the other and decides on some key values
if it should be sorted higher or lower in the list.
The way it works:
For networks, every 1 in 'netmask in binary' makes the subnet more specific.
Therefore I chose to use prefix as the weight.
So a single IP (/32) should have twice the weight of a /16 network.
To keep everything in the same weight scale,
- for ipv6, we use a weight scale of 0 (all possible ipv6 addresses) to 128 (single ip)
- for ipv4, we use a weight scale of 0 (all possible ipv4 addresses) to 128 (single ip)
Therefore for ipv4, we use prefixlen (0-32) * 4 for weight,
which corresponds to ipv6 (0-128).
"""
myweight = self.source_weight()
hisweight = other.source_weight()
if myweight != hisweight:
return myweight > hisweight
myweight = self.db_weight()
hisweight = other.db_weight()
if myweight != hisweight:
return myweight < hisweight
myweight = self.user_weight()
hisweight = other.user_weight()
if myweight != hisweight:
return myweight < hisweight
try:
return self['src'] < other['src']
except TypeError:
return self.source_type_weight() < other.source_type_weight()
except Exception:
# When all else fails, just compare the exact line.
return self.line() < other.line()
def source_weight(self):
"""Report the weight of this source net.
Basically this is the netmask, where IPv4 is normalized to IPv6
(IPv4/32 has the same weight as IPv6/128).
"""
if self['type'] == 'local':
return 130
sourceobj = self.source()
if isinstance(sourceobj, ipaddress.IPv4Network):
return sourceobj.prefixlen * 4
if isinstance(sourceobj, ipaddress.IPv6Network):
return sourceobj.prefixlen
if isinstance(sourceobj, str):
# You can also write all to match any IP address,
# samehost to match any of the server's own IP addresses,
# or samenet to match any address in any subnet that the server is connected to.
if sourceobj == 'all':
# (all is considered the full range of all ips, which has a weight of 0)
return 0
if sourceobj == 'samehost':
# (sort samehost second after local)
return 129
if sourceobj == 'samenet':
# Might write some fancy code to determine all prefix's
# from all interfaces and find a sane value for this one.
# For now, let's assume IPv4/24 or IPv6/96 (both have weight 96).
return 96
if sourceobj[0] == '.':
# suffix matching (domain name), let's assume a very large scale
# and therefore a very low weight IPv4/16 or IPv6/64 (both have weight 64).
return 64
# hostname, let's assume only one host matches, which is
# IPv4/32 or IPv6/128 (both have weight 128)
return 128
raise PgHbaValueError('Cannot deduct the source weight of this source {1}'.format(sourceobj))
def source_type_weight(self):
"""Give a weight on the type of this source.
Basically make sure that IPv6Networks are sorted higher than IPv4Networks.
This is a 'when all else fails' solution in __lt__.
"""
if self['type'] == 'local':
return 3
sourceobj = self.source()
if isinstance(sourceobj, ipaddress.IPv4Network):
return 2
if isinstance(sourceobj, ipaddress.IPv6Network):
return 1
if isinstance(sourceobj, str):
return 0
raise PgHbaValueError('This source {0} is of an unknown type...'.format(sourceobj))
def db_weight(self):
"""Report the weight of the database.
Normally, just 1, but for replication this is 0, and for 'all', this is more than 2.
"""
if self['db'] == 'all':
return 100000
if self['db'] == 'replication':
return 0
if self['db'] in ['samerole', 'samegroup']:
return 1
return 1 + self['db'].count(',')
def user_weight(self):
"""Report weight when comparing users."""
if self['usr'] == 'all':
return 1000000
return 1
def main():
'''
This function is the main function of this module
'''
# argument_spec = postgres_common_argument_spec()
argument_spec = dict()
argument_spec.update(
address=dict(type='str', default='samehost', aliases=['source', 'src']),
backup=dict(type='bool', default=False),
backup_file=dict(type='str'),
contype=dict(type='str', default=None, choices=PG_HBA_TYPES),
comment=dict(type='str', default=None),
create=dict(type='bool', default=False),
databases=dict(type='str', default='all'),
dest=dict(type='path', required=True),
method=dict(type='str', default='md5', choices=PG_HBA_METHODS),
netmask=dict(type='str'),
options=dict(type='str'),
order=dict(type='str', default="sdu", choices=PG_HBA_ORDERS,
removed_in_version='3.0.0', removed_from_collection='community.postgresql'),
keep_comments_at_rules=dict(type='bool', default=False),
state=dict(type='str', default="present", choices=["absent", "present"]),
users=dict(type='str', default='all'),
rules=dict(type='list', elements='dict'),
rules_behavior=dict(type='str', default='conflict', choices=['combine', 'conflict']),
overwrite=dict(type='bool', default=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
add_file_common_args=True,
supports_check_mode=True
)
if IPADDRESS_IMP_ERR is not None:
module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR)
create = bool(module.params["create"] or module.check_mode)
if module.check_mode:
backup = False
else:
backup = module.params['backup']
backup_file = module.params['backup_file']
dest = module.params["dest"]
order = module.params["order"]
keep_comments_at_rules = module.params["keep_comments_at_rules"]
rules = module.params["rules"]
rules_behavior = module.params["rules_behavior"]
overwrite = module.params["overwrite"]
ret = {'msgs': []}
try:
pg_hba = PgHba(dest, order, backup=backup, create=create, keep_comments_at_rules=keep_comments_at_rules)
except PgHbaError as error:
module.fail_json(msg='Error reading file:\n{0}'.format(error))
if overwrite:
pg_hba.clear_rules()
rule_keys = [
'address',
'comment',
'contype',
'databases',
'method',
'netmask',
'options',
'state',
'users'
]
if rules is None:
single_rule = dict()
for key in rule_keys:
single_rule[key] = module.params[key]
rules = [single_rule]
else:
if rules_behavior == 'conflict':
# it's ok if the module default is set
used_rule_keys = [key for key in rule_keys if module.params[key] != argument_spec[key].get('default', None)]
if len(used_rule_keys) > 0:
module.fail_json(msg='conflict: either argument "rules_behavior" needs to be changed or "rules" must'
' not be set or {0} must not be set'.format(used_rule_keys))
new_rules = []
for index, rule in enumerate(rules):
# alias handling
address_keys = [key for key in rule.keys() if key in ('address', 'source', 'src')]
if len(address_keys) > 1:
module.fail_json(msg='rule number {0} of the "rules" argument ({1}) uses ambiguous settings: '
'{2} are aliases, only one is allowed'.format(index, address_keys, rule))
if len(address_keys) == 1:
address = rule[address_keys[0]]
del rule[address_keys[0]]
rule['address'] = address
for key in rule_keys:
if key not in rule:
if rules_behavior == 'combine':
# use user-supplied defaults or module defaults
rule[key] = module.params[key]
else:
# use module defaults
rule[key] = argument_spec[key].get('default', None)
new_rules.append(rule)
rules = new_rules
for rule in rules:
if rule.get('contype', None) is None:
continue
try:
for database in rule['databases'].split(','):
for user in rule['users'].split(','):
pg_hba_rule = PgHbaRule(rule['contype'], database, user, rule['address'], rule['netmask'],
rule['method'], rule['options'], comment=rule['comment'])
if rule['state'] == "present":
ret['msgs'].append('Adding rule {0}'.format(pg_hba_rule))
pg_hba.add_rule(pg_hba_rule)
else:
ret['msgs'].append('Removing rule {0}'.format(pg_hba_rule))
pg_hba.remove_rule(pg_hba_rule)
except PgHbaError as error:
module.fail_json(msg='Error modifying rules:\n{0}'.format(error))
file_args = module.load_file_common_arguments(module.params)
ret['changed'] = changed = pg_hba.changed()
if changed:
ret['msgs'].append('Changed')
ret['diff'] = pg_hba.diff
if not module.check_mode:
ret['msgs'].append('Writing')
try:
if pg_hba.write(backup_file):
module.set_fs_attributes_if_different(file_args, True, pg_hba.diff,
expand=False)
except PgHbaError as error:
module.fail_json(msg='Error writing file:\n{0}'.format(error))
if pg_hba.last_backup:
ret['backup_file'] = pg_hba.last_backup
ret['pg_hba'] = list(pg_hba.get_rules())
module.exit_json(**ret)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,213 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018-2020 Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: postgresql_ping
short_description: Check remote PostgreSQL server availability
description:
- Simple module to check remote PostgreSQL server availability.
options:
db:
description:
- Name of a database to connect to.
type: str
aliases:
- login_db
session_role:
description:
- Switch to session_role after connecting. The specified session_role must
be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
version_added: '0.2.0'
trust_input:
description:
- If C(false), check whether a value of I(session_role) is potentially dangerous.
- It makes sense to use C(false) only when SQL injections via I(session_role) are possible.
type: bool
default: true
version_added: '0.2.0'
seealso:
- module: community.postgresql.postgresql_info
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment:
- community.postgresql.postgres
notes:
- Supports C(check_mode).
'''
EXAMPLES = r'''
# PostgreSQL ping dbsrv server from the shell:
# ansible dbsrv -m postgresql_ping
# In the example below you need to generate certificates previously.
# See https://www.postgresql.org/docs/current/libpq-ssl.html for more information.
- name: >
Ping PostgreSQL server using non-default credentials and SSL
registering the return values into the result variable for future use
community.postgresql.postgresql_ping:
db: protected_db
login_host: dbsrv
login_user: secret
login_password: secret_pass
ca_cert: /root/root.crt
ssl_mode: verify-full
register: result
# If you need to fail when the server is not available,
# uncomment the following line:
#failed_when: not result.is_available
# You can use the registered result with another task
- name: This task should be executed only if the server is available
# ...
when: result.is_available == true
'''
RETURN = r'''
is_available:
description: PostgreSQL server availability.
returned: always
type: bool
sample: true
server_version:
description: PostgreSQL server version.
returned: always
type: dict
sample: { major: 13, minor: 2, full: '13.2', raw: 'PostgreSQL 13.2 on x86_64-pc-linux-gnu' }
conn_err_msg:
description: Connection error message.
returned: always
type: str
sample: ''
version_added: 1.7.0
'''
import re
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.postgresql.plugins.module_utils.database import (
check_input,
)
from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
ensure_required_libs,
get_conn_params,
postgres_common_argument_spec,
)
# ===========================================
# PostgreSQL module specific support methods.
#
class PgPing(object):
def __init__(self, module, cursor):
self.module = module
self.cursor = cursor
self.is_available = False
self.version = {}
def do(self):
self.get_pg_version()
return (self.is_available, self.version)
def get_pg_version(self):
query = "SELECT version()"
raw = exec_sql(self, query, add_to_executed=False)[0][0]
if not raw:
return
self.is_available = True
full = raw.split()[1]
m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", full)
major = int(m.group(1))
minor = int(m.group(2))
patch = None
if m.group(3) is not None:
patch = int(m.group(3))
self.version = dict(
major=major,
minor=minor,
full=full,
raw=raw,
)
if patch is not None:
self.version['patch'] = patch
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
db=dict(type='str', aliases=['login_db']),
session_role=dict(type='str'),
trust_input=dict(type='bool', default=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if not module.params['trust_input']:
# Check input for potentially dangerous elements:
check_input(module, module.params['session_role'])
# Set some default values:
cursor = False
db_connection = False
result = dict(
changed=False,
is_available=False,
server_version=dict(),
conn_err_msg='',
)
# Ensure psycopg2 libraries are available before connecting to DB:
ensure_required_libs(module)
conn_params = get_conn_params(module, module.params, warn_db_default=False)
db_connection, err = connect_to_db(module, conn_params, fail_on_conn=False)
if err:
result['conn_err_msg'] = err
if db_connection is not None:
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Do job:
pg_ping = PgPing(module, cursor)
if cursor:
# If connection established:
result["is_available"], result["server_version"] = pg_ping.do()
db_connection.rollback()
module.exit_json(**result)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,686 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: postgresql_publication
short_description: Add, update, or remove PostgreSQL publication
description:
- Add, update, or remove PostgreSQL publication.
options:
name:
description:
- Name of the publication to add, update, or remove.
required: true
type: str
db:
description:
- Name of the database to connect to and where
the publication state will be changed.
aliases: [ login_db ]
type: str
tables:
description:
- List of tables to add to the publication.
- If no value is set all tables are targeted.
- If the publication already exists for specific tables and I(tables) is not passed,
nothing will be changed.
- If you need to add all tables to the publication with the same name,
drop existent and create new without passing I(tables).
type: list
elements: str
state:
description:
- The publication state.
default: present
choices: [ absent, present ]
type: str
parameters:
description:
- Dictionary with optional publication parameters.
- Available parameters depend on PostgreSQL version.
type: dict
owner:
description:
- Publication owner.
- If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role).
type: str
cascade:
description:
- Drop publication dependencies. Has effect with I(state=absent) only.
type: bool
default: false
session_role:
description:
- Switch to session_role after connecting. The specified session_role must
be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
version_added: '0.2.0'
trust_input:
description:
- If C(false), check whether values of parameters I(name), I(tables), I(owner),
I(session_role), I(params) are potentially dangerous.
- It makes sense to use C(false) only when SQL injections via the parameters are possible.
type: bool
default: true
version_added: '0.2.0'
notes:
- PostgreSQL version must be 10 or greater.
- Supports C(check_mode).
seealso:
- name: CREATE PUBLICATION reference
description: Complete reference of the CREATE PUBLICATION command documentation.
link: https://www.postgresql.org/docs/current/sql-createpublication.html
- name: ALTER PUBLICATION reference
description: Complete reference of the ALTER PUBLICATION command documentation.
link: https://www.postgresql.org/docs/current/sql-alterpublication.html
- name: DROP PUBLICATION reference
description: Complete reference of the DROP PUBLICATION command documentation.
link: https://www.postgresql.org/docs/current/sql-droppublication.html
author:
- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
- Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
extends_documentation_fragment:
- community.postgresql.postgres
'''
EXAMPLES = r'''
- name: Create a new publication with name "acme" targeting all tables in database "test"
community.postgresql.postgresql_publication:
db: test
name: acme
- name: Create publication "acme" publishing only prices and vehicles tables
community.postgresql.postgresql_publication:
name: acme
tables:
- prices
- vehicles
- name: >
Create publication "acme", set user alice as an owner, targeting all tables
Allowable DML operations are INSERT and UPDATE only
community.postgresql.postgresql_publication:
name: acme
owner: alice
parameters:
publish: 'insert,update'
- name: >
Assuming publication "acme" exists and there are targeted
tables "prices" and "vehicles", add table "stores" to the publication
community.postgresql.postgresql_publication:
name: acme
tables:
- prices
- vehicles
- stores
- name: Remove publication "acme" if exists in database "test"
community.postgresql.postgresql_publication:
db: test
name: acme
state: absent
'''
RETURN = r'''
exists:
description:
- Flag indicates the publication exists or not at the end of runtime.
returned: always
type: bool
sample: true
queries:
description: List of executed queries.
returned: always
type: str
sample: [ 'DROP PUBLICATION "acme" CASCADE' ]
owner:
description: Owner of the publication at the end of runtime.
returned: if publication exists
type: str
sample: "alice"
tables:
description:
- List of tables in the publication at the end of runtime.
- If all tables are published, returns empty list.
returned: if publication exists
type: list
sample: ["\"public\".\"prices\"", "\"public\".\"vehicles\""]
alltables:
description:
- Flag indicates that all tables are published.
returned: if publication exists
type: bool
sample: false
parameters:
description: Publication parameters at the end of runtime.
returned: if publication exists
type: dict
sample: {'publish': {'insert': false, 'delete': false, 'update': true}}
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.postgresql.plugins.module_utils.database import (
check_input,
pg_quote_identifier,
)
from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
ensure_required_libs,
get_conn_params,
postgres_common_argument_spec,
)
from ansible.module_utils.six import iteritems
SUPPORTED_PG_VERSION = 10000
################################
# Module functions and classes #
################################
def transform_tables_representation(tbl_list):
"""Add 'public.' to names of tables where a schema identifier is absent
and add quotes to each element.
Args:
tbl_list (list): List of table names.
Returns:
tbl_list (list): Changed list.
"""
for i, table in enumerate(tbl_list):
if '.' not in table:
tbl_list[i] = pg_quote_identifier('public.%s' % table.strip(), 'table')
else:
tbl_list[i] = pg_quote_identifier(table.strip(), 'table')
return tbl_list
class PgPublication():
"""Class to work with PostgreSQL publication.
Args:
module (AnsibleModule): Object of AnsibleModule class.
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
name (str): The name of the publication.
Attributes:
module (AnsibleModule): Object of AnsibleModule class.
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
name (str): Name of the publication.
executed_queries (list): List of executed queries.
attrs (dict): Dict with publication attributes.
exists (bool): Flag indicates the publication exists or not.
"""
def __init__(self, module, cursor, name):
self.module = module
self.cursor = cursor
self.name = name
self.executed_queries = []
self.attrs = {
'alltables': False,
'tables': [],
'parameters': {},
'owner': '',
}
self.exists = self.check_pub()
def get_info(self):
"""Refresh the publication information.
Returns:
``self.attrs``.
"""
self.exists = self.check_pub()
return self.attrs
def check_pub(self):
"""Check the publication and refresh ``self.attrs`` publication attribute.
Returns:
True if the publication with ``self.name`` exists, False otherwise.
"""
pub_info = self.__get_general_pub_info()
if not pub_info:
# Publication does not exist:
return False
self.attrs['owner'] = pub_info.get('pubowner')
# Publication DML operations:
self.attrs['parameters']['publish'] = {}
self.attrs['parameters']['publish']['insert'] = pub_info.get('pubinsert', False)
self.attrs['parameters']['publish']['update'] = pub_info.get('pubupdate', False)
self.attrs['parameters']['publish']['delete'] = pub_info.get('pubdelete', False)
if pub_info.get('pubtruncate'):
self.attrs['parameters']['publish']['truncate'] = pub_info.get('pubtruncate')
# If alltables flag is False, get the list of targeted tables:
if not pub_info.get('puballtables'):
table_info = self.__get_tables_pub_info()
# Join sublists [['schema', 'table'], ...] to ['schema.table', ...]
# for better representation:
for i, schema_and_table in enumerate(table_info):
table_info[i] = pg_quote_identifier('.'.join(schema_and_table), 'table')
self.attrs['tables'] = table_info
else:
self.attrs['alltables'] = True
# Publication exists:
return True
def create(self, tables, params, owner, check_mode=True):
"""Create the publication.
Args:
tables (list): List with names of the tables that need to be added to the publication.
params (dict): Dict contains optional publication parameters and their values.
owner (str): Name of the publication owner.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
changed (bool): True if publication has been created, otherwise False.
"""
changed = True
query_fragments = ["CREATE PUBLICATION %s" % pg_quote_identifier(self.name, 'publication')]
if tables:
query_fragments.append("FOR TABLE %s" % ', '.join(tables))
else:
query_fragments.append("FOR ALL TABLES")
if params:
params_list = []
# Make list ["param = 'value'", ...] from params dict:
for (key, val) in iteritems(params):
params_list.append("%s = '%s'" % (key, val))
# Add the list to query_fragments:
query_fragments.append("WITH (%s)" % ', '.join(params_list))
changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
if owner:
# If check_mode, just add possible SQL to
# executed_queries and return:
self.__pub_set_owner(owner, check_mode=check_mode)
return changed
def update(self, tables, params, owner, check_mode=True):
"""Update the publication.
Args:
tables (list): List with names of the tables that need to be presented in the publication.
params (dict): Dict contains optional publication parameters and their values.
owner (str): Name of the publication owner.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
changed (bool): True if publication has been updated, otherwise False.
"""
changed = False
# Add or drop tables from published tables suit:
if tables and not self.attrs['alltables']:
# 1. If needs to add table to the publication:
for tbl in tables:
if tbl not in self.attrs['tables']:
# If needs to add table to the publication:
changed = self.__pub_add_table(tbl, check_mode=check_mode)
# 2. if there is a table in targeted tables
# that's not presented in the passed tables:
for tbl in self.attrs['tables']:
if tbl not in tables:
changed = self.__pub_drop_table(tbl, check_mode=check_mode)
elif tables and self.attrs['alltables']:
changed = self.__pub_set_tables(tables, check_mode=check_mode)
# Update pub parameters:
if params:
for key, val in iteritems(params):
if self.attrs['parameters'].get(key):
# In PostgreSQL 10/11 only 'publish' optional parameter is presented.
if key == 'publish':
# 'publish' value can be only a string with comma-separated items
# of allowed DML operations like 'insert,update' or
# 'insert,update,delete', etc.
# Make dictionary to compare with current attrs later:
val_dict = self.attrs['parameters']['publish'].copy()
val_list = val.split(',')
for v in val_dict:
if v in val_list:
val_dict[v] = True
else:
val_dict[v] = False
# Compare val_dict and the dict with current 'publish' parameters,
# if they're different, set new values:
if val_dict != self.attrs['parameters']['publish']:
changed = self.__pub_set_param(key, val, check_mode=check_mode)
# Default behavior for other cases:
elif self.attrs['parameters'][key] != val:
changed = self.__pub_set_param(key, val, check_mode=check_mode)
else:
# If the parameter was not set before:
changed = self.__pub_set_param(key, val, check_mode=check_mode)
# Update pub owner:
if owner:
if owner != self.attrs['owner']:
changed = self.__pub_set_owner(owner, check_mode=check_mode)
return changed
def drop(self, cascade=False, check_mode=True):
"""Drop the publication.
Kwargs:
cascade (bool): Flag indicates that publication needs to be deleted
with its dependencies.
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
changed (bool): True if publication has been updated, otherwise False.
"""
if self.exists:
query_fragments = []
query_fragments.append("DROP PUBLICATION %s" % pg_quote_identifier(self.name, 'publication'))
if cascade:
query_fragments.append("CASCADE")
return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
def __get_general_pub_info(self):
"""Get and return general publication information.
Returns:
Dict with publication information if successful, False otherwise.
"""
# Check pg_publication.pubtruncate exists (supported from PostgreSQL 11):
pgtrunc_sup = exec_sql(self, ("SELECT 1 FROM information_schema.columns "
"WHERE table_name = 'pg_publication' "
"AND column_name = 'pubtruncate'"), add_to_executed=False)
if pgtrunc_sup:
query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, "
"p.pubupdate , p.pubdelete, p.pubtruncate FROM pg_publication AS p "
"JOIN pg_catalog.pg_roles AS r "
"ON p.pubowner = r.oid "
"WHERE p.pubname = %(pname)s")
else:
query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, "
"p.pubupdate , p.pubdelete FROM pg_publication AS p "
"JOIN pg_catalog.pg_roles AS r "
"ON p.pubowner = r.oid "
"WHERE p.pubname = %(pname)s")
result = exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False)
if result:
return result[0]
else:
return False
def __get_tables_pub_info(self):
"""Get and return tables that are published by the publication.
Returns:
List of dicts with published tables.
"""
query = ("SELECT schemaname, tablename "
"FROM pg_publication_tables WHERE pubname = %(pname)s")
return exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False)
def __pub_add_table(self, table, check_mode=False):
"""Add a table to the publication.
Args:
table (str): Table name.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
query = ("ALTER PUBLICATION %s ADD TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
pg_quote_identifier(table, 'table')))
return self.__exec_sql(query, check_mode=check_mode)
def __pub_drop_table(self, table, check_mode=False):
"""Drop a table from the publication.
Args:
table (str): Table name.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
query = ("ALTER PUBLICATION %s DROP TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
pg_quote_identifier(table, 'table')))
return self.__exec_sql(query, check_mode=check_mode)
def __pub_set_tables(self, tables, check_mode=False):
"""Set a table suit that need to be published by the publication.
Args:
tables (list): List of tables.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
quoted_tables = [pg_quote_identifier(t, 'table') for t in tables]
query = ("ALTER PUBLICATION %s SET TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
', '.join(quoted_tables)))
return self.__exec_sql(query, check_mode=check_mode)
def __pub_set_param(self, param, value, check_mode=False):
"""Set an optional publication parameter.
Args:
param (str): Name of the parameter.
value (str): Parameter value.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
query = ("ALTER PUBLICATION %s SET (%s = '%s')" % (pg_quote_identifier(self.name, 'publication'),
param, value))
return self.__exec_sql(query, check_mode=check_mode)
def __pub_set_owner(self, role, check_mode=False):
"""Set a publication owner.
Args:
role (str): Role (user) name that needs to be set as a publication owner.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
query = ('ALTER PUBLICATION %s '
'OWNER TO "%s"' % (pg_quote_identifier(self.name, 'publication'), role))
return self.__exec_sql(query, check_mode=check_mode)
def __exec_sql(self, query, check_mode=False):
"""Execute SQL query.
Note: If we need just to get information from the database,
we use ``exec_sql`` function directly.
Args:
query (str): Query that needs to be executed.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just add ``query`` to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
if check_mode:
self.executed_queries.append(query)
return True
else:
return exec_sql(self, query, return_bool=True)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
name=dict(required=True),
db=dict(type='str', aliases=['login_db']),
state=dict(type='str', default='present', choices=['absent', 'present']),
tables=dict(type='list', elements='str'),
parameters=dict(type='dict'),
owner=dict(type='str'),
cascade=dict(type='bool', default=False),
session_role=dict(type='str'),
trust_input=dict(type='bool', default=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
# Parameters handling:
name = module.params['name']
state = module.params['state']
tables = module.params['tables']
params = module.params['parameters']
owner = module.params['owner']
cascade = module.params['cascade']
session_role = module.params['session_role']
trust_input = module.params['trust_input']
if not trust_input:
# Check input for potentially dangerous elements:
if not params:
params_list = None
else:
params_list = ['%s = %s' % (k, v) for k, v in iteritems(params)]
check_input(module, name, tables, owner, session_role, params_list)
if state == 'absent':
if tables:
module.warn('parameter "tables" is ignored when "state=absent"')
if params:
module.warn('parameter "parameters" is ignored when "state=absent"')
if owner:
module.warn('parameter "owner" is ignored when "state=absent"')
if state == 'present' and cascade:
module.warn('parameter "cascade" is ignored when "state=present"')
# Ensure psycopg2 libraries are available before connecting to DB:
ensure_required_libs(module)
# Connect to DB and make cursor object:
conn_params = get_conn_params(module, module.params)
# We check publication state without DML queries execution, so set autocommit:
db_connection, dummy = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Check version:
if cursor.connection.server_version < SUPPORTED_PG_VERSION:
module.fail_json(msg="PostgreSQL server version should be 10.0 or greater")
# Nothing was changed by default:
changed = False
###################################
# Create object and do rock'n'roll:
publication = PgPublication(module, cursor, name)
if tables:
tables = transform_tables_representation(tables)
# If module.check_mode=True, nothing will be changed:
if state == 'present':
if not publication.exists:
changed = publication.create(tables, params, owner, check_mode=module.check_mode)
else:
changed = publication.update(tables, params, owner, check_mode=module.check_mode)
elif state == 'absent':
changed = publication.drop(cascade=cascade, check_mode=module.check_mode)
# Get final publication info:
pub_fin_info = {}
if state == 'present' or (state == 'absent' and module.check_mode):
pub_fin_info = publication.get_info()
elif state == 'absent' and not module.check_mode:
publication.exists = False
# Connection is not needed any more:
cursor.close()
db_connection.close()
# Update publication info and return ret values:
module.exit_json(changed=changed, queries=publication.executed_queries, exists=publication.exists, **pub_fin_info)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,524 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Felix Archambault
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: postgresql_query
short_description: Run PostgreSQL queries
description:
- Runs arbitrary PostgreSQL queries.
- B(WARNING) The C(path_to_script) and C(as_single_query) options as well as
the C(query_list) and C(query_all_results) return values have been B(deprecated) and
will be removed in community.postgresql 3.0.0, please use the
M(community.postgresql.postgresql_script) module to execute statements from scripts.
- Does not run against backup files. Use M(community.postgresql.postgresql_db) with I(state=restore)
to run queries on files made by pg_dump/pg_dumpall utilities.
options:
query:
description:
- SQL query to run. Variables can be escaped with psycopg2 syntax
U(http://initd.org/psycopg/docs/usage.html).
type: str
positional_args:
description:
- List of values to be passed as positional arguments to the query.
When the value is a list, it will be converted to PostgreSQL array.
- Mutually exclusive with I(named_args).
type: list
elements: raw
named_args:
description:
- Dictionary of key-value arguments to pass to the query.
When the value is a list, it will be converted to PostgreSQL array.
- Mutually exclusive with I(positional_args).
type: dict
path_to_script:
description:
- This option has been B(deprecated) and will be removed in community.postgresql 3.0.0,
please use the M(community.postgresql.postgresql_script) module to execute
statements from scripts.
- Path to a SQL script on the target machine.
- If the script contains several queries, they must be semicolon-separated.
- To run scripts containing objects with semicolons
(for example, function and procedure definitions), use I(as_single_query=true).
- To upload dumps or to execute other complex scripts, the preferable way
is to use the M(community.postgresql.postgresql_db) module with I(state=restore).
- Mutually exclusive with I(query).
type: path
session_role:
description:
- Switch to session_role after connecting. The specified session_role must
be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
db:
description:
- Name of database to connect to and run queries against.
type: str
aliases:
- login_db
autocommit:
description:
- Execute in autocommit mode when the query can't be run inside a transaction block
(e.g., VACUUM).
- Mutually exclusive with I(check_mode).
type: bool
default: false
encoding:
description:
- Set the client encoding for the current session (e.g. C(UTF-8)).
- The default is the encoding defined by the database.
type: str
version_added: '0.2.0'
trust_input:
description:
- If C(false), check whether a value of I(session_role) is potentially dangerous.
- It makes sense to use C(false) only when SQL injections via I(session_role) are possible.
type: bool
default: true
version_added: '0.2.0'
search_path:
description:
- List of schema names to look in.
type: list
elements: str
version_added: '1.0.0'
as_single_query:
description:
- This option has been B(deprecated) and will be removed in community.postgresql 3.0.0,
please use the M(community.postgresql.postgresql_script) module to execute
statements from scripts.
- If C(true), when reading from the I(path_to_script) file,
executes its whole content in a single query (not splitting it up
into separate queries by semicolons). It brings the following changes in
the module's behavior.
- When C(true), the C(query_all_results) return value
contains only the result of the last statement.
- Whether the state is reported as changed or not
is determined by the last statement of the file.
- Used only when I(path_to_script) is specified, otherwise ignored.
- If set to C(false), the script can contain only semicolon-separated queries.
(see the I(path_to_script) option documentation).
type: bool
default: true
version_added: '1.1.0'
seealso:
- module: community.postgresql.postgresql_script
- module: community.postgresql.postgresql_db
- name: PostgreSQL Schema reference
description: Complete reference of the PostgreSQL schema documentation.
link: https://www.postgresql.org/docs/current/ddl-schemas.html
author:
- Felix Archambault (@archf)
- Andrew Klychkov (@Andersson007)
- Will Rouesnel (@wrouesnel)
extends_documentation_fragment:
- community.postgresql.postgres
notes:
- Supports C(check_mode).
'''
EXAMPLES = r'''
- name: Simple select query to acme db
community.postgresql.postgresql_query:
db: acme
query: SELECT version()
- name: Select query to db acme with positional arguments and non-default credentials
community.postgresql.postgresql_query:
db: acme
login_user: django
login_password: mysecretpass
query: SELECT * FROM acme WHERE id = %s AND story = %s
positional_args:
- 1
- test
- name: Select query to test_db with named_args
community.postgresql.postgresql_query:
db: test_db
query: SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s
named_args:
id_val: 1
story_val: test
- name: Insert query to test_table in db test_db
community.postgresql.postgresql_query:
db: test_db
query: INSERT INTO test_table (id, story) VALUES (2, 'my_long_story')
- name: Use connect_params to add any additional connection parameters that libpg supports
community.postgresql.postgresql_query:
connect_params:
target_session_attrs: read-write
connect_timeout: 10
login_host: "host1,host2"
login_user: "test"
login_password: "test1234"
db: 'test'
query: 'insert into test (test) values (now())'
# WARNING: The path_to_script and as_single_query options have been deprecated
# and will be removed in community.postgresql 3.0.0, please
# use the community.postgresql.postgresql_script module instead.
# If your script contains semicolons as parts of separate objects
# like functions, procedures, and so on, use "as_single_query: true"
- name: Run queries from SQL script using UTF-8 client encoding for session
community.postgresql.postgresql_query:
db: test_db
path_to_script: /var/lib/pgsql/test.sql
positional_args:
- 1
encoding: UTF-8
- name: Example of using autocommit parameter
community.postgresql.postgresql_query:
db: test_db
query: VACUUM
autocommit: true
- name: >
Insert data to the column of array type using positional_args.
Note that we use quotes here, the same as for passing JSON, etc.
community.postgresql.postgresql_query:
query: INSERT INTO test_table (array_column) VALUES (%s)
positional_args:
- '{1,2,3}'
# Pass list and string vars as positional_args
- name: Set vars
ansible.builtin.set_fact:
my_list:
- 1
- 2
- 3
my_arr: '{1, 2, 3}'
- name: Select from test table by passing positional_args as arrays
community.postgresql.postgresql_query:
query: SELECT * FROM test_array_table WHERE arr_col1 = %s AND arr_col2 = %s
positional_args:
- '{{ my_list }}'
- '{{ my_arr|string }}'
# Select from test table looking into app1 schema first, then,
# if the schema doesn't exist or the table hasn't been found there,
# try to find it in the schema public
- name: Select from test using search_path
community.postgresql.postgresql_query:
query: SELECT * FROM test_array_table
search_path:
- app1
- public
# If you use a variable in positional_args / named_args that can
# be undefined and you wish to set it as NULL, the constructions like
# "{{ my_var if (my_var is defined) else none | default(none) }}"
# will not work as expected substituting an empty string instead of NULL.
# If possible, we suggest to use Ansible's DEFAULT_JINJA2_NATIVE configuration
# (https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-jinja2-native).
# Enabling it fixes this problem. If you cannot enable it, the following workaround
# can be used.
# You should precheck such a value and define it as NULL when undefined.
# For example:
- name: When undefined, set to NULL
set_fact:
my_var: NULL
when: my_var is undefined
# Then:
- name: Insert a value using positional arguments
community.postgresql.postgresql_query:
query: INSERT INTO test_table (col1) VALUES (%s)
positional_args:
- '{{ my_var }}'
'''
RETURN = r'''
query:
description:
- Executed query.
- When reading several queries from a file, it contains only the last one.
returned: always
type: str
sample: 'SELECT * FROM bar'
statusmessage:
description:
- Attribute containing the message returned by the command.
- When reading several queries from a file, it contains a message of the last one.
returned: always
type: str
sample: 'INSERT 0 1'
query_result:
description:
- List of dictionaries in column:value form representing returned rows.
- When running queries from a file, returns result of the last query.
returned: always
type: list
elements: dict
sample: [{"Column": "Value1"},{"Column": "Value2"}]
query_list:
description:
- This return value has been B(deprecated) and will be removed in
community.postgresql 3.0.0.
- List of executed queries.
Useful when reading several queries from a file.
returned: always
type: list
elements: str
sample: ['SELECT * FROM foo', 'SELECT * FROM bar']
query_all_results:
description:
- This return value has been B(deprecated) and will be removed in
community.postgresql 3.0.0.
- List containing results of all queries executed (one sublist for every query).
Useful when reading several queries from a file.
returned: always
type: list
elements: list
sample: [[{"Column": "Value1"},{"Column": "Value2"}], [{"Column": "Value1"},{"Column": "Value2"}]]
rowcount:
description:
- Number of produced or affected rows.
- When using a script with multiple queries,
it contains a total number of produced or affected rows.
returned: changed
type: int
sample: 5
'''
try:
from psycopg2 import ProgrammingError as Psycopg2ProgrammingError
from psycopg2.extras import DictCursor
except ImportError:
# it is needed for checking 'no result to fetch' in main(),
# psycopg2 availability will be checked by connect_to_db() into
# ansible.module_utils.postgres
pass
import re
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.postgresql.plugins.module_utils.database import (
check_input,
)
from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
connect_to_db,
convert_elements_to_pg_arrays,
convert_to_supported,
ensure_required_libs,
get_conn_params,
postgres_common_argument_spec,
set_search_path,
TYPES_NEED_TO_CONVERT,
)
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems
# ===========================================
# Module execution.
#
def insane_query(string):
for c in string:
if c not in (' ', '\n', '', '\t'):
return False
return True
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
query=dict(type='str'),
db=dict(type='str', aliases=['login_db']),
positional_args=dict(type='list', elements='raw'),
named_args=dict(type='dict'),
session_role=dict(type='str'),
path_to_script=dict(type='path'),
autocommit=dict(type='bool', default=False),
encoding=dict(type='str'),
trust_input=dict(type='bool', default=True),
search_path=dict(type='list', elements='str'),
as_single_query=dict(type='bool', default=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=(('positional_args', 'named_args'),),
supports_check_mode=True,
)
query = module.params["query"]
positional_args = module.params["positional_args"]
named_args = module.params["named_args"]
path_to_script = module.params["path_to_script"]
autocommit = module.params["autocommit"]
encoding = module.params["encoding"]
session_role = module.params["session_role"]
trust_input = module.params["trust_input"]
search_path = module.params["search_path"]
as_single_query = module.params["as_single_query"]
if not trust_input:
# Check input for potentially dangerous elements:
check_input(module, session_role)
if autocommit and module.check_mode:
module.fail_json(msg="Using autocommit is mutually exclusive with check_mode")
if path_to_script and query:
module.fail_json(msg="path_to_script is mutually exclusive with query")
query_list = []
if path_to_script:
depr_msg = ("The 'path_to_script' option is deprecated. Please use the "
"'community.postgresql.postgresql_script' module to execute "
"statements from scripts")
module.deprecate(msg=depr_msg, version="3.0.0", collection_name="community.postgresql")
try:
with open(path_to_script, 'rb') as f:
query = to_native(f.read())
if not as_single_query:
depr_msg = ("The 'as_single_query' option is deprecated. Please use the "
"'community.postgresql.postgresql_script' module to execute "
"statements from scripts")
module.deprecate(msg=depr_msg, version="3.0.0", collection_name="community.postgresql")
if ';' in query:
for q in query.split(';'):
if insane_query(q):
continue
else:
query_list.append(q)
else:
query_list.append(query)
else:
query_list.append(query)
except Exception as e:
module.fail_json(msg="Cannot read file '%s' : %s" % (path_to_script, to_native(e)))
else:
query_list.append(query)
# Ensure psycopg2 libraries are available before connecting to DB:
ensure_required_libs(module)
conn_params = get_conn_params(module, module.params)
db_connection, dummy = connect_to_db(module, conn_params, autocommit=autocommit)
if encoding is not None:
db_connection.set_client_encoding(encoding)
cursor = db_connection.cursor(cursor_factory=DictCursor)
if search_path:
set_search_path(cursor, '%s' % ','.join([x.strip(' ') for x in search_path]))
# Prepare args:
if positional_args:
args = positional_args
elif named_args:
args = named_args
else:
args = None
# Convert elements of type list to strings
# representing PG arrays
if args:
args = convert_elements_to_pg_arrays(args)
# Set defaults:
changed = False
query_all_results = []
rowcount = 0
statusmessage = ''
# Execute query:
for query in query_list:
try:
cursor.execute(query, args)
statusmessage = cursor.statusmessage
if cursor.rowcount > 0:
rowcount += cursor.rowcount
query_result = []
try:
for row in cursor.fetchall():
# Ansible engine does not support decimals.
# An explicit conversion is required on the module's side
row = dict(row)
for (key, val) in iteritems(row):
if isinstance(val, TYPES_NEED_TO_CONVERT):
row[key] = convert_to_supported(val)
query_result.append(row)
except Psycopg2ProgrammingError as e:
if to_native(e) == 'no results to fetch':
query_result = {}
except Exception as e:
module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e))
query_all_results.append(query_result)
if 'SELECT' not in statusmessage:
if re.search(re.compile(r'(UPDATE|INSERT|DELETE)'), statusmessage):
s = statusmessage.split()
if len(s) == 3:
if s[2] != '0':
changed = True
elif len(s) == 2:
if s[1] != '0':
changed = True
else:
changed = True
else:
changed = True
except Exception as e:
if not autocommit:
db_connection.rollback()
cursor.close()
db_connection.close()
module.fail_json(msg="Cannot execute SQL '%s' %s: %s, query list: %s" % (query, args, to_native(e), query_list))
if module.check_mode:
db_connection.rollback()
else:
if not autocommit:
db_connection.commit()
kw = dict(
changed=changed,
query=cursor.query,
query_list=query_list,
statusmessage=statusmessage,
query_result=query_result,
query_all_results=query_all_results,
rowcount=rowcount,
)
cursor.close()
db_connection.close()
module.exit_json(**kw)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,298 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: postgresql_schema
short_description: Add or remove PostgreSQL schema
description:
- Add or remove PostgreSQL schema.
options:
name:
description:
- Name of the schema to add or remove.
required: true
type: str
aliases:
- schema
database:
description:
- Name of the database to connect to and add or remove the schema.
type: str
default: postgres
aliases:
- db
- login_db
owner:
description:
- Name of the role to set as owner of the schema.
type: str
default: ''
session_role:
description:
- Switch to session_role after connecting.
- The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though the session_role
were the one that had logged in originally.
type: str
state:
description:
- The schema state.
type: str
default: present
choices: [ absent, present ]
cascade_drop:
description:
- Drop schema with CASCADE to remove child objects.
type: bool
default: false
ssl_mode:
description:
- Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
- Default of C(prefer) matches libpq default.
type: str
default: prefer
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
ca_cert:
description:
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
type: str
aliases: [ ssl_rootcert ]
trust_input:
description:
- If C(false), check whether values of parameters I(schema), I(owner), I(session_role) are potentially dangerous.
- It makes sense to use C(false) only when SQL injections via the parameters are possible.
type: bool
default: true
version_added: '0.2.0'
seealso:
- name: PostgreSQL schemas
description: General information about PostgreSQL schemas.
link: https://www.postgresql.org/docs/current/ddl-schemas.html
- name: CREATE SCHEMA reference
description: Complete reference of the CREATE SCHEMA command documentation.
link: https://www.postgresql.org/docs/current/sql-createschema.html
- name: ALTER SCHEMA reference
description: Complete reference of the ALTER SCHEMA command documentation.
link: https://www.postgresql.org/docs/current/sql-alterschema.html
- name: DROP SCHEMA reference
description: Complete reference of the DROP SCHEMA command documentation.
link: https://www.postgresql.org/docs/current/sql-dropschema.html
author:
- Flavien Chantelot (@Dorn-) <contact@flavien.io>
- Thomas O'Donnell (@andytom)
extends_documentation_fragment:
- community.postgresql.postgres
notes:
- Supports C(check_mode).
'''
EXAMPLES = r'''
- name: Create a new schema with name acme in test database
community.postgresql.postgresql_schema:
db: test
name: acme
- name: Create a new schema acme with a user bob who will own it
community.postgresql.postgresql_schema:
name: acme
owner: bob
- name: Drop schema "acme" with cascade
community.postgresql.postgresql_schema:
name: acme
state: absent
cascade_drop: true
'''
RETURN = r'''
schema:
description: Name of the schema.
returned: success, changed
type: str
sample: "acme"
queries:
description: List of executed queries.
returned: always
type: list
sample: ["CREATE SCHEMA \"acme\""]
'''
import traceback
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
connect_to_db,
ensure_required_libs,
get_conn_params,
postgres_common_argument_spec,
)
from ansible_collections.community.postgresql.plugins.module_utils.database import (
check_input,
pg_quote_identifier,
SQLParseError,
)
from ansible.module_utils._text import to_native
executed_queries = []
class NotSupportedError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def set_owner(cursor, schema, owner):
query = 'ALTER SCHEMA %s OWNER TO "%s"' % (
pg_quote_identifier(schema, 'schema'), owner)
cursor.execute(query)
executed_queries.append(query)
return True
def get_schema_info(cursor, schema):
query = ("SELECT schema_owner AS owner "
"FROM information_schema.schemata "
"WHERE schema_name = %(schema)s")
cursor.execute(query, {'schema': schema})
return cursor.fetchone()
def schema_exists(cursor, schema):
query = ("SELECT schema_name FROM information_schema.schemata "
"WHERE schema_name = %(schema)s")
cursor.execute(query, {'schema': schema})
return cursor.rowcount == 1
def schema_delete(cursor, schema, cascade):
if schema_exists(cursor, schema):
query = "DROP SCHEMA %s" % pg_quote_identifier(schema, 'schema')
if cascade:
query += " CASCADE"
cursor.execute(query)
executed_queries.append(query)
return True
else:
return False
def schema_create(cursor, schema, owner):
if not schema_exists(cursor, schema):
query_fragments = ['CREATE SCHEMA %s' % pg_quote_identifier(schema, 'schema')]
if owner:
query_fragments.append('AUTHORIZATION "%s"' % owner)
query = ' '.join(query_fragments)
cursor.execute(query)
executed_queries.append(query)
return True
else:
schema_info = get_schema_info(cursor, schema)
if owner and owner != schema_info['owner']:
return set_owner(cursor, schema, owner)
else:
return False
def schema_matches(cursor, schema, owner):
if not schema_exists(cursor, schema):
return False
else:
schema_info = get_schema_info(cursor, schema)
if owner and owner != schema_info['owner']:
return False
else:
return True
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
schema=dict(type="str", required=True, aliases=['name']),
owner=dict(type="str", default=""),
database=dict(type="str", default="postgres", aliases=["db", "login_db"]),
cascade_drop=dict(type="bool", default=False),
state=dict(type="str", default="present", choices=["absent", "present"]),
session_role=dict(type="str"),
trust_input=dict(type="bool", default=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
schema = module.params["schema"]
owner = module.params["owner"]
state = module.params["state"]
cascade_drop = module.params["cascade_drop"]
session_role = module.params["session_role"]
trust_input = module.params["trust_input"]
if not trust_input:
# Check input for potentially dangerous elements:
check_input(module, schema, owner, session_role)
changed = False
# Ensure psycopg2 libraries are available before connecting to DB:
ensure_required_libs(module)
conn_params = get_conn_params(module, module.params)
db_connection, dummy = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
try:
if module.check_mode:
if state == "absent":
changed = not schema_exists(cursor, schema)
elif state == "present":
changed = not schema_matches(cursor, schema, owner)
module.exit_json(changed=changed, schema=schema)
if state == "absent":
try:
changed = schema_delete(cursor, schema, cascade_drop)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
elif state == "present":
try:
changed = schema_create(cursor, schema, owner)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except NotSupportedError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except SystemExit:
# Avoid catching this on Python 2.4
raise
except Exception as e:
module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
db_connection.close()
module.exit_json(changed=changed, schema=schema, queries=executed_queries)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,353 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2022, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: postgresql_script
short_description: Run PostgreSQL statements from a file
description:
- Runs arbitrary PostgreSQL statements from a file.
- The module always reports that the state has changed.
- Does not run against backup files.
Use M(community.postgresql.postgresql_db) with I(state=restore)
to run queries on files made by pg_dump/pg_dumpall utilities.
version_added: '2.1.0'
options:
positional_args:
description:
- List of values to substitute variable placeholders within the file content.
- When the value is a list, it will be converted to PostgreSQL array.
- Mutually exclusive with I(named_args).
type: list
elements: raw
named_args:
description:
- Dictionary of key-value arguments to substitute
variable placeholders within the file content.
- When the value is a list, it will be converted to PostgreSQL array.
- Mutually exclusive with I(positional_args).
type: dict
path:
description:
- Path to a SQL script on the target machine.
- To upload dumps, the preferable way
is to use the M(community.postgresql.postgresql_db) module with I(state=restore).
type: path
session_role:
description:
- Switch to C(session_role) after connecting. The specified role must
be a role that the current C(login_user) is a member of.
- Permissions checking for SQL commands is carried out as though
the C(session_role) were the one that had logged in originally.
type: str
db:
description:
- Name of database to connect to and run queries against.
type: str
aliases:
- login_db
encoding:
description:
- Set the client encoding for the current session (e.g. C(UTF-8)).
- The default is the encoding defined by the database.
type: str
trust_input:
description:
- If C(false), check whether a value of I(session_role) is potentially dangerous.
- It makes sense to use C(false) only when SQL injections
via I(session_role) are possible.
type: bool
default: true
search_path:
description:
- Overrides the list of schemas to search for db objects in.
type: list
elements: str
seealso:
- module: community.postgresql.postgresql_db
- module: community.postgresql.postgresql_query
- name: PostgreSQL Schema reference
description: Complete reference of the PostgreSQL schema documentation.
link: https://www.postgresql.org/docs/current/ddl-schemas.html
author:
- Douglas J Hunley (@hunleyd)
- A. Hart (@jtelcontar)
- Daniel Scharon (@DanScharon)
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment:
- community.postgresql.postgres
notes:
- Does not support C(check_mode).
'''
EXAMPLES = r'''
# Assuming that the file contains
# SELECT * FROM id_talbe WHERE id = %s,
# '%s' will be substituted with 1
- name: Run query from SQL script using UTF-8 client encoding for session and positional args
community.postgresql.postgresql_script:
db: test_db
path: /var/lib/pgsql/test.sql
positional_args:
- 1
encoding: UTF-8
# Assuming that the file contains
# SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s,
# %-values will be substituted with 1 and 'test'
- name: Select query to test_db with named_args
community.postgresql.postgresql_script:
db: test_db
path: /var/lib/pgsql/test.sql
named_args:
id_val: 1
story_val: test
- block:
# Assuming that the the file contains
# SELECT * FROM test_array_table WHERE arr_col1 = %s AND arr_col2 = %s
# Pass list and string vars as positional_args
- name: Set vars
ansible.builtin.set_fact:
my_list:
- 1
- 2
- 3
my_arr: '{1, 2, 3}'
- name: Passing positional_args as arrays
community.postgresql.postgresql_script:
path: /var/lib/pgsql/test.sql
positional_args:
- '{{ my_list }}'
- '{{ my_arr|string }}'
# Assuming that the the file contains
# SELECT * FROM test_table,
# look into app1 schema first, then,
# if the schema doesn't exist or the table hasn't been found there,
# try to find it in the schema public
- name: Select from test using search_path
community.postgresql.postgresql_script:
path: /var/lib/pgsql/test.sql
search_path:
- app1
- public
- block:
# If you use a variable in positional_args/named_args that can
# be undefined and you wish to set it as NULL, constructions like
# "{{ my_var if (my_var is defined) else none | default(none) }}"
# will not work as expected substituting an empty string instead of NULL.
# If possible, we suggest using Ansible's DEFAULT_JINJA2_NATIVE configuration
# (https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-jinja2-native).
# Enabling it fixes this problem. If you cannot enable it, the following workaround
# can be used.
# You should precheck such a value and define it as NULL when undefined.
# For example:
- name: When undefined, set to NULL
set_fact:
my_var: NULL
when: my_var is undefined
# Then, assuming that the file contains
# INSERT INTO test_table (col1) VALUES (%s)
- name: Insert a value using positional arguments
community.postgresql.postgresql_script:
path: /var/lib/pgsql/test.sql
positional_args:
- '{{ my_var }}'
'''
RETURN = r'''
query:
description:
- Executed query.
- When the C(positional_args) or C(named_args) options are used,
the query contains all variables that were substituted
inside the database connector.
returned: always
type: str
sample: 'SELECT * FROM bar'
statusmessage:
description:
- Attribute containing the message returned by the database connector
after executing the script content.
- When there are several statements in the script, returns a message
related to the last statement.
returned: always
type: str
sample: 'INSERT 0 1'
query_result:
description:
- List of dictionaries in the column:value form representing returned rows.
- When there are several statements in the script,
returns result of the last statement.
returned: always
type: list
elements: dict
sample: [{"Column": "Value1"},{"Column": "Value2"}]
rowcount:
description:
- Number of produced or affected rows.
- When there are several statements in the script,
returns a number of rows affected by the last statement.
returned: changed
type: int
sample: 5
'''
try:
from psycopg2 import ProgrammingError as Psycopg2ProgrammingError
from psycopg2.extras import DictCursor
except ImportError:
# it is needed for checking 'no result to fetch' in main(),
# psycopg2 availability will be checked by connect_to_db() into
# ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.postgresql.plugins.module_utils.database import (
check_input,
)
from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
connect_to_db,
convert_elements_to_pg_arrays,
convert_to_supported,
ensure_required_libs,
get_conn_params,
list_to_pg_array,
postgres_common_argument_spec,
set_search_path,
TYPES_NEED_TO_CONVERT,
)
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
path=dict(type='path'),
db=dict(type='str', aliases=['login_db']),
positional_args=dict(type='list', elements='raw'),
named_args=dict(type='dict'),
session_role=dict(type='str'),
encoding=dict(type='str'),
trust_input=dict(type='bool', default=True),
search_path=dict(type='list', elements='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=(('positional_args', 'named_args'),),
supports_check_mode=False,
)
path = module.params["path"]
positional_args = module.params["positional_args"]
named_args = module.params["named_args"]
encoding = module.params["encoding"]
session_role = module.params["session_role"]
trust_input = module.params["trust_input"]
search_path = module.params["search_path"]
if not trust_input:
# Check input for potentially dangerous elements:
check_input(module, session_role)
try:
with open(path, 'rb') as f:
script_content = to_native(f.read())
except Exception as e:
module.fail_json(msg="Cannot read file '%s' : %s" % (path, to_native(e)))
# Ensure psycopg2 libraries are available before connecting to DB:
ensure_required_libs(module)
conn_params = get_conn_params(module, module.params)
db_connection, dummy = connect_to_db(module, conn_params, autocommit=True)
if encoding is not None:
db_connection.set_client_encoding(encoding)
cursor = db_connection.cursor(cursor_factory=DictCursor)
if search_path:
set_search_path(cursor, '%s' % ','.join([x.strip(' ') for x in search_path]))
# Prepare args:
if positional_args:
args = positional_args
elif named_args:
args = named_args
else:
args = None
# Convert elements of type list to strings
# representing PG arrays
if args:
args = convert_elements_to_pg_arrays(args)
# Execute script content:
try:
cursor.execute(script_content, args)
except Exception as e:
cursor.close()
db_connection.close()
module.fail_json(msg="Cannot execute SQL '%s' %s: %s" % (script_content, args, to_native(e)))
statusmessage = cursor.statusmessage
rowcount = cursor.rowcount
query_result = []
try:
for row in cursor.fetchall():
# Ansible engine does not support decimals.
# An explicit conversion is required on the module's side
row = dict(row)
for (key, val) in iteritems(row):
if isinstance(val, TYPES_NEED_TO_CONVERT):
row[key] = convert_to_supported(val)
query_result.append(row)
except Psycopg2ProgrammingError as e:
if to_native(e) == "no results to fetch":
query_result = {}
except Exception as e:
module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e))
kw = dict(
changed=True,
query=cursor.query,
statusmessage=statusmessage,
query_result=query_result,
rowcount=rowcount,
)
cursor.close()
db_connection.close()
module.exit_json(**kw)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,632 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: postgresql_sequence
short_description: Create, drop, or alter a PostgreSQL sequence
description:
- Allows to create, drop or change the definition of a sequence generator.
options:
sequence:
description:
- The name of the sequence.
required: true
type: str
aliases:
- name
state:
description:
- The sequence state.
- If I(state=absent) other options will be ignored except of I(name) and
I(schema).
default: present
choices: [ absent, present ]
type: str
data_type:
description:
- Specifies the data type of the sequence. Valid types are bigint, integer,
and smallint. bigint is the default. The data type determines the default
minimum and maximum values of the sequence. For more info see the
documentation
U(https://www.postgresql.org/docs/current/sql-createsequence.html).
- Supported from PostgreSQL 10.
choices: [ bigint, integer, smallint ]
type: str
increment:
description:
- Increment specifies which value is added to the current sequence value
to create a new value.
- A positive value will make an ascending sequence, a negative one a
descending sequence. The default value is 1.
type: int
minvalue:
description:
- Minvalue determines the minimum value a sequence can generate. The
default for an ascending sequence is 1. The default for a descending
sequence is the minimum value of the data type.
type: int
aliases:
- min
maxvalue:
description:
- Maxvalue determines the maximum value for the sequence. The default for
an ascending sequence is the maximum
value of the data type. The default for a descending sequence is -1.
type: int
aliases:
- max
start:
description:
- Start allows the sequence to begin anywhere. The default starting value
is I(minvalue) for ascending sequences and I(maxvalue) for descending
ones.
type: int
cache:
description:
- Cache specifies how many sequence numbers are to be preallocated and
stored in memory for faster access. The minimum value is 1 (only one
value can be generated at a time, i.e., no cache), and this is also
the default.
type: int
cycle:
description:
- The cycle option allows the sequence to wrap around when the I(maxvalue)
or I(minvalue) has been reached by an ascending or descending sequence
respectively. If the limit is reached, the next number generated will be
the minvalue or maxvalue, respectively.
- If C(false) (NO CYCLE) is specified, any calls to nextval after the sequence
has reached its maximum value will return an error. False (NO CYCLE) is
the default.
type: bool
default: false
cascade:
description:
- Automatically drop objects that depend on the sequence, and in turn all
objects that depend on those objects.
- Ignored if I(state=present).
- Only used with I(state=absent).
type: bool
default: false
rename_to:
description:
- The new name for the I(sequence).
- Works only for existing sequences.
type: str
owner:
description:
- Set the owner for the I(sequence).
type: str
schema:
description:
- The schema of the I(sequence). This is be used to create and relocate
a I(sequence) in the given schema.
default: public
type: str
newschema:
description:
- The new schema for the I(sequence). Will be used for moving a
I(sequence) to another I(schema).
- Works only for existing sequences.
type: str
session_role:
description:
- Switch to session_role after connecting. The specified I(session_role)
must be a role that the current I(login_user) is a member of.
- Permissions checking for SQL commands is carried out as though
the I(session_role) were the one that had logged in originally.
type: str
db:
description:
- Name of database to connect to and run queries against.
type: str
default: ''
aliases:
- database
- login_db
trust_input:
description:
- If C(false), check whether values of parameters I(sequence), I(schema), I(rename_to),
I(owner), I(newschema), I(session_role) are potentially dangerous.
- It makes sense to use C(false) only when SQL injections via the parameters are possible.
type: bool
default: true
version_added: '0.2.0'
notes:
- Supports C(check_mode).
- If you do not pass db parameter, sequence will be created in the database
named postgres.
seealso:
- module: community.postgresql.postgresql_table
- module: community.postgresql.postgresql_owner
- module: community.postgresql.postgresql_privs
- module: community.postgresql.postgresql_tablespace
- name: CREATE SEQUENCE reference
description: Complete reference of the CREATE SEQUENCE command documentation.
link: https://www.postgresql.org/docs/current/sql-createsequence.html
- name: ALTER SEQUENCE reference
description: Complete reference of the ALTER SEQUENCE command documentation.
link: https://www.postgresql.org/docs/current/sql-altersequence.html
- name: DROP SEQUENCE reference
description: Complete reference of the DROP SEQUENCE command documentation.
link: https://www.postgresql.org/docs/current/sql-dropsequence.html
author:
- Tobias Birkefeld (@tcraxs)
- Thomas O'Donnell (@andytom)
extends_documentation_fragment:
- community.postgresql.postgres
'''
EXAMPLES = r'''
- name: Create an ascending bigint sequence called foobar in the default
database
community.postgresql.postgresql_sequence:
name: foobar
- name: Create an ascending integer sequence called foobar, starting at 101
community.postgresql.postgresql_sequence:
name: foobar
data_type: integer
start: 101
- name: Create an descending sequence called foobar, starting at 101 and
preallocated 10 sequence numbers in cache
community.postgresql.postgresql_sequence:
name: foobar
increment: -1
cache: 10
start: 101
- name: Create an ascending sequence called foobar, which cycle between 1 to 10
community.postgresql.postgresql_sequence:
name: foobar
cycle: true
min: 1
max: 10
- name: Create an ascending bigint sequence called foobar in the default
database with owner foobar
community.postgresql.postgresql_sequence:
name: foobar
owner: foobar
- name: Rename an existing sequence named foo to bar
community.postgresql.postgresql_sequence:
name: foo
rename_to: bar
- name: Change the schema of an existing sequence to foobar
community.postgresql.postgresql_sequence:
name: foobar
newschema: foobar
- name: Change the owner of an existing sequence to foobar
community.postgresql.postgresql_sequence:
name: foobar
owner: foobar
- name: Drop a sequence called foobar
community.postgresql.postgresql_sequence:
name: foobar
state: absent
- name: Drop a sequence called foobar with cascade
community.postgresql.postgresql_sequence:
name: foobar
cascade: true
state: absent
'''
RETURN = r'''
state:
description: Sequence state at the end of execution.
returned: always
type: str
sample: 'present'
sequence:
description: Sequence name.
returned: always
type: str
sample: 'foobar'
queries:
description: List of queries that was tried to be executed.
returned: always
type: str
sample: [ "CREATE SEQUENCE \"foo\"" ]
schema:
description: Name of the schema of the sequence.
returned: always
type: str
sample: 'foo'
data_type:
description: Shows the current data type of the sequence.
returned: always
type: str
sample: 'bigint'
increment:
description: The value of increment of the sequence. A positive value will
make an ascending sequence, a negative one a descending
sequence.
returned: always
type: int
sample: '-1'
minvalue:
description: The value of minvalue of the sequence.
returned: always
type: int
sample: '1'
maxvalue:
description: The value of maxvalue of the sequence.
returned: always
type: int
sample: '9223372036854775807'
start:
description: The value of start of the sequence.
returned: always
type: int
sample: '12'
cycle:
description: Shows if the sequence cycle or not.
returned: always
type: str
sample: 'false'
owner:
description: Shows the current owner of the sequence
after the successful run of the task.
returned: always
type: str
sample: 'postgres'
newname:
description: Shows the new sequence name after rename.
returned: on success
type: str
sample: 'barfoo'
newschema:
description: Shows the new schema of the sequence after schema change.
returned: on success
type: str
sample: 'foobar'
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.postgresql.plugins.module_utils.database import (
check_input,
)
from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
ensure_required_libs,
get_conn_params,
postgres_common_argument_spec,
)
class Sequence(object):
"""Implements behavior of CREATE, ALTER or DROP SEQUENCE PostgreSQL command.
Arguments:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
Attributes:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
changed (bool) -- something was changed after execution or not
executed_queries (list) -- executed queries
name (str) -- name of the sequence
owner (str) -- name of the owner of the sequence
schema (str) -- name of the schema (default: public)
data_type (str) -- data type of the sequence
start_value (int) -- value of the sequence start
minvalue (int) -- minimum value of the sequence
maxvalue (int) -- maximum value of the sequence
increment (int) -- increment value of the sequence
cycle (bool) -- sequence can cycle or not
new_name (str) -- name of the renamed sequence
new_schema (str) -- name of the new schema
exists (bool) -- sequence exists or not
"""
def __init__(self, module, cursor):
self.module = module
self.cursor = cursor
self.executed_queries = []
self.name = self.module.params['sequence']
self.owner = ''
self.schema = self.module.params['schema']
self.data_type = ''
self.start_value = ''
self.minvalue = ''
self.maxvalue = ''
self.increment = ''
self.cycle = ''
self.new_name = ''
self.new_schema = ''
self.exists = False
# Collect info
self.get_info()
def get_info(self):
"""Getter to refresh and get sequence info"""
query = ("SELECT "
"s.sequence_schema AS schemaname, "
"s.sequence_name AS sequencename, "
"pg_get_userbyid(c.relowner) AS sequenceowner, "
"s.data_type::regtype AS data_type, "
"s.start_value AS start_value, "
"s.minimum_value AS min_value, "
"s.maximum_value AS max_value, "
"s.increment AS increment_by, "
"s.cycle_option AS cycle "
"FROM information_schema.sequences s "
"JOIN pg_class c ON c.relname = s.sequence_name "
"LEFT JOIN pg_namespace n ON n.oid = c.relnamespace "
"WHERE NOT pg_is_other_temp_schema(n.oid) "
"AND c.relkind = 'S'::\"char\" "
"AND sequence_name = %(name)s "
"AND sequence_schema = %(schema)s")
res = exec_sql(self, query,
query_params={'name': self.name, 'schema': self.schema},
add_to_executed=False)
if not res:
self.exists = False
return False
if res:
self.exists = True
self.schema = res[0]['schemaname']
self.name = res[0]['sequencename']
self.owner = res[0]['sequenceowner']
self.data_type = res[0]['data_type']
self.start_value = res[0]['start_value']
self.minvalue = res[0]['min_value']
self.maxvalue = res[0]['max_value']
self.increment = res[0]['increment_by']
self.cycle = res[0]['cycle']
def create(self):
"""Implements CREATE SEQUENCE command behavior."""
query = ['CREATE SEQUENCE']
query.append(self.__add_schema())
if self.module.params.get('data_type'):
query.append('AS %s' % self.module.params['data_type'])
if self.module.params.get('increment'):
query.append('INCREMENT BY %s' % self.module.params['increment'])
if self.module.params.get('minvalue'):
query.append('MINVALUE %s' % self.module.params['minvalue'])
if self.module.params.get('maxvalue'):
query.append('MAXVALUE %s' % self.module.params['maxvalue'])
if self.module.params.get('start'):
query.append('START WITH %s' % self.module.params['start'])
if self.module.params.get('cache'):
query.append('CACHE %s' % self.module.params['cache'])
if self.module.params.get('cycle'):
query.append('CYCLE')
return exec_sql(self, ' '.join(query), return_bool=True)
def drop(self):
"""Implements DROP SEQUENCE command behavior."""
query = ['DROP SEQUENCE']
query.append(self.__add_schema())
if self.module.params.get('cascade'):
query.append('CASCADE')
return exec_sql(self, ' '.join(query), return_bool=True)
def rename(self):
"""Implements ALTER SEQUENCE RENAME TO command behavior."""
query = ['ALTER SEQUENCE']
query.append(self.__add_schema())
query.append('RENAME TO "%s"' % self.module.params['rename_to'])
return exec_sql(self, ' '.join(query), return_bool=True)
def set_owner(self):
"""Implements ALTER SEQUENCE OWNER TO command behavior."""
query = ['ALTER SEQUENCE']
query.append(self.__add_schema())
query.append('OWNER TO "%s"' % self.module.params['owner'])
return exec_sql(self, ' '.join(query), return_bool=True)
def set_schema(self):
"""Implements ALTER SEQUENCE SET SCHEMA command behavior."""
query = ['ALTER SEQUENCE']
query.append(self.__add_schema())
query.append('SET SCHEMA "%s"' % self.module.params['newschema'])
return exec_sql(self, ' '.join(query), return_bool=True)
def __add_schema(self):
return '"%s"."%s"' % (self.schema, self.name)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
sequence=dict(type='str', required=True, aliases=['name']),
state=dict(type='str', default='present', choices=['absent', 'present']),
data_type=dict(type='str', choices=['bigint', 'integer', 'smallint']),
increment=dict(type='int'),
minvalue=dict(type='int', aliases=['min']),
maxvalue=dict(type='int', aliases=['max']),
start=dict(type='int'),
cache=dict(type='int'),
cycle=dict(type='bool', default=False),
schema=dict(type='str', default='public'),
cascade=dict(type='bool', default=False),
rename_to=dict(type='str'),
owner=dict(type='str'),
newschema=dict(type='str'),
db=dict(type='str', default='', aliases=['login_db', 'database']),
session_role=dict(type='str'),
trust_input=dict(type="bool", default=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['rename_to', 'data_type'],
['rename_to', 'increment'],
['rename_to', 'minvalue'],
['rename_to', 'maxvalue'],
['rename_to', 'start'],
['rename_to', 'cache'],
['rename_to', 'cycle'],
['rename_to', 'cascade'],
['rename_to', 'owner'],
['rename_to', 'newschema'],
['cascade', 'data_type'],
['cascade', 'increment'],
['cascade', 'minvalue'],
['cascade', 'maxvalue'],
['cascade', 'start'],
['cascade', 'cache'],
['cascade', 'cycle'],
['cascade', 'owner'],
['cascade', 'newschema'],
]
)
if not module.params["trust_input"]:
check_input(
module,
module.params['sequence'],
module.params['schema'],
module.params['rename_to'],
module.params['owner'],
module.params['newschema'],
module.params['session_role'],
)
# Note: we don't need to check mutually exclusive params here, because they are
# checked automatically by AnsibleModule (mutually_exclusive=[] list above).
# Change autocommit to False if check_mode:
autocommit = not module.check_mode
# Ensure psycopg2 libraries are available before connecting to DB:
ensure_required_libs(module)
# Connect to DB and make cursor object:
conn_params = get_conn_params(module, module.params)
db_connection, dummy = connect_to_db(module, conn_params, autocommit=autocommit)
cursor = db_connection.cursor(cursor_factory=DictCursor)
##############
# Create the object and do main job:
data = Sequence(module, cursor)
# Set defaults:
changed = False
# Create new sequence
if not data.exists and module.params['state'] == 'present':
if module.params.get('rename_to'):
module.fail_json(msg="Sequence '%s' does not exist, nothing to rename" % module.params['sequence'])
if module.params.get('newschema'):
module.fail_json(msg="Sequence '%s' does not exist, change of schema not possible" % module.params['sequence'])
changed = data.create()
# Drop non-existing sequence
elif not data.exists and module.params['state'] == 'absent':
# Nothing to do
changed = False
# Drop existing sequence
elif data.exists and module.params['state'] == 'absent':
changed = data.drop()
# Rename sequence
if data.exists and module.params.get('rename_to'):
if data.name != module.params['rename_to']:
changed = data.rename()
if changed:
data.new_name = module.params['rename_to']
# Refresh information
if module.params['state'] == 'present':
data.get_info()
# Change owner, schema and settings
if module.params['state'] == 'present' and data.exists:
# change owner
if module.params.get('owner'):
if data.owner != module.params['owner']:
changed = data.set_owner()
# Set schema
if module.params.get('newschema'):
if data.schema != module.params['newschema']:
changed = data.set_schema()
if changed:
data.new_schema = module.params['newschema']
# Rollback if it's possible and check_mode:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
cursor.close()
db_connection.close()
# Make return values:
kw = dict(
changed=changed,
state='present',
sequence=data.name,
queries=data.executed_queries,
schema=data.schema,
data_type=data.data_type,
increment=data.increment,
minvalue=data.minvalue,
maxvalue=data.maxvalue,
start=data.start_value,
cycle=data.cycle,
owner=data.owner,
)
if module.params['state'] == 'present':
if data.new_name:
kw['newname'] = data.new_name
if data.new_schema:
kw['newschema'] = data.new_schema
elif module.params['state'] == 'absent':
kw['state'] = 'absent'
module.exit_json(**kw)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,507 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: postgresql_set
short_description: Change a PostgreSQL server configuration parameter
description:
- Allows to change a PostgreSQL server configuration parameter.
- The module uses ALTER SYSTEM command and applies changes by reload server configuration.
- ALTER SYSTEM is used for changing server configuration parameters across the entire database cluster.
- It can be more convenient and safe than the traditional method of manually editing the postgresql.conf file.
- ALTER SYSTEM writes the given parameter setting to the $PGDATA/postgresql.auto.conf file,
which is read in addition to postgresql.conf.
- The module allows to reset parameter to boot_val (cluster initial value) by I(reset=true) or remove parameter
string from postgresql.auto.conf and reload I(value=default) (for settings with postmaster context restart is required).
- After change you can see in the ansible output the previous and
the new parameter value and other information using returned values and M(ansible.builtin.debug) module.
options:
name:
description:
- Name of PostgreSQL server parameter. Pay attention that parameters are case sensitive (see examples below).
type: str
required: true
value:
description:
- Parameter value to set.
- To remove parameter string from postgresql.auto.conf and
reload the server configuration you must pass I(value=default).
With I(value=default) the playbook always returns changed is true.
type: str
reset:
description:
- Restore parameter to initial state (boot_val). Mutually exclusive with I(value).
type: bool
default: false
session_role:
description:
- Switch to session_role after connecting. The specified session_role must
be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
db:
description:
- Name of database to connect.
type: str
aliases:
- login_db
trust_input:
description:
- If C(false), check whether values of parameters are potentially dangerous.
- It makes sense to use C(false) only when SQL injections are possible.
type: bool
default: true
version_added: '0.2.0'
notes:
- Supported version of PostgreSQL is 9.4 and later.
- Supports C(check_mode).
- Pay attention, change setting with 'postmaster' context can return changed is true
when actually nothing changes because the same value may be presented in
several different form, for example, 1024MB, 1GB, etc. However in pg_settings
system view it can be defined like 131072 number of 8kB pages.
The final check of the parameter value cannot compare it because the server was
not restarted and the value in pg_settings is not updated yet.
- For some parameters restart of PostgreSQL server is required.
See official documentation U(https://www.postgresql.org/docs/current/view-pg-settings.html).
seealso:
- module: community.postgresql.postgresql_info
- name: PostgreSQL server configuration
description: General information about PostgreSQL server configuration.
link: https://www.postgresql.org/docs/current/runtime-config.html
- name: PostgreSQL view pg_settings reference
description: Complete reference of the pg_settings view documentation.
link: https://www.postgresql.org/docs/current/view-pg-settings.html
- name: PostgreSQL ALTER SYSTEM command reference
description: Complete reference of the ALTER SYSTEM command documentation.
link: https://www.postgresql.org/docs/current/sql-altersystem.html
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment:
- community.postgresql.postgres
'''
EXAMPLES = r'''
- name: Restore wal_keep_segments parameter to initial state
community.postgresql.postgresql_set:
name: wal_keep_segments
reset: true
# Set work_mem parameter to 32MB and show what's been changed and restart is required or not
# (output example: "msg": "work_mem 4MB >> 64MB restart_req: False")
- name: Set work mem parameter
community.postgresql.postgresql_set:
name: work_mem
value: 32mb
register: set
- name: Print the result if the setting changed
ansible.builtin.debug:
msg: "{{ set.name }} {{ set.prev_val_pretty }} >> {{ set.value_pretty }} restart_req: {{ set.restart_required }}"
when: set.changed
# Ensure that the restart of PostgreSQL server must be required for some parameters.
# In this situation you see the same parameter in prev_val_pretty and value_pretty, but 'changed=True'
# (If you passed the value that was different from the current server setting).
- name: Set log_min_duration_statement parameter to 1 second
community.postgresql.postgresql_set:
name: log_min_duration_statement
value: 1s
- name: Set wal_log_hints parameter to default value (remove parameter from postgresql.auto.conf)
community.postgresql.postgresql_set:
name: wal_log_hints
value: default
- name: Set TimeZone parameter (careful, case sensitive)
community.postgresql.postgresql_set:
name: TimeZone
value: 'Europe/Paris'
'''
RETURN = r'''
name:
description: Name of PostgreSQL server parameter.
returned: always
type: str
sample: 'shared_buffers'
restart_required:
description: Information about parameter current state.
returned: always
type: bool
sample: true
prev_val_pretty:
description: Information about previous state of the parameter.
returned: always
type: str
sample: '4MB'
value_pretty:
description: Information about current state of the parameter.
returned: always
type: str
sample: '64MB'
value:
description:
- Dictionary that contains the current parameter value (at the time of playbook finish).
- Pay attention that for real change some parameters restart of PostgreSQL server is required.
- Returns the current value in the check mode.
returned: always
type: dict
sample: { "value": 67108864, "unit": "b" }
context:
description:
- PostgreSQL setting context.
returned: always
type: str
sample: user
'''
try:
from psycopg2.extras import DictCursor
except Exception:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.postgresql.plugins.module_utils.database import (
check_input,
)
from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
connect_to_db,
ensure_required_libs,
get_conn_params,
postgres_common_argument_spec,
)
from ansible.module_utils._text import to_native
PG_REQ_VER = 90400
# To allow to set value like 1mb instead of 1MB, etc:
LOWERCASE_SIZE_UNITS = ("mb", "gb", "tb")
# ===========================================
# PostgreSQL module specific support methods.
#
def param_get(cursor, module, name):
query = ("SELECT name, setting, unit, context, boot_val "
"FROM pg_settings WHERE name = %(name)s")
try:
cursor.execute(query, {'name': name})
info = cursor.fetchone()
cursor.execute("SHOW %s" % name)
val = cursor.fetchone()
except Exception as e:
module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
if not info:
module.fail_json(msg="No such parameter: %s. "
"Please check its spelling or presence in your PostgreSQL version "
"(https://www.postgresql.org/docs/current/runtime-config.html)" % name)
raw_val = info['setting']
unit = info['unit']
context = info['context']
boot_val = info['boot_val']
if val[name] == 'True':
val[name] = 'on'
elif val[name] == 'False':
val[name] = 'off'
if unit == 'kB':
if int(raw_val) > 0:
raw_val = int(raw_val) * 1024
if int(boot_val) > 0:
boot_val = int(boot_val) * 1024
unit = 'b'
elif unit == 'MB':
if int(raw_val) > 0:
raw_val = int(raw_val) * 1024 * 1024
if int(boot_val) > 0:
boot_val = int(boot_val) * 1024 * 1024
unit = 'b'
return {
'current_val': val[name],
'raw_val': raw_val,
'unit': unit,
'boot_val': boot_val,
'context': context,
}
def pretty_to_bytes(pretty_val):
# The function returns a value in bytes
# if the value contains 'B', 'kB', 'MB', 'GB', 'TB'.
# Otherwise it returns the passed argument.
# It's sometimes possible to have an empty values
if not pretty_val:
return pretty_val
# If the first char is not a digit, it does not make sense
# to parse further, so just return a passed value
if not pretty_val[0].isdigit():
return pretty_val
# If the last char is not an alphabetical symbol, it means that
# it does not contain any suffixes, so no sense to parse further
if not pretty_val[-1].isalpha():
try:
pretty_val = int(pretty_val)
except ValueError:
pretty_val = float(pretty_val)
return pretty_val
# Extract digits
num_part = []
for c in pretty_val:
# When we reach the first non-digit element,
# e.g. in 1024kB, stop iterating
if not c.isdigit():
break
else:
num_part.append(c)
num_part = int(''.join(num_part))
val_in_bytes = None
if len(pretty_val) >= 2:
if 'kB' in pretty_val[-2:]:
val_in_bytes = num_part * 1024
elif 'MB' in pretty_val[-2:]:
val_in_bytes = num_part * 1024 * 1024
elif 'GB' in pretty_val[-2:]:
val_in_bytes = num_part * 1024 * 1024 * 1024
elif 'TB' in pretty_val[-2:]:
val_in_bytes = num_part * 1024 * 1024 * 1024 * 1024
# For cases like "1B"
if not val_in_bytes and 'B' in pretty_val[-1]:
val_in_bytes = num_part
if val_in_bytes is not None:
return val_in_bytes
else:
return pretty_val
def param_set(cursor, module, name, value, context):
try:
if str(value).lower() == 'default':
query = "ALTER SYSTEM SET %s = DEFAULT" % name
else:
if isinstance(value, str) and ',' in value:
# Issue https://github.com/ansible-collections/community.postgresql/issues/78
# Change value from 'one, two, three' -> "'one','two','three'"
value = ','.join(["'" + elem.strip() + "'" for elem in value.split(',')])
query = "ALTER SYSTEM SET %s = %s" % (name, value)
else:
query = "ALTER SYSTEM SET %s = '%s'" % (name, value)
cursor.execute(query)
if context != 'postmaster':
cursor.execute("SELECT pg_reload_conf()")
except Exception as e:
module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
return True
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
name=dict(type='str', required=True),
db=dict(type='str', aliases=['login_db']),
value=dict(type='str'),
reset=dict(type='bool', default=False),
session_role=dict(type='str'),
trust_input=dict(type='bool', default=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
name = module.params['name']
value = module.params['value']
reset = module.params['reset']
session_role = module.params['session_role']
trust_input = module.params['trust_input']
if not trust_input:
# Check input for potentially dangerous elements:
check_input(module, name, value, session_role)
if value:
# Convert a value like 1mb (Postgres does not support) to 1MB, etc:
if len(value) > 2 and value[:-2].isdigit() and value[-2:] in LOWERCASE_SIZE_UNITS:
value = value.upper()
# Convert a value like 1b (Postgres does not support) to 1B:
elif len(value) > 1 and ('b' in value[-1] and value[:-1].isdigit()):
value = value.upper()
if value is not None and reset:
module.fail_json(msg="%s: value and reset params are mutually exclusive" % name)
if value is None and not reset:
module.fail_json(msg="%s: at least one of value or reset param must be specified" % name)
# Ensure psycopg2 libraries are available before connecting to DB:
ensure_required_libs(module)
conn_params = get_conn_params(module, module.params, warn_db_default=False)
db_connection, dummy = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
kw = {}
# Check server version (needs 9.4 or later):
ver = db_connection.server_version
if ver < PG_REQ_VER:
module.warn("PostgreSQL is %s version but %s or later is required" % (ver, PG_REQ_VER))
kw = dict(
changed=False,
restart_required=False,
value_pretty="",
prev_val_pretty="",
value={"value": "", "unit": ""},
)
kw['name'] = name
db_connection.close()
module.exit_json(**kw)
# Set default returned values:
restart_required = False
changed = False
kw['name'] = name
kw['restart_required'] = False
# Get info about param state:
res = param_get(cursor, module, name)
current_val = res['current_val']
raw_val = res['raw_val']
unit = res['unit']
boot_val = res['boot_val']
context = res['context']
if value == 'True':
value = 'on'
elif value == 'False':
value = 'off'
kw['prev_val_pretty'] = current_val
kw['value_pretty'] = deepcopy(kw['prev_val_pretty'])
kw['context'] = context
# Do job
if context == "internal":
module.fail_json(msg="%s: cannot be changed (internal context). See "
"https://www.postgresql.org/docs/current/runtime-config-preset.html" % name)
if context == "postmaster":
restart_required = True
# If check_mode, just compare and exit:
if module.check_mode:
if pretty_to_bytes(value) == pretty_to_bytes(current_val):
kw['changed'] = False
else:
kw['value_pretty'] = value
kw['changed'] = True
# Anyway returns current raw value in the check_mode:
kw['value'] = dict(
value=raw_val,
unit=unit,
)
kw['restart_required'] = restart_required
module.exit_json(**kw)
# Set param (value can be an empty string):
if value is not None and value != current_val:
changed = param_set(cursor, module, name, value, context)
kw['value_pretty'] = value
# Reset param:
elif reset:
if raw_val == boot_val:
# nothing to change, exit:
kw['value'] = dict(
value=raw_val,
unit=unit,
)
module.exit_json(**kw)
changed = param_set(cursor, module, name, boot_val, context)
cursor.close()
db_connection.close()
# Reconnect and recheck current value:
if context in ('sighup', 'superuser-backend', 'backend', 'superuser', 'user'):
db_connection, dummy = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
res = param_get(cursor, module, name)
# f_ means 'final'
f_value = res['current_val']
f_raw_val = res['raw_val']
if raw_val == f_raw_val:
changed = False
else:
changed = True
kw['value_pretty'] = f_value
kw['value'] = dict(
value=f_raw_val,
unit=unit,
)
cursor.close()
db_connection.close()
kw['changed'] = changed
kw['restart_required'] = restart_required
if restart_required and changed:
module.warn("Restart of PostgreSQL is required for setting %s" % name)
module.exit_json(**kw)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,308 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, John Scalia (@jscalia), Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: postgresql_slot
short_description: Add or remove replication slots from a PostgreSQL database
description:
- Add or remove physical or logical replication slots from a PostgreSQL database.
options:
name:
description:
- Name of the replication slot to add or remove.
type: str
required: true
aliases:
- slot_name
slot_type:
description:
- Slot type.
type: str
default: physical
choices: [ logical, physical ]
state:
description:
- The slot state.
- I(state=present) implies the slot must be present in the system.
- I(state=absent) implies the I(groups) must be revoked from I(target_roles).
type: str
default: present
choices: [ absent, present ]
immediately_reserve:
description:
- Optional parameter that when C(true) specifies that the LSN for this replication slot be reserved
immediately, otherwise the default, C(false), specifies that the LSN is reserved on the first connection
from a streaming replication client.
- Is available from PostgreSQL version 9.6.
- Uses only with I(slot_type=physical).
- Mutually exclusive with I(slot_type=logical).
type: bool
default: false
output_plugin:
description:
- All logical slots must indicate which output plugin decoder they're using.
- This parameter does not apply to physical slots.
- It will be ignored with I(slot_type=physical).
type: str
default: "test_decoding"
db:
description:
- Name of database to connect to.
type: str
aliases:
- login_db
session_role:
description:
- Switch to session_role after connecting.
The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
trust_input:
description:
- If C(false), check the value of I(session_role) is potentially dangerous.
- It makes sense to use C(false) only when SQL injections via I(session_role) are possible.
type: bool
default: true
version_added: '0.2.0'
notes:
- Physical replication slots were introduced to PostgreSQL with version 9.4,
while logical replication slots were added beginning with version 10.0.
- Supports C(check_mode).
seealso:
- name: PostgreSQL pg_replication_slots view reference
description: Complete reference of the PostgreSQL pg_replication_slots view.
link: https://www.postgresql.org/docs/current/view-pg-replication-slots.html
- name: PostgreSQL streaming replication protocol reference
description: Complete reference of the PostgreSQL streaming replication protocol documentation.
link: https://www.postgresql.org/docs/current/protocol-replication.html
- name: PostgreSQL logical replication protocol reference
description: Complete reference of the PostgreSQL logical replication protocol documentation.
link: https://www.postgresql.org/docs/current/protocol-logical-replication.html
author:
- John Scalia (@jscalia)
- Andrew Klychkov (@Andersson007)
- Thomas O'Donnell (@andytom)
extends_documentation_fragment:
- community.postgresql.postgres
'''
EXAMPLES = r'''
- name: Create physical_one physical slot if doesn't exist
become_user: postgres
community.postgresql.postgresql_slot:
slot_name: physical_one
db: ansible
- name: Remove physical_one slot if exists
become_user: postgres
community.postgresql.postgresql_slot:
slot_name: physical_one
db: ansible
state: absent
- name: Create logical_one logical slot to the database acme if doesn't exist
community.postgresql.postgresql_slot:
name: logical_slot_one
slot_type: logical
state: present
output_plugin: custom_decoder_one
db: "acme"
- name: Remove logical_one slot if exists from the cluster running on another host and non-standard port
community.postgresql.postgresql_slot:
name: logical_one
login_host: mydatabase.example.org
port: 5433
login_user: ourSuperuser
login_password: thePassword
state: absent
'''
RETURN = r'''
name:
description: Name of the slot.
returned: always
type: str
sample: "physical_one"
queries:
description: List of executed queries.
returned: always
type: str
sample: [ "SELECT pg_create_physical_replication_slot('physical_one', False, False)" ]
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.postgresql.plugins.module_utils.database import (
check_input,
)
from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
ensure_required_libs,
get_conn_params,
postgres_common_argument_spec,
)
# ===========================================
# PostgreSQL module specific support methods.
#
class PgSlot(object):
def __init__(self, module, cursor, name):
self.module = module
self.cursor = cursor
self.name = name
self.exists = False
self.kind = ''
self.__slot_exists()
self.changed = False
self.executed_queries = []
def create(self, kind='physical', immediately_reserve=False, output_plugin=False, just_check=False):
if self.exists:
if self.kind == kind:
return False
else:
self.module.warn("slot with name '%s' already exists "
"but has another type '%s'" % (self.name, self.kind))
return False
if just_check:
return None
if kind == 'physical':
# Check server version (immediately_reserved needs 9.6+):
if self.cursor.connection.server_version < 90600:
query = "SELECT pg_create_physical_replication_slot(%(name)s)"
else:
query = "SELECT pg_create_physical_replication_slot(%(name)s, %(i_reserve)s)"
self.changed = exec_sql(self, query,
query_params={'name': self.name, 'i_reserve': immediately_reserve},
return_bool=True)
elif kind == 'logical':
query = "SELECT pg_create_logical_replication_slot(%(name)s, %(o_plugin)s)"
self.changed = exec_sql(self, query,
query_params={'name': self.name, 'o_plugin': output_plugin}, return_bool=True)
def drop(self):
if not self.exists:
return False
query = "SELECT pg_drop_replication_slot(%(name)s)"
self.changed = exec_sql(self, query, query_params={'name': self.name}, return_bool=True)
def __slot_exists(self):
query = "SELECT slot_type FROM pg_replication_slots WHERE slot_name = %(name)s"
res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False)
if res:
self.exists = True
self.kind = res[0][0]
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
db=dict(type="str", aliases=["login_db"]),
name=dict(type="str", required=True, aliases=["slot_name"]),
slot_type=dict(type="str", default="physical", choices=["logical", "physical"]),
immediately_reserve=dict(type="bool", default=False),
session_role=dict(type="str"),
output_plugin=dict(type="str", default="test_decoding"),
state=dict(type="str", default="present", choices=["absent", "present"]),
trust_input=dict(type="bool", default=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
name = module.params["name"]
slot_type = module.params["slot_type"]
immediately_reserve = module.params["immediately_reserve"]
state = module.params["state"]
output_plugin = module.params["output_plugin"]
if not module.params["trust_input"]:
check_input(module, module.params['session_role'])
if immediately_reserve and slot_type == 'logical':
module.fail_json(msg="Module parameters immediately_reserve and slot_type=logical are mutually exclusive")
# When slot_type is logical and parameter db is not passed,
# the default database will be used to create the slot and
# the user should know about this.
# When the slot type is physical,
# it doesn't matter which database will be used
# because physical slots are global objects.
if slot_type == 'logical':
warn_db_default = True
else:
warn_db_default = False
# Ensure psycopg2 libraries are available before connecting to DB:
ensure_required_libs(module)
conn_params = get_conn_params(module, module.params, warn_db_default=warn_db_default)
db_connection, dummy = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
##################################
# Create an object and do main job
pg_slot = PgSlot(module, cursor, name)
changed = False
if module.check_mode:
if state == "present":
if not pg_slot.exists:
changed = True
pg_slot.create(slot_type, immediately_reserve, output_plugin, just_check=True)
elif state == "absent":
if pg_slot.exists:
changed = True
else:
if state == "absent":
pg_slot.drop()
elif state == "present":
pg_slot.create(slot_type, immediately_reserve, output_plugin)
changed = pg_slot.changed
db_connection.close()
module.exit_json(changed=changed, name=name, queries=pg_slot.executed_queries)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,739 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: postgresql_subscription
short_description: Add, update, or remove PostgreSQL subscription
description:
- Add, update, or remove PostgreSQL subscription.
version_added: '0.2.0'
options:
name:
description:
- Name of the subscription to add, update, or remove.
type: str
required: true
db:
description:
- Name of the database to connect to and where
the subscription state will be changed.
aliases: [ login_db ]
type: str
required: true
state:
description:
- The subscription state.
- C(present) implies that if I(name) subscription doesn't exist, it will be created.
- C(absent) implies that if I(name) subscription exists, it will be removed.
- C(refresh) implies that if I(name) subscription exists, it will be refreshed.
Fetch missing table information from publisher. Always returns ``changed`` is ``True``.
This will start replication of tables that were added to the subscribed-to publications
since the last invocation of REFRESH PUBLICATION or since CREATE SUBSCRIPTION.
The existing data in the publications that are being subscribed to
should be copied once the replication starts.
- For more information about C(refresh) see U(https://www.postgresql.org/docs/current/sql-altersubscription.html).
type: str
choices: [ absent, present, refresh ]
default: present
owner:
description:
- Subscription owner.
- If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role).
- Ignored when I(state) is not C(present).
type: str
publications:
description:
- The publication names on the publisher to use for the subscription.
- Ignored when I(state) is not C(present).
type: list
elements: str
connparams:
description:
- The connection dict param-value to connect to the publisher.
- For more information see U(https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING).
- Ignored when I(state) is not C(present).
type: dict
cascade:
description:
- Drop subscription dependencies. Has effect with I(state=absent) only.
- Ignored when I(state) is not C(absent).
type: bool
default: false
subsparams:
description:
- Dictionary of optional parameters for a subscription, e.g. copy_data, enabled, create_slot, etc.
- For update the subscription allowed keys are C(enabled), C(slot_name), C(synchronous_commit), C(publication_name).
- See available parameters to create a new subscription
on U(https://www.postgresql.org/docs/current/sql-createsubscription.html).
- Ignored when I(state) is not C(present).
type: dict
session_role:
description:
- Switch to session_role after connecting. The specified session_role must
be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
version_added: '0.2.0'
trust_input:
description:
- If C(false), check whether values of parameters I(name), I(publications), I(owner),
I(session_role), I(connparams), I(subsparams) are potentially dangerous.
- It makes sense to use C(true) only when SQL injections via the parameters are possible.
type: bool
default: true
version_added: '0.2.0'
notes:
- PostgreSQL version must be 10 or greater.
- Supports C(check_mode).
seealso:
- module: community.postgresql.postgresql_publication
- module: community.postgresql.postgresql_info
- name: CREATE SUBSCRIPTION reference
description: Complete reference of the CREATE SUBSCRIPTION command documentation.
link: https://www.postgresql.org/docs/current/sql-createsubscription.html
- name: ALTER SUBSCRIPTION reference
description: Complete reference of the ALTER SUBSCRIPTION command documentation.
link: https://www.postgresql.org/docs/current/sql-altersubscription.html
- name: DROP SUBSCRIPTION reference
description: Complete reference of the DROP SUBSCRIPTION command documentation.
link: https://www.postgresql.org/docs/current/sql-dropsubscription.html
author:
- Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
extends_documentation_fragment:
- community.postgresql.postgres
'''
EXAMPLES = r'''
- name: >
Create acme subscription in mydb database using acme_publication and
the following connection parameters to connect to the publisher.
Set the subscription owner as alice.
community.postgresql.postgresql_subscription:
db: mydb
name: acme
state: present
publications: acme_publication
owner: alice
connparams:
host: 127.0.0.1
port: 5432
user: repl
password: replpass
dbname: mydb
- name: Assuming that acme subscription exists, try to change conn parameters
community.postgresql.postgresql_subscription:
db: mydb
name: acme
connparams:
host: 127.0.0.1
port: 5432
user: repl
password: replpass
connect_timeout: 100
- name: Refresh acme publication
community.postgresql.postgresql_subscription:
db: mydb
name: acme
state: refresh
- name: Drop acme subscription from mydb with dependencies (cascade=true)
community.postgresql.postgresql_subscription:
db: mydb
name: acme
state: absent
cascade: true
- name: Assuming that acme subscription exists and enabled, disable the subscription
community.postgresql.postgresql_subscription:
db: mydb
name: acme
state: present
subsparams:
enabled: false
'''
RETURN = r'''
name:
description:
- Name of the subscription.
returned: always
type: str
sample: acme
exists:
description:
- Flag indicates the subscription exists or not at the end of runtime.
returned: always
type: bool
sample: true
queries:
description: List of executed queries.
returned: always
type: str
sample: [ 'DROP SUBSCRIPTION "mysubscription"' ]
initial_state:
description: Subscription configuration at the beginning of runtime.
returned: always
type: dict
sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true}
final_state:
description: Subscription configuration at the end of runtime.
returned: always
type: dict
sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true}
'''
from copy import deepcopy
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.postgresql.plugins.module_utils.database import check_input
from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
ensure_required_libs,
get_conn_params,
postgres_common_argument_spec,
)
from ansible.module_utils.six import iteritems
SUPPORTED_PG_VERSION = 10000
SUBSPARAMS_KEYS_FOR_UPDATE = ('enabled', 'synchronous_commit', 'slot_name')
################################
# Module functions and classes #
################################
def convert_conn_params(conn_dict):
"""Converts the passed connection dictionary to string.
Args:
conn_dict (list): Dictionary which needs to be converted.
Returns:
Connection string.
"""
conn_list = []
for (param, val) in iteritems(conn_dict):
conn_list.append('%s=%s' % (param, val))
return ' '.join(conn_list)
def convert_subscr_params(params_dict):
"""Converts the passed params dictionary to string.
Args:
params_dict (list): Dictionary which needs to be converted.
Returns:
Parameters string.
"""
params_list = []
for (param, val) in iteritems(params_dict):
if val is False:
val = 'false'
elif val is True:
val = 'true'
params_list.append('%s = %s' % (param, val))
return ', '.join(params_list)
def cast_connparams(connparams_dict):
"""Cast the passed connparams_dict dictionary
Returns:
Dictionary
"""
for (param, val) in iteritems(connparams_dict):
try:
connparams_dict[param] = int(val)
except ValueError:
connparams_dict[param] = val
return connparams_dict
class PgSubscription():
"""Class to work with PostgreSQL subscription.
Args:
module (AnsibleModule): Object of AnsibleModule class.
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
name (str): The name of the subscription.
db (str): The database name the subscription will be associated with.
Attributes:
module (AnsibleModule): Object of AnsibleModule class.
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
name (str): Name of subscription.
executed_queries (list): List of executed queries.
attrs (dict): Dict with subscription attributes.
exists (bool): Flag indicates the subscription exists or not.
"""
def __init__(self, module, cursor, name, db):
self.module = module
self.cursor = cursor
self.name = name
self.db = db
self.executed_queries = []
self.attrs = {
'owner': None,
'enabled': None,
'synccommit': None,
'conninfo': {},
'slotname': None,
'publications': [],
}
self.empty_attrs = deepcopy(self.attrs)
self.exists = self.check_subscr()
def get_info(self):
"""Refresh the subscription information.
Returns:
``self.attrs``.
"""
self.exists = self.check_subscr()
return self.attrs
def check_subscr(self):
"""Check the subscription and refresh ``self.attrs`` subscription attribute.
Returns:
True if the subscription with ``self.name`` exists, False otherwise.
"""
subscr_info = self.__get_general_subscr_info()
if not subscr_info:
# The subscription does not exist:
self.attrs = deepcopy(self.empty_attrs)
return False
self.attrs['owner'] = subscr_info.get('rolname')
self.attrs['enabled'] = subscr_info.get('subenabled')
self.attrs['synccommit'] = subscr_info.get('subenabled')
self.attrs['slotname'] = subscr_info.get('subslotname')
self.attrs['publications'] = subscr_info.get('subpublications')
if subscr_info.get('subconninfo'):
for param in subscr_info['subconninfo'].split(' '):
tmp = param.split('=')
try:
self.attrs['conninfo'][tmp[0]] = int(tmp[1])
except ValueError:
self.attrs['conninfo'][tmp[0]] = tmp[1]
return True
def create(self, connparams, publications, subsparams, check_mode=True):
"""Create the subscription.
Args:
connparams (str): Connection string in libpq style.
publications (list): Publications on the primary to use.
subsparams (str): Parameters string in WITH () clause style.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
changed (bool): True if the subscription has been created, otherwise False.
"""
query_fragments = []
query_fragments.append("CREATE SUBSCRIPTION %s CONNECTION '%s' "
"PUBLICATION %s" % (self.name, connparams, ', '.join(publications)))
if subsparams:
query_fragments.append("WITH (%s)" % subsparams)
changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
return changed
def update(self, connparams, publications, subsparams, check_mode=True):
"""Update the subscription.
Args:
connparams (dict): Connection dict in libpq style.
publications (list): Publications on the primary to use.
subsparams (dict): Dictionary of optional parameters.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
changed (bool): True if subscription has been updated, otherwise False.
"""
changed = False
if connparams:
if connparams != self.attrs['conninfo']:
changed = self.__set_conn_params(convert_conn_params(connparams),
check_mode=check_mode)
if publications:
if sorted(self.attrs['publications']) != sorted(publications):
changed = self.__set_publications(publications, check_mode=check_mode)
if subsparams:
params_to_update = []
for (param, value) in iteritems(subsparams):
if param == 'enabled':
if self.attrs['enabled'] and value is False:
changed = self.enable(enabled=False, check_mode=check_mode)
elif not self.attrs['enabled'] and value is True:
changed = self.enable(enabled=True, check_mode=check_mode)
elif param == 'synchronous_commit':
if self.attrs['synccommit'] is True and value is False:
params_to_update.append("%s = false" % param)
elif self.attrs['synccommit'] is False and value is True:
params_to_update.append("%s = true" % param)
elif param == 'slot_name':
if self.attrs['slotname'] and self.attrs['slotname'] != value:
params_to_update.append("%s = %s" % (param, value))
else:
self.module.warn("Parameter '%s' is not in params supported "
"for update '%s', ignored..." % (param, SUBSPARAMS_KEYS_FOR_UPDATE))
if params_to_update:
changed = self.__set_params(params_to_update, check_mode=check_mode)
return changed
def drop(self, cascade=False, check_mode=True):
"""Drop the subscription.
Kwargs:
cascade (bool): Flag indicates that the subscription needs to be deleted
with its dependencies.
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
changed (bool): True if the subscription has been removed, otherwise False.
"""
if self.exists:
query_fragments = ["DROP SUBSCRIPTION %s" % self.name]
if cascade:
query_fragments.append("CASCADE")
return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
def set_owner(self, role, check_mode=True):
"""Set a subscription owner.
Args:
role (str): Role (user) name that needs to be set as a subscription owner.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
query = 'ALTER SUBSCRIPTION %s OWNER TO "%s"' % (self.name, role)
return self.__exec_sql(query, check_mode=check_mode)
def refresh(self, check_mode=True):
"""Refresh publication.
Fetches missing table info from publisher.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
query = 'ALTER SUBSCRIPTION %s REFRESH PUBLICATION' % self.name
return self.__exec_sql(query, check_mode=check_mode)
def __set_params(self, params_to_update, check_mode=True):
"""Update optional subscription parameters.
Args:
params_to_update (list): Parameters with values to update.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
query = 'ALTER SUBSCRIPTION %s SET (%s)' % (self.name, ', '.join(params_to_update))
return self.__exec_sql(query, check_mode=check_mode)
def __set_conn_params(self, connparams, check_mode=True):
"""Update connection parameters.
Args:
connparams (str): Connection string in libpq style.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
query = "ALTER SUBSCRIPTION %s CONNECTION '%s'" % (self.name, connparams)
return self.__exec_sql(query, check_mode=check_mode)
def __set_publications(self, publications, check_mode=True):
"""Update publications.
Args:
publications (list): Publications on the primary to use.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
query = 'ALTER SUBSCRIPTION %s SET PUBLICATION %s' % (self.name, ', '.join(publications))
return self.__exec_sql(query, check_mode=check_mode)
def enable(self, enabled=True, check_mode=True):
"""Enable or disable the subscription.
Kwargs:
enable (bool): Flag indicates that the subscription needs
to be enabled or disabled.
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
if enabled:
query = 'ALTER SUBSCRIPTION %s ENABLE' % self.name
else:
query = 'ALTER SUBSCRIPTION %s DISABLE' % self.name
return self.__exec_sql(query, check_mode=check_mode)
def __get_general_subscr_info(self):
"""Get and return general subscription information.
Returns:
Dict with subscription information if successful, False otherwise.
"""
query = ("SELECT d.datname, r.rolname, s.subenabled, "
"s.subconninfo, s.subslotname, s.subsynccommit, "
"s.subpublications FROM pg_catalog.pg_subscription s "
"JOIN pg_catalog.pg_database d "
"ON s.subdbid = d.oid "
"JOIN pg_catalog.pg_roles AS r "
"ON s.subowner = r.oid "
"WHERE s.subname = %(name)s AND d.datname = %(db)s")
result = exec_sql(self, query, query_params={'name': self.name, 'db': self.db}, add_to_executed=False)
if result:
return result[0]
else:
return False
def __exec_sql(self, query, check_mode=False):
"""Execute SQL query.
Note: If we need just to get information from the database,
we use ``exec_sql`` function directly.
Args:
query (str): Query that needs to be executed.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just add ``query`` to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
if check_mode:
self.executed_queries.append(query)
return True
else:
return exec_sql(self, query, return_bool=True)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
name=dict(type='str', required=True),
db=dict(type='str', required=True, aliases=['login_db']),
state=dict(type='str', default='present', choices=['absent', 'present', 'refresh']),
publications=dict(type='list', elements='str'),
connparams=dict(type='dict'),
cascade=dict(type='bool', default=False),
owner=dict(type='str'),
subsparams=dict(type='dict'),
session_role=dict(type='str'),
trust_input=dict(type='bool', default=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
# Parameters handling:
db = module.params['db']
name = module.params['name']
state = module.params['state']
publications = module.params['publications']
cascade = module.params['cascade']
owner = module.params['owner']
subsparams = module.params['subsparams']
connparams = module.params['connparams']
session_role = module.params['session_role']
trust_input = module.params['trust_input']
if not trust_input:
# Check input for potentially dangerous elements:
if not subsparams:
subsparams_str = None
else:
subsparams_str = convert_subscr_params(subsparams)
if not connparams:
connparams_str = None
else:
connparams_str = convert_conn_params(connparams)
check_input(module, name, publications, owner, session_role,
connparams_str, subsparams_str)
if state == 'present' and cascade:
module.warn('parameter "cascade" is ignored when state is not absent')
if state != 'present':
if owner:
module.warn("parameter 'owner' is ignored when state is not 'present'")
if publications:
module.warn("parameter 'publications' is ignored when state is not 'present'")
if connparams:
module.warn("parameter 'connparams' is ignored when state is not 'present'")
if subsparams:
module.warn("parameter 'subsparams' is ignored when state is not 'present'")
# Ensure psycopg2 libraries are available before connecting to DB:
ensure_required_libs(module)
# Connect to DB and make cursor object:
pg_conn_params = get_conn_params(module, module.params)
# We check subscription state without DML queries execution, so set autocommit:
db_connection, dummy = connect_to_db(module, pg_conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Check version:
if cursor.connection.server_version < SUPPORTED_PG_VERSION:
module.fail_json(msg="PostgreSQL server version should be 10.0 or greater")
# Set defaults:
changed = False
initial_state = {}
final_state = {}
###################################
# Create object and do rock'n'roll:
subscription = PgSubscription(module, cursor, name, db)
if subscription.exists:
initial_state = deepcopy(subscription.attrs)
final_state = deepcopy(initial_state)
if state == 'present':
if not subscription.exists:
if subsparams:
subsparams = convert_subscr_params(subsparams)
if connparams:
connparams = convert_conn_params(connparams)
changed = subscription.create(connparams,
publications,
subsparams,
check_mode=module.check_mode)
else:
if connparams:
connparams = cast_connparams(connparams)
changed = subscription.update(connparams,
publications,
subsparams,
check_mode=module.check_mode)
if owner and subscription.attrs['owner'] != owner:
changed = subscription.set_owner(owner, check_mode=module.check_mode) or changed
elif state == 'absent':
changed = subscription.drop(cascade, check_mode=module.check_mode)
elif state == 'refresh':
if not subscription.exists:
module.fail_json(msg="Refresh failed: subscription '%s' does not exist" % name)
# Always returns True:
changed = subscription.refresh(check_mode=module.check_mode)
# Get final subscription info:
final_state = subscription.get_info()
# Connection is not needed any more:
cursor.close()
db_connection.close()
# Return ret values and exit:
module.exit_json(changed=changed,
name=name,
exists=subscription.exists,
queries=subscription.executed_queries,
initial_state=initial_state,
final_state=final_state)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,615 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: postgresql_table
short_description: Create, drop, or modify a PostgreSQL table
description:
- Allows to create, drop, rename, truncate a table, or change some table attributes.
options:
table:
description:
- Table name.
required: true
aliases:
- name
type: str
state:
description:
- The table state. I(state=absent) is mutually exclusive with I(tablespace), I(owner), I(unlogged),
I(like), I(including), I(columns), I(truncate), I(storage_params) and, I(rename).
type: str
default: present
choices: [ absent, present ]
tablespace:
description:
- Set a tablespace for the table.
type: str
owner:
description:
- Set a table owner.
type: str
unlogged:
description:
- Create an unlogged table.
type: bool
default: false
like:
description:
- Create a table like another table (with similar DDL).
Mutually exclusive with I(columns), I(rename), and I(truncate).
type: str
including:
description:
- Keywords that are used with like parameter, may be DEFAULTS, CONSTRAINTS, INDEXES, STORAGE, COMMENTS or ALL.
Needs I(like) specified. Mutually exclusive with I(columns), I(rename), and I(truncate).
type: str
columns:
description:
- Columns that are needed.
type: list
elements: str
rename:
description:
- New table name. Mutually exclusive with I(tablespace), I(owner),
I(unlogged), I(like), I(including), I(columns), I(truncate), and I(storage_params).
type: str
truncate:
description:
- Truncate a table. Mutually exclusive with I(tablespace), I(owner), I(unlogged),
I(like), I(including), I(columns), I(rename), and I(storage_params).
type: bool
default: false
storage_params:
description:
- Storage parameters like fillfactor, autovacuum_vacuum_treshold, etc.
Mutually exclusive with I(rename) and I(truncate).
type: list
elements: str
db:
description:
- Name of database to connect and where the table will be created.
type: str
default: ''
aliases:
- login_db
session_role:
description:
- Switch to session_role after connecting.
The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
cascade:
description:
- Automatically drop objects that depend on the table (such as views).
Used with I(state=absent) only.
type: bool
default: false
trust_input:
description:
- If C(false), check whether values of parameters are potentially dangerous.
- It makes sense to use C(false) only when SQL injections are possible.
type: bool
default: true
version_added: '0.2.0'
notes:
- Supports C(check_mode).
- If you do not pass db parameter, tables will be created in the database
named postgres.
- PostgreSQL allows to create columnless table, so columns param is optional.
- Unlogged tables are available from PostgreSQL server version 9.1.
seealso:
- module: community.postgresql.postgresql_sequence
- module: community.postgresql.postgresql_idx
- module: community.postgresql.postgresql_info
- module: community.postgresql.postgresql_tablespace
- module: community.postgresql.postgresql_owner
- module: community.postgresql.postgresql_privs
- module: community.postgresql.postgresql_copy
- name: CREATE TABLE reference
description: Complete reference of the CREATE TABLE command documentation.
link: https://www.postgresql.org/docs/current/sql-createtable.html
- name: ALTER TABLE reference
description: Complete reference of the ALTER TABLE command documentation.
link: https://www.postgresql.org/docs/current/sql-altertable.html
- name: DROP TABLE reference
description: Complete reference of the DROP TABLE command documentation.
link: https://www.postgresql.org/docs/current/sql-droptable.html
- name: PostgreSQL data types
description: Complete reference of the PostgreSQL data types documentation.
link: https://www.postgresql.org/docs/current/datatype.html
author:
- Andrei Klychkov (@Andersson007)
extends_documentation_fragment:
- community.postgresql.postgres
'''
EXAMPLES = r'''
- name: Create tbl2 in the acme database with the DDL like tbl1 with testuser as an owner
community.postgresql.postgresql_table:
db: acme
name: tbl2
like: tbl1
owner: testuser
- name: Create tbl2 in the acme database and tablespace ssd with the DDL like tbl1 including comments and indexes
community.postgresql.postgresql_table:
db: acme
table: tbl2
like: tbl1
including: comments, indexes
tablespace: ssd
- name: Create test_table with several columns in ssd tablespace with fillfactor=10 and autovacuum_analyze_threshold=1
community.postgresql.postgresql_table:
name: test_table
columns:
- id bigserial primary key
- num bigint
- stories text
tablespace: ssd
storage_params:
- fillfactor=10
- autovacuum_analyze_threshold=1
- name: Create an unlogged table in schema acme
community.postgresql.postgresql_table:
name: acme.useless_data
columns: waste_id int
unlogged: true
- name: Rename table foo to bar
community.postgresql.postgresql_table:
table: foo
rename: bar
- name: Rename table foo from schema acme to bar
community.postgresql.postgresql_table:
name: acme.foo
rename: bar
- name: Set owner to someuser
community.postgresql.postgresql_table:
name: foo
owner: someuser
- name: Change tablespace of foo table to new_tablespace and set owner to new_user
community.postgresql.postgresql_table:
name: foo
tablespace: new_tablespace
owner: new_user
- name: Truncate table foo
community.postgresql.postgresql_table:
name: foo
truncate: true
- name: Drop table foo from schema acme
community.postgresql.postgresql_table:
name: acme.foo
state: absent
- name: Drop table bar cascade
community.postgresql.postgresql_table:
name: bar
state: absent
cascade: true
'''
RETURN = r'''
table:
description: Name of a table.
returned: always
type: str
sample: 'foo'
state:
description: Table state.
returned: always
type: str
sample: 'present'
owner:
description: Table owner.
returned: always
type: str
sample: 'postgres'
tablespace:
description: Tablespace.
returned: always
type: str
sample: 'ssd_tablespace'
queries:
description: List of executed queries.
returned: always
type: str
sample: [ 'CREATE TABLE "test_table" (id bigint)' ]
storage_params:
description: Storage parameters.
returned: always
type: list
sample: [ "fillfactor=100", "autovacuum_analyze_threshold=1" ]
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.postgresql.plugins.module_utils.database import (
check_input,
pg_quote_identifier,
)
from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
ensure_required_libs,
get_conn_params,
postgres_common_argument_spec,
)
# ===========================================
# PostgreSQL module specific support methods.
#
class Table(object):
def __init__(self, name, module, cursor):
self.name = name
self.module = module
self.cursor = cursor
self.info = {
'owner': '',
'tblspace': '',
'storage_params': [],
}
self.exists = False
self.__exists_in_db()
self.executed_queries = []
def get_info(self):
"""Getter to refresh and get table info"""
self.__exists_in_db()
def __exists_in_db(self):
"""Check table exists and refresh info"""
if "." in self.name:
schema = self.name.split('.')[-2]
tblname = self.name.split('.')[-1]
else:
schema = 'public'
tblname = self.name
query = ("SELECT t.tableowner, t.tablespace, c.reloptions "
"FROM pg_tables AS t "
"INNER JOIN pg_class AS c ON c.relname = t.tablename "
"INNER JOIN pg_namespace AS n ON c.relnamespace = n.oid "
"WHERE t.tablename = %(tblname)s "
"AND n.nspname = %(schema)s")
res = exec_sql(self, query, query_params={'tblname': tblname, 'schema': schema},
add_to_executed=False)
if res:
self.exists = True
self.info = dict(
owner=res[0][0],
tblspace=res[0][1] if res[0][1] else '',
storage_params=res[0][2] if res[0][2] else [],
)
return True
else:
self.exists = False
return False
def create(self, columns='', params='', tblspace='',
unlogged=False, owner=''):
"""
Create table.
If table exists, check passed args (params, tblspace, owner) and,
if they're different from current, change them.
Arguments:
params - storage params (passed by "WITH (...)" in SQL),
comma separated.
tblspace - tablespace.
owner - table owner.
unlogged - create unlogged table.
columns - column string (comma separated).
"""
name = pg_quote_identifier(self.name, 'table')
changed = False
if self.exists:
if tblspace == 'pg_default' and self.info['tblspace'] is None:
pass # Because they have the same meaning
elif tblspace and self.info['tblspace'] != tblspace:
self.set_tblspace(tblspace)
changed = True
if owner and self.info['owner'] != owner:
self.set_owner(owner)
changed = True
if params:
param_list = [p.strip(' ') for p in params.split(',')]
new_param = False
for p in param_list:
if p not in self.info['storage_params']:
new_param = True
if new_param:
self.set_stor_params(params)
changed = True
if changed:
return True
return False
query = "CREATE"
if unlogged:
query += " UNLOGGED TABLE %s" % name
else:
query += " TABLE %s" % name
if columns:
query += " (%s)" % columns
else:
query += " ()"
if params:
query += " WITH (%s)" % params
if tblspace:
query += ' TABLESPACE "%s"' % tblspace
if exec_sql(self, query, return_bool=True):
changed = True
if owner:
changed = self.set_owner(owner)
return changed
def create_like(self, src_table, including='', tblspace='',
unlogged=False, params='', owner=''):
"""
Create table like another table (with similar DDL).
Arguments:
src_table - source table.
including - corresponds to optional INCLUDING expression
in CREATE TABLE ... LIKE statement.
params - storage params (passed by "WITH (...)" in SQL),
comma separated.
tblspace - tablespace.
owner - table owner.
unlogged - create unlogged table.
"""
changed = False
name = pg_quote_identifier(self.name, 'table')
query = "CREATE"
if unlogged:
query += " UNLOGGED TABLE %s" % name
else:
query += " TABLE %s" % name
query += " (LIKE %s" % pg_quote_identifier(src_table, 'table')
if including:
including = including.split(',')
for i in including:
query += " INCLUDING %s" % i
query += ')'
if params:
query += " WITH (%s)" % params
if tblspace:
query += ' TABLESPACE "%s"' % tblspace
if exec_sql(self, query, return_bool=True):
changed = True
if owner:
changed = self.set_owner(owner)
return changed
def truncate(self):
query = "TRUNCATE TABLE %s" % pg_quote_identifier(self.name, 'table')
return exec_sql(self, query, return_bool=True)
def rename(self, newname):
query = "ALTER TABLE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'table'),
pg_quote_identifier(newname, 'table'))
return exec_sql(self, query, return_bool=True)
def set_owner(self, username):
query = 'ALTER TABLE %s OWNER TO "%s"' % (pg_quote_identifier(self.name, 'table'), username)
return exec_sql(self, query, return_bool=True)
def drop(self, cascade=False):
if not self.exists:
return False
query = "DROP TABLE %s" % pg_quote_identifier(self.name, 'table')
if cascade:
query += " CASCADE"
return exec_sql(self, query, return_bool=True)
def set_tblspace(self, tblspace):
query = 'ALTER TABLE %s SET TABLESPACE "%s"' % (pg_quote_identifier(self.name, 'table'), tblspace)
return exec_sql(self, query, return_bool=True)
def set_stor_params(self, params):
query = "ALTER TABLE %s SET (%s)" % (pg_quote_identifier(self.name, 'table'), params)
return exec_sql(self, query, return_bool=True)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
table=dict(type='str', required=True, aliases=['name']),
state=dict(type='str', default='present', choices=['absent', 'present']),
db=dict(type='str', default='', aliases=['login_db']),
tablespace=dict(type='str'),
owner=dict(type='str'),
unlogged=dict(type='bool', default=False),
like=dict(type='str'),
including=dict(type='str'),
rename=dict(type='str'),
truncate=dict(type='bool', default=False),
columns=dict(type='list', elements='str'),
storage_params=dict(type='list', elements='str'),
session_role=dict(type='str'),
cascade=dict(type='bool', default=False),
trust_input=dict(type='bool', default=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
table = module.params['table']
state = module.params['state']
tablespace = module.params['tablespace']
owner = module.params['owner']
unlogged = module.params['unlogged']
like = module.params['like']
including = module.params['including']
newname = module.params['rename']
storage_params = module.params['storage_params']
truncate = module.params['truncate']
columns = module.params['columns']
cascade = module.params['cascade']
session_role = module.params['session_role']
trust_input = module.params['trust_input']
if not trust_input:
# Check input for potentially dangerous elements:
check_input(module, table, tablespace, owner, like, including,
newname, storage_params, columns, session_role)
if state == 'present' and cascade:
module.warn("cascade=true is ignored when state=present")
# Check mutual exclusive parameters:
if state == 'absent' and (truncate or newname or columns or tablespace or like or storage_params or unlogged or owner or including):
module.fail_json(msg="%s: state=absent is mutually exclusive with: "
"truncate, rename, columns, tablespace, "
"including, like, storage_params, unlogged, owner" % table)
if truncate and (newname or columns or like or unlogged or storage_params or owner or tablespace or including):
module.fail_json(msg="%s: truncate is mutually exclusive with: "
"rename, columns, like, unlogged, including, "
"storage_params, owner, tablespace" % table)
if newname and (columns or like or unlogged or storage_params or owner or tablespace or including):
module.fail_json(msg="%s: rename is mutually exclusive with: "
"columns, like, unlogged, including, "
"storage_params, owner, tablespace" % table)
if like and columns:
module.fail_json(msg="%s: like and columns params are mutually exclusive" % table)
if including and not like:
module.fail_json(msg="%s: including param needs like param specified" % table)
# Ensure psycopg2 libraries are available before connecting to DB:
ensure_required_libs(module)
conn_params = get_conn_params(module, module.params)
db_connection, dummy = connect_to_db(module, conn_params, autocommit=False)
cursor = db_connection.cursor(cursor_factory=DictCursor)
if storage_params:
storage_params = ','.join(storage_params)
if columns:
columns = ','.join(columns)
##############
# Do main job:
table_obj = Table(table, module, cursor)
# Set default returned values:
changed = False
kw = {}
kw['table'] = table
kw['state'] = ''
if table_obj.exists:
kw = dict(
table=table,
state='present',
owner=table_obj.info['owner'],
tablespace=table_obj.info['tblspace'],
storage_params=table_obj.info['storage_params'],
)
if state == 'absent':
changed = table_obj.drop(cascade=cascade)
elif truncate:
changed = table_obj.truncate()
elif newname:
changed = table_obj.rename(newname)
q = table_obj.executed_queries
table_obj = Table(newname, module, cursor)
table_obj.executed_queries = q
elif state == 'present' and not like:
changed = table_obj.create(columns, storage_params,
tablespace, unlogged, owner)
elif state == 'present' and like:
changed = table_obj.create_like(like, including, tablespace,
unlogged, storage_params)
if changed:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
# Refresh table info for RETURN.
# Note, if table has been renamed, it gets info by newname:
table_obj.get_info()
db_connection.commit()
if table_obj.exists:
kw = dict(
table=table,
state='present',
owner=table_obj.info['owner'],
tablespace=table_obj.info['tblspace'],
storage_params=table_obj.info['storage_params'],
)
else:
# We just change the table state here
# to keep other information about the dropped table:
kw['state'] = 'absent'
kw['queries'] = table_obj.executed_queries
kw['changed'] = changed
db_connection.close()
module.exit_json(**kw)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,544 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Flavien Chantelot (@Dorn-)
# Copyright: (c) 2018, Antoine Levy-Lambert (@antoinell)
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: postgresql_tablespace
short_description: Add or remove PostgreSQL tablespaces from remote hosts
description:
- Adds or removes PostgreSQL tablespaces from remote hosts.
options:
tablespace:
description:
- Name of the tablespace to add or remove.
required: true
type: str
aliases:
- name
location:
description:
- Path to the tablespace directory in the file system.
- Ensure that the location exists and has right privileges.
type: path
aliases:
- path
state:
description:
- Tablespace state.
- I(state=present) implies the tablespace must be created if it doesn't exist.
- I(state=absent) implies the tablespace must be removed if present.
I(state=absent) is mutually exclusive with I(location), I(owner), i(set).
- See the Notes section for information about check mode restrictions.
type: str
default: present
choices: [ absent, present ]
owner:
description:
- Name of the role to set as an owner of the tablespace.
- If this option is not specified, the tablespace owner is a role that creates the tablespace.
type: str
set:
description:
- Dict of tablespace options to set. Supported from PostgreSQL 9.0.
- For more information see U(https://www.postgresql.org/docs/current/sql-createtablespace.html).
- When reset is passed as an option's value, if the option was set previously, it will be removed.
type: dict
rename_to:
description:
- New name of the tablespace.
- The new name cannot begin with pg_, as such names are reserved for system tablespaces.
type: str
session_role:
description:
- Switch to session_role after connecting. The specified session_role must
be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
db:
description:
- Name of database to connect to and run queries against.
type: str
aliases:
- login_db
trust_input:
description:
- If C(false), check whether values of parameters I(tablespace), I(location), I(owner),
I(rename_to), I(session_role), I(settings_list) are potentially dangerous.
- It makes sense to use C(false) only when SQL injections via the parameters are possible.
type: bool
default: true
version_added: '0.2.0'
notes:
- I(state=absent) and I(state=present) (the second one if the tablespace doesn't exist) do not
support check mode because the corresponding PostgreSQL DROP and CREATE TABLESPACE commands
can not be run inside the transaction block.
seealso:
- name: PostgreSQL tablespaces
description: General information about PostgreSQL tablespaces.
link: https://www.postgresql.org/docs/current/manage-ag-tablespaces.html
- name: CREATE TABLESPACE reference
description: Complete reference of the CREATE TABLESPACE command documentation.
link: https://www.postgresql.org/docs/current/sql-createtablespace.html
- name: ALTER TABLESPACE reference
description: Complete reference of the ALTER TABLESPACE command documentation.
link: https://www.postgresql.org/docs/current/sql-altertablespace.html
- name: DROP TABLESPACE reference
description: Complete reference of the DROP TABLESPACE command documentation.
link: https://www.postgresql.org/docs/current/sql-droptablespace.html
author:
- Flavien Chantelot (@Dorn-)
- Antoine Levy-Lambert (@antoinell)
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment:
- community.postgresql.postgres
'''
EXAMPLES = r'''
- name: Create a new tablespace called acme and set bob as an its owner
community.postgresql.postgresql_tablespace:
name: acme
owner: bob
location: /data/foo
- name: Create a new tablespace called bar with tablespace options
community.postgresql.postgresql_tablespace:
name: bar
set:
random_page_cost: 1
seq_page_cost: 1
- name: Reset random_page_cost option
community.postgresql.postgresql_tablespace:
name: bar
set:
random_page_cost: reset
- name: Rename the tablespace from bar to pcie_ssd
community.postgresql.postgresql_tablespace:
name: bar
rename_to: pcie_ssd
- name: Drop tablespace called bloat
community.postgresql.postgresql_tablespace:
name: bloat
state: absent
'''
RETURN = r'''
queries:
description: List of queries that was tried to be executed.
returned: always
type: str
sample: [ "CREATE TABLESPACE bar LOCATION '/incredible/ssd'" ]
tablespace:
description: Tablespace name.
returned: always
type: str
sample: 'ssd'
owner:
description: Tablespace owner.
returned: always
type: str
sample: 'Bob'
options:
description: Tablespace options.
returned: always
type: dict
sample: { 'random_page_cost': 1, 'seq_page_cost': 1 }
location:
description: Path to the tablespace in the file system.
returned: always
type: str
sample: '/incredible/fast/ssd'
newname:
description: New tablespace name.
returned: if existent
type: str
sample: new_ssd
state:
description: Tablespace state at the end of execution.
returned: always
type: str
sample: 'present'
'''
try:
from psycopg2 import __version__ as PSYCOPG2_VERSION
from psycopg2.extras import DictCursor
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT as AUTOCOMMIT
from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED as READ_COMMITTED
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible_collections.community.postgresql.plugins.module_utils.database import (
check_input,
pg_quote_identifier,
)
from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
ensure_required_libs,
get_conn_params,
postgres_common_argument_spec,
)
class PgTablespace(object):
"""Class for working with PostgreSQL tablespaces.
Args:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
name (str) -- name of the tablespace
Attrs:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
name (str) -- name of the tablespace
exists (bool) -- flag the tablespace exists in the DB or not
owner (str) -- tablespace owner
location (str) -- path to the tablespace directory in the file system
executed_queries (list) -- list of executed queries
new_name (str) -- new name for the tablespace
opt_not_supported (bool) -- flag indicates a tablespace option is supported or not
"""
def __init__(self, module, cursor, name):
self.module = module
self.cursor = cursor
self.name = name
self.exists = False
self.owner = ''
self.settings = {}
self.location = ''
self.executed_queries = []
self.new_name = ''
self.opt_not_supported = False
# Collect info:
self.get_info()
def get_info(self):
"""Get tablespace information."""
# Check that spcoptions exists:
opt = exec_sql(self, "SELECT 1 FROM information_schema.columns "
"WHERE table_name = 'pg_tablespace' "
"AND column_name = 'spcoptions'", add_to_executed=False)
# For 9.1 version and earlier:
location = exec_sql(self, "SELECT 1 FROM information_schema.columns "
"WHERE table_name = 'pg_tablespace' "
"AND column_name = 'spclocation'", add_to_executed=False)
if location:
location = 'spclocation'
else:
location = 'pg_tablespace_location(t.oid)'
if not opt:
self.opt_not_supported = True
query = ("SELECT r.rolname, (SELECT Null), %s "
"FROM pg_catalog.pg_tablespace AS t "
"JOIN pg_catalog.pg_roles AS r "
"ON t.spcowner = r.oid " % location)
else:
query = ("SELECT r.rolname, t.spcoptions, %s "
"FROM pg_catalog.pg_tablespace AS t "
"JOIN pg_catalog.pg_roles AS r "
"ON t.spcowner = r.oid " % location)
res = exec_sql(self, query + "WHERE t.spcname = %(name)s",
query_params={'name': self.name}, add_to_executed=False)
if not res:
self.exists = False
return False
if res[0][0]:
self.exists = True
self.owner = res[0][0]
if res[0][1]:
# Options exist:
for i in res[0][1]:
i = i.split('=')
self.settings[i[0]] = i[1]
if res[0][2]:
# Location exists:
self.location = res[0][2]
def create(self, location):
"""Create tablespace.
Return True if success, otherwise, return False.
args:
location (str) -- tablespace directory path in the FS
"""
query = ('CREATE TABLESPACE "%s" LOCATION \'%s\'' % (self.name, location))
return exec_sql(self, query, return_bool=True)
def drop(self):
"""Drop tablespace.
Return True if success, otherwise, return False.
"""
return exec_sql(self, 'DROP TABLESPACE "%s"' % self.name, return_bool=True)
def set_owner(self, new_owner):
"""Set tablespace owner.
Return True if success, otherwise, return False.
args:
new_owner (str) -- name of a new owner for the tablespace"
"""
if new_owner == self.owner:
return False
query = 'ALTER TABLESPACE "%s" OWNER TO "%s"' % (self.name, new_owner)
return exec_sql(self, query, return_bool=True)
def rename(self, newname):
"""Rename tablespace.
Return True if success, otherwise, return False.
args:
newname (str) -- new name for the tablespace"
"""
query = 'ALTER TABLESPACE "%s" RENAME TO "%s"' % (self.name, newname)
self.new_name = newname
return exec_sql(self, query, return_bool=True)
def set_settings(self, new_settings):
"""Set tablespace settings (options).
If some setting has been changed, set changed = True.
After all settings list is handling, return changed.
args:
new_settings (list) -- list of new settings
"""
# settings must be a dict {'key': 'value'}
if self.opt_not_supported:
return False
changed = False
# Apply new settings:
for i in new_settings:
if new_settings[i] == 'reset':
if i in self.settings:
changed = self.__reset_setting(i)
self.settings[i] = None
elif (i not in self.settings) or (str(new_settings[i]) != self.settings[i]):
changed = self.__set_setting("%s = '%s'" % (i, new_settings[i]))
return changed
def __reset_setting(self, setting):
"""Reset tablespace setting.
Return True if success, otherwise, return False.
args:
setting (str) -- string in format "setting_name = 'setting_value'"
"""
query = 'ALTER TABLESPACE "%s" RESET (%s)' % (self.name, setting)
return exec_sql(self, query, return_bool=True)
def __set_setting(self, setting):
"""Set tablespace setting.
Return True if success, otherwise, return False.
args:
setting (str) -- string in format "setting_name = 'setting_value'"
"""
query = 'ALTER TABLESPACE "%s" SET (%s)' % (self.name, setting)
return exec_sql(self, query, return_bool=True)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
tablespace=dict(type='str', required=True, aliases=['name']),
state=dict(type='str', default="present", choices=["absent", "present"]),
location=dict(type='path', aliases=['path']),
owner=dict(type='str'),
set=dict(type='dict'),
rename_to=dict(type='str'),
db=dict(type='str', aliases=['login_db']),
session_role=dict(type='str'),
trust_input=dict(type='bool', default=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=(('positional_args', 'named_args'),),
supports_check_mode=True,
)
tablespace = module.params["tablespace"]
state = module.params["state"]
location = module.params["location"]
owner = module.params["owner"]
rename_to = module.params["rename_to"]
settings = module.params["set"]
session_role = module.params["session_role"]
trust_input = module.params["trust_input"]
if state == 'absent' and (location or owner or rename_to or settings):
module.fail_json(msg="state=absent is mutually exclusive location, "
"owner, rename_to, and set")
if not trust_input:
# Check input for potentially dangerous elements:
if not settings:
settings_list = None
else:
settings_list = ['%s = %s' % (k, v) for k, v in iteritems(settings)]
check_input(module, tablespace, location, owner,
rename_to, session_role, settings_list)
# Ensure psycopg2 libraries are available before connecting to DB:
ensure_required_libs(module)
conn_params = get_conn_params(module, module.params, warn_db_default=False)
db_connection, dummy = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Change autocommit to False if check_mode:
if module.check_mode:
if PSYCOPG2_VERSION >= '2.4.2':
db_connection.set_session(autocommit=False)
else:
db_connection.set_isolation_level(READ_COMMITTED)
# Set defaults:
autocommit = False
changed = False
##############
# Create PgTablespace object and do main job:
tblspace = PgTablespace(module, cursor, tablespace)
# If tablespace exists with different location, exit:
if tblspace.exists and location and location != tblspace.location:
module.fail_json(msg="Tablespace '%s' exists with "
"different location '%s'" % (tblspace.name, tblspace.location))
# Create new tablespace:
if not tblspace.exists and state == 'present':
if rename_to:
module.fail_json(msg="Tablespace %s does not exist, nothing to rename" % tablespace)
if not location:
module.fail_json(msg="'location' parameter must be passed with "
"state=present if the tablespace doesn't exist")
# Because CREATE TABLESPACE can not be run inside the transaction block:
autocommit = True
if PSYCOPG2_VERSION >= '2.4.2':
db_connection.set_session(autocommit=True)
else:
db_connection.set_isolation_level(AUTOCOMMIT)
changed = tblspace.create(location)
# Drop non-existing tablespace:
elif not tblspace.exists and state == 'absent':
# Nothing to do:
module.fail_json(msg="Tries to drop nonexistent tablespace '%s'" % tblspace.name)
# Drop existing tablespace:
elif tblspace.exists and state == 'absent':
# Because DROP TABLESPACE can not be run inside the transaction block:
autocommit = True
if PSYCOPG2_VERSION >= '2.4.2':
db_connection.set_session(autocommit=True)
else:
db_connection.set_isolation_level(AUTOCOMMIT)
changed = tblspace.drop()
# Rename tablespace:
elif tblspace.exists and rename_to:
if tblspace.name != rename_to:
changed = tblspace.rename(rename_to)
if state == 'present':
# Refresh information:
tblspace.get_info()
# Change owner and settings:
if state == 'present' and tblspace.exists:
if owner:
changed = tblspace.set_owner(owner)
if settings:
changed = tblspace.set_settings(settings)
tblspace.get_info()
# Rollback if it's possible and check_mode:
if not autocommit:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
cursor.close()
db_connection.close()
# Make return values:
kw = dict(
changed=changed,
state='present',
tablespace=tblspace.name,
owner=tblspace.owner,
queries=tblspace.executed_queries,
options=tblspace.settings,
location=tblspace.location,
)
if state == 'present':
kw['state'] = 'present'
if tblspace.new_name:
kw['newname'] = tblspace.new_name
elif state == 'absent':
kw['state'] = 'absent'
module.exit_json(**kw)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,339 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: postgresql_user_obj_stat_info
short_description: Gather statistics about PostgreSQL user objects
description:
- Gathers statistics about PostgreSQL user objects.
version_added: '0.2.0'
options:
filter:
description:
- Limit the collected information by comma separated string or YAML list.
- Allowable values are C(functions), C(indexes), C(tables).
- By default, collects all subsets.
- Unsupported values are ignored.
type: list
elements: str
schema:
description:
- Restrict the output by certain schema.
type: str
db:
description:
- Name of database to connect.
type: str
aliases:
- login_db
session_role:
description:
- Switch to session_role after connecting. The specified session_role must
be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
trust_input:
description:
- If C(false), check the value of I(session_role) is potentially dangerous.
- It makes sense to use C(false) only when SQL injections via I(session_role) are possible.
type: bool
default: true
version_added: '0.2.0'
notes:
- C(size) and C(total_size) returned values are presented in bytes.
- For tracking function statistics the PostgreSQL C(track_functions) parameter must be enabled.
See U(https://www.postgresql.org/docs/current/runtime-config-statistics.html) for more information.
- Supports C(check_mode).
seealso:
- module: community.postgresql.postgresql_info
- module: community.postgresql.postgresql_ping
- name: PostgreSQL statistics collector reference
description: Complete reference of the PostgreSQL statistics collector documentation.
link: https://www.postgresql.org/docs/current/monitoring-stats.html
author:
- Andrew Klychkov (@Andersson007)
- Thomas O'Donnell (@andytom)
extends_documentation_fragment:
- community.postgresql.postgres
'''
EXAMPLES = r'''
- name: Collect information about all supported user objects of the acme database
community.postgresql.postgresql_user_obj_stat_info:
db: acme
- name: Collect information about all supported user objects in the custom schema of the acme database
community.postgresql.postgresql_user_obj_stat_info:
db: acme
schema: custom
- name: Collect information about user tables and indexes in the acme database
community.postgresql.postgresql_user_obj_stat_info:
db: acme
filter: tables, indexes
'''
RETURN = r'''
indexes:
description: User index statistics.
returned: always
type: dict
sample: {"public": {"test_id_idx": {"idx_scan": 0, "idx_tup_fetch": 0, "idx_tup_read": 0, "relname": "test", "size": 8192, ...}}}
tables:
description: User table statistics.
returned: always
type: dict
sample: {"public": {"test": {"analyze_count": 3, "n_dead_tup": 0, "n_live_tup": 0, "seq_scan": 2, "size": 0, "total_size": 8192, ...}}}
functions:
description: User function statistics.
returned: always
type: dict
sample: {"public": {"inc": {"calls": 1, "funcid": 26722, "self_time": 0.23, "total_time": 0.23}}}
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.postgresql.plugins.module_utils.database import (
check_input,
)
from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
ensure_required_libs,
get_conn_params,
postgres_common_argument_spec,
)
from ansible.module_utils.six import iteritems
# ===========================================
# PostgreSQL module specific support methods.
#
class PgUserObjStatInfo():
"""Class to collect information about PostgreSQL user objects.
Args:
module (AnsibleModule): Object of AnsibleModule class.
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
Attributes:
module (AnsibleModule): Object of AnsibleModule class.
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
executed_queries (list): List of executed queries.
info (dict): Statistics dictionary.
obj_func_mapping (dict): Mapping of object types to corresponding functions.
schema (str): Name of a schema to restrict stat collecting.
"""
def __init__(self, module, cursor):
self.module = module
self.cursor = cursor
self.info = {
'functions': {},
'indexes': {},
'tables': {},
}
self.obj_func_mapping = {
'functions': self.get_func_stat,
'indexes': self.get_idx_stat,
'tables': self.get_tbl_stat,
}
self.schema = None
def collect(self, filter_=None, schema=None):
"""Collect statistics information of user objects.
Kwargs:
filter_ (list): List of subsets which need to be collected.
schema (str): Restrict stat collecting by certain schema.
Returns:
``self.info``.
"""
if schema:
self.set_schema(schema)
if filter_:
for obj_type in filter_:
obj_type = obj_type.strip()
obj_func = self.obj_func_mapping.get(obj_type)
if obj_func is not None:
obj_func()
else:
self.module.warn("Unknown filter option '%s'" % obj_type)
else:
for obj_func in self.obj_func_mapping.values():
obj_func()
return self.info
def get_func_stat(self):
"""Get function statistics and fill out self.info dictionary."""
query = "SELECT * FROM pg_stat_user_functions"
if self.schema:
query = "SELECT * FROM pg_stat_user_functions WHERE schemaname = %s"
result = exec_sql(self, query, query_params=(self.schema,),
add_to_executed=False)
if not result:
return
self.__fill_out_info(result,
info_key='functions',
schema_key='schemaname',
name_key='funcname')
def get_idx_stat(self):
"""Get index statistics and fill out self.info dictionary."""
query = "SELECT * FROM pg_stat_user_indexes"
if self.schema:
query = "SELECT * FROM pg_stat_user_indexes WHERE schemaname = %s"
result = exec_sql(self, query, query_params=(self.schema,),
add_to_executed=False)
if not result:
return
self.__fill_out_info(result,
info_key='indexes',
schema_key='schemaname',
name_key='indexrelname')
def get_tbl_stat(self):
"""Get table statistics and fill out self.info dictionary."""
query = "SELECT * FROM pg_stat_user_tables"
if self.schema:
query = "SELECT * FROM pg_stat_user_tables WHERE schemaname = %s"
result = exec_sql(self, query, query_params=(self.schema,),
add_to_executed=False)
if not result:
return
self.__fill_out_info(result,
info_key='tables',
schema_key='schemaname',
name_key='relname')
def __fill_out_info(self, result, info_key=None, schema_key=None, name_key=None):
# Convert result to list of dicts to handle it easier:
result = [dict(row) for row in result]
for elem in result:
# Add schema name as a key if not presented:
if not self.info[info_key].get(elem[schema_key]):
self.info[info_key][elem[schema_key]] = {}
# Add object name key as a subkey
# (they must be uniq over a schema, so no need additional checks):
self.info[info_key][elem[schema_key]][elem[name_key]] = {}
# Add other other attributes to a certain index:
for key, val in iteritems(elem):
if key not in (schema_key, name_key):
self.info[info_key][elem[schema_key]][elem[name_key]][key] = val
if info_key in ('tables', 'indexes'):
schemaname = elem[schema_key]
if self.schema:
schemaname = self.schema
relname = '%s.%s' % (schemaname, elem[name_key])
result = exec_sql(self, "SELECT pg_relation_size (%s)",
query_params=(relname,),
add_to_executed=False)
self.info[info_key][elem[schema_key]][elem[name_key]]['size'] = result[0][0]
if info_key == 'tables':
result = exec_sql(self, "SELECT pg_total_relation_size (%s)",
query_params=(relname,),
add_to_executed=False)
self.info[info_key][elem[schema_key]][elem[name_key]]['total_size'] = result[0][0]
def set_schema(self, schema):
"""If schema exists, sets self.schema, otherwise fails."""
query = ("SELECT 1 FROM information_schema.schemata "
"WHERE schema_name = %s")
result = exec_sql(self, query, query_params=(schema,),
add_to_executed=False)
if result and result[0][0]:
self.schema = schema
else:
self.module.fail_json(msg="Schema '%s' does not exist" % (schema))
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
db=dict(type='str', aliases=['login_db']),
filter=dict(type='list', elements='str'),
session_role=dict(type='str'),
schema=dict(type='str'),
trust_input=dict(type="bool", default=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
filter_ = module.params["filter"]
schema = module.params["schema"]
if not module.params["trust_input"]:
check_input(module, module.params['session_role'])
# Ensure psycopg2 libraries are available before connecting to DB:
ensure_required_libs(module)
# Connect to DB and make cursor object:
pg_conn_params = get_conn_params(module, module.params)
# We don't need to commit anything, so, set it to False:
db_connection, dummy = connect_to_db(module, pg_conn_params, autocommit=False)
cursor = db_connection.cursor(cursor_factory=DictCursor)
############################
# Create object and do work:
pg_obj_info = PgUserObjStatInfo(module, cursor)
info_dict = pg_obj_info.collect(filter_, schema)
# Clean up:
cursor.close()
db_connection.close()
# Return information:
module.exit_json(**info_dict)
if __name__ == '__main__':
main()