forked from sclorg/postgresql-container
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcommon.sh
429 lines (361 loc) · 14.6 KB
/
common.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
# Configuration settings.
export POSTGRESQL_MAX_CONNECTIONS=${POSTGRESQL_MAX_CONNECTIONS:-100}
export POSTGRESQL_MAX_PREPARED_TRANSACTIONS=${POSTGRESQL_MAX_PREPARED_TRANSACTIONS:-0}
# Perform auto-tuning based on the container cgroups limits (only when the
# limits are set).
# Users can still override this by setting the POSTGRESQL_SHARED_BUFFERS
# and POSTGRESQL_EFFECTIVE_CACHE_SIZE variables.
if [[ "${NO_MEMORY_LIMIT:-}" == "true" || -z "${MEMORY_LIMIT_IN_BYTES:-}" ]]; then
export POSTGRESQL_SHARED_BUFFERS=${POSTGRESQL_SHARED_BUFFERS:-32MB}
export POSTGRESQL_EFFECTIVE_CACHE_SIZE=${POSTGRESQL_EFFECTIVE_CACHE_SIZE:-128MB}
else
# Use 1/4 of given memory for shared buffers
shared_buffers_computed="$(($MEMORY_LIMIT_IN_BYTES/1024/1024/4))MB"
# Setting effective_cache_size to 1/2 of total memory would be a normal conservative setting,
effective_cache="$(($MEMORY_LIMIT_IN_BYTES/1024/1024/2))MB"
export POSTGRESQL_SHARED_BUFFERS=${POSTGRESQL_SHARED_BUFFERS:-$shared_buffers_computed}
export POSTGRESQL_EFFECTIVE_CACHE_SIZE=${POSTGRESQL_EFFECTIVE_CACHE_SIZE:-$effective_cache}
fi
export POSTGRESQL_RECOVERY_FILE=$HOME/openshift-custom-recovery.conf
export POSTGRESQL_CONFIG_FILE=$HOME/openshift-custom-postgresql.conf
postinitdb_actions=
psql_identifier_regex='^[a-zA-Z_][a-zA-Z0-9_]*$'
psql_password_regex='^[a-zA-Z0-9_~!@#$%^&*()-=<>,.?;:|]+$'
# match . files when moving userdata below
shopt -s dotglob
# extglob enables the !(userdata) glob pattern below.
shopt -s extglob
function usage() {
if [ $# == 1 ]; then
echo >&2 "error: $1"
fi
cat >&2 <<EOF
For general container run, you must either specify the following environment
variables:
POSTGRESQL_USER (regex: '$psql_identifier_regex')
POSTGRESQL_PASSWORD (regex: '$psql_password_regex')
POSTGRESQL_DATABASE (regex: '$psql_identifier_regex')
Or the following environment variable:
POSTGRESQL_ADMIN_PASSWORD (regex: '$psql_password_regex')
Or both.
To migrate data from different PostgreSQL container:
POSTGRESQL_MIGRATION_REMOTE_HOST (hostname or IP address)
POSTGRESQL_MIGRATION_ADMIN_PASSWORD (password of remote 'postgres' user)
And optionally:
POSTGRESQL_MIGRATION_IGNORE_ERRORS=yes (default is 'no')
Optional settings:
POSTGRESQL_MAX_CONNECTIONS (default: 100)
POSTGRESQL_MAX_PREPARED_TRANSACTIONS (default: 0)
POSTGRESQL_SHARED_BUFFERS (default: 32MB)
For more information see /usr/share/container-scripts/postgresql/README.md
within the container or visit https://github.com/sclorg/postgresql-container.
EOF
exit 1
}
function check_env_vars() {
if [[ -v POSTGRESQL_USER || -v POSTGRESQL_PASSWORD || -v POSTGRESQL_DATABASE ]]; then
# one var means all three must be specified
[[ -v POSTGRESQL_USER && -v POSTGRESQL_PASSWORD && -v POSTGRESQL_DATABASE ]] || usage
[[ "$POSTGRESQL_USER" =~ $psql_identifier_regex ]] || usage
[[ "$POSTGRESQL_PASSWORD" =~ $psql_password_regex ]] || usage
[[ "$POSTGRESQL_DATABASE" =~ $psql_identifier_regex ]] || usage
[ ${#POSTGRESQL_USER} -le 63 ] || usage "PostgreSQL username too long (maximum 63 characters)"
[ ${#POSTGRESQL_DATABASE} -le 63 ] || usage "Database name too long (maximum 63 characters)"
postinitdb_actions+=",simple_db"
fi
if [ -v POSTGRESQL_ADMIN_PASSWORD ]; then
[[ "$POSTGRESQL_ADMIN_PASSWORD" =~ $psql_password_regex ]] || usage
postinitdb_actions+=",admin_pass"
fi
if [ -v POSTGRESQL_MIGRATION_REMOTE_HOST -a \
-v POSTGRESQL_MIGRATION_ADMIN_PASSWORD ]; then
postinitdb_actions+=",migration"
fi
case "$postinitdb_actions" in
,simple_db,admin_pass) ;;
,migration|,simple_db|,admin_pass) ;;
*) usage ;;
esac
}
# Make sure env variables don't propagate to PostgreSQL process.
function unset_env_vars() {
unset POSTGRESQL_{DATABASE,USER,PASSWORD,ADMIN_PASSWORD}
}
# postgresql_master_addr lookups the 'postgresql-master' DNS and get list of the available
# endpoints. Each endpoint is a PostgreSQL container with the 'master' PostgreSQL running.
function postgresql_master_addr() {
local service_name=${POSTGRESQL_MASTER_SERVICE_NAME:-postgresql-master}
local endpoints=$(dig ${service_name} A +search | grep ";${service_name}" | cut -d ';' -f 2 2>/dev/null)
# FIXME: This is for debugging (docker run)
if [ -v POSTGRESQL_MASTER_IP ]; then
endpoints=${POSTGRESQL_MASTER_IP:-}
fi
if [ -z "$endpoints" ]; then
>&2 echo "Failed to resolve PostgreSQL master IP address"
exit 3
fi
echo -n "$(echo $endpoints | cut -d ' ' -f 1)"
}
# New config is generated every time a container is created. It only contains
# additional custom settings and is included from $PGDATA/postgresql.conf.
function generate_postgresql_config() {
envsubst \
< "${CONTAINER_SCRIPTS_PATH}/openshift-custom-postgresql.conf.template" \
> "${POSTGRESQL_CONFIG_FILE}"
if [ "${ENABLE_REPLICATION}" == "true" ]; then
envsubst \
< "${CONTAINER_SCRIPTS_PATH}/openshift-custom-postgresql-replication.conf.template" \
>> "${POSTGRESQL_CONFIG_FILE}"
fi
}
function generate_postgresql_recovery_config() {
envsubst \
< "${CONTAINER_SCRIPTS_PATH}/openshift-custom-recovery.conf.template" \
> "${POSTGRESQL_RECOVERY_FILE}"
}
# Generate passwd file based on current uid
function generate_passwd_file() {
export USER_ID=$(id -u)
export GROUP_ID=$(id -g)
grep -v ^postgres /etc/passwd > "$HOME/passwd"
echo "postgres:x:${USER_ID}:${GROUP_ID}:PostgreSQL Server:${HOME}:/bin/bash" >> "$HOME/passwd"
export LD_PRELOAD=libnss_wrapper.so
export NSS_WRAPPER_PASSWD=${HOME}/passwd
export NSS_WRAPPER_GROUP=/etc/group
}
initdb_wrapper ()
{
# Initialize the database cluster with utf8 support enabled by default.
# This might affect performance, see:
# http://www.postgresql.org/docs/9.4/static/locale.html
LANG=${LANG:-en_US.utf8} "$@"
}
function initialize_database() {
initdb_wrapper initdb
# PostgreSQL configuration.
cat >> "$PGDATA/postgresql.conf" <<EOF
# Custom OpenShift configuration:
include '${POSTGRESQL_CONFIG_FILE}'
EOF
# Access control configuration.
# FIXME: would be nice-to-have if we could allow connections only from
# specific hosts / subnet
cat >> "$PGDATA/pg_hba.conf" <<EOF
#
# Custom OpenShift configuration starting at this point.
#
# Allow connections from all hosts.
host all all all md5
# Allow replication connections from all hosts.
host replication all all md5
EOF
}
function create_users() {
if [[ ",$postinitdb_actions," = *,simple_db,* ]]; then
createuser "$POSTGRESQL_USER"
createdb --owner="$POSTGRESQL_USER" "$POSTGRESQL_DATABASE"
fi
if [ -v POSTGRESQL_MASTER_USER ]; then
createuser "$POSTGRESQL_MASTER_USER"
fi
}
migrate_db ()
{
test "$postinitdb_actions" = ",migration" || return 0
# Migration path.
(
if [ ${POSTGRESQL_MIGRATION_IGNORE_ERRORS-no} = no ]; then
echo '\set ON_ERROR_STOP on'
fi
# initdb automatically creates 'postgres' role; creating it again would
# fail the whole migration so we drop it here
PGPASSWORD="$POSTGRESQL_MIGRATION_ADMIN_PASSWORD" \
pg_dumpall -h "$POSTGRESQL_MIGRATION_REMOTE_HOST" \
| grep -v '^CREATE ROLE postgres;'
) | psql
}
function set_pgdata ()
{
export PGDATA=$HOME/data/userdata
# create a subdirectory that the user owns
mkdir -p "$PGDATA"
# backwards compatibility case, we used to put the data here,
# move it into our new expected location (userdata)
if [ -e ${HOME}/data/PG_VERSION ]; then
pushd "${HOME}/data"
# move everything except the userdata directory itself, into the userdata directory.
mv !(userdata) "userdata"
popd
fi
# ensure sane perms for postgresql startup
chmod 700 "$PGDATA"
}
function wait_for_postgresql_master() {
while true; do
master_fqdn=$(postgresql_master_addr)
echo "Waiting for PostgreSQL master (${master_fqdn}) to accept connections ..."
if [ -v POSTGRESQL_ADMIN_PASSWORD ]; then
PGPASSWORD=${POSTGRESQL_ADMIN_PASSWORD} psql "postgresql://postgres@${master_fqdn}" -c "SELECT 1;" && return 0
else
PGPASSWORD=${POSTGRESQL_PASSWORD} psql "postgresql://${POSTGRESQL_USER}@${master_fqdn}/${POSTGRESQL_DATABASE}" -c "SELECT 1;" && return 0
fi
sleep 1
done
}
run_pgupgrade ()
(
optimized=false
old_raw_version=${POSTGRESQL_PREV_VERSION//\./}
new_raw_version=${POSTGRESQL_VERSION//\./}
if test "$old_raw_version" = 92; then
old_collection=postgresql92
else
old_collection=rh-postgresql$old_raw_version
fi
old_pgengine=/opt/rh/$old_collection/root/usr/bin
new_pgengine=/opt/rh/rh-postgresql${new_raw_version}/root/usr/bin
PGDATA_new="${PGDATA}-new"
printf >&2 "\n========== \$PGDATA upgrade: %s -> %s ==========\n\n" \
"$POSTGRESQL_PREV_VERSION" \
"$POSTGRESQL_VERSION"
info_msg () { printf >&2 "\n===> $*\n\n" ;}
# pg_upgrade writes logs to cwd, so go to the persistent storage first
cd "$HOME"/data
# disable this because of scl_source, 'set +u' just makes the code ugly
# anyways
set +u
# we need to have the old SCL enabled, otherwise the $old_pgengine is not
# working. The scl_source script doesn't pay attention to non-zero exit
# statuses, so use 'set +e'.
set +e
source scl_source enable $old_collection
set -e
case $POSTGRESQL_UPGRADE in
copy) # we accept this
;;
hardlink)
optimized=:
;;
*)
echo >&2 "Unsupported value: \$POSTGRESQL_UPGRADE=$POSTGRESQL_UPGRADE"
false
;;
esac
# Ensure $PGDATA_new doesn't exist yet, so we can immediately remove it if
# there's some problem.
test ! -e "$PGDATA_new"
# initialize the database
info_msg "Initialize new data directory; we will migrate to that."
initdb_cmd=( initdb_wrapper "$new_pgengine"/initdb "$PGDATA_new" )
eval "\${initdb_cmd[@]} ${POSTGRESQL_UPGRADE_INITDB_OPTIONS-}" || \
{ rm -rf "$PGDATA_new" ; false ; }
upgrade_cmd=(
"$new_pgengine"/pg_upgrade
"--old-bindir=$old_pgengine"
"--new-bindir=$new_pgengine"
"--old-datadir=$PGDATA"
"--new-datadir=$PGDATA_new"
)
# Dangerous --link option, we loose $DATADIR if something goes wrong.
! $optimized || upgrade_cmd+=(--link)
# User-specififed options for pg_upgrade.
eval "upgrade_cmd+=(${POSTGRESQL_UPGRADE_PGUPGRADE_OPTIONS-})"
# the upgrade
info_msg "Starting the pg_upgrade process."
# Once we stop support for PostgreSQL 9.4, we don't need
# REDHAT_PGUPGRADE_FROM_RHEL hack as we don't upgrade from 9.2 -- that means
# that we don't need to fiddle with unix_socket_director{y,ies} option.
REDHAT_PGUPGRADE_FROM_RHEL=1 \
"${upgrade_cmd[@]}" || { rm -rf "$PGDATA_new" && false ; }
# Move the important configuration and remove old data. This is highly
# careless, but we can't do more for this over-automatized process.
info_msg "Swap the old and new PGDATA and cleanup."
mv "$PGDATA"/*.conf "$PGDATA_new"
rm -rf "$PGDATA"
mv "$PGDATA_new" "$PGDATA"
info_msg "Upgrade DONE."
)
# Run right after container startup, when the data volume is already initialized
# (not initialized by this container run) and thus there exists a chance that
# the data was generated by incompatible PostgreSQL major version.
try_pgupgrade ()
{
local versionfile="$PGDATA"/PG_VERSION version upgrade_available
# This file always exists.
test -f "$versionfile"
version=$(cat "$versionfile")
# If we don't support pg_upgrade, skip.
test -z "${POSTGRESQL_PREV_VERSION-}" && return 0
if test "$POSTGRESQL_VERSION" = "$version"; then
# No need to call pg_upgrade.
# Mistakenly requests upgrade? If not, just start the DB.
test -z "${POSTGRESQL_UPGRADE-}" && return 0
# Make _sure_ we have this safety-belt here, otherwise our users would
# just specify '-e POSTGRESQL_UPGRADE=hardlink' permanently, even for
# re-deployment cases when upgrade is not needed. Setting such
# unfortunate default could mean that pg_upgrade might (after some user
# mistake) migrate (or even destruct, especially with --link) the old data
# directory with limited rollback options, if any.
echo >&2
echo >&2 "== WARNING!! =="
echo >&2 "PostgreSQL server version matches the datadir PG_VERSION."
echo >&2 "The \$POSTGRESQL_UPGRADE makes no sense and you probably"
echo >&2 "made some mistake, keeping the variable set you might"
echo >&2 "risk a data loss in future!"
echo >&2 "==============="
echo >&2
# Exit here, but allow _really explicit_ foot-shot.
${POSTGRESQL_UPGRADE_FORCE-false}
return 0
fi
# At this point in code we know that PG_VERSION doesn't match the PostgreSQL
# server major version; this might mean that user either (a) mistakenly
# deploys from a bad image, or (b) user wants to perform upgrade. For the
# upgrade we require explicit request -- just to avoid disasters in (a)-cases.
if test -z "${POSTGRESQL_UPGRADE-}"; then
echo >&2 "Incompatible data directory. This container image provides"
echo >&2 "PostgreSQL '$POSTGRESQL_VERSION', but data directory is of"
echo >&2 "version '$version'."
echo >&2
echo >&2 "This image supports automatic data directory upgrade from"
echo >&2 "'$POSTGRESQL_PREV_VERSION', please _carefully_ consult image documentation"
echo >&2 "about how to use the '\$POSTGRESQL_UPGRADE' startup option."
# We could wait for postgresql startup failure (there's no risk of data dir
# corruption), but fail rather early.
false
fi
# We support pg_upgrade process only from previous version of this container
# (upgrade to N to N+1 is possible, so e.g. 9.4 to 9.5).
if test "$POSTGRESQL_PREV_VERSION" != "$version"; then
echo >&2 "With this container image you can only upgrade from data directory"
echo >&2 "of version '$POSTGRESQL_PREV_VERSION', not '$version'."
false
fi
run_pgupgrade
}
# get_matched_files finds file for image extending
function get_matched_files() {
local custom_dir default_dir
custom_dir="$1"
default_dir="$2"
files_matched="$3"
find "$default_dir" -maxdepth 1 -type f -name "$files_matched" -printf "%f\n"
[ -d "$custom_dir" ] && find "$custom_dir" -maxdepth 1 -type f -name "$files_matched" -printf "%f\n"
}
# process_extending_files process extending files in $1 and $2 directories
# - source all *.sh files
# (if there are files with same name source only file from $1)
function process_extending_files() {
local custom_dir default_dir
custom_dir=$1
default_dir=$2
while read filename ; do
echo "=> sourcing $filename ..."
# Custom file is prefered
if [ -f $custom_dir/$filename ]; then
source $custom_dir/$filename
elif [ -f $default_dir/$filename ]; then
source $default_dir/$filename
fi
done <<<"$(get_matched_files "$custom_dir" "$default_dir" '*.sh' | sort -u)"
}