# # sample slony_logshipper configuration # # Where the logshipper itself will leave its messages logfile = './offline_logs/logshipper.log'; # The logshipper needs to know the cluster name in order to # lookup what the last applied logfile was. cluster name = 'T1'; # The conninfo for the destination database is optional. If # given, the logshipper will connect to it and apply the logs. # In addition it will scan the archve dir on startup for any # log files that need to be applied before the ones given on # the command line. destination database = 'dbname=slony_test4'; # The archive dir is needed when running in database mode to # scan for missing (unapplied) archives. archive dir = './offline_logs'; # If a destination dir is specified, the logshipper will write # the result of the data massaging into result logfiles. ## destination dir = './offline_result'; # To have something in place to fight eventual resource leakage, the # daemon will enter smart shutdown mode automatically after this many # archives. max archives = 3600; # Some examples of advanced processing options: # One can filter out and rename single tables or entire namespaces. ## ignore table "public"."history"; ## rename namespace "public" to "site_001"; # Pre and post processing commands are executed via system(3). # An @ as the first character causes the exit code to be ignored. # A nonzero exitcode otherwise is treated as an error and causes # processing to abort. Pre and post processing commands have two # special variables defined: $inarchive and $outarchive. post processing command = 'gzip -9 $inarchive'; # Send email to the dba mailing list on error. All logging since # the last successfull completion of an archive is available in # the $errortext variable. error command = ' ( echo "archive=$inarchive" echo "error messages:" echo "$errortext" ) | mail -s "Slony log shipping failed" postgres@localhost ';