Cleanup tariling whitespace

This commit is contained in:
Jerome Charaoui 2021-01-22 11:24:14 -05:00
parent bbf488879a
commit 0ec7752dd3
18 changed files with 203 additions and 203 deletions

View File

@ -33,8 +33,8 @@
## Default: ## Default:
# testconnect = yes # testconnect = yes
## default is not to limit bandwidth. ## default is not to limit bandwidth.
## set to a number in kiBytes/second to limit bandwidth usage. ## set to a number in kiBytes/second to limit bandwidth usage.
## ##
## Default: ## Default:
# bwlimit = 0 # bwlimit = 0
@ -164,7 +164,7 @@ exclude = /var/lib/mysql
## for more info see : borg help create ## for more info see : borg help create
## ##
## Default: ## Default:
# archive = {now:%Y-%m-%dT%H:%M:%S} # archive = {now:%Y-%m-%dT%H:%M:%S}
## compression algorithm ## compression algorithm
## can be "none", "lz4", "zstd[,L]", "zlib[,L]", "lzma[,L]", "auto,C[,L]". ## can be "none", "lz4", "zstd[,L]", "zlib[,L]", "lzma[,L]", "auto,C[,L]".
@ -213,4 +213,4 @@ exclude = /var/lib/mysql
## sshoptions = -i /root/.ssh/id_rsa_borg ## sshoptions = -i /root/.ssh/id_rsa_borg
## ##
## Default: ## Default:
# sshoptions = # sshoptions =

View File

@ -1,4 +1,4 @@
## This is an example duplicity configuration file. ## This is an example duplicity configuration file.
## ##
## Here you can find all the possible duplicity options, details of ## Here you can find all the possible duplicity options, details of
## what the options provide and possible settings. The defaults are set ## what the options provide and possible settings. The defaults are set
@ -12,7 +12,7 @@
## options = --s3-european-buckets --s3-use-new-style ## options = --s3-european-buckets --s3-use-new-style
## ##
## Default: ## Default:
# options = # options =
## default is 0, but set to something like 19 if you want to lower the priority. ## default is 0, but set to something like 19 if you want to lower the priority.
## ##
@ -37,7 +37,7 @@
## temporary directory used by duplicity, set to some other location if your /tmp is small ## temporary directory used by duplicity, set to some other location if your /tmp is small
## default is either /tmp or /usr/tmp, depending on the system ## default is either /tmp or /usr/tmp, depending on the system
## ##
## Default: ## Default:
# tmpdir = /tmp # tmpdir = /tmp
@ -78,14 +78,14 @@
## encryptkey = 04D9EA79 ## encryptkey = 04D9EA79
## ##
## Default: ## Default:
# encryptkey = # encryptkey =
## ID of the GnuPG private key used for data signing. ## ID of the GnuPG private key used for data signing.
## if not set, encryptkey will be used, an example setting would be: ## if not set, encryptkey will be used, an example setting would be:
## signkey = 04D9EA79 ## signkey = 04D9EA79
## ##
## Default: ## Default:
# signkey = # signkey =
## password used to unlock the encryption key ## password used to unlock the encryption key
## NB: neither quote this, nor should it contain any quotes, ## NB: neither quote this, nor should it contain any quotes,
@ -93,7 +93,7 @@
## password = a_very_complicated_passphrase ## password = a_very_complicated_passphrase
## ##
## Default: ## Default:
# password = # password =
## password used to unlock the signature key, used only if ## password used to unlock the signature key, used only if
## it differs from the encryption key ## it differs from the encryption key
@ -150,12 +150,12 @@ exclude = /var/cache/backupninja/duplicity
## perform an incremental backup? (default = yes) ## perform an incremental backup? (default = yes)
## if incremental = no, perform a full backup in order to start a new backup set ## if incremental = no, perform a full backup in order to start a new backup set
## ##
## Default: ## Default:
# incremental = yes # incremental = yes
## how many days of incremental backups before doing a full backup again ; ## how many days of incremental backups before doing a full backup again ;
## default is 30 days (one can also use the time format of duplicity). ## default is 30 days (one can also use the time format of duplicity).
## if increments = keep, never automatically perform a new full backup ; ## if increments = keep, never automatically perform a new full backup ;
## only perform incremental backups. ## only perform incremental backups.
## ##
## Default: ## Default:
@ -188,7 +188,7 @@ exclude = /var/cache/backupninja/duplicity
## the default value of this configuration option is not set: ## the default value of this configuration option is not set:
## ##
## Default: ## Default:
# desturl = # desturl =
## Amazon Web Services Access Key ID and Secret Access Key, needed for backups ## Amazon Web Services Access Key ID and Secret Access Key, needed for backups
## to S3 buckets. ## to S3 buckets.
@ -196,8 +196,8 @@ exclude = /var/cache/backupninja/duplicity
## awssecretaccesskey = YOUR_AWS_SECRET_KEY ## awssecretaccesskey = YOUR_AWS_SECRET_KEY
## ##
## Default: ## Default:
# awsaccesskeyid = # awsaccesskeyid =
# awssecretaccesskey = # awssecretaccesskey =
## RackSpace's CloudFiles username, API key, and authentication URL. ## RackSpace's CloudFiles username, API key, and authentication URL.
## cfusername = YOUR_CF_USERNAME ## cfusername = YOUR_CF_USERNAME
@ -205,9 +205,9 @@ exclude = /var/cache/backupninja/duplicity
## cfauthurl = YOUR_CF_AUTH_URL ## cfauthurl = YOUR_CF_AUTH_URL
## ##
## Default: ## Default:
# cfusername = # cfusername =
# cfapikey = # cfapikey =
# cfauthurl = # cfauthurl =
## Dropbox requires a valid authentication token. To obtain one, you will need ## Dropbox requires a valid authentication token. To obtain one, you will need
## to create a Dropbox API application at https://www.dropbox.com/developers/apps/create. ## to create a Dropbox API application at https://www.dropbox.com/developers/apps/create.
@ -222,7 +222,7 @@ exclude = /var/cache/backupninja/duplicity
## FTP password, needed for backups using desturl = ftp://... ## FTP password, needed for backups using desturl = ftp://...
## ##
## Default: ## Default:
# ftp_password = # ftp_password =
## bandwith limit, in KB/s ; default is 0, i.e. no limit ## bandwith limit, in KB/s ; default is 0, i.e. no limit
## if using 'desturl' above, 'bandwidthlimit' must not be set ## if using 'desturl' above, 'bandwidthlimit' must not be set
@ -247,25 +247,25 @@ exclude = /var/cache/backupninja/duplicity
## warning: requires no space beetween "-o" and "IdentityFile=...". ## warning: requires no space beetween "-o" and "IdentityFile=...".
## ##
## Default: ## Default:
# sshoptions = # sshoptions =
## put the backups under this destination directory ## put the backups under this destination directory
## if using 'desturl' above, this must not be set ## if using 'desturl' above, this must not be set
## in all other cases, this must be set! ## in all other cases, this must be set!
## an example setting would be: ## an example setting would be:
## destdir = /backups ## destdir = /backups
## ##
## Default: ## Default:
# destdir = # destdir =
## the machine which will receive the backups ## the machine which will receive the backups
## if using 'desturl' above, this must not be set ## if using 'desturl' above, this must not be set
## in all other cases, this must be set! ## in all other cases, this must be set!
## an example setting would be: ## an example setting would be:
## desthost = backuphost ## desthost = backuphost
## ##
## Default: ## Default:
# desthost = # desthost =
## make the files owned by this user ## make the files owned by this user
## if using 'desturl' above, this must not be set ## if using 'desturl' above, this must not be set

View File

@ -1,5 +1,5 @@
## ##
## configuration file for openldap backups ## configuration file for openldap backups
## ##
## If the method is set to "slapcat", the LDIFs generated are ## If the method is set to "slapcat", the LDIFs generated are
## suitable for use with slapadd. As the entries are in database ## suitable for use with slapadd. As the entries are in database
@ -25,19 +25,19 @@
## if your backend is ldbm and your method is slapcat, but unnecessary otherwise. ## if your backend is ldbm and your method is slapcat, but unnecessary otherwise.
# restart = no # restart = no
## method (default ldapsearch): either 'ldapsearch' or 'slapcat' ## method (default ldapsearch): either 'ldapsearch' or 'slapcat'
## ldapsearch is the safer method to do backups, but is slow, slapcat ## ldapsearch is the safer method to do backups, but is slow, slapcat
## is much faster, but should not be done on an ldbm backend unless you have ## is much faster, but should not be done on an ldbm backend unless you have
## restart set to yes ## restart set to yes
## NOTE: with the ldapsearch method passwordfile and binddn need to be set ## NOTE: with the ldapsearch method passwordfile and binddn need to be set
# method = ldapsearch # method = ldapsearch
## passwordfile (no default): this should be set to the file that contains ## passwordfile (no default): this should be set to the file that contains
## your ldap password, this is required for ldapsearch and not needed for slapcat ## your ldap password, this is required for ldapsearch and not needed for slapcat
## this file should have no newlines in it, echo -n "password" > passfile works. ## this file should have no newlines in it, echo -n "password" > passfile works.
## NOTE: be sure to set the permissions on your password file appropriately ## NOTE: be sure to set the permissions on your password file appropriately
## (hint: world readable is not appropriate) ## (hint: world readable is not appropriate)
# passwordfile = # passwordfile =
## binddn (no default): set this to the DN of the user that the ldapsearch binds ## binddn (no default): set this to the DN of the user that the ldapsearch binds
## to, not needed for slapcat ## to, not needed for slapcat

View File

@ -13,10 +13,10 @@
## We handle each maildir individually because it becomes very ## We handle each maildir individually because it becomes very
## unweldy to hardlink and rsync many hundreds of thousands ## unweldy to hardlink and rsync many hundreds of thousands
## of files at once. It is much faster to take on smaller ## of files at once. It is much faster to take on smaller
## chunks at a time. ## chunks at a time.
## ##
## Any maildir which is deleted from the source will be moved to ## Any maildir which is deleted from the source will be moved to
## "deleted" directory in the destination. It is up to you to ## "deleted" directory in the destination. It is up to you to
## periodically remove this directory or old maildirs in it. ## periodically remove this directory or old maildirs in it.
## ##
## Note: This handler assumes that the remote shell is set to bash ## Note: This handler assumes that the remote shell is set to bash
@ -27,7 +27,7 @@
when = everyday at 21:00 when = everyday at 21:00
## each users maildir will contain these files: ## each users maildir will contain these files:
## daily.1, daily.2, daily.3, daily.4, daily.5, weekly.1, weekly.2, ## daily.1, daily.2, daily.3, daily.4, daily.5, weekly.1, weekly.2,
## weekly.3, monthly.1 ## weekly.3, monthly.1
## if keepdaily is 5, keepweekly is 3, and keepmonthly is 1 ## if keepdaily is 5, keepweekly is 3, and keepmonthly is 1
keepdaily = 5 keepdaily = 5
@ -53,7 +53,7 @@ destuser = backer
# For alternate ports from the default 22, specify here # For alternate ports from the default 22, specify here
destport = 4444 destport = 4444
# If you need to specify an alternate ssh public key authentication file # If you need to specify an alternate ssh public key authentication file
# do that here. Default: /root/.ssh/id_rsa # do that here. Default: /root/.ssh/id_rsa
destid_file = /home/backupkeys/.ssh/maildirbackup_id_rsa destid_file = /home/backupkeys/.ssh/maildirbackup_id_rsa

View File

@ -30,11 +30,11 @@ compress = yes
# configfile = < path/to/file > (default = /etc/mysql/debian.cnf) # configfile = < path/to/file > (default = /etc/mysql/debian.cnf)
# The config file is passed to mysql with --defaults-file. # The config file is passed to mysql with --defaults-file.
# On debian, this default will allow backupninja to make backups # On debian, this default will allow backupninja to make backups
# of mysql without configuring any additional options. # of mysql without configuring any additional options.
# (this option is not compatible with "user" or "dbusername"). # (this option is not compatible with "user" or "dbusername").
# #
# user = <user> (default = root) # user = <user> (default = root)
# Run mysql commands as 'user'. A valid .my.cnf must exist with a # Run mysql commands as 'user'. A valid .my.cnf must exist with a
# database username and password in the user's home directory. # database username and password in the user's home directory.
# (this option is not compatible with "configfile" or "dbusername"). # (this option is not compatible with "configfile" or "dbusername").
# #
@ -43,14 +43,14 @@ compress = yes
# (this option is not compatible with "configfile" or "user"). # (this option is not compatible with "configfile" or "user").
# #
# dbpassword = <dbpass> (no default) # dbpassword = <dbpass> (no default)
# The password used with dbusername. this password will NOT be passed # The password used with dbusername. this password will NOT be passed
# on the command line and is not readable using "ps aux". # on the command line and is not readable using "ps aux".
# #
# dbhost = <host> (default = localhost) # dbhost = <host> (default = localhost)
# only localhost works right now. # only localhost works right now.
# #
# databases = < all | db1 db2 db3 > (default = all) # databases = < all | db1 db2 db3 > (default = all)
# which databases to backup. should either be the word 'all' or a # which databases to backup. should either be the word 'all' or a
# space separated list of database names. # space separated list of database names.
# #
# nodata = < db.table1 db.table2 db.table3 > (no default) # nodata = < db.table1 db.table2 db.table3 > (no default)
@ -60,10 +60,10 @@ compress = yes
# isn't necessary to backup, but you still need the structure to exist # isn't necessary to backup, but you still need the structure to exist
# on a restore. You *must* specify the table as part of a database, such # on a restore. You *must* specify the table as part of a database, such
# as "drupal.cache", where the database name is "drupal" and the table that # as "drupal.cache", where the database name is "drupal" and the table that
# you do not want to dump the data for is called "cache". # you do not want to dump the data for is called "cache".
# #
# backupdir = < path/to/destination > (default = /var/backups/mysql) # backupdir = < path/to/destination > (default = /var/backups/mysql)
# where to dump the backups. hotcopy backups will be in a subdirectory # where to dump the backups. hotcopy backups will be in a subdirectory
# 'hotcopy' and sqldump backups will be in a subdirectory 'sqldump' # 'hotcopy' and sqldump backups will be in a subdirectory 'sqldump'
# #
# hotcopy = < yes | no > (default = no) # hotcopy = < yes | no > (default = no)
@ -78,4 +78,4 @@ compress = yes
# arguments to pass to mysqldump # arguments to pass to mysqldump
# #
# compress = < yes | no > (default = yes) # compress = < yes | no > (default = yes)
# if yes, compress the sqldump output. # if yes, compress the sqldump output.

View File

@ -4,19 +4,19 @@
# where to dump the backups # where to dump the backups
# databases = < all | db1 db2 db3 > (default = all) # databases = < all | db1 db2 db3 > (default = all)
# which databases to backup. should either be the word 'all' or a # which databases to backup. should either be the word 'all' or a
# space separated list of database names. # space separated list of database names.
# Note: when using 'all', pg_dumpall is used instead of pg_dump, which means # Note: when using 'all', pg_dumpall is used instead of pg_dump, which means
# that cluster-wide data (such as users and groups) are saved. # that cluster-wide data (such as users and groups) are saved.
# compress = < yes | no > (default = yes) # compress = < yes | no > (default = yes)
# if yes, compress the pg_dump/pg_dumpall output. # if yes, compress the pg_dump/pg_dumpall output.
# format = < plain | tar | custom > (default = plain) # format = < plain | tar | custom > (default = plain)
# plain - Output a plain-text SQL script file with the extension .sql. # plain - Output a plain-text SQL script file with the extension .sql.
# When dumping all databases, a single file is created via pg_dumpall. # When dumping all databases, a single file is created via pg_dumpall.
# tar - Output a tar archive suitable for input into pg_restore. More # tar - Output a tar archive suitable for input into pg_restore. More
# flexible than plain and can be manipulated by standard Unix tools # flexible than plain and can be manipulated by standard Unix tools
# such as tar. Creates a globals.sql file and an archive per database. # such as tar. Creates a globals.sql file and an archive per database.
# custom - Output a custom PostgreSQL pg_restore archive. This is the most # custom - Output a custom PostgreSQL pg_restore archive. This is the most
# flexible format allowing selective import and reordering of database # flexible format allowing selective import and reordering of database

View File

@ -6,17 +6,17 @@
## as the commented out option, uncomment and change when ## as the commented out option, uncomment and change when
## necessary. Options which are uncommented in this example do not have ## necessary. Options which are uncommented in this example do not have
## defaults, and the settings provided are recommended. ## defaults, and the settings provided are recommended.
## ##
## The defaults are useful in most cases, just make sure to configure the ## The defaults are useful in most cases, just make sure to configure the
## destination host and user. ## destination host and user.
## ##
## passed directly to rdiff-backup ## passed directly to rdiff-backup
## an example setting would be: ## an example setting would be:
## options = --force ## options = --force
## ##
## Default: ## Default:
# options = # options =
## default is 0, but set to 19 if you want to lower the priority. ## default is 0, but set to 19 if you want to lower the priority.
## an example setting would be: ## an example setting would be:
@ -40,10 +40,10 @@
## Default: ## Default:
# testconnect = no # testconnect = no
## default is not to limit bandwidth. ## default is not to limit bandwidth.
## set to a number in bytes/second to limit bandwidth usage. Use a negative ## set to a number in bytes/second to limit bandwidth usage. Use a negative
## number to set a limit that will never be exceeded, or a positive number ## number to set a limit that will never be exceeded, or a positive number
## to set a target average bandwidth use. cstream is required. See cstream's ## to set a target average bandwidth use. cstream is required. See cstream's
## -t option for more information. 62500 bytes = 500 Kb (.5 Mb) ## -t option for more information. 62500 bytes = 500 Kb (.5 Mb)
## an example setting would be: ## an example setting would be:
## bwlimit = 62500 ## bwlimit = 62500
@ -57,9 +57,9 @@
## on remote and local side are different, and you are certain there are no ## on remote and local side are different, and you are certain there are no
## problems in using mis-matched versions and want to get beyond this check. ## problems in using mis-matched versions and want to get beyond this check.
## An example usage could be the remote side has its authorized_keys configured ## An example usage could be the remote side has its authorized_keys configured
## with command="rdiff-backup --server" to allow for restricted yet automated ## with command="rdiff-backup --server" to allow for restricted yet automated
## password-less backups ## password-less backups
## ##
## Default: ## Default:
# ignore_version = no # ignore_version = no
@ -149,7 +149,7 @@ exclude = /var/cache/backupninja/duplicity
## put the backups under this directory, this must be set! ## put the backups under this directory, this must be set!
## an example setting would be: ## an example setting would be:
## directory = /backups ## directory = /backups
## ##
## Default: ## Default:
# directory = # directory =
@ -175,4 +175,4 @@ exclude = /var/cache/backupninja/duplicity
## sshoptions = -o IdentityFile=/root/.ssh/id_rsa_duplicity ## sshoptions = -o IdentityFile=/root/.ssh/id_rsa_duplicity
## ##
## Default: ## Default:
# sshoptions = # sshoptions =

View File

@ -14,13 +14,13 @@
# just use this option if your data is backed up in a separate partition and # just use this option if your data is backed up in a separate partition and
# you want backupninja to fsck it; this option will just be used if fscheck # you want backupninja to fsck it; this option will just be used if fscheck
# (see below) is set to 'yes' # (see below) is set to 'yes'
#partition = #partition =
# set to 1 if fsck should run on partition after the backup is made # set to 1 if fsck should run on partition after the backup is made
#fscheck = #fscheck =
# set to 1 if partition is mounted read-only # set to 1 if partition is mounted read-only
#read_only = #read_only =
# backup partition mountpoint or backup main folder # backup partition mountpoint or backup main folder
# this doesn't need to be a real partition, but should be at least the # this doesn't need to be a real partition, but should be at least the
@ -40,15 +40,15 @@ backupdir = myserver
# if you want to have incremental backups for longer periods (like months) you # if you want to have incremental backups for longer periods (like months) you
# have to configure rotations for 30 or more using the "days" parameter at the # have to configure rotations for 30 or more using the "days" parameter at the
# [general] section in the handler config. # [general] section in the handler config.
# #
# The short format is better described here: # The short format is better described here:
# http://www.mikerubel.org/computers/rsync_snapshots/#Incremental # http://www.mikerubel.org/computers/rsync_snapshots/#Incremental
# #
# The long format is inspired by the maildir handler and allows keeping backups # The long format is inspired by the maildir handler and allows keeping backups
# of longer periods (weeks and months) using less rotations as it stores # of longer periods (weeks and months) using less rotations as it stores
# the increments in folders like daily.1, weekly.1, monthly.1 and has three # the increments in folders like daily.1, weekly.1, monthly.1 and has three
# rotation parameters: # rotation parameters:
# #
# keepdaily = number of daily backup increments # keepdaily = number of daily backup increments
# keepweekly = number of weekly backup increments # keepweekly = number of weekly backup increments
# keepmonthly = number of monthly backup increments # keepmonthly = number of monthly backup increments

View File

@ -1,6 +1,6 @@
## ##
## Perform a hot backup of subversion repositories. ## Perform a hot backup of subversion repositories.
## ##
## REQUIRES: apt-get install subversion-tools ## REQUIRES: apt-get install subversion-tools
## ##
## This file can be empty, the defaults are usually good. ## This file can be empty, the defaults are usually good.

View File

@ -12,17 +12,17 @@
# (2) a list of all the packages installed and removed. # (2) a list of all the packages installed and removed.
# this file can be used to restore the state of installed packages # this file can be used to restore the state of installed packages
# by running "dpkg --set-selections < dpkg-selections.txt and # by running "dpkg --set-selections < dpkg-selections.txt and
# then run "apt-get -u dselect-upgrade". If you have the # then run "apt-get -u dselect-upgrade". If you have the
# debconf-set-selections file from (1), you should restore those first. # debconf-set-selections file from (1), you should restore those first.
# #
# (3) the partition table of all disks. # (3) the partition table of all disks.
# this partition table can be used to format another disk of # this partition table can be used to format another disk of
# the same size. this can be handy if using software raid and # the same size. this can be handy if using software raid and
# you have a disk go bad. just replace the disk and partition it # you have a disk go bad. just replace the disk and partition it
# by running "sfdisk /dev/sdb < partitions.sdb.txt" # by running "sfdisk /dev/sdb < partitions.sdb.txt"
# (MAKE SURE YOU PARTITION THE CORRECT DISK!!!) # (MAKE SURE YOU PARTITION THE CORRECT DISK!!!)
# #
# (4) hardware information. # (4) hardware information.
# detailed information on most important aspects of the hardware. # detailed information on most important aspects of the hardware.
# #
# (5) the Luks header of every Luks block device, if option luksheaders # (5) the Luks header of every Luks block device, if option luksheaders
@ -53,9 +53,9 @@
# partitions = yes # partitions = yes
# NOTE: the __star__ below will be replaced by the disks found on the # NOTE: the __star__ below will be replaced by the disks found on the
# system (e.g. partitions.sda.txt, partitions.sdb.txt). If you change # system (e.g. partitions.sda.txt, partitions.sdb.txt). If you change
# the partitionsfile default below, be sure to include the __star__ # the partitionsfile default below, be sure to include the __star__
# replacement in the filename, or you will get one file for only one disk, # replacement in the filename, or you will get one file for only one disk,
# the others being written to the same file, and then overwritten by the next. # the others being written to the same file, and then overwritten by the next.
# partitionsfile = /var/backups/partitions.__star__.txt # partitionsfile = /var/backups/partitions.__star__.txt
# dosfdisk = yes # dosfdisk = yes
@ -66,8 +66,8 @@
# luksheaders = no # luksheaders = no
# NOTE: the __star__ below will be replaced by the Luks partitions found on the # NOTE: the __star__ below will be replaced by the Luks partitions found on the
# system (e.g. luksheader.sda2.bin, luksheader.sdb3.bin). If you change # system (e.g. luksheader.sda2.bin, luksheader.sdb3.bin). If you change
# the luksheadersfile default below, be sure to include the __star__ # the luksheadersfile default below, be sure to include the __star__
# replacement in the filename, or you will get one file for only one partition, # replacement in the filename, or you will get one file for only one partition,
# the others being written to the same file, and then overwritten by the next. # the others being written to the same file, and then overwritten by the next.
# luksheadersfile = /var/backups/luksheader.__star__.bin # luksheadersfile = /var/backups/luksheader.__star__.bin

View File

@ -1,6 +1,6 @@
## ##
## Perform backups of trac environment ## Perform backups of trac environment
## ##
## REQUIRES: apt-get install trac ## REQUIRES: apt-get install trac
## ##
## This file can be empty, the defaults are usually good. ## This file can be empty, the defaults are usually good.

View File

@ -26,18 +26,18 @@
# For the backup rotation to work, destuser must be able to run # For the backup rotation to work, destuser must be able to run
# arbitrary bash commands on the desthost. # arbitrary bash commands on the desthost.
# #
# If 'remove' is set to 'yes' (default), then any mail directory # If 'remove' is set to 'yes' (default), then any mail directory
# which is deleted from the source will be moved to a "deleted" # which is deleted from the source will be moved to a "deleted"
# directory in the destination. It is up to you to periodically # directory in the destination. It is up to you to periodically
# remove this directory or old maildirs in it. # remove this directory or old maildirs in it.
# #
# Limitations: # Limitations:
# . because we are not dynamically looking up anything with # . because we are not dynamically looking up anything with
# dovecot's userdb, we expect all data to be under the same # dovecot's userdb, we expect all data to be under the same
# tree on both the source and destination # tree on both the source and destination
# #
# . we are assuming a backup to a backup server, so the # . we are assuming a backup to a backup server, so the
# destination host should have its dovecot mail_location # destination host should have its dovecot mail_location
# configured to put the mail into # configured to put the mail into
# $stripped_destdir/$letter/$user/$current_backup # $stripped_destdir/$letter/$user/$current_backup
# #
@ -53,7 +53,7 @@ getconf keepmonthly 1
getconf srcconffile getconf srcconffile
getconf destconffile getconf destconffile
getconf srcdir getconf srcdir
getconf destdir getconf destdir
getconf current_backup current_backup getconf current_backup current_backup
getconf desthost getconf desthost
@ -89,7 +89,7 @@ fi
function do_user() { function do_user() {
local user=$1 local user=$1
local btype=$2 local btype=$2
local letter=${user:0:1} local letter=${user:0:1}
local target="$stripped_destdir/$letter/$user/$btype.1" local target="$stripped_destdir/$letter/$user/$btype.1"
local failedcount=0 local failedcount=0
local ret=0 local ret=0
@ -202,7 +202,7 @@ function do_rotate() {
echo "Debug: skipping rotation of \$dir.\$i because it was created" \$(( (now-created)/86400)) "days ago ("\$(( (now-cutoff_time)/86400))" needed)." echo "Debug: skipping rotation of \$dir.\$i because it was created" \$(( (now-created)/86400)) "days ago ("\$(( (now-cutoff_time)/86400))" needed)."
fi fi
fi fi
done done
done done
max=\$((keepdaily+1)) max=\$((keepdaily+1))

View File

@ -356,7 +356,7 @@ incremental = $dup_incremental
# how many days of incremental backups before doing a full backup again ; # how many days of incremental backups before doing a full backup again ;
# default is 30 days (one can also use the time format of duplicity). # default is 30 days (one can also use the time format of duplicity).
# if increments = keep, never automatically perform a new full backup ; # if increments = keep, never automatically perform a new full backup ;
# only perform incremental backups. # only perform incremental backups.
#increments = 30 #increments = 30
#increments = keep #increments = keep
@ -398,9 +398,9 @@ keepincroffulls = $dup_keepincroffulls
## cfauthurl = YOUR_CF_AUTH_URL ## cfauthurl = YOUR_CF_AUTH_URL
## ##
## Default: ## Default:
# cfusername = # cfusername =
# cfapikey = # cfapikey =
# cfauthurl = # cfauthurl =
## Dropbox requires a valid authentication token. To obtain one, you will need ## Dropbox requires a valid authentication token. To obtain one, you will need
## to create a Dropbox API application at https://www.dropbox.com/developers/apps/create. ## to create a Dropbox API application at https://www.dropbox.com/developers/apps/create.
@ -413,7 +413,7 @@ keepincroffulls = $dup_keepincroffulls
# dropboxaccesstoken = # dropboxaccesstoken =
# FTP password, needed for backups using desturl = ftp://... # FTP password, needed for backups using desturl = ftp://...
#ftp_password = #ftp_password =
# bandwith limit, in KB/s ; default is 0, i.e. no limit # bandwith limit, in KB/s ; default is 0, i.e. no limit
# if using 'desturl' above, 'bandwidthlimit' must not be set # if using 'desturl' above, 'bandwidthlimit' must not be set
@ -436,7 +436,7 @@ bandwidthlimit = $dup_bandwidth
## warning: requires no space beetween "-o" and "IdentityFile=...". ## warning: requires no space beetween "-o" and "IdentityFile=...".
## ##
## Default: ## Default:
# sshoptions = # sshoptions =
sshoptions = $dup_sshoptions sshoptions = $dup_sshoptions
# put the backups under this destination directory # put the backups under this destination directory

View File

@ -63,7 +63,7 @@ then
home=`getent passwd "root" | @AWK@ -F: '{print $6}'` home=`getent passwd "root" | @AWK@ -F: '{print $6}'`
[ -d $home ] || fatal "Can't find root's home directory ($home)." [ -d $home ] || fatal "Can't find root's home directory ($home)."
mycnf="$home/.my.cnf" mycnf="$home/.my.cnf"
workcnf="$mycnf" workcnf="$mycnf"
@ -75,7 +75,7 @@ then
debug "mv $workcnf $tmpcnf" debug "mv $workcnf $tmpcnf"
mv $workcnf $tmpcnf mv $workcnf $tmpcnf
fi fi
oldmask=`umask` oldmask=`umask`
umask 077 umask 077
cat > $workcnf <<EOF cat > $workcnf <<EOF

View File

@ -66,7 +66,7 @@ pgsql_wizard() {
*) pgsql_format = "format = plain";; *) pgsql_format = "format = plain";;
esac esac
fi fi
# write config file # write config file
get_next_filename $configdirectory/20.pgsql get_next_filename $configdirectory/20.pgsql
@ -91,8 +91,8 @@ $pgsql_compress
# format = < plain | tar | custom > (default = plain) # format = < plain | tar | custom > (default = plain)
# plain - Output a plain-text SQL script file with the extension .sql. # plain - Output a plain-text SQL script file with the extension .sql.
# When dumping all databases, a single file is created via pg_dumpall. # When dumping all databases, a single file is created via pg_dumpall.
# tar - Output a tar archive suitable for input into pg_restore. More # tar - Output a tar archive suitable for input into pg_restore. More
# flexible than plain and can be manipulated by standard Unix tools # flexible than plain and can be manipulated by standard Unix tools
# such as tar. Creates a globals.sql file and an archive per database. # such as tar. Creates a globals.sql file and an archive per database.
# custom - Output a custom PostgreSQL pg_restore archive. This is the most # custom - Output a custom PostgreSQL pg_restore archive. This is the most
# flexible format allowing selective import and reordering of database # flexible format allowing selective import and reordering of database

View File

@ -101,18 +101,18 @@
# function definitions # function definitions
function eval_config { function eval_config {
# system section # system section
setsection system setsection system
getconf rm rm getconf rm rm
getconf cp cp getconf cp cp
getconf touch touch getconf touch touch
getconf mv mv getconf mv mv
getconf fsck fsck getconf fsck fsck
# general section # general section
setsection general setsection general
getconf log /var/log/backup/rsync.log getconf log /var/log/backup/rsync.log
getconf partition getconf partition
@ -130,14 +130,14 @@ function eval_config {
getconf enable_mv_timestamp_bug no getconf enable_mv_timestamp_bug no
getconf tmp /tmp getconf tmp /tmp
getconf multiconnection no getconf multiconnection no
# source section # source section
setsection source setsection source
getconf from local getconf from local
getconf rsync $RSYNC getconf rsync $RSYNC
getconf rsync_options "-av --delete --recursive" getconf rsync_options "-av --delete --recursive"
if [ "$from" == "remote" ]; then if [ "$from" == "remote" ]; then
getconf testconnect no getconf testconnect no
getconf protocol ssh getconf protocol ssh
@ -156,7 +156,7 @@ function eval_config {
getconf remote_rsync rsync getconf remote_rsync rsync
getconf id_file /root/.ssh/id_rsa getconf id_file /root/.ssh/id_rsa
fi fi
getconf batch no getconf batch no
if [ "$batch" == "yes" ]; then if [ "$batch" == "yes" ]; then
@ -172,13 +172,13 @@ function eval_config {
getconf exclude getconf exclude
getconf numericids 0 getconf numericids 0
getconf compress 0 getconf compress 0
# dest section # dest section
setsection dest setsection dest
getconf dest local getconf dest local
getconf fakesuper no getconf fakesuper no
if [ "$dest" == "remote" ]; then if [ "$dest" == "remote" ]; then
getconf testconnect no getconf testconnect no
getconf protocol ssh getconf protocol ssh
@ -197,7 +197,7 @@ function eval_config {
getconf remote_rsync rsync getconf remote_rsync rsync
getconf id_file /root/.ssh/id_rsa getconf id_file /root/.ssh/id_rsa
fi fi
getconf batch no getconf batch no
if [ "$batch" != "yes" ]; then if [ "$batch" != "yes" ]; then
@ -212,9 +212,9 @@ function eval_config {
getconf numericids 0 getconf numericids 0
getconf compress 0 getconf compress 0
# services section # services section
setsection services setsection services
getconf initscripts /etc/init.d getconf initscripts /etc/init.d
getconf service getconf service
@ -231,7 +231,7 @@ function eval_config {
backupdir="$mountpoint/$backupdir" backupdir="$mountpoint/$backupdir"
if [ "$dest" == "local" ] && [ ! -d "$backupdir" ]; then if [ "$dest" == "local" ] && [ ! -d "$backupdir" ]; then
fatal "Backupdir $backupdir does not exist" fatal "Backupdir $backupdir does not exist"
fi fi
@ -247,9 +247,9 @@ function eval_config {
fi fi
fi fi
if [ ! -z "$nicelevel" ]; then if [ ! -z "$nicelevel" ]; then
nice="nice -n $nicelevel" nice="nice -n $nicelevel"
else else
nice="" nice=""
fi fi
@ -419,7 +419,7 @@ function rotate_long {
warning "Warning: metadata does not exist for $dir.1. This backup may be only partially completed. Skipping rotation." warning "Warning: metadata does not exist for $dir.1. This backup may be only partially completed. Skipping rotation."
continue 1 continue 1
fi fi
# Rotate the current list of backups, if we can. # Rotate the current list of backups, if we can.
oldest=`find $backuproot -maxdepth 1 -type d -name $rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1` oldest=`find $backuproot -maxdepth 1 -type d -name $rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1`
[ "$oldest" == "" ] && oldest=0 [ "$oldest" == "" ] && oldest=0
@ -488,7 +488,7 @@ function rotate_long {
max=$((keep${rottype}+1)) max=$((keep${rottype}+1))
dir="$backuproot/$rottype" dir="$backuproot/$rottype"
oldest=`find $backuproot -maxdepth 1 -type d -name $rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1` oldest=`find $backuproot -maxdepth 1 -type d -name $rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1`
[ "$oldest" == "" ] && oldest=0 [ "$oldest" == "" ] && oldest=0
# if we've rotated the last backup off the stack, remove it. # if we've rotated the last backup off the stack, remove it.
for (( i=$oldest; i >= $max; i-- )); do for (( i=$oldest; i >= $max; i-- )); do
if [ -d $dir.$i ]; then if [ -d $dir.$i ]; then
@ -546,7 +546,7 @@ function rotate_long_remote {
echo "Warning: metadata does not exist for \$dir.1. This backup may be only partially completed. Skipping rotation." echo "Warning: metadata does not exist for \$dir.1. This backup may be only partially completed. Skipping rotation."
continue 1 continue 1
fi fi
# Rotate the current list of backups, if we can. # Rotate the current list of backups, if we can.
oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1\` oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1\`
[ "\$oldest" == "" ] && oldest=0 [ "\$oldest" == "" ] && oldest=0
@ -615,7 +615,7 @@ function rotate_long_remote {
max=\$((keep\${rottype}+1)) max=\$((keep\${rottype}+1))
dir="$backuproot/\$rottype" dir="$backuproot/\$rottype"
oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1\` oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1\`
[ "\$oldest" == "" ] && oldest=0 [ "\$oldest" == "" ] && oldest=0
# if we've rotated the last backup off the stack, remove it. # if we've rotated the last backup off the stack, remove it.
for (( i=\$oldest; i >= \$max; i-- )); do for (( i=\$oldest; i >= \$max; i-- )); do
if [ -d \$dir.\$i ]; then if [ -d \$dir.\$i ]; then
@ -847,7 +847,7 @@ function set_orig {
} }
function set_dest { function set_dest {
if [ "$dest" == "local" ]; then if [ "$dest" == "local" ]; then
dest_path="$backupdir/$SECTION/$suffix/" dest_path="$backupdir/$SECTION/$suffix/"
@ -981,7 +981,7 @@ function set_rsync_options {
if [ ! -z "$bandwidthlimit" ]; then if [ ! -z "$bandwidthlimit" ]; then
rsync_options="$rsync_options --bwlimit=$bandwidthlimit" rsync_options="$rsync_options --bwlimit=$bandwidthlimit"
fi fi
if [ "$fakesuper" == "yes" ]; then if [ "$fakesuper" == "yes" ]; then
remote_rsync="$remote_rsync --fake-super" remote_rsync="$remote_rsync --fake-super"
fi fi

View File

@ -11,7 +11,7 @@ edit = sed \
-e "s,@AWK\@,$(AWK),g" \ -e "s,@AWK\@,$(AWK),g" \
-e "s,@SED\@,$(SED),g" \ -e "s,@SED\@,$(SED),g" \
-e "s,@MKTEMP\@,$(MKTEMP),g" \ -e "s,@MKTEMP\@,$(MKTEMP),g" \
-e "s,@libdir\@,$(pkglibdir),g" -e "s,@libdir\@,$(pkglibdir),g"
easydialog: $(srcdir)/easydialog.in easydialog: $(srcdir)/easydialog.in
rm -f easydialog rm -f easydialog

View File

@ -1,5 +1,5 @@
# -*- mode: awk; indent-tabs-mode: nil; -*- # -*- mode: awk; indent-tabs-mode: nil; -*-
# #
# parseini --- parses 'ini' style configuration files. # parseini --- parses 'ini' style configuration files.
# #
# Usage: # Usage:
@ -8,123 +8,123 @@
# if section is an empty string, then we use the default section # if section is an empty string, then we use the default section
# #
# example ini file: # example ini file:
# #
# fruit = apple # fruit = apple
# fruit = pear # fruit = pear
# multiline = this is a multiline \ # multiline = this is a multiline \
# parameter # parameter
# #
# # this is a comment # # this is a comment
# [colors] # [colors]
# red = yes # red = yes
# green = no # green = no
# blue = maybe # blue = maybe
# #
# [ocean] # [ocean]
# fish = red # fish = red
# fish = blue # fish = blue
# #
# example usage: # example usage:
# > awk -f parseini S=ocean P=fish testfile.ini # > awk -f parseini S=ocean P=fish testfile.ini
# would return: # would return:
# red # red
# blue # blue
# #
BEGIN { BEGIN {
readlines = 1 readlines = 1
implied = 1 implied = 1
} }
# remove lines starting with #, but not #! # remove lines starting with #, but not #!
/^#[^!]/ {next} /^#[^!]/ {next}
# skip blank # skip blank
/^[ \r\t]*$/ {next} /^[ \r\t]*$/ {next}
# we want to read the lines of the matched section # we want to read the lines of the matched section
# and disable for other sections # and disable for other sections
/^\[.+\][ \r\t]*$/ { /^\[.+\][ \r\t]*$/ {
continueline = 0 continueline = 0
if (S && implied) { if (S && implied) {
nline = 0 nline = 0
implied = 0 implied = 0
} }
if (S && match($0, "^\\[" S "\\][ \n]*")) { if (S && match($0, "^\\[" S "\\][ \n]*")) {
# we found the section, so start reading. # we found the section, so start reading.
readlines = 1 readlines = 1
} }
else { else {
# no section, so stop reading lines # no section, so stop reading lines
if (readlines) readlines = 0 if (readlines) readlines = 0
} }
next next
} }
# when reading, store lines. # when reading, store lines.
{ {
if (!readlines) next if (!readlines) next
line[nline++] = $0 line[nline++] = $0
if ($0 ~ /\\[ \r\t]*$/) if ($0 ~ /\\[ \r\t]*$/)
continueline = 1 continueline = 1
else else
continueline = 0 continueline = 0
} }
# process the read lines lines, matching parameters # process the read lines lines, matching parameters
END { END {
# if section is set but implied is still true # if section is set but implied is still true
# then we never found the section, so use everything # then we never found the section, so use everything
if (S && implied) { if (S && implied) {
nline = 0 nline = 0
} }
# if have P then find P in read lines and get values # if have P then find P in read lines and get values
if (P) { if (P) {
MATCH = "^[ \r\t]*" P "[ \r\t]*=" MATCH = "^[ \r\t]*" P "[ \r\t]*="
continueline = 0 continueline = 0
for (x = 0; x < nline; ++x) { for (x = 0; x < nline; ++x) {
v = line[x] v = line[x]
if (continueline) { if (continueline) {
sub(/[ \r\t]+$/, "", v) sub(/[ \r\t]+$/, "", v)
if (v ~ /\\$/) { if (v ~ /\\$/) {
v = substr(v, 1, length(v)-1) v = substr(v, 1, length(v)-1)
sub(/[ \r\t]+$/, "", v) sub(/[ \r\t]+$/, "", v)
} }
if (v) value[nvalue++] = v if (v) value[nvalue++] = v
} }
else if (v ~ MATCH) { else if (v ~ MATCH) {
sub(MATCH, "", v) sub(MATCH, "", v)
sub(/^[ \r\t]+/, "", v) sub(/^[ \r\t]+/, "", v)
sub(/[ \r\t]+$/, "", v) sub(/[ \r\t]+$/, "", v)
if (v ~ /\\$/) { if (v ~ /\\$/) {
continueline = 1 continueline = 1
v = substr(v, 1, length(v)-1) v = substr(v, 1, length(v)-1)
sub(/[ \r\t]+$/, "", v) sub(/[ \r\t]+$/, "", v)
} }
if (v) value[nvalue++] = v if (v) value[nvalue++] = v
} }
} }
# copy parameter definition to output array # copy parameter definition to output array
nline = nvalue nline = nvalue
for (x = 0; x < nvalue; ++x) for (x = 0; x < nvalue; ++x)
line[x] = value[x] line[x] = value[x]
} }
# trim all leading & trailing whitespace;
# except for leading whitespace in continuation lines,
for (x = 0; x < nline; ++x) {
sub(/^[ \r\t]+/, "", line[x])
sub(/[ \r\t]+$/, "", line[x])
}
# trim all leading & trailing whitespace;
# except for leading whitespace in continuation lines,
for (x = 0; x < nline; ++x) {
sub(/^[ \r\t]+/, "", line[x])
sub(/[ \r\t]+$/, "", line[x])
}
# output the final result # output the final result
for (x = 0; x < nline; ++x) for (x = 0; x < nline; ++x)
print line[x] print line[x]
if (nline) exit 0 if (nline) exit 0
else exit 1 else exit 1
} }