Cleanup tariling whitespace

This commit is contained in:
Jerome Charaoui 2021-01-22 11:24:14 -05:00
parent bbf488879a
commit 0ec7752dd3
18 changed files with 203 additions and 203 deletions

View File

@ -33,8 +33,8 @@
## Default:
# testconnect = yes
## default is not to limit bandwidth.
## set to a number in kiBytes/second to limit bandwidth usage.
## default is not to limit bandwidth.
## set to a number in kiBytes/second to limit bandwidth usage.
##
## Default:
# bwlimit = 0
@ -164,7 +164,7 @@ exclude = /var/lib/mysql
## for more info see : borg help create
##
## Default:
# archive = {now:%Y-%m-%dT%H:%M:%S}
# archive = {now:%Y-%m-%dT%H:%M:%S}
## compression algorithm
## can be "none", "lz4", "zstd[,L]", "zlib[,L]", "lzma[,L]", "auto,C[,L]".
@ -213,4 +213,4 @@ exclude = /var/lib/mysql
## sshoptions = -i /root/.ssh/id_rsa_borg
##
## Default:
# sshoptions =
# sshoptions =

View File

@ -1,4 +1,4 @@
## This is an example duplicity configuration file.
## This is an example duplicity configuration file.
##
## Here you can find all the possible duplicity options, details of
## what the options provide and possible settings. The defaults are set
@ -12,7 +12,7 @@
## options = --s3-european-buckets --s3-use-new-style
##
## Default:
# options =
# options =
## default is 0, but set to something like 19 if you want to lower the priority.
##
@ -37,7 +37,7 @@
## temporary directory used by duplicity, set to some other location if your /tmp is small
## default is either /tmp or /usr/tmp, depending on the system
##
##
## Default:
# tmpdir = /tmp
@ -78,14 +78,14 @@
## encryptkey = 04D9EA79
##
## Default:
# encryptkey =
# encryptkey =
## ID of the GnuPG private key used for data signing.
## if not set, encryptkey will be used, an example setting would be:
## signkey = 04D9EA79
##
##
## Default:
# signkey =
# signkey =
## password used to unlock the encryption key
## NB: neither quote this, nor should it contain any quotes,
@ -93,7 +93,7 @@
## password = a_very_complicated_passphrase
##
## Default:
# password =
# password =
## password used to unlock the signature key, used only if
## it differs from the encryption key
@ -150,12 +150,12 @@ exclude = /var/cache/backupninja/duplicity
## perform an incremental backup? (default = yes)
## if incremental = no, perform a full backup in order to start a new backup set
##
## Default:
## Default:
# incremental = yes
## how many days of incremental backups before doing a full backup again ;
## default is 30 days (one can also use the time format of duplicity).
## if increments = keep, never automatically perform a new full backup ;
## if increments = keep, never automatically perform a new full backup ;
## only perform incremental backups.
##
## Default:
@ -188,7 +188,7 @@ exclude = /var/cache/backupninja/duplicity
## the default value of this configuration option is not set:
##
## Default:
# desturl =
# desturl =
## Amazon Web Services Access Key ID and Secret Access Key, needed for backups
## to S3 buckets.
@ -196,8 +196,8 @@ exclude = /var/cache/backupninja/duplicity
## awssecretaccesskey = YOUR_AWS_SECRET_KEY
##
## Default:
# awsaccesskeyid =
# awssecretaccesskey =
# awsaccesskeyid =
# awssecretaccesskey =
## RackSpace's CloudFiles username, API key, and authentication URL.
## cfusername = YOUR_CF_USERNAME
@ -205,9 +205,9 @@ exclude = /var/cache/backupninja/duplicity
## cfauthurl = YOUR_CF_AUTH_URL
##
## Default:
# cfusername =
# cfapikey =
# cfauthurl =
# cfusername =
# cfapikey =
# cfauthurl =
## Dropbox requires a valid authentication token. To obtain one, you will need
## to create a Dropbox API application at https://www.dropbox.com/developers/apps/create.
@ -222,7 +222,7 @@ exclude = /var/cache/backupninja/duplicity
## FTP password, needed for backups using desturl = ftp://...
##
## Default:
# ftp_password =
# ftp_password =
## bandwith limit, in KB/s ; default is 0, i.e. no limit
## if using 'desturl' above, 'bandwidthlimit' must not be set
@ -247,25 +247,25 @@ exclude = /var/cache/backupninja/duplicity
## warning: requires no space beetween "-o" and "IdentityFile=...".
##
## Default:
# sshoptions =
# sshoptions =
## put the backups under this destination directory
## if using 'desturl' above, this must not be set
## in all other cases, this must be set!
## an example setting would be:
## destdir = /backups
##
##
## Default:
# destdir =
# destdir =
## the machine which will receive the backups
## if using 'desturl' above, this must not be set
## in all other cases, this must be set!
## an example setting would be:
## desthost = backuphost
##
## Default:
# desthost =
##
## Default:
# desthost =
## make the files owned by this user
## if using 'desturl' above, this must not be set

View File

@ -1,5 +1,5 @@
##
## configuration file for openldap backups
## configuration file for openldap backups
##
## If the method is set to "slapcat", the LDIFs generated are
## suitable for use with slapadd. As the entries are in database
@ -25,19 +25,19 @@
## if your backend is ldbm and your method is slapcat, but unnecessary otherwise.
# restart = no
## method (default ldapsearch): either 'ldapsearch' or 'slapcat'
## method (default ldapsearch): either 'ldapsearch' or 'slapcat'
## ldapsearch is the safer method to do backups, but is slow, slapcat
## is much faster, but should not be done on an ldbm backend unless you have
## restart set to yes
## NOTE: with the ldapsearch method passwordfile and binddn need to be set
# method = ldapsearch
## passwordfile (no default): this should be set to the file that contains
## passwordfile (no default): this should be set to the file that contains
## your ldap password, this is required for ldapsearch and not needed for slapcat
## this file should have no newlines in it, echo -n "password" > passfile works.
## NOTE: be sure to set the permissions on your password file appropriately
## (hint: world readable is not appropriate)
# passwordfile =
# passwordfile =
## binddn (no default): set this to the DN of the user that the ldapsearch binds
## to, not needed for slapcat

View File

@ -13,10 +13,10 @@
## We handle each maildir individually because it becomes very
## unweldy to hardlink and rsync many hundreds of thousands
## of files at once. It is much faster to take on smaller
## chunks at a time.
## chunks at a time.
##
## Any maildir which is deleted from the source will be moved to
## "deleted" directory in the destination. It is up to you to
## "deleted" directory in the destination. It is up to you to
## periodically remove this directory or old maildirs in it.
##
## Note: This handler assumes that the remote shell is set to bash
@ -27,7 +27,7 @@
when = everyday at 21:00
## each users maildir will contain these files:
## daily.1, daily.2, daily.3, daily.4, daily.5, weekly.1, weekly.2,
## daily.1, daily.2, daily.3, daily.4, daily.5, weekly.1, weekly.2,
## weekly.3, monthly.1
## if keepdaily is 5, keepweekly is 3, and keepmonthly is 1
keepdaily = 5
@ -53,7 +53,7 @@ destuser = backer
# For alternate ports from the default 22, specify here
destport = 4444
# If you need to specify an alternate ssh public key authentication file
# If you need to specify an alternate ssh public key authentication file
# do that here. Default: /root/.ssh/id_rsa
destid_file = /home/backupkeys/.ssh/maildirbackup_id_rsa

View File

@ -30,11 +30,11 @@ compress = yes
# configfile = < path/to/file > (default = /etc/mysql/debian.cnf)
# The config file is passed to mysql with --defaults-file.
# On debian, this default will allow backupninja to make backups
# of mysql without configuring any additional options.
# of mysql without configuring any additional options.
# (this option is not compatible with "user" or "dbusername").
#
# user = <user> (default = root)
# Run mysql commands as 'user'. A valid .my.cnf must exist with a
# Run mysql commands as 'user'. A valid .my.cnf must exist with a
# database username and password in the user's home directory.
# (this option is not compatible with "configfile" or "dbusername").
#
@ -43,14 +43,14 @@ compress = yes
# (this option is not compatible with "configfile" or "user").
#
# dbpassword = <dbpass> (no default)
# The password used with dbusername. this password will NOT be passed
# The password used with dbusername. this password will NOT be passed
# on the command line and is not readable using "ps aux".
#
# dbhost = <host> (default = localhost)
# only localhost works right now.
#
# databases = < all | db1 db2 db3 > (default = all)
# which databases to backup. should either be the word 'all' or a
# which databases to backup. should either be the word 'all' or a
# space separated list of database names.
#
# nodata = < db.table1 db.table2 db.table3 > (no default)
@ -60,10 +60,10 @@ compress = yes
# isn't necessary to backup, but you still need the structure to exist
# on a restore. You *must* specify the table as part of a database, such
# as "drupal.cache", where the database name is "drupal" and the table that
# you do not want to dump the data for is called "cache".
# you do not want to dump the data for is called "cache".
#
# backupdir = < path/to/destination > (default = /var/backups/mysql)
# where to dump the backups. hotcopy backups will be in a subdirectory
# where to dump the backups. hotcopy backups will be in a subdirectory
# 'hotcopy' and sqldump backups will be in a subdirectory 'sqldump'
#
# hotcopy = < yes | no > (default = no)
@ -78,4 +78,4 @@ compress = yes
# arguments to pass to mysqldump
#
# compress = < yes | no > (default = yes)
# if yes, compress the sqldump output.
# if yes, compress the sqldump output.

View File

@ -4,19 +4,19 @@
# where to dump the backups
# databases = < all | db1 db2 db3 > (default = all)
# which databases to backup. should either be the word 'all' or a
# which databases to backup. should either be the word 'all' or a
# space separated list of database names.
# Note: when using 'all', pg_dumpall is used instead of pg_dump, which means
# that cluster-wide data (such as users and groups) are saved.
# compress = < yes | no > (default = yes)
# if yes, compress the pg_dump/pg_dumpall output.
# if yes, compress the pg_dump/pg_dumpall output.
# format = < plain | tar | custom > (default = plain)
# plain - Output a plain-text SQL script file with the extension .sql.
# When dumping all databases, a single file is created via pg_dumpall.
# tar - Output a tar archive suitable for input into pg_restore. More
# flexible than plain and can be manipulated by standard Unix tools
# tar - Output a tar archive suitable for input into pg_restore. More
# flexible than plain and can be manipulated by standard Unix tools
# such as tar. Creates a globals.sql file and an archive per database.
# custom - Output a custom PostgreSQL pg_restore archive. This is the most
# flexible format allowing selective import and reordering of database

View File

@ -6,17 +6,17 @@
## as the commented out option, uncomment and change when
## necessary. Options which are uncommented in this example do not have
## defaults, and the settings provided are recommended.
##
## The defaults are useful in most cases, just make sure to configure the
##
## The defaults are useful in most cases, just make sure to configure the
## destination host and user.
##
## passed directly to rdiff-backup
## an example setting would be:
## options = --force
##
##
## Default:
# options =
# options =
## default is 0, but set to 19 if you want to lower the priority.
## an example setting would be:
@ -40,10 +40,10 @@
## Default:
# testconnect = no
## default is not to limit bandwidth.
## set to a number in bytes/second to limit bandwidth usage. Use a negative
## number to set a limit that will never be exceeded, or a positive number
## to set a target average bandwidth use. cstream is required. See cstream's
## default is not to limit bandwidth.
## set to a number in bytes/second to limit bandwidth usage. Use a negative
## number to set a limit that will never be exceeded, or a positive number
## to set a target average bandwidth use. cstream is required. See cstream's
## -t option for more information. 62500 bytes = 500 Kb (.5 Mb)
## an example setting would be:
## bwlimit = 62500
@ -57,9 +57,9 @@
## on remote and local side are different, and you are certain there are no
## problems in using mis-matched versions and want to get beyond this check.
## An example usage could be the remote side has its authorized_keys configured
## with command="rdiff-backup --server" to allow for restricted yet automated
## with command="rdiff-backup --server" to allow for restricted yet automated
## password-less backups
##
##
## Default:
# ignore_version = no
@ -149,7 +149,7 @@ exclude = /var/cache/backupninja/duplicity
## put the backups under this directory, this must be set!
## an example setting would be:
## directory = /backups
##
##
## Default:
# directory =
@ -175,4 +175,4 @@ exclude = /var/cache/backupninja/duplicity
## sshoptions = -o IdentityFile=/root/.ssh/id_rsa_duplicity
##
## Default:
# sshoptions =
# sshoptions =

View File

@ -14,13 +14,13 @@
# just use this option if your data is backed up in a separate partition and
# you want backupninja to fsck it; this option will just be used if fscheck
# (see below) is set to 'yes'
#partition =
#partition =
# set to 1 if fsck should run on partition after the backup is made
#fscheck =
#fscheck =
# set to 1 if partition is mounted read-only
#read_only =
#read_only =
# backup partition mountpoint or backup main folder
# this doesn't need to be a real partition, but should be at least the
@ -40,15 +40,15 @@ backupdir = myserver
# if you want to have incremental backups for longer periods (like months) you
# have to configure rotations for 30 or more using the "days" parameter at the
# [general] section in the handler config.
#
#
# The short format is better described here:
# http://www.mikerubel.org/computers/rsync_snapshots/#Incremental
#
#
# The long format is inspired by the maildir handler and allows keeping backups
# of longer periods (weeks and months) using less rotations as it stores
# the increments in folders like daily.1, weekly.1, monthly.1 and has three
# rotation parameters:
#
#
# keepdaily = number of daily backup increments
# keepweekly = number of weekly backup increments
# keepmonthly = number of monthly backup increments

View File

@ -1,6 +1,6 @@
##
## Perform a hot backup of subversion repositories.
##
##
## REQUIRES: apt-get install subversion-tools
##
## This file can be empty, the defaults are usually good.

View File

@ -12,17 +12,17 @@
# (2) a list of all the packages installed and removed.
# this file can be used to restore the state of installed packages
# by running "dpkg --set-selections < dpkg-selections.txt and
# then run "apt-get -u dselect-upgrade". If you have the
# then run "apt-get -u dselect-upgrade". If you have the
# debconf-set-selections file from (1), you should restore those first.
#
# (3) the partition table of all disks.
#
# (3) the partition table of all disks.
# this partition table can be used to format another disk of
# the same size. this can be handy if using software raid and
# the same size. this can be handy if using software raid and
# you have a disk go bad. just replace the disk and partition it
# by running "sfdisk /dev/sdb < partitions.sdb.txt"
# (MAKE SURE YOU PARTITION THE CORRECT DISK!!!)
#
# (4) hardware information.
# (4) hardware information.
# detailed information on most important aspects of the hardware.
#
# (5) the Luks header of every Luks block device, if option luksheaders
@ -53,9 +53,9 @@
# partitions = yes
# NOTE: the __star__ below will be replaced by the disks found on the
# system (e.g. partitions.sda.txt, partitions.sdb.txt). If you change
# the partitionsfile default below, be sure to include the __star__
# replacement in the filename, or you will get one file for only one disk,
# system (e.g. partitions.sda.txt, partitions.sdb.txt). If you change
# the partitionsfile default below, be sure to include the __star__
# replacement in the filename, or you will get one file for only one disk,
# the others being written to the same file, and then overwritten by the next.
# partitionsfile = /var/backups/partitions.__star__.txt
# dosfdisk = yes
@ -66,8 +66,8 @@
# luksheaders = no
# NOTE: the __star__ below will be replaced by the Luks partitions found on the
# system (e.g. luksheader.sda2.bin, luksheader.sdb3.bin). If you change
# the luksheadersfile default below, be sure to include the __star__
# system (e.g. luksheader.sda2.bin, luksheader.sdb3.bin). If you change
# the luksheadersfile default below, be sure to include the __star__
# replacement in the filename, or you will get one file for only one partition,
# the others being written to the same file, and then overwritten by the next.
# luksheadersfile = /var/backups/luksheader.__star__.bin

View File

@ -1,6 +1,6 @@
##
## Perform backups of trac environment
##
##
## REQUIRES: apt-get install trac
##
## This file can be empty, the defaults are usually good.

View File

@ -26,18 +26,18 @@
# For the backup rotation to work, destuser must be able to run
# arbitrary bash commands on the desthost.
#
# If 'remove' is set to 'yes' (default), then any mail directory
# If 'remove' is set to 'yes' (default), then any mail directory
# which is deleted from the source will be moved to a "deleted"
# directory in the destination. It is up to you to periodically
# directory in the destination. It is up to you to periodically
# remove this directory or old maildirs in it.
#
# Limitations:
# . because we are not dynamically looking up anything with
# dovecot's userdb, we expect all data to be under the same
# dovecot's userdb, we expect all data to be under the same
# tree on both the source and destination
#
# . we are assuming a backup to a backup server, so the
# destination host should have its dovecot mail_location
#
# . we are assuming a backup to a backup server, so the
# destination host should have its dovecot mail_location
# configured to put the mail into
# $stripped_destdir/$letter/$user/$current_backup
#
@ -53,7 +53,7 @@ getconf keepmonthly 1
getconf srcconffile
getconf destconffile
getconf srcdir
getconf srcdir
getconf destdir
getconf current_backup current_backup
getconf desthost
@ -89,7 +89,7 @@ fi
function do_user() {
local user=$1
local btype=$2
local letter=${user:0:1}
local letter=${user:0:1}
local target="$stripped_destdir/$letter/$user/$btype.1"
local failedcount=0
local ret=0
@ -202,7 +202,7 @@ function do_rotate() {
echo "Debug: skipping rotation of \$dir.\$i because it was created" \$(( (now-created)/86400)) "days ago ("\$(( (now-cutoff_time)/86400))" needed)."
fi
fi
done
done
done
max=\$((keepdaily+1))

View File

@ -356,7 +356,7 @@ incremental = $dup_incremental
# how many days of incremental backups before doing a full backup again ;
# default is 30 days (one can also use the time format of duplicity).
# if increments = keep, never automatically perform a new full backup ;
# if increments = keep, never automatically perform a new full backup ;
# only perform incremental backups.
#increments = 30
#increments = keep
@ -398,9 +398,9 @@ keepincroffulls = $dup_keepincroffulls
## cfauthurl = YOUR_CF_AUTH_URL
##
## Default:
# cfusername =
# cfapikey =
# cfauthurl =
# cfusername =
# cfapikey =
# cfauthurl =
## Dropbox requires a valid authentication token. To obtain one, you will need
## to create a Dropbox API application at https://www.dropbox.com/developers/apps/create.
@ -413,7 +413,7 @@ keepincroffulls = $dup_keepincroffulls
# dropboxaccesstoken =
# FTP password, needed for backups using desturl = ftp://...
#ftp_password =
#ftp_password =
# bandwith limit, in KB/s ; default is 0, i.e. no limit
# if using 'desturl' above, 'bandwidthlimit' must not be set
@ -436,7 +436,7 @@ bandwidthlimit = $dup_bandwidth
## warning: requires no space beetween "-o" and "IdentityFile=...".
##
## Default:
# sshoptions =
# sshoptions =
sshoptions = $dup_sshoptions
# put the backups under this destination directory

View File

@ -63,7 +63,7 @@ then
home=`getent passwd "root" | @AWK@ -F: '{print $6}'`
[ -d $home ] || fatal "Can't find root's home directory ($home)."
mycnf="$home/.my.cnf"
workcnf="$mycnf"
@ -75,7 +75,7 @@ then
debug "mv $workcnf $tmpcnf"
mv $workcnf $tmpcnf
fi
oldmask=`umask`
umask 077
cat > $workcnf <<EOF

View File

@ -66,7 +66,7 @@ pgsql_wizard() {
*) pgsql_format = "format = plain";;
esac
fi
# write config file
get_next_filename $configdirectory/20.pgsql
@ -91,8 +91,8 @@ $pgsql_compress
# format = < plain | tar | custom > (default = plain)
# plain - Output a plain-text SQL script file with the extension .sql.
# When dumping all databases, a single file is created via pg_dumpall.
# tar - Output a tar archive suitable for input into pg_restore. More
# flexible than plain and can be manipulated by standard Unix tools
# tar - Output a tar archive suitable for input into pg_restore. More
# flexible than plain and can be manipulated by standard Unix tools
# such as tar. Creates a globals.sql file and an archive per database.
# custom - Output a custom PostgreSQL pg_restore archive. This is the most
# flexible format allowing selective import and reordering of database

View File

@ -101,18 +101,18 @@
# function definitions
function eval_config {
# system section
setsection system
getconf rm rm
getconf cp cp
getconf touch touch
getconf mv mv
getconf fsck fsck
# general section
setsection general
getconf log /var/log/backup/rsync.log
getconf partition
@ -130,14 +130,14 @@ function eval_config {
getconf enable_mv_timestamp_bug no
getconf tmp /tmp
getconf multiconnection no
# source section
setsection source
getconf from local
getconf rsync $RSYNC
getconf rsync_options "-av --delete --recursive"
if [ "$from" == "remote" ]; then
getconf testconnect no
getconf protocol ssh
@ -156,7 +156,7 @@ function eval_config {
getconf remote_rsync rsync
getconf id_file /root/.ssh/id_rsa
fi
getconf batch no
if [ "$batch" == "yes" ]; then
@ -172,13 +172,13 @@ function eval_config {
getconf exclude
getconf numericids 0
getconf compress 0
# dest section
setsection dest
getconf dest local
getconf fakesuper no
if [ "$dest" == "remote" ]; then
getconf testconnect no
getconf protocol ssh
@ -197,7 +197,7 @@ function eval_config {
getconf remote_rsync rsync
getconf id_file /root/.ssh/id_rsa
fi
getconf batch no
if [ "$batch" != "yes" ]; then
@ -212,9 +212,9 @@ function eval_config {
getconf numericids 0
getconf compress 0
# services section
setsection services
getconf initscripts /etc/init.d
getconf service
@ -231,7 +231,7 @@ function eval_config {
backupdir="$mountpoint/$backupdir"
if [ "$dest" == "local" ] && [ ! -d "$backupdir" ]; then
if [ "$dest" == "local" ] && [ ! -d "$backupdir" ]; then
fatal "Backupdir $backupdir does not exist"
fi
@ -247,9 +247,9 @@ function eval_config {
fi
fi
if [ ! -z "$nicelevel" ]; then
if [ ! -z "$nicelevel" ]; then
nice="nice -n $nicelevel"
else
else
nice=""
fi
@ -419,7 +419,7 @@ function rotate_long {
warning "Warning: metadata does not exist for $dir.1. This backup may be only partially completed. Skipping rotation."
continue 1
fi
# Rotate the current list of backups, if we can.
oldest=`find $backuproot -maxdepth 1 -type d -name $rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1`
[ "$oldest" == "" ] && oldest=0
@ -488,7 +488,7 @@ function rotate_long {
max=$((keep${rottype}+1))
dir="$backuproot/$rottype"
oldest=`find $backuproot -maxdepth 1 -type d -name $rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1`
[ "$oldest" == "" ] && oldest=0
[ "$oldest" == "" ] && oldest=0
# if we've rotated the last backup off the stack, remove it.
for (( i=$oldest; i >= $max; i-- )); do
if [ -d $dir.$i ]; then
@ -546,7 +546,7 @@ function rotate_long_remote {
echo "Warning: metadata does not exist for \$dir.1. This backup may be only partially completed. Skipping rotation."
continue 1
fi
# Rotate the current list of backups, if we can.
oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1\`
[ "\$oldest" == "" ] && oldest=0
@ -615,7 +615,7 @@ function rotate_long_remote {
max=\$((keep\${rottype}+1))
dir="$backuproot/\$rottype"
oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1\`
[ "\$oldest" == "" ] && oldest=0
[ "\$oldest" == "" ] && oldest=0
# if we've rotated the last backup off the stack, remove it.
for (( i=\$oldest; i >= \$max; i-- )); do
if [ -d \$dir.\$i ]; then
@ -847,7 +847,7 @@ function set_orig {
}
function set_dest {
function set_dest {
if [ "$dest" == "local" ]; then
dest_path="$backupdir/$SECTION/$suffix/"
@ -981,7 +981,7 @@ function set_rsync_options {
if [ ! -z "$bandwidthlimit" ]; then
rsync_options="$rsync_options --bwlimit=$bandwidthlimit"
fi
if [ "$fakesuper" == "yes" ]; then
remote_rsync="$remote_rsync --fake-super"
fi

View File

@ -11,7 +11,7 @@ edit = sed \
-e "s,@AWK\@,$(AWK),g" \
-e "s,@SED\@,$(SED),g" \
-e "s,@MKTEMP\@,$(MKTEMP),g" \
-e "s,@libdir\@,$(pkglibdir),g"
-e "s,@libdir\@,$(pkglibdir),g"
easydialog: $(srcdir)/easydialog.in
rm -f easydialog

View File

@ -1,5 +1,5 @@
# -*- mode: awk; indent-tabs-mode: nil; -*-
#
#
# parseini --- parses 'ini' style configuration files.
#
# Usage:
@ -8,123 +8,123 @@
# if section is an empty string, then we use the default section
#
# example ini file:
#
#
# fruit = apple
# fruit = pear
# multiline = this is a multiline \
# parameter
#
# # this is a comment
# [colors]
# [colors]
# red = yes
# green = no
# blue = maybe
#
# [ocean]
# fish = red
# [ocean]
# fish = red
# fish = blue
#
#
# example usage:
# > awk -f parseini S=ocean P=fish testfile.ini
# would return:
# > awk -f parseini S=ocean P=fish testfile.ini
# would return:
# red
# blue
#
BEGIN {
readlines = 1
implied = 1
}
BEGIN {
readlines = 1
implied = 1
}
# remove lines starting with #, but not #!
/^#[^!]/ {next}
/^#[^!]/ {next}
# skip blank
/^[ \r\t]*$/ {next}
/^[ \r\t]*$/ {next}
# we want to read the lines of the matched section
# and disable for other sections
/^\[.+\][ \r\t]*$/ {
continueline = 0
if (S && implied) {
nline = 0
implied = 0
}
if (S && match($0, "^\\[" S "\\][ \n]*")) {
/^\[.+\][ \r\t]*$/ {
continueline = 0
if (S && implied) {
nline = 0
implied = 0
}
if (S && match($0, "^\\[" S "\\][ \n]*")) {
# we found the section, so start reading.
readlines = 1
}
else {
readlines = 1
}
else {
# no section, so stop reading lines
if (readlines) readlines = 0
}
next
}
if (readlines) readlines = 0
}
next
}
# when reading, store lines.
{
if (!readlines) next
line[nline++] = $0
if ($0 ~ /\\[ \r\t]*$/)
continueline = 1
else
continueline = 0
}
{
if (!readlines) next
line[nline++] = $0
if ($0 ~ /\\[ \r\t]*$/)
continueline = 1
else
continueline = 0
}
# process the read lines lines, matching parameters
END {
END {
# if section is set but implied is still true
# then we never found the section, so use everything
if (S && implied) {
nline = 0
}
if (S && implied) {
nline = 0
}
# if have P then find P in read lines and get values
if (P) {
MATCH = "^[ \r\t]*" P "[ \r\t]*="
continueline = 0
for (x = 0; x < nline; ++x) {
v = line[x]
if (continueline) {
sub(/[ \r\t]+$/, "", v)
if (v ~ /\\$/) {
v = substr(v, 1, length(v)-1)
sub(/[ \r\t]+$/, "", v)
}
if (v) value[nvalue++] = v
}
else if (v ~ MATCH) {
sub(MATCH, "", v)
sub(/^[ \r\t]+/, "", v)
sub(/[ \r\t]+$/, "", v)
if (v ~ /\\$/) {
continueline = 1
v = substr(v, 1, length(v)-1)
sub(/[ \r\t]+$/, "", v)
}
if (v) value[nvalue++] = v
}
}
# copy parameter definition to output array
nline = nvalue
for (x = 0; x < nvalue; ++x)
line[x] = value[x]
}
# if have P then find P in read lines and get values
if (P) {
MATCH = "^[ \r\t]*" P "[ \r\t]*="
continueline = 0
for (x = 0; x < nline; ++x) {
v = line[x]
if (continueline) {
sub(/[ \r\t]+$/, "", v)
if (v ~ /\\$/) {
v = substr(v, 1, length(v)-1)
sub(/[ \r\t]+$/, "", v)
}
if (v) value[nvalue++] = v
}
else if (v ~ MATCH) {
sub(MATCH, "", v)
sub(/^[ \r\t]+/, "", v)
sub(/[ \r\t]+$/, "", v)
if (v ~ /\\$/) {
continueline = 1
v = substr(v, 1, length(v)-1)
sub(/[ \r\t]+$/, "", v)
}
if (v) value[nvalue++] = v
}
}
# copy parameter definition to output array
nline = nvalue
for (x = 0; x < nvalue; ++x)
line[x] = value[x]
}
# trim all leading & trailing whitespace;
# except for leading whitespace in continuation lines,
for (x = 0; x < nline; ++x) {
sub(/^[ \r\t]+/, "", line[x])
sub(/[ \r\t]+$/, "", line[x])
}
# trim all leading & trailing whitespace;
# except for leading whitespace in continuation lines,
for (x = 0; x < nline; ++x) {
sub(/^[ \r\t]+/, "", line[x])
sub(/[ \r\t]+$/, "", line[x])
}
# output the final result
for (x = 0; x < nline; ++x)
print line[x]
for (x = 0; x < nline; ++x)
print line[x]
if (nline) exit 0
else exit 1
if (nline) exit 0
else exit 1
}