# define common vars
OPTIONS="--verbose --lock-tables --flush-logs --force --quick --single-transaction"
AUTHFILE="/etc/mysql/rootauth.cnf"
BACKUPDIR="/srv/backup/mysql/"
BACKUPDATE=`date +"%y%m%d%H"`
# create temp folder (this isn't entirely safe, but be sure only root or backup user has
# write access here, you might want to use mktemp)
mkdir ${BACKUPDIR}/tmp/
# get a list of all the databases on the system
DBSQL="SELECT SCHEMA_NAME FROM information_schema.SCHEMATA where SCHEMA_NAME!='information_schema' \
AND SCHEMA_NAME!='performance_schema' order by SCHEMA_NAME"
DBS=`/usr/bin/mysql --defaults-extra-file=${AUTHFILE} --batch \
--skip-column-names --execute "$DBSQL"`
DBS=`echo $DBS | tr -d '\n' | sed -e "s/ \+/ /g"`
for DB in $DBS; do
# perform a per-database dump
BACKUPDIRDB="${BACKUPDIR}/tmp/${DB}"
mkdir -p ${BACKUPDIRDB}
/usr/bin/mysqldump --defaults-extra-file=${AUTHFILE} \
${OPTIONS} $DB > ${BACKUPDIRDB}/backup_${BACKUPDATE}
done
# create archive of everything
tar -czvf ${BACKUPDIR}/backup_${BACKUPDATE}.tar.gz ${BACKUPDIR}/tmp/
#remove temp files
rm -rf ${BACKUPDIR}/tmp/
DBLIST=`mysql -uroot -pPASSWORD -ANe"SELECT GROUP_CONCAT(schema_name) FROM information_schema.schemata WHERE schema_name NOT IN ('information_schema','performance_schema')" | sed 's/,/ /g'`
MYSQLDUMP_OPTIONS="-uroot -pPASSWORD --single-transaction --routines --triggers"
BACKUP_DEST=/home/backup/db/`date +\%G-\%m-\%d`
mkdir ${BACKUP_DEST}
for DB in `echo "${DBLIST}"`
do
mysqldump ${MYSQLDUMP_OPTIONS} ${DB} | gzip > ${BACKUP_DEST}/${DB}.sql.gz &
done
wait
然后把这个脚本放在crontab
如果数据库太多,你可以像这样一次转储 5 个
DBLIST=`mysql -uroot -pPASSWORD -ANe"SELECT GROUP_CONCAT(schema_name) FROM information_schema.schemata WHERE schema_name NOT IN ('information_schema','performance_schema')" | sed 's/,/ /g'`
MYSQLDUMP_OPTIONS="-uroot -pPASSWORD --single-transaction --routines --triggers"
BACKUP_DEST=/home/backup/db/`date +\%G-\%m-\%d`
mkdir ${BACKUP_DEST}
COMMIT_COUNT=0
COMMIT_LIMIT=5
for DB in `echo "${DBLIST}"`
do
mysqldump ${MYSQLDUMP_OPTIONS} ${DB} | gzip > ${BACKUP_DEST}/${DB}.sql.gz &
(( COMMIT_COUNT++ ))
if [ ${COMMIT_COUNT} -eq ${COMMIT_LIMIT} ]
then
COMMIT_COUNT=0
wait
fi
done
if [ ${COMMIT_COUNT} -gt 0 ]
then
wait
fi
这样的事情可能会奏效。它未经测试,但与我在系统上用于备份的略有不同。
创建这样的脚本以并行 mysqldump 所有数据库
然后把这个脚本放在crontab
如果数据库太多,你可以像这样一次转储 5 个
然后您可以将 tar 命令添加到脚本中
我会说你可以通过制作一个从 cron 运行的脚本来完成它,该脚本执行每个数据库的转储,然后脚本的最终操作将所有文件一起存档到一个 .tar.gz
因此,在您的实例中,您将删除该
--all-databases
选项并将数据库的名称放在那里。然后对您拥有的所有数据库重复该行。然后在完成所有转储后,创建包含所有这些文件的 tar 并将其压缩。最后但同样重要的是,执行任何必要的清理。将所有这些放入脚本中,然后从 cron 运行脚本。