Add database scripts

This commit is contained in:
Michael RICOIS 2017-03-29 14:14:18 +02:00
parent 37ccda9cb8
commit 81eb41ce85
7 changed files with 1067 additions and 0 deletions

121
scripts/README Normal file
View File

@ -0,0 +1,121 @@
Backup and Restore Database
---------------------------
dbSchema.php
============
Get schema
dbDump.php
==========
Simple dump
dbBackup.php
============
SELECT TABLE_NAME, ENGINE FROM information_schema.TABLES where TABLE_SCHEMA = 'database'
SHOW TABLE STATUS LIKE 'table';
Backup big table.
A backup is a dir name as TYPE-YYYY.MM.DD.HHMMSS, with sql file fo each table (dbname.tablename.sql).
The backup are first place in a process dir which you can define outside the dir where you keep all you backup.
e.g to make an rsync only on the complete backup.
--type TYPE
[--config FILENAME]
Filename must be in th same dir of this script
Create an array to define backup options in a file
return array(
'mysql' => array(
'host' => "127.0.0.1",
'port' => 3306,
'user' => "user",
'pass' => "password",
// --- If the server is a master else a slave
'master' => 1,
'options' => "--quick --add-drop-table", ou "--opt"
// --- Method to compress : none, gzip, bzip2, pigz, 7zx, xz
'compress' => "gzip",
// --- backup dir
'dir' => "/backup/keep",
// --- Number of backup to keep
'max' => 3,
// --- In process dir
'processdir' => "/backup/inprocess"
),
'backup type/name' => array(
/* List database to backup - always include a db to list tables */
'db' => array(...),
/**
Rules to include or exclude database and table by name (preg_match)
db\.table
*\.*_tmp
*\.*_old
*\.*_mvt
*\.*_backup
*\.*_histo
*\.*_table
*_.*_user
*\.*_[0-9]{8}
*\.*_v[0-9]{1,}
*/
'rules' => array(
'in' => array(),
'ex' => array(),
),
),
),
dbImport.php
============
Import or Restore a backup.
To import in parallel, cut in several type and/or database
--name DIRNAME
which backup TYPE-YYYY.MM.DD.HHMMSS
[--type TYPE]
Specify a type to follow rules define
[--db name]
Restore only this database
[--config filename]
Filename must be in th same dir of this script
Create an array to define restore options in a file
return array(
'mysql' => array(
'host' => "127.0.0.1",
'port' => 3306,
'user' => "user",
'pass' => "password",
'options' => "",
// --- Method to compress : gzip, bzip2, pigz, 7zx
'compress' => "gzip",
// --- backup dir
'dir' => "/backup",
),
'backup type/name' => array(
/* List database to restore */
'db' => array(...),
/**
Rules to include or exclude table by his name
*.*_tmp
*.*_old
*.*_mvt
*.*_backup
*.*_histo
*.*_table
*.*_user
*.*_[0-9]{8}
*.*_v[0-9]{1,}
*/
'rules' => array(
'in' => array(),
'ex' => array(),
),
),
),

322
scripts/dbBackup.php Normal file
View File

@ -0,0 +1,322 @@
<?php
/**
Backup big database on MASTER/SLAVE process
A backup is a dir name as TYPE-YYYY.MM.DD.HHMMSS, with sql file fo each table (dbname.tablename.sql).
The backup are first place in a process dir which you can define outside the dir where you keep all you backup.
e.g to make an rsync only on a completed backup.
MEMORY tables are not backup
"mysql", "test", "phpmyadmin", "information_schema", "performance_schema" databases are not backup
--type TYPE
[--config FILENAME]
Filename must be in th same dir of this script
Create an array to define backup options in a file name dbBackupConfig.php
return array(
'mysql' => array(
'host' => "127.0.0.1",
'port' => 3306,
'user' => "user",
'pass' => "password",
'master' => 1, // If the server is a master else a slave
'options' => "--quick --add-drop-table", // ou "--opt"
'compress' => "gzip", // Method to compress : none, gzip, bzip2, pigz, 7zx, xz
'dir' => "/backup/keep", // Backup dir
'max' => 3, // Number of backup to keep
'processdir' => "/backup/inprocess", // In process dir
),
'backup type/name' => array(
'db' => array(...), //List database to backup - always include a db to list tables
'rules' => array( // Rules to include or exclude database and table by name ( preg_match('/^db\.table$/') )
'in' => array(),
'ex' => array(),
),
),
),
Parallel restore
----------------
cd /to/backup/dir
echo *.sql | xargs -n1 -P 16 -I % sh -c 'mysql -uuser -ppassword -hIP mydb < %'
ls *.sql | xargs -n1 -P 16 -I % sh -c 'mysql -uuser -ppassword -hIP mydb < %'
*/
$shortopts = '';
$longopts = array(
'type:',
'config:',
);
$options = getopt($shortopts, $longopts);
if ($options === false || count($options) == 0) {
echo "Nothing to do...\n";
exit;
}
$configFile = 'dbBackupConfig.php';
if (array_key_exists('config', $options)) {
$configFile = $options['config'];
}
if (file_exists(__DIR__.'/'.$configFile)) {
$config = include __DIR__.'/'.$configFile;
} else {
echo date('Y-m-d H:i:s')." - No config.\n"; exit;
}
if (!array_key_exists('mysql', $config)) {
echo date('Y-m-d H:i:s')." - No config.\n"; exit;
}
// --- Define mysql option
$backupMysql = $config['mysql'];
// --- Type backup
$type = strtoupper($options['type']);
if (!array_key_exists($type, $config)) {
echo date('Y-m-d H:i:s')." - Backup type not found !\n"; exit;
}
// --- Backup Options
$backupParams = $config[$type];
// --- Host
$backupMysqlHost = $backupMysql['host'];
$backupMysqlPort = $backupMysql['port'];
$backupMysqlUser = $backupMysql['user'];
$backupMysqlPass = $backupMysql['pass'];
$backupMysqlMaster = $backupMysql['master'];
$backupMysqlOptions = $backupMysql['options'];
// --- Backup Directory Options
$backupCompress = $backupMysql['compress'];
$backupDir = $backupMysql['dir'];
$backupMax = $backupMysql['max'];
// --- Backup temporary dir
$backupDirProcess = $backupDir;
if (array_key_exists('processdir', $backupMysql)) {
$backupDirProcess = $backupMysql['processdir'];
}
if (!is_dir($backupDirProcess)) {
echo date('Y-m-d H:i:s')." - Backup dir not found $backupDir\n"; exit;
}
// --- Master or Slave information
if ($backupMysqlMaster) {
$filePosInfo = 'position-master-'.$type.'.info';
} else {
$filePosInfo = 'position-slave-'.$type.'.info';
}
echo date('Y-m-d H:i:s')." - Start Backup $type.\n";
$tInit = microtime(true);
$dirname = $type.'-'.date('Y.m.d.His');
$pathBackup = $backupDirProcess . '/' . $dirname;
mkdir($pathBackup);
// --- Connexion
$link = new mysqli($backupMysqlHost, $backupMysqlUser, $backupMysqlPass, null, $backupMysqlPort);
if ($link->connect_errno) {
echo date('Y-m-d H:i:s')." - Error can't connect to MySQL : (" . $mysqli->connect_errno . ") " . $mysqli->connect_error."\n";
}
// --- List databases
$result = $link->query('SHOW DATABASES');
$databases = $tables = array();
while ($row = $result->fetch_assoc()) {
$databases[] = $row['Database'];
}
// --- Stop Slave
if ( !$backupMysqlMaster ) {
echo date('Y-m-d H:i:s')." - STOP SLAVE SERVER\n";
$link->query('STOP SLAVE;');
}
// --- Save replication position
putMasterStatus($link, "$pathBackup/$filePosInfo", 'START', $backupMysqlMaster, 0);
// --- Backup for each databases
foreach ($databases as $database) {
if (in_array($database, $backupParams['db'])) {
$link->query('USE '.$database);
$result = $link->query('SHOW TABLES');
while ($row = $result->fetch_array()) {
$table = $row[0];
// --- Don't backup MEMORY table
$statusResult = $link->query("SHOW TABLE STATUS LIKE '".$table."'");
$statusRow = $statusResult->fetch_assoc();
if (strtoupper($statusRow['Engine']) == 'MEMORY') {
continue;
}
// --- Don't backup special database
if ( in_array($database, array('mysql', 'test', 'information_schema', 'performance_schema', 'phpmyadmin')) ) {
continue;
}
// --- Excluded table
$stop = 0;
foreach ($backupParams['rules']['ex'] as $rule) {
if ( preg_match('/^'.$rule.'$/', $database.'.'.$table) ) {
$stop = 1;
break;
}
}
if ($stop == 1) {
continue;
}
// --- Included table
$stop = 0;
foreach ($backupParams['rules']['in'] as $rule) {
if ( !preg_match('/^'.$rule.'$/', $database.'.'.$table) ) {
$stop = 1;
break;
}
}
if ($stop == 1) {
continue;
}
$files[] = $database.'.'.$table;
$tDeb = microtime(true);
// --- Optimize table to remove hole
if ( $backupMysqlMaster ) {
if ( $statusRow['Data_free'] > 0 ) {
echo date('Y-m-d H:i:s') ." - OPTIMIZE on '$database.$table'\n";
$link->query('OPTIMIZE TABLE '.$database.'.'.$table);
}
}
echo date('Y-m-d H:i:s') ." - Backup '$database.$table'";
// --- Cmd to backup the table
$cmd = 'mysqldump -h' . $backupMysqlHost . ' -P' . $backupMysqlPort .
' -u' . $backupMysqlUser . ' -p' . $backupMysqlPass .
' ' . $backupMysqlOptions . ' ' . $database . ' --tables ' . $table . ' > ' .
$pathBackup.'/'.$database.'.'.$table.'.sql';
// --- Execute
exec($cmd);
// --- Set Master or Slave position
putMasterStatus($link, "$pathBackup/$filePosInfo", "$database.$table", $backupMysqlMaster, FILE_APPEND);
$period = round(microtime(true)-$tDeb,3);
echo " in $period s.\n";
}
}
}
// --- Save replication position
putMasterStatus($link, "$pathBackup/$filePosInfo", 'END', $backupMysqlMaster, FILE_APPEND);
// --- Restart slave
if ( !$backupMysqlMaster ) {
echo date('Y-m-d H:i:s')." - RESTART SLAVE SERVER\n";
$link->query('START SLAVE;');
}
// --- Close mysql
$link->close();
// --- Compression des fichiers de dump
if (in_array($backupCompress, array('gzip', 'pigz', '7z', 'xz', 'pxz'))) {
foreach($files as $item) {
$tDeb = microtime(true);
switch($backupCompress) {
case 'gzip':
exec("gzip $pathBackup/$item.sql");
break;
case 'pigz':
exec("pigz -9 -f $pathBackup/$item.sql");
break;
case '7z':
exec("7za a $pathBackup/$item.7z $pathBackup/$item.sql");
break;
case 'xz':
exec("xz $pathBackup/$item.sql");
break;
case 'pxz':
exec("pxz -9 -f $pathBackup/$item.sql");
break;
default: break;
}
if (file_exists("$pathBackup/$item.sql")) {
exec("rm -f $pathBackup/$item.sql");
}
$period = round(microtime(true)-$tDeb,3);
echo date('Y-m-d H:i:s') ." - Compress '$item.sql' in $period s.\n";
}
}
// --- Rename if process dir is in use
if ($backupDirProcess != $backupDir) {
rename($pathBackup, $backupDir.'/'.$dirname);
}
// --- Delete old backup
if ($backupMax > 0) {
foreach ( glob("$backupDir/$type-*", GLOB_ONLYDIR) as $filename ) {
$date = substr(basename($filename), strlen($type) + 1);
$backupDelete[$date] = $filename;
}
krsort($backupDelete);
$i = 0;
foreach ( $backupDelete as $k => $delete ) {
if ( $i >= $backupMax ) {
passthru("rm -rf $delete");
echo date('Y-m-d H:i:s') ." - Delete backup $delete\n";
}
$i++;
}
}
echo date('Y-m-d H:i:s') ." - End Backup.\n";
$period = round(microtime(true) - $tInit);
$hours = floor($seconds / 3600);
$mins = floor(($seconds - ($hours*3600)) / 60);
$secs = floor($seconds % 60);
$message = "Backup Databases in $hours h $mins min $sec s ($period s) :\n";
$message.= print_r($backupParams['db'], 1);
sendMail( array('supportdev@scores-decisions.com'), 'Backup MySQL '.$backupMysqlHost, $message);
// --- End
function putMasterStatus($link, $file, $dbTable, $master, $flags=0) {
if ( $master ) {
$result = $link->query('SHOW MASTER STATUS');
while ($row = $result->fetch_assoc()){
file_put_contents($file, date('Y-m-d H:i:s').", $dbTable,".$row['File'].','.$row['Position']."\n", $flags);
}
} else {
$result = $link->query('SHOW SLAVE STATUS');
while ($row = $result->fetch_assoc()){
file_put_contents($file, date('Y-m-d H:i:s').", $dbTable,".$row['Relay_Master_Log_File'].','.$row['Exec_Master_Log_Pos']."\n", $flags);
}
}
}
function sendMail($emails, $sujet, $message) {
$from = 'supportdev@scores-decisions.com';
$headers = 'Reply-To: '.$from."\n"; // Mail de reponse
$headers .= 'From: "Support DEV"<'.$from.'>'."\n"; // Expediteur
$to = join(' ,',$emails); //recipient
$mail_body = $message; //mail body
$subject = $sujet; //subject
mail($to, $subject, $mail_body, $headers);
}

121
scripts/dbImport.php Normal file
View File

@ -0,0 +1,121 @@
<?php
/**
* Script auto d'importation de dump sql
* Name of backup dir : TYPE-YYYY.MM.DD.HHMMSS
* Name of file dump : dbname.tablename.sql
*/
// --- Options
$shortopts = '';
$longopts = array(
'name:',
'db:',
'config:',
);
$options = getopt($shortopts, $longopts);
if ($options === false || count($options) == 0) {
echo "Nothing to do...\n";
exit;
}
/**
* Format file size to display
* @param int $size
*/
function format_bytes($size) {
$units = array(' B', ' KB', ' MB', ' GB', ' TB');
for ($i = 0; $size >= 1024 && $i < 4; $i++) $size /= 1024;
return round($size, 2).$units[$i];
}
$configFile = 'dbImportConfig.php';
if (array_key_exists('config', $options)) {
$configFile = $options['config'];
}
if (file_exists(__DIR__.'/'.$configFile)) {
$config = include __DIR__.'/'.$configFile;
} else {
echo "No config."; exit;
}
$path = $config['dir'];
$name = $options['name'];
$excludeDb = array();
$dir = $path.'/'.$name;
if ( ! is_dir($dir) ) {
echo "Directory '$path/$name' not found!"; exit;
}
if (array_key_exists('decompress', $options)) {
// --- Parcourir le répertoire pour la décompression
$pattern = '/([^\s_]+)\.(.*)\.sql\.(gz|bzip2|xz|7z)\Z/';
if ( $dh = opendir($dir) ) {
while ( ($file = readdir($dh)) !== false ) {
if ( !is_file($dir.'/'.$file) && $file == '.' && $file == '..' ) {
continue;
}
if ( preg_match($pattern, $file, $matches) ) {
switch($matches[3]) {
case 'gz':
break;
case '7z':
break;
case 'xz':
break;
default:
break;
}
if (!empty($cmd)) {
echo date('Y-m-d H:i:s')." - Décompression $file";
passthru($cmd);
echo " - Done\n";
}
}
}
closedir($dh);
}
}
// --- Parcourir le répertoire pour l'import
$pattern = '/([^\s_]+)\.(.*)\.sql\Z/';
$import = array();
if ( $dh = opendir($dir) ) {
while ( ($file = readdir($dh)) !== false ) {
if ( !is_file($dir.'/'.$file) && $file == '.' && $file == '..' ) {
continue;
}
if (array_key_exists('db', $options)) {
if (substr($file, 0, strlen($options['db'])) != $options['db']) {
continue;
}
}
if ( preg_match($pattern, $file, $matches) ) {
if ( !in_array($matches[1], $excludeDb) && ( empty($database) || $matches[1]==$database ) ) {
$import[$file] = array(
'database' => $matches[1],
'source' => $dir.'/'.$file,
'size' => format_bytes(filesize($dir.'/'.$file)),
);
}
}
}
closedir($dh);
}
// --- Import
ksort($import);
$nb = count($import);
if ($nb>0) {
$i = 0;
foreach($import as $file => $info) {
$i++;
echo date('Y-m-d H:i:s').' : '.$i.'/'.$nb.' - '.$info['database'].' '.$file.' ('.$info['size'].')', "\n";
$cmdDB = "mysql -h ".$config['host']." -u".$config['user']." -p".$config['pass']." -e \"CREATE DATABASE IF NOT EXISTS ".$info['database']."\"";
$output = shell_exec($cmdDB);
$cmd = "mysql ".$config['options']." -h ".$config['host']." -u".$config['user']." -p".$config['pass']." ".$info['database']." < ".$info['source'];
$output = shell_exec($cmd);
if (!empty($output)) echo $output, "\n";
}
}

60
scripts/dbSchema.php Normal file
View File

@ -0,0 +1,60 @@
<?php
$shortopts = '';
$longopts = array(
'host:',
'user:',
'pass:',
'path:',
'database::',
);
$options = getopt($shortopts, $longopts);
if ($options === false || count($options) == 0) {
echo "Oups ! options manquantes\n";
exit;
}
$link = new mysqli($options['host'], $options['user'], $options['pass']);
if ($link->connect_errno) {
echo "Echec lors de la connexion à MySQL : (" . $mysqli->connect_errno . ") " . $mysqli->connect_error;
}
$databases = $tables = array();
// --- Uniquement database selectionné
if (array_key_exists('database', $options) && !empty($options['database']) ) {
$databases[] = $options['database'];
}
// --- Liste des bases de données
else {
$result = $link->query('SHOW DATABASES');
while ($row = $result->fetch_assoc()) {
$databases[] = $row['Database'];
}
}
$dbExcluded = array(
'mysql',
'information_schema',
);
// --- Sauvegarde des bases de données pour chaque table
foreach ($databases as $database) {
if ( !in_array($database, $dbExcluded) ) {
$link->query('USE '.$database);
$result = $link->query('SHOW TABLES');
while ($row = $result->fetch_array()) {
$table = $row[0];
echo date('Y/m/d - H:i:s') ." - Schema de '$database.$table'";
// --- Dump schema
exec(
'mysqldump -h' . $options['host'] .' -u' . $options['user'] .' -p' . $options['pass'] .
' --compact --no-data ' . $database . ' --tables ' . $table .
' | egrep -v "(^SET|^/\*\!)" | sed \'s/ AUTO_INCREMENT=[0-9]*\b//\' > ' .
$options['path'] . '/'.$database.'.'.$table.'.sql'
);
echo "\n";
}
}
}
// --- Fermeture du lien mysql
mysqli_close($link);

98
scripts/dbSlaveCheck.php Normal file
View File

@ -0,0 +1,98 @@
#!/usr/bin/env php
<?php
function sendMail($emails, $sujet, $message)
{
$from = 'supportdev@scores-decisions.com';
$headers= 'Reply-To: '.$from."\n"; // Mail de reponse
$headers.= 'From: "Support DEV"<'.$from.'>'."\n"; // Expediteur
$to = join(' ,',$emails); //recipient
$mail_body = $message; //mail body
$subject = $sujet; //subject
mail($to, $subject, $mail_body, $headers);
}
define('MYSQL_HOST', 'localhost');
define('MYSQL_USER', 'user');
define('MYSQL_PASS', 'password');
define('SERVER_NAME', 'sd-13408');
// Paramètres
if ( $argv[1] != 'info' && $argv[1] != 'mail' && $argv[1] != 'check'
|| in_array($argv[1], array('--help', '-help', '-h', '-?')) ) {
?>
Vérifie l'état de la réplication MySQL
Avec les options --help, -help, -h, et -?, vous obtiendrez cette aide.
Utilisation manuelle: <?php echo $argv[0]; ?> info
Force l'envoi du mail de l'état de réplication <?php echo $argv[0]; ?> mail
<?php
exit;
}
$display = false;
$mail = false;
$check = false;
if ($argv[1]=='info'){ $display = true; }
if ($argv[1]=='mail'){ $mail = true; }
if ($argv[1]=='check'){ $check = true; }
$message = "";
// --- Connexion
$link = new mysqli(MYSQL_HOST, MYSQL_USER, MYSQL_PASS);
if ($link->connect_errno) {
$message.= "Error can't connect to MySQL : (" . $mysqli->connect_errno . ") " . $mysqli->connect_error."\n";
} else {
$result = $link->query('SHOW SLAVE STATUS');
$status = $result->fetch_assoc();
$message.= "Master_Log_File : ".$status['Master_Log_File']."\n";
$message.= "Read_Master_Log_Pos : ".$status['Read_Master_Log_Pos']."\n";
$message.= "Relay_Master_Log_File: ".$status['Relay_Master_Log_File']."\n";
$message.= "Exec_Master_Log_Pos : ".$status['Exec_Master_Log_Pos']."\n";
$message.= "Seconds_Behind_Master : ".$status['Seconds_Behind_Master']."\n";
$message.= "Slave_IO_Running : ".$status['Slave_IO_Running']."\n";
$message.= "Slave_SQL_Running : ".$status['Slave_SQL_Running']."\n";
$message.= "Last_Error : ".$status['Last_Error']."\n";
}
$sujet = 'Replication MySQL - '.SERVER_NAME.' - ';
$erreurReplication = false;
if ( $status['Slave_IO_Running']!='Yes'
|| $status['Slave_SQL_Running']=='No'
|| ( $status['Last_Errno']!=0 && $status['Last_Error']!='' ) ){
$erreurReplication = true;
$sujet.= 'Erreur';
} elseif ( $status['Exec_Master_Log_Pos']!=$status['Read_Master_Log_Pos'] && intval($status['Seconds_Behind_Master'])>60 ){
$erreurReplication = true;
$sujet.= 'Retard';
} else {
$sujet.= 'OK';
}
/*
* Affiche les informations de réplication
*/
if ($display){
print $message;
}
/*
* Force l'envoi du mail d'information de réplication
*/
elseif($mail)
{
sendMail( array('mricois@scores-decisions.com'), $sujet, $message);
}
/*
* Envoi le mail que si la réplication est en erreur.
* ou si la réplication est en retard
* ou si le process slave est éteint
*/
elseif ( $check ){
if ( $erreurReplication ){
sendMail( array('supportdev@scores-decisions.com'), $sujet, $message);
}
}
mysqli_close($link);

222
scripts/pDump.py Normal file
View File

@ -0,0 +1,222 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
__title__ = "pDump"
__version__ = "0.5"
__author__= "oxomichael"
__email__= "oxomichael@hotmail.com"
__website__= ""
import threading, Queue
import MySQLdb
from optparse import OptionParser
import commands
import sys
import os
import gzip
class Log:
"""Simple class for logging"""
def __init__(self, verbose):
self.verbose = verbose
def log(self, line):
"""Logs an especified line"""
if self.verbose:
sys.stderr.write (" - " + str(line) + "\n")
class Database:
"""Class to handle database connection"""
def __init__(self, log, mysqluser, mysqlpass, mysqlhost):
self.user = mysqluser
self.password = mysqlpass
self.host = mysqlhost
self.log = log
self.log.log("Connecting to database")
self.db=MySQLdb.connect(user=mysqluser,passwd=mysqlpass,host=mysqlhost)
self.cursor = self.db.cursor()
def close(self):
self.log.log("Closing database connection")
self.db.close()
def lock(self):
"""Locks all tables for read/write"""
self.log.log("Locking all tables")
self.cursor.execute("FLUSH TABLES WITH READ LOCK;")
def unlock(self):
"""Unlocks all tables in the database"""
self.log.log("Unlocking all tables")
self.cursor.execute("UNLOCK TABLES")
def get_databases(self, included, excluded):
"""Return all the databases. Included and excluded databases can be specified"""
self.cursor.execute("show databases;")
result = self.cursor.fetchall()
databases = []
for database in result:
if len(included) == 0:
if database[0] not in excluded:
databases.append(database[0])
else:
if (database[0] in included) and (database[0] not in excluded):
databases.append(database[0])
return databases
def get_tables(self, database):
"""Return all tables for a given database"""
self.cursor.execute("show tables from " + str(database) + ";")
result = self.cursor.fetchall()
tables = []
for table in result:
tables.append(table[0])
return tables
def get_slave_status(self):
"""Return slave status"""
self.cursor.execute("show slave status;")
result = self.cursor.fetchall()
return result
def get_change_master_to(self, slave_status):
try:
return "CHANGE MASTER TO MASTER_HOST=\'" + slave_status[0][1] + "\', MASTER_LOG_FILE=\'" + slave_status[0][5] + "\', MASTER_LOG_POS=" + str(slave_status[0][6]) + ";"
except:
return ""
def mysqldump(self, database, table, destination, custom_parameters="", stdout=False, gzip=False, mysqldump="/usr/bin/mysqldump"):
"""Dumps a specified table.
It can dump it to a file or just return all the dumped data.
It can waste a lot of memory if its returning a big table."""
default_parameters = "--skip-lock-tables"
cmd=mysqldump + " " + default_parameters
if custom_parameters != "":
cmd = cmd + " " + custom_parameters
cmd = cmd + " -u" + self.user + " -p" + self.password + " -h" + self.host + " " + database + " " + table
if stdout:
return commands.getstatusoutput(cmd)
else:
file = destination + "/" + database + "-" + table + ".sql"
if gzip:
cmd = cmd + " | gzip -c > " + file + ".gz"
else:
cmd = cmd + " > " + file
os.system(cmd)
return (None, None)
class Worker(threading.Thread):
def __init__(self, queue, log, db, event_dict, destination, custom_parameters="", stdout=False, gzip=False, ):
threading.Thread.__init__(self)
self.queue = queue
self.log = log
self.db = db
self.event_dict = event_dict
self.stdout = stdout
self.gzip = gzip
self.destination = destination
self.custom_parameters = custom_parameters
def run(self):
self.log.log("Worker " + self.getName() + " started")
while True:
try:
num, database, table = self.queue.get(True, 1)
except Queue.Empty:
break
self.event_dict[num] = threading.Event()
self.event_dict[num].clear()
self.log.log(self.getName() + " dumping " + database + " " + table)
status, output = self.db.mysqldump(database, table, custom_parameters=self.custom_parameters, stdout=self.stdout, gzip=self.gzip, destination=self.destination)
if self.stdout:
if num > 0:
while not self.event_dict[num-1].isSet():
self.event_dict[num-1].wait()
self.log.log(self.getName() + " dumped " + database + " " + table)
if output:
print output
self.event_dict[num].set()
def main():
try:
current_user = os.getlogin()
except:
current_user = "nobody"
usage = "usage: %prog [options]\n Run mysqldump in paralel"
parser = OptionParser(usage, version=__version__)
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="verbose output.")
parser.add_option("-u", "--user", action="store", dest="user", type="string", default=current_user, help="User for login.")
parser.add_option("-p", "--password", action="store", dest="password", type="string", default='', help="Password for login.")
parser.add_option("-H", "--host", action="store", dest="host", type="string", default='localhost', help="Connect to host.")
parser.add_option("-t", "--threads", action="store", dest="threads", type="int", default=5, help="Threads used. Default = 5")
parser.add_option("-s", "--stdout", action="store_true", dest="stdout", default=False, help="Output dumps to stdout instead to files. WARNING: It can exaust all your memory!")
parser.add_option("-g", "--gzip", action="store_true", dest="gzip", default=False, help="Add gzip compression to files.")
parser.add_option("-m", "--master-data", action="store_true", dest="master_data", default=False, help="This causes the binary log position and filename to be written to the file 00_master_data.sql.")
parser.add_option("-d", "--destination", action="store", dest="destination", type="string", default=".", help="Path where to store generated dumps.")
parser.add_option("-P", "--parameters", action="store", dest="parameters", type="string", default="", help="Pass parameters directly to mysqldump.")
parser.add_option("-i", "--include_database", action="append", dest="included_databases", default=[], help="Databases to be dumped. By default, all databases are dumped. Can be called more than one time.")
parser.add_option("-e", "--exclude_database", action="append", dest="excluded_databases", default=[], help="Databases to be excluded from the dump. No database is excluded by default. Can be called more than one time.")
(options, args) = parser.parse_args()
log = Log(options.verbose)
try:
db = Database(log, options.user, options.password, options.host)
except:
parser.error("Cannot connect to database")
db.lock()
queue = Queue.Queue()
x = 0
if options.master_data:
if options.gzip:
f=gzip.open(options.destination + '/00_master_data.sql.gz', 'w')
else:
f=open(options.destination + '/00_master_data.sql', 'w')
f.write(db.get_change_master_to(db.get_slave_status()))
f.write('\n')
f.close()
for database in db.get_databases(options.included_databases, options.excluded_databases):
for table in db.get_tables(database):
queue.put([x,database,table])
x = x + 1
event_dict = {}
threads = []
x = 0
for i in range(options.threads):
threads.append(Worker(queue, log, db, event_dict, custom_parameters=options.parameters, stdout=options.stdout, gzip=options.gzip, destination=options.destination))
threads[x].setDaemon(True)
threads[x].start()
x = x + 1
# Wait for all threads to finish
for thread in threads:
thread.join()
db.unlock()
db.close()
if __name__ == "__main__":
main()

123
scripts/pImport.py Normal file
View File

@ -0,0 +1,123 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = "0.1"
from optparse import OptionParser
import sys, os, commands, yaml, re, threading, Queue
class Log:
"""Simple class for logging"""
def __init__(self, verbose):
self.verbose = verbose
def log(self, line):
"""Logs an especified line"""
if self.verbose:
sys.stderr.write (" - " + str(line) + "\n")
class ImportConfig:
"""Class to handle config"""
def get_config(self):
path = os.path.dirname(os.path.abspath(__file__))
documents = open(path + '/pImportConfig.yml', 'r').read()
data = yaml.load(documents)
return data
class ImportFile:
"""Class to handle SQL files"""
def __init__(self, log, mysqluser, mysqlpass, mysqlhost, mysqlport=3306, mysqloptions=""):
self.user = mysqluser
self.password = mysqlpass
self.host = mysqlhost
self.port = mysqlport
self.options = mysqloptions
self.log = log
def cli(self, database, file, cli="/usr/bin/mysql"):
cmd=cli
if self.options != "":
cmd = cmd + " " + self.options
cmd = cmd + " -h" + self.host + " -P" + self.port + " -u" + self.user + " -p" + self.password + " " + database + " < " + file
#print cmd
os.system(cmd)
return (None, None)
def get_files(self, path):
"""Return all files in path"""
files = []
for element in sorted(os.listdir(path)):
if os.path.isfile(path + '/' + element) and element.endswith('.sql'):
files.append(element)
return files
class Worker(threading.Thread):
def __init__(self, queue, log, cli, event_dict, ):
threading.Thread.__init__(self)
self.queue = queue
self.log = log
self.cli = cli
self.event_dict = event_dict
def run(self):
self.log.log("Worker " + self.getName() + " started")
while True:
try:
num, database, file = self.queue.get(True, 1)
except Queue.Empty:
break
self.event_dict[num] = threading.Event()
self.event_dict[num].clear()
self.log.log(self.getName() + " importing " + database + " " + file)
status, output = self.cli.cli(database, file)
self.log.log(self.getName() + " import " + database + " " + file)
if output:
print output
self.event_dict[num].set()
def main():
usage = "usage: %prog [options]\n Run mysql import in paralel"
parser = OptionParser(usage, version=__version__)
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="verbose output.")
parser.add_option("-t", "--threads", action="store", dest="threads", type="int", default=4, help="Threads used. Default = 4")
parser.add_option("-n", "--name", action="store", dest="name", type="string", default=False, help="Name/Type of backup dir")
(options, args) = parser.parse_args()
log = Log(options.verbose)
queue = Queue.Queue()
x = 0
print "=== Start import with " + str(options.threads) + " threads ==="
importconfig = ImportConfig()
config = importconfig.get_config()
path = config['dir'] + '/' + options.name
cli = ImportFile(log, config['user'], config['pass'], config['host'], config['port'], config['options'])
for file in cli.get_files(path):
match = re.match('([^\s_]+)\.(.*)\.sql\Z', file)
database = match.group(1)
queue.put([x, database, path + "/" + file])
x = x + 1
event_dict = {}
threads = []
x = 0
for i in range(options.threads):
threads.append(Worker(queue, log, cli, event_dict))
threads[x].setDaemon(True)
threads[x].start()
x = x + 1
# Wait for all threads to finish
for thread in threads:
thread.join()
print "=== End of import ==="
if __name__ == "__main__":
main()