#Existing production servers
**proddb-mast01 private ip 10.166.152.12 **
**proddb-slav01 private ip 10.166.152.13 **
#New servers \\
**proddb-mast02 private ip 10.166.152.23 **
**proddb-slav02 private ip 10.166.152.15 **
== Step1 ==
Put maintenance page
login as root prodapp03 and prodapp04 both server
cp /etc/fstab /etc/fstab-bakcup
UNMOUNT existing gluster folder
umount /deploy/crossbow/shared/assets
umount /deploy/crossbow/shared/system/uploaded_datas
umount /deploy/crossbow/current/public/system/images
umount /deploy/crossbow/shared/system/site_backgrounds
umount /deploy/crossbow/shared/system/site_logos
umount /deploy/crossbow/shared/system/styles
umount /deploy/crossbow/shared/system/sheets
umount /deploy/crossbow/shared/system/templates
df -h
vi /et/fstab #change below lines
from
10.166.152.13:/asset-volume /deploy/crossbow/shared/assets glusterfs defaults 1 0
10.166.152.13:/data-volume /deploy/crossbow/shared/system/uploaded_datas glusterfs defaults 1 0
10.166.152.13:/images-volume /deploy/crossbow/current/public/system/images glusterfs defaults 1 0
10.166.152.13:/site_backgrounds /deploy/crossbow/shared/system/site_backgrounds glusterfs defaults 1 0
10.166.152.13:/site_logos /deploy/crossbow/shared/system/site_logos glusterfs defaults 1 0
10.166.152.13:/styles /deploy/crossbow/shared/system/styles glusterfs defaults 1 0
10.166.152.13:/sheets /deploy/crossbow/shared/system/sheets glusterfs defaults 1 0
10.166.152.13:/templates /deploy/crossbow/shared/system/templates glusterfs defaults 1 0
to
10.166.152.15:/asset-volume /deploy/crossbow/shared/assets glusterfs defaults 1 0
10.166.152.15:/data-volume /deploy/crossbow/shared/system/uploaded_datas glusterfs defaults 1 0
10.166.152.15:/images-volume /deploy/crossbow/current/public/system/images glusterfs defaults 1 0
10.166.152.15:/site_backgrounds /deploy/crossbow/shared/system/site_backgrounds glusterfs defaults 1 0
10.166.152.15:/site_logos /deploy/crossbow/shared/system/site_logos glusterfs defaults 1 0
10.166.152.15:/styles /deploy/crossbow/shared/system/styles glusterfs defaults 1 0
10.166.152.15:/sheets /deploy/crossbow/shared/system/sheets glusterfs defaults 1 0
10.166.152.15:/templates /deploy/crossbow/shared/system/templates glusterfs defaults 1 0
mount -a ##mount new gluster
login as appuer
prodapp03 and prodapp04 both server
should do below changes and save the file
cp /deploy/crossbow/current/config/database.yml /deploy/crossbow/current/config/database.yml-bakcup
cp /deploy/crossbow/current/config/sunspot.yml /deploy/crossbow/current/config/sunspot.yml-bakcup
vi /deploy/crossbow/current/config/database.yml
from
production:
adapter: mysql
encoding: utf8
database: cbprod
username: cbpuser
port: 3306
password: prd_cbmydb1
host: 10.166.152.12
pool: 23
production_slave_database:
adapter: mysql
encoding: utf8
database: cbprod
username: cbpuser
port: 3306
password: prd_cbmydb1
host: 10.166.152.13
pool: 23
to
production:
adapter: mysql
encoding: utf8
database: cbprod
username: cbpuser
port: 3306
password: prd_cbmydb1
host: 10.166.152.23
pool: 23
production_slave_database:
adapter: mysql
encoding: utf8
database: cbprod
username: cbpuser
port: 3306
password: prd_cbmydb1
host: 10.166.152.15
pool: 23
vi /deploy/crossbow/current/config/sunspot.yml #do below changes and save the file
from
production:
solr:
hostname: 10.166.152.13
port: 8080
log_level: WARNING
path: /solr
to
production:
solr:
hostname: 10.166.152.15
port: 8080
log_level: WARNING
path: /solr
== Step2 ==
If already exist same database name drop database cbprod
###mysql dump from running production mysql slave server
ssh 10.166.152.13
switch user as expprodl
cd bin/
./runbackup.sh
ls -ltr /data00/backups/
copy later mysqldump.gz to 10.166.152.23 server
login to
proddb-mast02 private ip 10.166.152.23
mysql -u root -p
create database cbprod DEFAULT CHARACTER SET utf8 COLLATE utf8_unicode_ci;
#extract from zip
gunzip mysqldump.gz
mysql -u root -p cbprod < #take mysql dump from live prod mysql server and import into this new mysql server
#to verify triggers and function
select distinct definer from information_schema.triggers;
select distinct definer from mysql.proc;
above mysql cmds should not give any ip address in output except 127.0.0.1 or locahost
== Step3 ==
login to
proddb-slav02 private ip 10.166.152.15
mysql -u root -p
show slave status \G;
expected output
Slave_IO_Running: Yes
Slave_SQL_Running: Yes
show databases;
expected output
cbprod
== Step4 ==
login to prodapp03 and prodapp04 as appuser expprodl execute below commands
/deploy/systasks/god.sh stop ##god service stop
/deploy/systasks/god.sh start ##god service start
/opt/apache2/bin/httpd -k stop ##apache service stop
/opt/apache2/bin/httpd -k start ##apache service start
##restart passenger
cd /deploy/crossbow/current/
touch tmp/restart.txt
##login prodapp04 as appuser and execute only Below cmd for full renindex \\
bundle exec rake sunspot:solr:reindex RAILS_ENV=production
##wait for couple of minutes site will come login page
== Step5 ==
ssh expprodl@10.166.152.15
cd /data00/ #to copy data from old to new gluster execute below cmds
rsync -avz expprodl@10.166.152.13:/data00/sheets .
rsync -avz expprodl@10.166.152.13:/data00/site_backgrounds .
rsync -avz expprodl@10.166.152.13:/data00/styles .
rsync -avz expprodl@10.166.152.13:/data00/assets .
rsync -avz expprodl@10-166-152-13:/data00/images .
rsync -avz expprodl@10.166.152.13:/data00/site_logos .
rsync -avz expprodl@10.166.152.13:/data00/templates .
rsync -avz expprodl@10.166.152.13:/data00/sharedpub .
and verify all contents copied successfully folder size du -sh
if results positive
remove maintenance page
else follow below steps for rollback
== rollback option ==
login as appuer
prodapp03 and prodapp04 both server
should do below changes and save the file
cp /deploy/crossbow/current/config/database.yml-bakcup /deploy/crossbow/current/config/database.yml
cp /deploy/crossbow/current/config/sunspot.yml-bakcup /deploy/crossbow/current/config/sunspot.yml
login to prodapp03 and prodapp04 as appuser expprodl execute below commands
/deploy/systasks/god.sh stop ##god service stop
/deploy/systasks/god.sh start ##god service start
/opt/apache2/bin/httpd -k stop ##apache service stop
/opt/apache2/bin/httpd -k start ##apache service start
##restart passenger
cd /deploy/crossbow/current/
touch tmp/restart.txt
if results positive
remove maintenance page else follow rollback steps
== Rollback ==
login as root prodapp03 and prodapp04 both server execute below commands
UNMOUNT existing gluster folder
umount /deploy/crossbow/shared/assets
umount /deploy/crossbow/shared/system/uploaded_datas
umount /deploy/crossbow/current/public/system/images
umount /deploy/crossbow/shared/system/site_backgrounds
umount /deploy/crossbow/shared/system/site_logos
umount /deploy/crossbow/shared/system/styles
umount /deploy/crossbow/shared/system/sheets
umount /deploy/crossbow/shared/system/templates
df -h
cp /etc/fstab-bakcup /etc/fstab
mount -a ##mount old gluster
login as appuer
prodapp03 and prodapp04 both server
should do below changes and save the file
cp /deploy/crossbow/current/config/database.yml-bakcup /deploy/crossbow/current/config/database.yml
cp /deploy/crossbow/current/config/sunspot.yml-bakcup /deploy/crossbow/current/config/sunspot.yml
login to prodapp03 and prodapp04 as appuser expprodl execute below commands
/deploy/systasks/god.sh stop ##god service stop
/deploy/systasks/god.sh start ##god service start
/opt/apache2/bin/httpd -k stop ##apache service stop
/opt/apache2/bin/httpd -k start ##apache service start
##restart passenger
cd /deploy/crossbow/current/
touch tmp/restart.txt
##login prodapp04 as appuser and execute only Below cmd for full renindex \\
bundle exec rake sunspot:solr:reindex RAILS_ENV=production
if results positive remove maintenance page