ha-cluster

naos 2017/11/17 22:52

HA-Cluster

testé sur CentOS 7

SUR TOUS LES POSTES

# vim /etc/hosts

192.168.20.146	vm1.DOMAIN.TLD
192.168.20.147	vm2.DOMAIN.TLD
192.168.20.158	vip.DOMAIN.TLD

SUR LA VM1

# hostnamectl set-hostname vm1.DOMAIN.TLD

$ ssh-keygen -t dsa

# ssh-copy-id -i /home/USER/.ssh/id_dsa.pub USER@192.168.20.147

SUR LA VM2

# hostnamectl set-hostname vm2.DOMAIN.TLD

$ ssh-keygen -t dsa

# ssh-copy-id -i /home/USER/.ssh/id_dsa.pub USER@192.168.20.146

POUR CHAQUE VM

# firewall-cmd –permanent –add-service=high-availability

# firewall-cmd –reload

# yum install -y pacemaker pcs psmisc policycoreutils-python

# systemctl start pcsd.service

# systemctl enable pcsd.service

Le mot de passe hacluster doit être le même sur les deux vm

# passwd hacluster

# pcs cluster auth vm1.DOMAIN.TLD vm2.DOMAIN.TLD

# pcs cluster setup –name clusterDOMAIN vm1.DOMAIN.TLD vm2.DOMAIN.TLD

Suivre le manuel Cluster from scratch .pdf à la page 19 http://clusterlabs.org/doc/Cluster_from_Scratch.pdf

# pcs cluster start –all

# pcs status

# corosync-cfgtool -s

# corosync-cmapctl | grep members

# pcs status corosync

# ps axf

# journalctl | grep -i error

# pcs cluster cib

# crm_verify -L -V

# pcs property set stonith-enabled=false Uniquement pour le lab, option false pas en prod

# crm_verify -L

# pcs resource create vip.DOMAIN.TLD ocf:heartbeat:IPaddr2 ip=192.168.20.158 cidr_netmask=24 op monitor interval=30s

# pcs cluster stop vm2.DOMAIN.TLD

# pcs status

# pcs cluster start vm2.DOMAIN.TLD

# pcs status

# pcs resource defaults resource-stickiness=100

# pcs resource defaults

SUR LES DEUX VMS

# yum install httpd wget mariadb php

# firewall-cmd –permanent –add-service=http

# firewall-cmd –reload

# cat «-END >/var/www/html/index.html

 <html>
   <body>My Test Site - $(hostname)</body>
 </html>
END

# cat «-END >/etc/httpd/conf.d/status.conf

  <Location /server-status>
     SetHandler server-status
     Order deny,allow
     Deny from all
     Allow from 127.0.0.1
  </Location>
 END

# pcs resource create WebSite ocf:heartbeat:apache configfile=/etc/httpd/conf/httpd.conf statusurl=“http://localhost/server-status” op monitor interval=1min

# pcs resource op defaults timeout=240s

# pcs resource op defaults

# pcs status

SUR LES DEUX VMS

# systemctl stop httpd

# pcs cluster start WebSite

# pcs constraint colocation add WebSite with vip.DOMAIN.TLD INFINITY

# pcs constraint

DEPUIS LA MACHINE PHYSIQUE

# wget -O - vip.DOMAIN.TLD

# rpm –import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org

# rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm

# yum install -y kmod-drbd84 drbd84-utils

# semanage permissive -a drbd_t

# firewall-cmd –permanent –add-rich-rule='rule family=“ipv4” source address=“192.168.20.146” port port=“7789” protocol=“tcp” accept'

# firewall-cmd –permanent –add-rich-rule='rule family=“ipv4” source address=“192.168.20.147” port port=“7789” protocol=“tcp” accept'

# firewall-cmd –reload

SUR LA VM1

# mkdir /mnt/disk1
# fdisk /dev/sdb 
# mkfs.ext4 /dev/sdb1
# mount -t ext4 /dev/sdb1 /mnt/disk1
# umount /dev/sdb1
# pvcreate /dev/sdb1
# vgcreate vg_disk /dev/sdb1
# lvcreate --name donnes -l 100%FREE vg_disk
# mkfs.ext4 /dev/vg_disk/donnes 
# mount /dev/vg_disk/donnes /mnt/disk1/
# fdisk -l

SUR LA VM2

# mkdir /mnt/disk2
# fdisk /dev/sdb 
# mkfs.ext4 /dev/sdb1
# mount -t ext4 /dev/sdb1 /mnt/disk2
# umount /dev/sdb1
# pvcreate /dev/sdb1
# vgcreate vg_disk2 /dev/sdb1
# fdisk -l

SUR LES DEUX VM

# vgdisplay | grep -e Name -e Free
# lvs
# lvdisplay

SUR LA VM1

# ssh vm2.DOMAIN.TLD -- lvcreate --name donnes --size 0.8G vg_disk2

# cat «END >/etc/drbd.d/wwwdata.res

resource wwwdata {
protocol C;
meta-disk internal;
device /dev/drbd1;
syncer {
verify-alg sha1;
}
net {
allow-two-primaries;
}
on vm1.DOMAIN.TLD {
disk
/dev/vg_disk/donnes;
address 192.168.20.146:7789;
}
on vm2.DOMAIN.TLD {
disk
/dev/vg_disk2/donnes;
address 192.168.20.147:7789;
}
}

# drbdadm create-md wwwdata

Si erreur Command 'drbdmeta 1 v08 /dev/vg_disk/donnes internal create-md' terminated with exit code 40
Alors dd if=/dev/zero of=/dev/sdb1 bs=1M count=128

# modprobe drbd

# drbdadm up wwwdata

# cat /proc/drbd

version: 8.4.10-1 (api:1/proto:86-101)
GIT-hash: a4d5de01fffd7e4cde48a080e2c686f9e8cebf4c build by mockbuild@, 2017-09-15 14:23:22
 
 1: cs:WFConnection ro:Secondary/Unknown ds:Inconsistent/DUnknown C r----s
    ns:0 nr:0 dw:0 dr:0 al:8 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:1044412

SUR LA VM2

# cat <<END >/etc/drbd.d/wwwdata.res
resource wwwdata {
protocol C;
meta-disk internal;
device /dev/drbd1;
syncer {
verify-alg sha1;
}
net {
allow-two-primaries;
}
on vm1.DOMAIN.TLD {
disk
/dev/vg_disk/donnes;
address 192.168.20.146:7789;
}
on vm2.DOMAIN.TLD {
disk
/dev/vg_disk2/donnes;
address 192.168.20.147:7789;
}
}

# drbdadm create-md wwwdata

Si erreur Command 'drbdmeta 1 v08 /dev/vg_disk/donnes internal create-md' terminated with exit code 40
Alors dd if=/dev/zero of=/dev/sdb1 bs=1M count=128

# modprobe drbd

# drbdadm up wwwdata

# cat /proc/drbd

GIT-hash: a4d5de01fffd7e4cde48a080e2c686f9e8cebf4c build by mockbuild@, 2017-09-15 14:23:22
 
 1: cs:Connected ro:Secondary/Secondary ds:Inconsistent/Inconsistent C r-----
    ns:0 nr:0 dw:0 dr:0 al:8 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:839616

SUR LA VM1

# drbdadm primary –force wwwdata

# cat /proc/drbd

GIT-hash: a4d5de01fffd7e4cde48a080e2c686f9e8cebf4c build by mockbuild@, 2017-09-15 14:23:22
 
 1: cs:SyncSource ro:Primary/Secondary ds:UpToDate/Inconsistent C r-----
    ns:253344 nr:0 dw:0 dr:255464 al:16 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:586272
	[=====>..............] sync'ed: 30.3% (586272/839616)K
	finish: 0:00:46 speed: 12,516 (10,132) K/sec

SUR LA VM2 # cat /proc/drbd

version: 8.4.10-1 (api:1/proto:86-101)
GIT-hash: a4d5de01fffd7e4cde48a080e2c686f9e8cebf4c build by mockbuild@, 2017-09-15 14:23:22
 
 1: cs:Connected ro:Secondary/Primary ds:UpToDate/UpToDate C r-----
    ns:0 nr:839616 dw:839616 dr:0 al:8 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:0

SUR LA VM1

# mkfs.xfs /dev/drbd1

meta-data=/dev/drbd1             isize=512    agcount=4, agsize=52476 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=209904, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=855, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0

# mkdir /mnt/drbd1

# mount /dev/drbd1 /mnt/drbd1

# cat «-END >/mnt/index.html

<html>
<body>My Test Site - DRBD</body>
</html>
END

# chcon -R –reference=/var/www/html /mnt/drbd1/

# umount /dev/drbd1

# pcs cluster start –all

# pcs cluster cib drbd_cfg

# pcs -f drbd_cfg resource create WebData ocf:linbit:drbd \

drbd_resource=wwwdata op monitor interval=60s

# pcs -f drbd_cfg resource master WebDataClone WebData \

master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 \
notify=true

# pcs -f drbd_cfg resource show

 vip.DOMAIN.TLD	(ocf::heartbeat:IPaddr2):	Stopped
 WebSite	(ocf::heartbeat:apache):	Stopped
 Master/Slave Set: WebDataClone [WebData]
     Stopped: [ vm1.DOMAIN.TLD vm2.DOMAIN.TLD ]

# pcs cluster cib-push drbd_cfg

# pcs status

Cluster name: CLUSTERNAME
Stack: corosync
Current DC: vm1.DOMAIN.TLD (version 1.1.16-12.el7_4.4-94ff4df) - partition with quorum
Last updated: Mon Nov 20 11:57:06 2017
Last change: Mon Nov 20 11:56:43 2017 by root via cibadmin on vm1.DOMAIN.TLD
 
2 nodes configured
4 resources configured
 
Online: [ vm1.DOMAIN.TLD vm2.DOMAIN.TLD ]
 
Full list of resources:
 
 vip.DOMAIN.TLD	(ocf::heartbeat:IPaddr2):	Started vm1.DOMAIN.TLD
 WebSite	(ocf::heartbeat:apache):	Started vm2.DOMAIN.TLD
 Master/Slave Set: WebDataClone [WebData]
     Masters: [ vm1.DOMAIN.TLD ]
     Slaves: [ vm2.DOMAIN.TLD ]
 
Daemon Status:
  corosync: active/disabled
  pacemaker: active/disabled
  pcsd: active/enabled

SUR LES DEUX VM

# echo drbd >/etc/modules-load.d/drbd.conf

SUR LA VM1

# pcs cluster cib fs_cfg

# pcs -f fs_cfg resource create WebFS Filesystem > device=“/dev/drbd1” directory=“/var/www/html” fstype=“xfs”

Assumed agent name 'ocf:heartbeat:Filesystem' (deduced from 'Filesystem')

# pcs -f fs_cfg constraint colocation add WebFS with WebDataClone INFINITY with-rsc-role=Master

# pcs -f fs_cfg constraint order promote WebDataClone then start WebFS

Adding WebDataClone WebFS (kind: Mandatory) (Options: first-action=promote then-action=start)

# pcs -f fs_cfg constraint colocation add WebSite with WebFS INFINITY

# pcs -f fs_cfg constraint order WebFS then WebSite

Adding WebFS WebSite (kind: Mandatory) (Options: first-action=start then-action=start)

# pcs -f fs_cfg constraint

Location Constraints:
Ordering Constraints:
  promote WebDataClone then start WebFS (kind:Mandatory)
  start WebFS then start WebSite (kind:Mandatory)
Colocation Constraints:
  WebFS with WebDataClone (score:INFINITY)
  WebFS with WebDataClone (score:INFINITY) (with-rsc-role:Master)
  WebSite with WebFS (score:INFINITY)
Ticket Constraints:

# pcs -f fs_cfg resource show

 vip.DOMAIN.TLD	(ocf::heartbeat:IPaddr2):	Started vm1.DOMAIN.TLD
 WebSite	(ocf::heartbeat:apache):	Started vm2.DOMAIN.TLD
 Master/Slave Set: WebDataClone [WebData]
     Masters: [ vm1.DOMAIN.TLD ]
     Slaves: [ vm2.DOMAIN.TLD ]
 WebFS	(ocf::heartbeat:Filesystem):	Stopped

# pcs cluster cib-push fs_cfg

CIB updated

# pcs status

Cluster name: CLUSTERNAME
Stack: corosync
Current DC: vm1.DOMAIN.TLD (version 1.1.16-12.el7_4.4-94ff4df) - partition with quorum
Last updated: Mon Nov 20 12:05:04 2017
Last change: Mon Nov 20 12:04:39 2017 by root via cibadmin on vm1.DOMAIN.TLD
 
2 nodes configured
5 resources configured
 
Online: [ vm1.DOMAIN.TLD vm2.DOMAIN.TLD ]
 
Full list of resources:
 
 vip.DOMAIN.TLD	(ocf::heartbeat:IPaddr2):	Started vm1.DOMAIN.TLD
 WebSite	(ocf::heartbeat:apache):	Started vm2.DOMAIN.TLD
 Master/Slave Set: WebDataClone [WebData]
     Masters: [ vm2.DOMAIN.TLD ]
     Slaves: [ vm1.DOMAIN.TLD ]
 WebFS	(ocf::heartbeat:Filesystem):	Started vm2.DOMAIN.TLD
Daemon Status:
  corosync: active/disabled
  pacemaker: active/disabled
  pcsd: active/enabled

SUR LA VM1 - POUR TESTER DRBD

# pcs cluster standby vm1.DOMAIN.TLD

# pcs status

Cluster name: CLUSTERNAME
Stack: corosync
Current DC: vm1.DOMAIN.TLD (version 1.1.16-12.el7_4.4-94ff4df) - partition with quorum
Last updated: Mon Nov 20 12:06:31 2017
Last change: Mon Nov 20 12:06:01 2017 by root via cibadmin on vm1.DOMAIN.TLD
 
2 nodes configured
5 resources configured
 
Node vm1.DOMAIN.TLD: standby
Online: [ vm2.DOMAIN.TLD ]
 
Full list of resources:
 
 vip.DOMAIN.TLD	(ocf::heartbeat:IPaddr2):	Started vm2.DOMAIN.TLD
 WebSite	(ocf::heartbeat:apache):	Started vm2.DOMAIN.TLD
 Master/Slave Set: WebDataClone [WebData]
     Masters: [ vm2.DOMAIN.TLD ]
     Stopped: [ vm1.DOMAIN.TLD ]
 WebFS	(ocf::heartbeat:Filesystem):	Started vm2.DOMAIN.TLD
 
Daemon Status:
  corosync: active/disabled
  pacemaker: active/disabled
  pcsd: active/enabled

# pcs cluster unstandby vm1.DOMAIN.TLS

# pcs status

Cluster name: CLUSTERNAME
Stack: corosync
Current DC: vm1.DOMAIN.TLD (version 1.1.16-12.el7_4.4-94ff4df) - partition with quorum
Last updated: Mon Nov 20 12:07:29 2017
Last change: Mon Nov 20 12:07:13 2017 by root via cibadmin on vm1.DOMAIN.TLD
 
2 nodes configured
5 resources configured
 
Online: [ vm1.DOMAIN.TLD vm2.DOMAIN.TLD ]
 
Full list of resources:
 
 vip.DOMAIN.TLD	(ocf::heartbeat:IPaddr2):	Started vm2.DOMAIN.TLD
 WebSite	(ocf::heartbeat:apache):	Started vm2.DOMAIN.TLD
 Master/Slave Set: WebDataClone [WebData]
     Masters: [ vm2.DOMAIN.TLD ]
     Slaves: [ vm1.DOMAIN.TLD ]
 WebFS	(ocf::heartbeat:Filesystem):	Started vm2.DOMAIN.TLD
 
Daemon Status:
  corosync: active/disabled
  pacemaker: active/disabled
  pcsd: active/enabled

LA PARTIE STONITH N'EST PAS TRAITEE DANS LA DOC CAR C'EST UN LAB, MAIS EN PROD CETTE OPTION EST NECESSAIRE

  • ha-cluster.txt
  • Dernière modification: 2017/11/25 22:37
  • par naos