首页 > 代码库 > gfs-cluster配置

gfs-cluster配置


所有节点:

前提配置好yum本地源,关闭iptables,selinux、修改好主机名、配置好hosts文件

more /etc/hosts|grep -v local

192.168.100.230 storage

192.168.100.231 node1

192.168.100.232 node2


#安装时间同步软件,配置互联网时间同步

cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime(修改时区)

date -R(查看时区)

[root@node1 ~]# date -R

Tue, 01 Aug 2017 10:47:16 +0800

[root@node1 ~]# 


yum install -y ntpdate

/usr/sbin/ntpdate pool.ntp.org

#每五分钟同步一次互联网时间

echo ‘*/5 * * * * /usr/sbin/ntpdate pool.ntp.org > /dev/null 2>&1‘ >> /var/spool/cron/root


1.配置磁盘

存储分盘列表

服务端存储配置(给存储添加一块30G的硬盘)

yum -y install likernel-headers kernel-devel flex patch

yum -y install wget

wget https://nchc.dl.sourceforge.net/project/iscsitarget/iscsitarget/1.4.20.2/iscsitarget-1.4.20.2.tar.gz

tar zxvf iscsitarget-1.4.20.2.tar.gz 

yum install -y gcc*

cd iscsitarget-1.4.20.2

make 

make install

cd /etc/iet/

cp ietd.conf ietd.conf.bak

配置存储服务开机自动启动

chkconfig --add iscsi-target

chkconfig iscsi-target on

chkconfig --list |grep iscsi

fdisk /dev/sdb 

n

e

1



n

l


+5G

n

l


+5G

w

cat >> /etc/iet/ietd.conf << EOF

Target iqn.2014-05.cn.vqiu:datastore.public

Lun 0 Path=/dev/sdb5,Type=fileio,ScsiId=xyz,ScsiSN=xyz

Lun 1 Path=/dev/sdb6,Type=fileio,ScsiId=xyz,ScsiSN=xyz

EOF

[root@storage ~]# fdisk -l /dev/sdb |grep dev

Disk /dev/sdb: 32.2 GB, 32212254720 bytes

/dev/sdb1               1        3916    31455238+   5  Extended

/dev/sdb5               1         654     5253192   83  Linux

/dev/sdb6             655        1308     5253223+  83  Linux

[root@storage ~]# 

[root@storage ~]# /etc/init.d/iscsi-target start

客户端挂载存储过程:

yum install -y iscsi-initiator-utils

iscsiadm -m discovery -t sendtargets -p storage

iscsiadm -m node -T iqn.2014-05.cn.vqiu:datastore.public -p  storage:3260 -l

node2挂载存储列表:

[root@node2 ~]#  fdisk -l |grep /dev/ |awk ‘{print $2" " $3 $4}‘ |egrep "dev/s"|grep -v "sda"

/dev/sdb: 5379MB,

/dev/sdc: 5379MB,

[root@node2 ~]# 

[root@node2 cluster]#

node1挂载存储列表:

[root@node1 ~]# fdisk -l |grep /dev/ |awk ‘{print $2" " $3 $4}‘ |egrep "dev/s"|grep -v "sda"

/dev/sdb: 5379MB,

/dev/sdc: 5379MB,

[root@node1 ~]#

卸载存储方法

cd /var/lib/iscsi/nodes/

rm -rf *

cd /var/lib/iscsi/send_targets

rm -rf *

cd 

/etc/init.d/iscsi stop

##################################################################################################################################

2、安装yum软件包(node1、node2)

yum install cman openais gfs* kmod-gfs lvm2* rgmanager system-config-cluster scsi-target-utils cluster-snmp -y

##################################################################################################################################

3.修改lvm配置(node1、node2),允许同时读写、禁止会写,防止脑裂

sed -i ‘s/locking_type = 1/locking_type = 3/g‘ /etc/lvm/lvm.conf

sed -i ‘s/fallback_to_local_locking = 1/fallback_to_local_locking = 0/g‘ /etc/lvm/lvm.conf

more /etc/lvm/lvm.conf |grep "locking_type ="

more /etc/lvm/lvm.conf |grep "fallback_to_local_locking ="

##################################################################################################################################

3.cluster.conf配置文件(node1、node2)

cat > /etc/cluster/cluster.conf << EOF

<?xml version="1.0"?>

<cluster config_version="2" name="gfs_test">

<fence_daemon post_fail_delay="0" post_join_delay="3"/>

<clusternodes>

<clusternode name="node1" nodeid="1" votes="1">

<fence>

<method name="1">

<device name="manual_fence" nodename="node1"/>

</method>

</fence>

</clusternode>

<clusternode name="node2" nodeid="2" votes="2">

<fence>

<method name="1">

<device name="manual_fence" nodename="node2"/>

</method>

</fence>

</clusternode>

</clusternodes>

<cman/>

<fencedevices>

<fencedevice agent="fence_manual" name="manual_fence"/>

</fencedevices>

<rm>

<failoverdomains/>

<resources/>

</rm>

</cluster>

EOF

##################################################################################################################################

4.检测cluster配置是否(node1、node2)

[root@node1 ~]# ccs_config_validate 

Configuration validates(显示配置有效)

[root@node1 ~]# 

##################################################################################################################################

5.启动服务。(node1、node2)

node1:

/etc/init.d/cman start

/etc/init.d/clvmd start

node2:

/etc/init.d/cman start

/etc/init.d/clvmd start

node1:

/etc/init.d/rgmanager start

node2:

/etc/init.d/rgmanager start

检查状态(node1、node2)

配置cluster服务开机在动启动

chkconfig cman on

chkconfig rgmanager on

chkconfig gfs2 on

chkconfig clvmd on   

[root@node1 ~]#clustat -f

Cluster Status for gfs_test @ Tue Aug  1 10:37:08 2017

Member Status: Quorate


 Member Name                                                     ID   Status

 ------ ----                                                     ---- ------

 node1                                                               1 Online, Local

 node2                                                               2 Online

##################################################################################################################################

6.存储客户端,逻辑卷配置

[root@node1 ~]# 

[root@node1 ~]# fdisk -l |grep /dev/ |awk ‘{print $2" " $3 $4}‘ |egrep "dev/s"|grep -v "sda"

/dev/sdb: 5379MB,

/dev/sdc: 5379MB,

[root@node1 ~]# pvcreate /dev/sdb

  Physical volume "/dev/sdb" successfully created

[root@node1 ~]# pvdisplay /dev/sdb 

  "/dev/sdb" is a new physical volume of "5.01 GiB"

  --- NEW Physical volume ---

  PV Name               /dev/sdb

  VG Name               

  PV Size               5.01 GiB

  Allocatable           NO

  PE Size               0   

  Total PE              0

  Free PE               0

  Allocated PE          0

  PV UUID               0OJn2K-FHV0-oy2h-eOTr-nK6Z-toPQ-r8Uar4

   

[root@node1 ~]# vgcreate gfs_test /dev/sdb

  Clustered volume group "gfs_test" successfully created

[root@node1 ~]# vgdisplay gfs_test

  --- Volume group ---

  VG Name               gfs_test

  System ID             

  Format                lvm2

  Metadata Areas        1

  Metadata Sequence No  1

  VG Access             read/write

  VG Status             resizable

  Clustered             yes

  Shared                no

  MAX LV                0

  Cur LV                0

  Open LV               0

  Max PV                0

  Cur PV                1

  Act PV                1

  VG Size               5.01 GiB

  PE Size               4.00 MiB

  Total PE              1282

  Alloc PE / Size       0 / 0   

  Free  PE / Size       1282 / 5.01 GiB

  VG UUID               UYt9tC-bYZT-g00L-52he-6M1t-XvEO-9bMRME

   

[root@node1 ~]# vgdisplay gfs_test

  --- Volume group ---

  VG Name               gfs_test

  System ID             

  Format                lvm2

  Metadata Areas        1

  Metadata Sequence No  1

  VG Access             read/write

  VG Status             resizable

  Clustered             yes

  Shared                no

  MAX LV                0

  Cur LV                0

  Open LV               0

  Max PV                0

  Cur PV                1

  Act PV                1

  VG Size               5.01 GiB

  PE Size               4.00 MiB

  Total PE              1282

  Alloc PE / Size       0 / 0   

  Free  PE / Size       1282 / 5.01 GiB

  VG UUID               UYt9tC-bYZT-g00L-52he-6M1t-XvEO-9bMRME

   

[root@node1 ~]# lvcreate --size 2048M -n test gfs_test

  Logical volume "test" created.

   

[root@node1 ~]# lvdisplay /dev/gfs_test/test

  --- Logical volume ---

  LV Path                /dev/gfs_test/test

  LV Name                test

  VG Name                gfs_test

  LV UUID                Enw4kn-oxot-D0dj-Zzg3-RCej-R3z4-0CWfq0

  LV Write Access        read/write

  LV Creation host, time node1, 2017-08-01 11:40:42 +0800

  LV Status              available

  # open                 0

  LV Size                2.00 GiB

  Current LE             512

  Segments               1

  Allocation             inherit

  Read ahead sectors     auto

  - currently set to     256

  Block device           253:1

把另外一个盘扩容进来:

[root@node1 ~]# pvcreate /dev/sdc

  Physical volume "/dev/sdc" successfully created

[root@node1 ~]# vgextend gfs_test /dev/sdc

  Volume group "gfs_test" successfully extended

[root@node1 ~]# 

[root@node1 ~]# vgdisplay gfs_test

  --- Volume group ---

  VG Name               gfs_test

  System ID             

  Format                lvm2

  Metadata Areas        2

  Metadata Sequence No  3

  VG Access             read/write

  VG Status             resizable

  Clustered             yes

  Shared                no

  MAX LV                0

  Cur LV                1

  Open LV               0

  Max PV                0

  Cur PV                2

  Act PV                2

  VG Size               10.02 GiB(比原有的vg多了5G)

  PE Size               4.00 MiB (每个pe4M )

  Total PE              2564

  Alloc PE / Size       512 / 2.00 GiB

  Free  PE / Size       2052 / 8.02 GiB

  VG UUID               UYt9tC-bYZT-g00L-52he-6M1t-XvEO-9bMRME

   

[root@node1 ~]# 

[root@node1 ~]# lvextend -l +50 /dev/gfs_test/test (扩展50PE,也就是200M)

  Size of logical volume gfs_test/test changed from 2.00 GiB (512 extents) to 2.20 GiB (562 extents).

  Logical volume test successfully resized.

[root@node1 ~]# lvdisplay /dev/gfs_test/test       

  --- Logical volume ---

  LV Path                /dev/gfs_test/test

  LV Name                test

  VG Name                gfs_test

  LV UUID                Enw4kn-oxot-D0dj-Zzg3-RCej-R3z4-0CWfq0

  LV Write Access        read/write

  LV Creation host, time node1, 2017-08-01 11:40:42 +0800

  LV Status              available

  # open                 0

  LV Size                2.20 GiB

  Current LE             562

  Segments               1

  Allocation             inherit

  Read ahead sectors     auto

  - currently set to     256

  Block device           253:1

   

[root@node1 ~]# 

[root@node1 ~]# lvextend --size +500M /dev/gfs_test/test  (在扩展500M,系统转换后可能只有400多兆) 

  Size of logical volume gfs_test/test changed from 2.20 GiB (562 extents) to 2.68 GiB (687 extents).

  Logical volume test successfully resized.

[root@node1 ~]# lvdisplay /dev/gfs_test/test             

  --- Logical volume ---

  LV Path                /dev/gfs_test/test

  LV Name                test

  VG Name                gfs_test

  LV UUID                Enw4kn-oxot-D0dj-Zzg3-RCej-R3z4-0CWfq0

  LV Write Access        read/write

  LV Creation host, time node1, 2017-08-01 11:40:42 +0800

  LV Status              available

  # open                 0

  LV Size                2.68 GiB

  Current LE             687

  Segments               1

  Allocation             inherit

  Read ahead sectors     auto

  - currently set to     256

  Block device           253:1

   

[root@node1 ~]# vgdisplay /dev/gfs_test |grep Free(vg还剩需7.33G容量)

  Free  PE / Size       1877 / 7.33 GiB

[root@node1 ~]# lvs |grep test

  test    gfs_test -wi-a-----  2.68g                                                    

################################################################################################################################## 

#GFS文件系统配置

node1

[root@node1 ~]# mkdir -p /gfs

[root@node1 ~]# mkfs.gfs2 -j 2 -p lock_dlm -t gfs_test:gfs /dev/gfs_test/test 

This will destroy any data on /dev/gfs_test/test.

It appears to contain: symbolic link to `../dm-1‘


Are you sure you want to proceed? [y/n] y


Device:                    /dev/gfs_test/test

Blocksize:                 4096

Device Size                2.68 GB (703488 blocks)

Filesystem Size:           2.68 GB (703485 blocks)

Journals:                  2

Resource Groups:           11

Locking Protocol:          "lock_dlm"

Lock Table:                "gfs_test:gfs"

UUID:                      13efb5fb-bffa-58a8-b558-9ff34b5e2729

[root@node1 ~]# blkid  |grep gfs2|awk ‘{print $1 " /gfs gfs2 rw,relatime 0 0"}‘|sed ‘s/://g‘ >> /etc/fstab 

[root@node1 ~]# mount -a

[root@node1 ~]# df -h|grep gfs

/dev/mapper/gfs_test-test

                      2.7G  259M  2.5G  10% /gfs

[root@node1 ~]# more /etc/fstab |grep gfs

/dev/mapper/gfs_test-test /gfs gfs2 rw,relatime 0 0

[root@node1 ~]# 

[root@node1 ~]# /etc/init.d/gfs2 start

Mounting GFS2 filesystem (/gfs): already mounted           [确定]

[root@node1 ~]# 

################################################################################################################################## 

node2

mkdir -p /gfs

blkid  |grep gfs2|awk ‘{print $1 " /gfs gfs2 rw,relatime 0 0"}‘|sed ‘s/://g‘ >> /etc/fstab 

mount -a

/etc/init.d/gfs2 start

#################################################################################################################################

测试

[root@node1 ~]#

cat > /gfs/chenliang << EOF

HELLO WORLD!

EOF

 

[root@node2 ~]#

 more /gfs/chenliang 

HELLO WORLD!

[root@node2 ~]# 


本文出自 “chenliang8507” 博客,请务必保留此出处http://408461.blog.51cto.com/398461/1952660

gfs-cluster配置