由于前面安装了,由于时间关系没有来得及整理,今天闲下来,整理了安装步骤,还是活的一些收获的,下面附上步骤:
1.安装操作系统最小化安装即可
2.关闭防火墙
3.替换yum
4.添加共享磁盘
7.调整内核参数
8.安装依赖包
9.配置hosts
10.配置ssh 无密码访问
11.调整swap
12.配置asm共享磁盘
13.grid安装预检
14.grid安装
15.asm磁盘组创建
16.database安装
17.数据库实例创建
18.rac 状态查看和维护
esxi主机创建共享磁盘:
http://www.linuxfly.org/post/673/
VMware12上面安装11g rac:
在vmware安装目录 创建磁盘:
vmware-vdiskmanager.exe -c -s 1000Mb -a lsilogic -t 2 E:\VMwarecp\VMWARE\racsharedisk\ocr.vmdk
vmware-vdiskmanager.exe -c -s 1000Mb -a lsilogic -t 2 E:\VMwarecp\VMWARE\racsharedisk\ocr2.vmdk
vmware-vdiskmanager.exe -c -s 1000Mb -a lsilogic -t 2 E:\VMwarecp\VMWARE\racsharedisk\votingdisk.vmdk
vmware-vdiskmanager.exe -c -s 10000Mb -a lsilogic -t 2 E:\VMwarecp\VMWARE\racsharedisk\data2.vmdk
vmware-vdiskmanager.exe -c -s 5000Mb -a lsilogic -t 2 E:\VMwarecp\VMWARE\racsharedisk\backup.vmdk
scsi1.present = "TRUE"
scsi1.virtualDev = "lsilogic"
scsi1.sharedBus = "virtual"
scsi2.present = "TRUE"
scsi2.virtualDev = "lsilogic1"
scsi2.sharedBus = "virtual"
scsi1:1.present = "TRUE"
scsi1:1.mode = "independent-persistent"
scsi1:1.filename = "D:\VMWARE\racsharedisk\ocr.vmdk"
scsi1:1.deviceType = "plainDisk"
scsi1:2.present = "TRUE"
scsi1:2.mode = "independent-persistent"
scsi1:2.filename = "D:\VMWARE\racsharedisk\votingdisk.vmdk"
scsi1:2.deviceType = "plainDisk"
scsi1:3.present = "TRUE"
scsi1:3.mode = "independent-persistent"
scsi1:3.filename = "D:\VMWARE\racsharedisk\data.vmdk"
scsi1:3.deviceType = "plainDisk"
scsi1:4.present = "TRUE"
scsi1:4.mode = "independent-persistent"
scsi1:4.filename = "D:\VMWARE\racsharedisk\backup.vmdk"
scsi1:4.deviceType = "plainDisk"
scsi1:5.present = "TRUE"
scsi1:5.mode = "independent-persistent"
scsi1:5.filename = "D:\VMWARE\racsharedisk\ocr2.vmdk"
scsi1:5.deviceType = "plainDisk"
scsi2:2.present = "TRUE"
scsi2:2.mode = "independent-persistent"
scsi2:2.filename = "D:\VMWARE\racsharedisk\data3.vmdk"
scsi2:2.deviceType = "plainDisk"
disk.locking = "false"
diskLib.dataCacheMaxSize = "0"
diskLib.dataCacheMaxReadAheadSize = "0"
diskLib.DataCacheMinReadAheadSize = "0"
diskLib.dataCachePageSize = "4096"
diskLib.maxUnsyncedWrites = "0"
rac1和rac2上都要创建:
1.创建用户和组:
/usr/sbin/groupadd -g 1000 oinstall
/usr/sbin/groupadd -g 1020 asmadmin
/usr/sbin/groupadd -g 1021 asmdba
/usr/sbin/groupadd -g 1022 asmoper
/usr/sbin/groupadd -g 1031 dba
/usr/sbin/groupadd -g 1032 oper
useradd -u 1100 -g oinstall -G asmadmin,asmdba,asmoper,oper,dba grid
useradd -u 1101 -g oinstall -G dba,oper oracle
mkdir -p /u01/app/11.2.0/grid
mkdir -p /u01/app/grid
mkdir /u01/app/oracle
chown -R grid:oinstall /u01
chown oracle:oinstall /u01/app/oracle
chmod -R 775 /u01/
2.内核参数设置:
[root@rac1 ~]# vi /etc/sysctl.conf
kernel.msgmnb = 65536
kernel.msgmax = 65536
kernel.shmmax = 68719476736
kernel.shmall = 4294967296
fs.aio-max-nr = 1048576
fs.file-max = 6815744
kernel.shmall = 2097152
kernel.shmmax = 68719476736
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048586
net.ipv4.tcp_wmem = 262144 262144 262144
net.ipv4.tcp_rmem = 4194304 4194304 4194304
3.配置oracle、grid用户的shell限制
[root@rac1 ~]# vi /etc/security/limits.conf
grid soft nproc 2047
grid hard nproc 16384
grid soft nofile 1024
grid hard nofile 65536
oracle soft nproc 2047
oracle hard nproc 16384
oracle soft nofile 1024
oracle hard nofile 65536
4.配置login
[root@rac1 ~]# vi /etc/pam.d/login
session required pam_limits.so
5.依赖软件:
安装需要的软件包
binutils-2.20.51.0.2-5.11.el6 (x86_64)
compat-libcap1-1.10-1 (x86_64)
compat-libstdc++-33-3.2.3-69.el6 (x86_64)
compat-libstdc++-33-3.2.3-69.el6.i686
gcc-4.4.4-13.el6 (x86_64)
gcc-c++-4.4.4-13.el6 (x86_64)
glibc-2.12-1.7.el6 (i686)
glibc-2.12-1.7.el6 (x86_64)
glibc-devel-2.12-1.7.el6 (x86_64)
glibc-devel-2.12-1.7.el6.i686
ksh
libgcc-4.4.4-13.el6 (i686)
libgcc-4.4.4-13.el6 (x86_64)
libstdc++-4.4.4-13.el6 (x86_64)
libstdc++-4.4.4-13.el6.i686
libstdc++-devel-4.4.4-13.el6 (x86_64)
libstdc++-devel-4.4.4-13.el6.i686
libaio-0.3.107-10.el6 (x86_64)
libaio-0.3.107-10.el6.i686
libaio-devel-0.3.107-10.el6 (x86_64)
libaio-devel-0.3.107-10.el6.i686
make-3.81-19.el6
sysstat-9.0.4-11.el6 (x86_64)
安装:
yum install gcc gcc-c++ libaio* glibc* glibc-devel* ksh libgcc* libstdc++* libstdc++-devel* make sysstat unixODBC* compat-libstdc++-33.x86_64 elfutils-libelf-devel glibc.i686 compat-libcap1 smartmontools unzip openssh* parted cvuqdisk -y
##cvuqdisk 这个软件是在安装grid的时候 使用fixup.sh 安装即可,yum安装不上没有关系
6.
##注意 rpm安装一定要在yum安装之后
rpm -ivh --force --nodeps libaio-0.3.106-5.i386.rpm
rpm -ivh --force --nodeps libaio-devel-0.3.106-5.i386.rpm
rpm -ivh --force --nodeps pdksh-5.2.14-36.el5.i386.rpm
rpm -ivh --force --nodeps libstdc++-4.1.2-48.el5.i386.rpm
rpm -ivh --force --nodeps libgcc-4.1.2-48.el5.i386.rpm
rpm -ivh --force --nodeps unixODBC-devel-2.2.11-7.1.i386.rpm
rpm -ivh --force --nodeps compat-libstdc++-33-3.2.3-61.i386.rpm
rpm -ivh --force --nodeps unixODBC-2.2.11-7.1.i386.rpm
7.配置hosts文件:
192.168.20.220 rac1
192.168.166.220 rac1-priv
192.168.20.223 rac1-vip
192.168.20.221 rac2
192.168.166.221 rac2-priv
192.168.20.224 rac2-vip
192.168.20.222 scan-ip
8.添加环境变量
Oracle_sid需要根据节点不同进行修改
##grid变量
[root@rac1 ~]# su - grid
[grid@rac1 ~]$ vi .bash_profile
##rac1:
export TMP=/tmp
export TMPDIR=$TMP
export ORACLE_SID=+ASM1
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/app/11.2.0/grid
export PATH=/usr/sbin:$PATH
export PATH=$ORACLE_HOME/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
export CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib
umask 022
##rac2:
export TMP=/tmp
export TMPDIR=$TMP
export ORACLE_SID=+ASM2
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/app/11.2.0/grid
export PATH=/usr/sbin:$PATH
export PATH=$ORACLE_HOME/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
export CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib
umask 022
##需要注意的是ORACLE_UNQNAME是数据库名,创建数据库时指定多个节点是会创建多个实例,ORACLE_SID指的是数据库实例名
##oracle 环境变量
[root@rac1 ~]# su - oracle
[oracle@rac1 ~]$ vi .bash_profile
#rac1:
export TMP=/tmp
export TMPDIR=$TMP
export ORACLE_SID=orcl1
export ORACLE_UNQNAME=orcl
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1
export TNS_ADMIN=$ORACLE_HOME/network/admin
export PATH=/usr/sbin:$PATH
export PATH=$ORACLE_HOME/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
##rac2:
export TMP=/tmp
export TMPDIR=$TMP
export ORACLE_SID=orcl2
export ORACLE_UNQNAME=orcl
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1
export TNS_ADMIN=$ORACLE_HOME/network/admin
export PATH=/usr/sbin:$PATH
export PATH=$ORACLE_HOME/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
$ source .bash_profile使配置文件生效
9.配置 rac1 rac2 中的 root grid oracle 三个用户直接相互无密码ssh访问
每一个用户目录下都要配置:
ssh-keygen -t rsa
ssh rac1 cat ~/.ssh/id_rsa.pub >> authorized_keys
ssh rac2 cat ~/.ssh/id_rsa.pub >> authorized_keys
scp authorized_keys rac2:~/.ssh/
ssh rac1 date
ssh rac2 date
ssh rac1-priv date
ssh rac2-priv date
10.##如果swap太小,swap调整方法:
通过此种方式进行swap 的扩展,首先要计算出block的数目。具体为根据需要扩展的swapfile的大小,以M为单位。block=swap分区大小*1024,例如,需要扩展64M的swapfile,则:block=64*1024=65536.
然后做如下步骤:
dd if=/dev/zero of=/swapfile bs=1024 count=9216000
#mkswap /swapfile
#swapon /swapfile
#vi /etc/fstab
增加/swapf swap swap defaults 0 0
# cat /proc/swaps 或者# free �Cm //查看swap分区大小
# swapoff /swapf //关闭扩展的swap分区
11.享磁盘配置:
rac1 和rac2配置共享磁盘 (esxi主机上面 2个总线都要选择共享才不会报错):
rac2 需要重启才可以在 ll /dev/raw 下面看到磁盘
所以只需root.sh 的时候 如果报错找不到raw磁盘,需要重启rac2,所以在配置好共享磁盘之后,重启rac2
在rac1格式化之后,
cat /etc/udev/rules.d/60-raw.rules
# Enter raw device bindings here.
#
# An example would be:
# ACTION=="add",KERNEL=="sda",RUN+="/bin/raw /dev/raw/raw1 %N"
# to bind /dev/raw/raw1 to /dev/sda,or
# ACTION=="add",ENV{MAJOR}=="8",ENV{MINOR}=="1",RUN+="/bin/raw /dev/raw/raw2 %M %m"
# to bind /dev/raw/raw2 to the device with major 8,minor 1.
ACTION=="add",KERNEL=="/dev/sdb1",RUN+='/bin/raw /dev/raw/raw1 %N"
ACTION=="add",ENV{MINOR}=="17",RUN+="/bin/raw /dev/raw/raw1 %M %m"
ACTION=="add",KERNEL=="/dev/sdc1",RUN+='/bin/raw /dev/raw/raw2 %N"
ACTION=="add",ENV{MINOR}=="33",RUN+="/bin/raw /dev/raw/raw2 %M %m"
ACTION=="add",KERNEL=="/dev/sdd1",RUN+='/bin/raw /dev/raw/raw3 %N"
ACTION=="add",ENV{MINOR}=="49",RUN+="/bin/raw /dev/raw/raw3 %M %m"
ACTION=="add",KERNEL=="/dev/sde1",RUN+='/bin/raw /dev/raw/raw4 %N"
ACTION=="add",ENV{MINOR}=="65",RUN+="/bin/raw /dev/raw/raw4 %M %m"
ACTION=="add",KERNEL=="/dev/sdf1",RUN+='/bin/raw /dev/raw/raw5 %N"
ACTION=="add",ENV{MINOR}=="81",RUN+="/bin/raw /dev/raw/raw5 %M %m"
KERNEL=="raw[1-5]",OWNER="grid",GROUP="asmadmin",MODE="660"
注意ENV{MINOR} 的值相差16,增加的值也得相差16 要不然识别不出来
然后执行 start_udev
ll /dev/raw
rac2上要是没有 执行 partprobe
12.grid 预检
集群预检查 grid用户:
./runcluvfy.sh stage -pre crsinst -n rac1,rac2 -fixup -verbose
乱码请设置字符编码:
export LC_CTYPE=en_US.UTF-8
13.安装grid
./runInstall 按照操作安装即可
##注意1:安装实现root.sh 的脚本的时候 需要在途中执行,CRS-4124: Oracle High Availability Services startup Failed.
CRS-4000: Command Start Failed,or completed with errors.
ohasd Failed to start: 对设备不适当的 ioctl 操作
ohasd Failed to start at /u01/app/11.2.0/grid/crs/install/rootcrs.pl line 443.
解决方法竟然是出现pa user cert的时候在另一个窗口不停的执行下面的命令,直到命令执行成功,真是变态啊。
##执行root.sh 的时候 出现 adding demo to inttab的时
候 执行,要不然可能需要重现安装系统来安装rac
/bin/dd if=/var/tmp/.oracle/npohasd of=/dev/null bs=1024 count=1
注意2:安装最后报一个dns解析错误,忽略即可。[INS-20802]错误,忽略即可
14. 创建asm磁盘
su - grid
设置字符集:
export LC_CTYPE=en_US.UTF-8
运行 asmca
按照提示创建即可
15.安装database
16.rac维护
1.查看服务状态
忽略gsd问题
[root@rac1 ~]# su - grid
[grid@rac1 ~]$ crs_stat -t
检查集群实例运行状态
[grid@rac1 ~]$ srvctl status database -d orcl
Instance orcl1 is running on node rac1
Instance orcl2 is running on node rac2
检查本地节点的CRS状态
[grid@rac1 ~]$ crsctl check crs
CRS-4638: Oracle High Availability Services is online
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
检查集群的CRS状态
[grid@rac1 ~]$ crsctl check cluster
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
2.查看集群中节点配置信息
[grid@rac1 ~]$ olsnodes
rac1
rac2
[grid@rac1 ~]$ olsnodes -n
rac1 1
rac2 2
[grid@rac1 ~]$ olsnodes -n -i -s -t
rac1 1 rac1-vip Active Unpinned
rac2 2 rac2-vip Active Unpinned
3.查看集群件的表决磁盘信息
[grid@rac1 ~]$ crsctl query css votedisk
## STATE File Universal Id File Name Disk group
-- ----- ----------------- --------- ---------
1. ONLINE 496abcfc4e214fc9bf85cf755e0cc8e2 (/dev/raw/raw1) [OCR]
Located 1 voting disk(s).
4.查看集群SCAN VIP信息
[grid@rac1 ~]$ srvctl config scan
SCAN name: scan-ip,Network: 1/192.168.248.0/255.255.255.0/eth0
SCAN VIP name: scan1,IP: /scan-ip/192.168.248.110
查看集群SCAN Listener信息
[grid@rac1 ~]$ srvctl config scan_listener
SCAN Listener LISTENER_SCAN1 exists. Port: TCP:1521
5.asm 增加磁盘:
方式1. grid 用户下:
sqlplus / as sysasm
alter diskgroup FRA add disk '/dev/raw/raw4' rebalance power 5;
方式2. 运行asmca,直接添加磁盘
6. 集群关闭和开启:
一 关闭rac
1.关闭数据实例
[grid@rac1 ~]$ srvctl status database -d RACDB
实例 RACDB1 正在节点 rac1 上运行
实例 RACDB2 正在节点 rac2 上运行
[grid@rac1 ~]$ ps -ef|grep smon
oracle 3676 1 0 06:05 ? 00:00:02 ora_smon_RACDB1
grid 12840 1 0 01:54 ? 00:00:00 asm_smon_+ASM1
grid 27890 27621 0 07:52 pts/3 00:00:00 grep smon
[grid@rac1 ~]$ srvctl stop database -d RACDB
[grid@rac1 ~]$ srvctl status database -d RACDB
3.切换到root用户,source grid用户的环境变量
[root@rac1 ~]# cd /home/grid
[root@rac1 grid]# sh .bash_profile
4,使用crs_stat 确认集群各项资源和服务运行状态
[root@rac1 bin]# /u01/app/11.2.0/grid/bin/crs_stat -t -v
5,使用crsctl 指令关闭集群
[root@rac1 bin]# /u01/app/11.2.0/grid/bin/crsctl stop cluster -all
6,使用crs_stat 确认集群各项资源和服务运行状态
[root@rac1 bin]# /u01/app/11.2.0/grid/bin/crs_stat -t -v
[root@rac2 ~]# /u01/app/11.2.0/grid/bin/crs_stat -t -v
CRS-0184: Cannot communicate with the CRS daemon.
说明顺利关闭
二 。RAC 开启
1,root 执行grid 下面的环境变量 (可以不执行直接到/u01/app/11.2.0/grid/bin/模式下)
2,查看crs 集群状态
[root@rac1 bin]# /u01/app/11.2.0/grid/bin/crs_stat -t -v
3,开启集群
[root@rac1 ~]# /u01/app/11.2.0/grid/bin/crsctl start cluster -all
检查状态
[root@rac2 ~]# /u01/app/11.2.0/grid/bin/crs_stat -t -v
4,使用srvctl 确认数据库实例状态
[root@rac1 ~]# /u01/app/11.2.0/grid/bin/srvctl status database -d RACDB
实例 RACDB1 没有在 rac1 节点上运行
实例 RACDB2 没有在 rac2 节点上运行
5,打开RAC 实例集群
[root@rac1 ~]# /u01/app/11.2.0/grid/bin/srvctl start database -d RACDB
确认状态
[root@rac2 ~]# /u01/app/11.2.0/grid/bin/srvctl status database -d RACDB
实例 RACDB1 正在节点 rac1 上运行
实例 RACDB2 正在节点 rac2 上运行
6,打开OEM
[root@rac1 ~]# /u01/app/11.2.0/grid/bin/emctl start RACDB
参考资料:
http://www.linuxidc.com/Linux/2017-03/141543.htm
17.遇到的问题及处理办法
[root@rac2 ~]# sh /tmp/CVU_11.2.0.4.0_grid/runfixup.sh
Response file being used is :/tmp/CVU_11.2.0.4.0_grid/fixup.response
Enable file being used is :/tmp/CVU_11.2.0.4.0_grid/fixup.enable
Log file location: /tmp/CVU_11.2.0.4.0_grid/orarun.log
Installing Package /tmp/CVU_11.2.0.4.0_grid//cvuqdisk-1.0.9-1.rpm
Preparing... ########################################### [100%]
ls: 无法访问/usr/sbin/smartctl: 没有那个文件或目录
/usr/sbin/smartctl not found.
error: %pre(cvuqdisk-1.0.9-1.x86_64) scriptlet Failed,exit status 1
error: install: %pre scriptlet Failed (2),skipping cvuqdisk-1.0.9-1
yum install smartmontools
Creating trace directory
/u01/app/11.2.0/grid/bin/clscfg.bin: error while loading shared libraries: libcap.so.1: cannot open shared object file: No such file or directory
Failed to create keys in the OLR,rc = 127,32512
OLR configuration Failed
解决:
yum install compat-libcap1 -y
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root.sh script.
Now product-specific root actions will be performed.
2017-09-01 18:18:52: Parsing the host name
2017-09-01 18:18:52: Checking for super user privileges
2017-09-01 18:18:52: User has super user privileges
Using configuration parameter file: /u01/app/11.2.0/grid/crs/install/crsconfig_params
Improper Oracle Clusterware configuration found on this host
Deconfigure the existing cluster configuration before starting
to configure a new Clusterware
run '/u01/app/11.2.0/grid/crs/install/rootcrs.pl -deconfig'
to configure existing Failed configuration and then rerun root.sh
解决:
/u01/app/11.2.0/grid/crs/install
./roothas.pl -delete -force -verbose
CRS-4124: Oracle High Availability Services startup Failed.
CRS-4000: Command Start Failed,or completed with errors.
ohasd Failed to start: 对设备不适当的 ioctl 操作
ohasd Failed to start at /u01/app/11.2.0/grid/crs/install/rootcrs.pl line 443.
解决方法竟然是出现pa user cert的时候在另一个窗口不停的执行下面的命令,直到命令执行成功,真是变态啊。
##执行root.sh 的时候 出现 adding demo to inttab的时
候 执行,要不然可能需要重现安装系统来安装rac
/bin/dd if=/var/tmp/.oracle/npohasd of=/dev/null bs=1024 count=1
错误:
ORA-12545: Connect Failed because target host or object does not exist
解决:
经测试,其实只需要在客户端主机的/etc/hosts文件中,添加目标数据库的vip的解决信息便可以解决(测试数据库版本11G R2)
RAC维护
在工作上有时候忘记ASM磁盘组所对应的ASM磁盘和设备名,需要查看时可以在ASM实例中使用以下命令:
sql> select name,path from v$asm_disk_stat;
原文链接:/oracle/207461.html