#! /bin/sh # # $Id: cluster_config.sh,v 6.79.16.2 2007/02/02 20:14:12 laim Exp $ Copyright (c) 2005-2007, EMC Corporation. # # # Copyright (c) 2005-2007 EMC Corporation. # # All rights reserved. This is an UNPUBLISHED work, and # comprises proprietary and confidential information of EMC. # Unauthorized use, disclosure, and distribution are strictly # prohibited. Use, duplication, or disclosure of the software # and documentation by the U.S. Government is subject to # restrictions set forth in a license agreement between the # Government and EMC or other written agreement specifying # the Government's rights to use the software and any applicable # FAR provisions, such as FAR 52.227-19. # #--------------------------------------------------------------------- # # High Availability NetWorker installation procedure. This procedure # should be used after a proper installation of NetWorker on all the # nodes of the relevent cluster to activate the failover and cluster- # aware capabilities of NetWorker. # # CONFIGURATION # # To configure the NetWorker product as one that is cluster-aware and # has the ability to failover within the mechanisms provided for by the # operating system, the following are the minimal steps necessary: # # 1. The NetWorker database area (/nsr) must reside on a local disk in # such a way that it is a symbolic link to the actual local directory. # This enables the cluster start/stop mechanisms to easily move the # configuration between being local (for cluster-aware client ops), # and full-blown cluster server capability. (The cluster server # database move to whatever host the logical host operates upon.) # # 2. The ${CLUSTER_TAG} file must exist on the system. # # DE-CONFIGURATION # # This procedure can also be called with a "-r" option to remove all # the configuration steps that have been done by a previous use of the # script for cluster configuration. Note that a second optional param # is given with the "-r" which indicates where the nsr binaries are # kept on the system. # # OBS: For TruCluster 5.0a this script is performing the same major # steps and also is running 5.0a specific actions. # #--------------------------------------------------------------------- # #--------------------------------------------------------------------- # Restart syslog # # Restart the syslog daemon after the /nsr directory has been moved. #--------------------------------------------------------------------- # restart_syslog() { pid=0 if [ X${ARCH} = "XSOLARIS" ]; then ps_output="`ps -ax | egrep 'syslogd'`" if [ -f /etc/syslog.pid ]; then pid=`cat /etc/syslog.pid` fi elif [ X${ARCH} = "XDIGITAL" ]; then ps_output="`ps -eopid,tt,time,comm | egrep 'syslogd'`" if [ -f /var/run/syslog.pid ]; then pid=`cat /var/run/syslog.pid` fi elif [ X${ARCH} = "XHPUX" ]; then ps_output="`ps -e | egrep 'syslogd'`" if [ -f /var/run/syslog.pid ]; then pid=`cat /var/run/syslog.pid` fi elif [ X${ARCH} = "XLINUX" ]; then ps_output="`ps -e | egrep 'syslogd'`" if [ -f /var/run/syslogd.pid ]; then pid=`cat /var/run/syslogd.pid` fi elif [ X${ARCH} = "XAIX" ]; then ps_output="`ps -e | egrep 'syslogd'`" if [ -f /etc/syslog.pid ]; then pid=`cat /etc/syslog.pid` fi else ps_output="`ps -e | egrep 'syslogd'`" fi if [ ! -z "${ps_output}" ]; then if [ ! -z "${pid}" ]; then echo "Restarting syslog daemon..." kill -1 ${pid} # Give syslog a chance to restart sleep 5 fi else echo "The syslog daemon is not currently running" fi } # #--------------------------------------------------------------------- # yesno function # # Generic function to get yes/no answer # # inputs: # prompt= string to prompt with (excluding default value) # default= name of default value if no response # # outputs: # result= 'y' or 'n' #--------------------------------------------------------------------- # yesno() { while true ; do if [ -z "${default}" ]; then echo_n "${prompt}? " else echo_n "${prompt} [${default}]? " fi read ans if [ "X${ans}" = X ]; then result="${default}" else result="${ans}" fi if [ `expr "X${result}" : 'X[yY]'` -ne 0 ]; then result=y break elif [ `expr "X${result}" : 'X[nN]'` -ne 0 ]; then result=n break else echo "Please respond \`yes' or \`no'" fi done } # #--------------------------------------------------------------------- # echo_n function # # Generic function for output of lines without a carriage return. #--------------------------------------------------------------------- # echo_n() { if [ ${ARCH} = SOLARIS -o ${ARCH} = LINUX ]; then echo -n "$*" else echo "$*\c" fi } # #--------------------------------------------------------------------- # prompt_and_answer function # # Generic function to prompt user for answers contained within ${prompt} # # inputs: # prompt= string to prompt with (excluding default value) # default= name of default value, if any # outputs: # result= resultant value #--------------------------------------------------------------------- # prompt_and_answer() { echo "" while true ; do if [ -z "${default}" ]; then echo_n "${prompt}? " read ans else echo_n "${prompt} [${default}]? " read ans fi if [ -z "${ans}" ]; then if [ -z "${default}" ]; then continue fi ans="${default}" fi result="${ans}" return done } # #--------------------------------------------------------------------- # find_utility function # # Generic function to prompt user for where a particular utility is # located # # inputs: # $1 directory that is a guess of where ${tool} is # $2 name of utility to find # outputs: # result= full pathname of utility #--------------------------------------------------------------------- # find_utility() { guess=$1 tool=$2 if [ -f ${guess}/${tool} ] ; then result=${guess}/${tool} else while true; do default=$guess prompt="What directory contains ${tool}" prompt_and_answer if [ -d "${result}" -a -f "${result}/${tool}" ]; then result=${result}/${tool} break fi echo "% Cannot find ${tool} in given directory ($result)" done fi } # #--------------------------------------------------------------------- # Find processes # # INPUTS: # process_list= The list of processes names to find # OUTPUTS: # active_processes= The ps output of the processes we targeted. #--------------------------------------------------------------------- # find_processes() { if [ ${ARCH} = SOLARIS ]; then ps_output="`ps -ax | egrep 'nsr|save|nwadmin'`" elif [ ${ARCH} = DIGITAL ]; then ps_output="`ps -eopid,tt,time,comm | egrep 'nsr|save|nwadmin'`" else ps_output="`ps -e | egrep 'nsr|save|nwadmin'`" fi # build up an egrep style pattern we can feed to awk which will match # all the various daemons. pattern= for daemon in $process_list ; do if [ -z "${pattern}" ]; then pattern='[0-9] [-a-zA-Z\+\/._0-9]*('"${daemon}" else pattern="${pattern}|${daemon}" fi done pattern="${pattern}"')' if [ ! -z "${ps_output}" ]; then active_processes="`echo "${ps_output}" | awk '/awk/ { next } $0 ~ /'"$pattern"'/ { print $0 }'`" else active_processes= fi } # #------------------------------------------------------------------ # modfile function # # Scan $file for $searchstr. # If installing, and no match is found this function's behavior # depends upon the setting of ${insertmode}: # If ${insertmode} = "y", then ${insertcmd} is used to change # ${file}. # If insertmode != y then contents of ${appendstr} are appended # to ${file}. # # If removing, and a match is found, # use $sedcmd to strip out lines of a given type or range. # # INPUTS # op= "install" or "remove" # file= file to egrep thru and modify # searchstr= string to egrep for to determine if already nsr-ized # # INPUTS when in install mode: # appendstr= string to append to ${file} if ${searchstr} not found # insertcmd= sed command to stick lines in file if in insert mode # insertmode= y if lines are to be inserted in ${file}. # # INPUTS when in remove mode: # sedcmd= sed command to use to remove changes if searchstr found # # OUTPUTS when in install mode: # filechange= y if file changed # n if file is not changed # #--------------------------------------------------------------------- # modfile() { egrep "${searchstr}" ${file} >/dev/null 2>&1 grepstatus=$? filechange=n if [ ${grepstatus} -eq 2 ]; then echo "could not read ${file} for modification" filechange=n elif [ ${grepstatus} -eq 0 ]; then # ${searchstr} was found if [ ${op} = install ]; then echo "${file} already modified for NetWorker" return fi # remove the lines in ${file} using ${sedcmd} tfile=${LGTO_TMP1_FILE} # copy so the new file has to correct permissions cp ${file} ${tfile} sed -e "${sedcmd}" ${file} > ${tfile} mv -f ${tfile} ${file} filechange=y else # ${searchstr} not found if [ ${op} = remove ]; then return fi if [ "${insertmode}" = "y" ]; then # apply ${insertcmd} to ${file} tfile=/tmp/file.$$ # copy so the new file has the correct permissions cp ${file} ${tfile} sed -e "${insertcmd}" ${file} > ${tfile} mv -f ${tfile} ${file} filechange=y else # Append the new data cat >> ${file} << EOF ${appendstr} EOF filechange=y fi fi } # #------------------------------------------------------------------ # pre_nwinstcreate function # # This function is for LGTpa81539: preparing for running nwinstcreate #------------------------------------------------------------------ # pre_nwinstcreate() { NWINST_PERL="${NSR_BIN}/nwinst" if [ ! -f ${NWINST_PERL} ]; then echo "${NWINST_PERL} not found - check local node's installation" myname=`basename $0` echo "${myname} exiting" exit 1 fi NWINST="${NSR_BIN}/nwinst.sh" echo "" echo "Creating ${NWINST} file ..." cat <${NWINST} #!/bin/sh ${NSR_BIN}/nsr_shutdown -q PATH=$PATH:/bin export PATH FT_DIR=${FT_DIR} FT_DOMAIN=${FT_DOMAIN} export FT_DIR FT_DOMAIN ${FT_DIR}/bin/ftPerl ${NWINST_PERL} ${shared_nsr_mntpt} ${vhostname} if [ \$? -ne 0 ]; then echo "Script execution failed." fi EOF chmod 775 ${NWINST} cat ${NWINST} echo "Finished creating ${NWINST} file." echo "" } # #--------------------------------------------------------------------- # Main Program: #--------------------------------------------------------------------- # Determine if creating a cluster installation makes sense.... # 1. Check to see if we've got a cluster utility area, and if # not, we can check to see if the correct package has been # installed. In that case, we need to prompt the user where # he has installed the cluster utilities, as it's in a non- # standard location. # 2. Check for the version of the NetWorker binaries. We can # do this by checking what the version is from the LGTOserv # package. # 3. If the "-r" option has been given, execute the remove # procedure and then exit. # 4. Check that the local NSR database directory has not been # created. If this has been done, this install procedure # has either already been executed, or we need to instruct # the user to modify some of the default NetWorker cluster # start/stop scripts. # 5. Finally, check for the ${CLU_TAG} file's existence. If # it exists, we may also be a rerun'd command that needs # some additional user cleanup. # 6. Once all these sanity checks are done, we document the # the information that will be required and ask the user if # we should continue with the changes. #--------------------------------------------------------------------- # # # 0. Initializations: # #--------------------------------------------------------------------- # NOTE: some systems put "Berkeley" utilities in /usr/ucb, others (e.g. # SGI) put them in /usr/bsd. Also, some systems use /usr/etc and other # use /usr/sbin. We include all variants in addition to the path to # this program to be safe. #--------------------------------------------------------------------- mypath="`expr X\"${0}\" : X'\(.*\)/.*' \| X\"${0}\" : X'\(/\)[^/]*$' \| '.'`" PATH=/usr/ucb:/usr/bsd:/bin:/usr/bin:/etc:/usr/etc:/usr/sbin:$PATH export PATH NSR_GLOBAL=/nsr NSR_LOCAL=/nsr.NetWorker.local CLU_TAG=NetWorker.clustersvr NWCLUSTPLPATH=${FT_DIR}/bin/nwclust.pl PRODUCTNAME="Sun StorageTek(TM) Enterprise Backup" VERSION="`echo "7.4.Build.187" | sed 's/\.Build\.[0-9]*[a-z]*//'`" CLUTYPE="X" #for platforms with more than one NW-supported cluster product FTCLI_IMPORT="/usr/sbin/nw_ux.lc.aam5.imp" LCNAME="EMC AutoStart" LCSHORT=EMCas FIVE_CLUVER="5.0" uname=`uname` if [ "X${uname}" = "XSunOS" ]; then ARCH=SOLARIS # Determine whether EMC NetWorker or Sun StorEdge(TM) Enterprise Backup has # been installed and which package to integrate with the cluster software. pkginfo -q LGTOclnt if [ $? -eq 0 ]; then # EMC NetWorker NWCLNT="LGTOclnt" NWSERV="LGTOserv" NSR_BIN="/usr/sbin" fi pkginfo -q SUNWebsc if [ $? -eq 0 ]; then # Sun StorEdge(TM) Enterprise Backup NWCLNT="SUNWebsc" NWSERV="SUNWebss" NSR_BIN="/usr/sbin/nsr" # Turn off version checking for EBS. It breaks maintenance releases # because they are done as patches for EBS. NWVER_BYPASS=true fi CLUINFO_SC=`pkginfo -l SUNWsc 2>/dev/null` #Native Solaris Sun Cluster 2.2 if [ $? -ne 0 ]; then CLUINFO_SC="" fi CLUINFO_SC30=`pkginfo -l SUNWscr 2>/dev/null` #Native Solaris SC 3.0+ if [ $? -ne 0 ]; then CLUINFO_SC30="" fi CLUINFO_LC=`pkginfo -l EMCasa 2>/dev/null` # EMC AutoStart 5.2 if [ $? -ne 0 ]; then CLUINFO_LC=`pkginfo -l LGTOaama 2>/dev/null` # AAM 4.8 to 5.1.2 if [ $? -eq 0 ]; then LCNAME="Availability Manager" LCSHORT=AM else CLUINFO_LC="" fi fi CLUINFO_VCS=`pkginfo -l VRTSvcs 2>/dev/null` # Veritas Cluster Server if [ $? -ne 0 ]; then CLUINFO_VCS="" fi result= if [ \( "${CLUINFO_SC}" != "" -o "${CLUINFO_SC30}" != "" \) -a "${CLUINFO_LC}" != "" ]; then echo "Both Sun Cluster (SC) and ${LCNAME} (${LCSHORT}) Products detected." if [ "X$1" != "X-r" ]; then echo "Only one will be integrated under NetWorker, so you should" echo "first uninstall one. If not uninstalling, you must choose one." prompt="Do you wish to continue" default="No" yesno if [ ${result} = n ]; then myname=`basename $0` echo "${myname} exiting" exit 1 fi fi prompt="Which one would you like to configure NetWorker for: 1-${LCSHORT} 2-SC" default=1 while true; do prompt_and_answer; if [ "${result}" = 1 -o "${result}" = 2 ]; then break fi done elif [ "${CLUINFO_LC}" != "" ]; then result=1 elif [ "${CLUINFO_SC30}" != "" ]; then #Do it this way so if both SC2 and SC3 are installed, use SC3. result=3 elif [ "${CLUINFO_SC}" != "" ]; then result=2 elif [ "${CLUINFO_VCS}" != "" ]; then result=4 fi if [ "${result}" = 1 ]; then CLUTYPE="LC" MIN_CLUVER="4.8" #not currently enforced CLUINFO="${CLUINFO_LC}" CLUVER="`echo "${CLUINFO}" | awk '/VERSION/ { print $2 }'`" if [ "${CLUVER}" -ge "${FIVE_CLUVER}" ]; then FTCLI_IMPORT="/usr/sbin/nw_ux.lc.aam5.imp" else FTCLI_IMPORT="/usr/sbin/nw_ux.lc.imp" fi LCMAP="/usr/sbin/lcmap" elif [ "${result}" = 2 ]; then CLUTYPE="SC" ABS_CLUVER="2.2" CLUINFO="${CLUINFO_SC}" find_utility /opt/SUNWcluster/bin haget HAGET=${result} find_utility `dirname ${HAGET}` hareg HAREG=${result} elif [ "${result}" = 3 ]; then CLUTYPE="SC30" MIN_CLUVER="3.0.0" #not currently enforced CLUINFO="${CLUINFO_SC30}" LCMAP="/usr/sbin/lcmap" LCMAPSTATIC="${NSR_BIN}/lcmap.sc30" PATH=$PATH:/usr/cluster/bin export PATH elif [ "${result}" = 4 ]; then CLUTYPE="VCS" MIN_CLUVER="3.5" CLUINFO="${CLUINFO_VCS}" LCMAP="/usr/sbin/lcmap" LCMAPSTATIC="${NSR_BIN}/lcmap.vcs" if [ "${VCS_HOME}" = "" ]; then VCS_HOME=/opt/VRTSvcs fi if [ "${VCS_CONF}" = "" ]; then VCS_CONF=/etc/VRTSvcs fi PATH=$PATH:${VCS_HOME}/bin export PATH fi CLUVER="`echo "${CLUINFO}" | awk '/VERSION/ { print $2 }'`" NWVER="`pkginfo -l ${NWCLNT} | awk '/VERSION/ { print $2 }'`" NWVER="`echo "${NWVER}" | sed 's/\.Build\.[0-9]*[a-z]*//' | sed 's/,REV=[0-9]*[a-z]*//'`" elif [ "X${uname}" = XHP-UX ]; then ARCH="HPUX" NSR_BIN="/opt/networker/bin" CLUTYPE= CLUINFO_MCSG=`swlist -l product | grep -i 'Service.*Guard'` CLUINFO_LM=`swlist | grep 'Lock Manager'` CLUINFO_LC=`swlist | grep 'EMCasa'` # EMC AutoStart 5.2 if [ -z "${CLUINFO_LC}" ]; then CLUINFO_LC=`swlist | grep 'LGTOaama'` # AAM 4.8 to 5.1.2 if [ ! -z "${CLUINFO_LC}" ]; then LCNAME="Availability Manager" LCSHORT=AM fi fi # Since MCSG and LM native cluster s/w cannot be installed at the same # time don't have to check for that instance. Just check for LC with either MCSG or LM. if [ ! -z "${CLUINFO_LC}" -a \( ! -z "${CLUINFO_MCSG}" -o ! -z "${CLUINFO_LM}" \) -a "X$1" != "X-r" ]; then echo "Both native MCSG/LM clustering s/w and ${LCNAME} clustering s/w have been detected." echo "Cluster aware NetWorker should be installed in only one clustering environment." echo "Either abort this NetWorker cluster setup and uninstall one clustering s/w," echo "or continue with this setup and choose one of the clustering s/w to which NetWorker" echo "should be integrated into." prompt="Do you want to continue with the installation" default="n" yesno if [ "${result}" = "n" ]; then echo "Aborting NetWorker cluster setup" exit 1 fi prompt="Choose one of the following cluster software to configure NetWorker 1-${LCSHORT} or 2-MCSG/LM" default=1 while true; do prompt_and_answer if [ "${result}" = "1" -o "${result}" = "2" ]; then break fi done if [ "${result}" = "1" ]; then CLUINFO_MCSG= CLUINFO_LM= else CLUINFO_LC= fi elif [ ! -z "${CLUINFO_LC}" -a \( ! -z "${CLUINFO_MCSG}" -o ! -z "${CLUINFO_LM}" \) -a "X$1" = "X-r" ]; then echo "Both native MCSG/LM clustering s/w and ${LCNAME} clustering s/w have been detected." echo "Cluster aware NetWorker should be removed or unconfigured from only one clustering" echo "s/w at a time." echo "Continuing with this NetWorker cluster de-config will require you to choose one" echo "clustering s/w from which NetWorker cluster config will be de-configured." prompt="Do you want to continue with the removal or de-configuration?" default="n" yesno if [ "${result}" = "n" ]; then echo "Aborting NetWorker cluster de-config." exit 1 fi prompt="Choose one of the following cluster software. 1-${LCSHORT} or 2-MCSG/LM" default=1 while true; do prompt_and_answer if [ "${result}" = "1" -o "${result}" = "2" ]; then break fi done if [ "${result}" = "1" ]; then CLUINFO_MCSG= CLUINFO_LM= else CLUINFO_LC= fi elif [ -z "${CLUINFO_LC}" -a -z "${CLUINFO_MCSG}" -a -z "${CLUINFO_LM}" ]; then echo "No clustering software has been detected." echo "Aborting NetWorker cluster setup." exit 1 fi if [ ! -z "${CLUINFO_MCSG}" ]; then echo "MC Service Guard clustering software has been detected." CLUINFO="${CLUINFO_MCSG}" CLUTYPE="MCSG" MIN_CLUVER="A.10.05" CLUVER="$(/usr/bin/what /usr/lbin/cmcld | /usr/bin/grep "Date" | \ /usr/bin/egrep '[AB]\...\...|NTT\...\...' | \ cut -f2 -d" ")" elif [ ! -z "${CLUINFO_LM}" ]; then echo "Lock manager clustering software has been detected." CLUINFO="${CLUINFO_LM}" CLUTYPE="LM" MIN_CLUVER="A.10.05" CLUVER="$(/usr/bin/what /usr/lbin/cmcld | /usr/bin/grep "Date" | \ /usr/bin/egrep '[AB]\...\...|NTT\...\...' | \ cut -f2 -d" ")" elif [ ! -z "${CLUINFO_LC}" ]; then echo "${LCNAME} clustering software has been detected." CLUINFO="${CLUINFO_LC}" CLUTYPE="LC" MIN_CLUVER="4.8" CLUVER="`echo "${CLUINFO}" | awk '{ print $2 }'`" LCMAP="/opt/networker/bin/lcmap" if [[ "${CLUVER#.}" -ge "${FIVE_CLUVER#.}" ]]; then FTCLI_IMPORT="/opt/networker/bin/nw_ux.lc.aam5.imp" else FTCLI_IMPORT="/opt/networker/bin/nw_ux.lc.imp" fi fi NWVER="`swlist | awk '/NetWorker/ { if ($1 == "NetWorker") print $2 }'`" NWVER="`echo "${NWVER}" | sed 's/\.Build\.[0-9]*[a-z]*//'`" elif [ "X${uname}" = XOSF1 ]; then ARCH=DIGITAL # For compatibility, make sure this and the packaging (LPS) code # will come up with exactly the same VERSION string. if [ `expr index $VERSION .` -ne 0 ] then MAJOR=`expr substr $VERSION 1 1` MINOR=`expr substr $VERSION 3 1` if [ `expr length $VERSION` -eq 5 ] then INCR=`expr substr $VERSION 5 1` else INCR=0 fi else MAJOR=9 MINOR=9 INCR=9 fi VERSION=${MAJOR}${MINOR}${INCR} # which fails for DIGITAL NSR_BIN="/usr/opt/networker/bin" NSR_LOCAL="/var/cluster/members/{memb}/nsr" NSR_LINK="/cluster/members/{memb}/nsr" CLUALIAS="`/usr/sbin/clu_get_info -full | grep "Cluster.*information" | \ awk '/Cluster/ { print $5 }'`" OSFBASEMIN="505" CLUVERMIN="505" NWVER_OSFBASE_CLUINFO="`setld -i | grep -E \"((^|[[:space:]])OSFBASE[0-9]+[[:space:]]+installed|(^|[[:space:]])LGTOCLNT[0-9]+[[:space:]]+installed|(^|[[:space:]])TCRBASE[0-9]+[[:space:]]+installed)\" | awk '/OSFBASE/ { print $1 } /LGTOCLNT/ { print $1 } /TCRBASE/ { print $1 }'`" NWVER="`echo "${NWVER_OSFBASE_CLUINFO}" | sed -n '/LGTOCLNT/p'`" OSFBASE="`echo "${NWVER_OSFBASE_CLUINFO}" | sed -n '/OSFBASE/p'`" CLUINFO="`echo "${NWVER_OSFBASE_CLUINFO}" | sed -n '/TCRBASE/p'`" NWVER="`echo "${NWVER}" | sed 's/LGTOCLNT//'`" OSFBASE="`echo "${OSFBASE}" | sed 's/OSFBASE//'`" if [ ${OSFBASE} -lt ${OSFBASEMIN} ]; then echo "% This script is not running on Tru64 ${OSFBASE}" exit 1 fi # 5.0a has a sym link as /etc/syslog.conf SYSLOG="/cluster/members/{memb}/etc/syslog.conf" LGTO_EDIT_BEGIN="%LGT${VERSION}_BEGIN%" LGTO_EDIT_END="%LGT${VERSION}_END%" LGTO_TMP1_FILE=/tmp/lgto_tmp_1.$$ elif [ "X${uname}" = XLinux ]; then ARCH="LINUX" NSR_BIN="/usr/sbin" CLUINFO_LC=`rpm -q EMCasa -i 2>/dev/null` # EMC AutoStart 5.2 if [ $? -ne 0 ]; then CLUINFO_LC=`rpm -q LGTOaama -i 2>/dev/null` # AAM 4.8 to 5.1.2 if [ $? -eq 0 ]; then LCNAME="Availability Manager" LCSHORT=AM else CLUINFO_LC="" fi fi #Now only LC is supported on NW for Linux #Did the above to provide for multiple checks for other clustering s/w in future. #Also CLUINFO_LC contains LGTOlca/LGTOaama info or contains nothing. Easy for checking if [ "${CLUINFO_LC}" = "" ]; then echo "${LCNAME} clustering s/w not installed." echo "Aborting cluster aware NW install." exit 1 else CLUTYPE="LC" #The cluster version will be set below in the general code to keep #with previous implementation formats. CLUINFO="${CLUINFO_LC}" #So assign specific cluinfo_lc to generic cluinfo CLUVER="`echo "${CLUINFO}" | awk '/Version/ { print $3 }'`" if [ "${CLUVER}" \< "${FIVE_CLUVER}" ]; then FTCLI_IMPORT="/usr/sbin/nw_ux.lc.imp" else FTCLI_IMPORT="/usr/sbin/nw_ux.lc.aam5.imp" fi MIN_CLUVER="4.8" #not currently enforced LCMAP="/usr/sbin/lcmap" echo "% Detected ${LCNAME} installation." fi #Find out what cluster version is running CLUVER="`echo "${CLUINFO}" | awk '/Version/ { print $3 }'`" NWVER="`rpm -q lgtoclnt -i 2>/dev/null | awk '/Version/ { print $3 }' | awk -F.Build '{ print $1 }'`" # Thats all for Linux (RedHat 6.0) detection stage. elif [ "X${uname}" = XAIX ]; then ARCH=AIX NSR_BIN="/usr/bin" #Native AIX HACMP Cluster CLUINFO_HACMP=`lslpp -LI cluster.base.server.rte 2>/dev/null | awk '{if ($3 == "C") print $0}'` if [ -z "${CLUINFO_HACMP}" ]; then # Check if it's the ES version CLUINFO_HACMP=`lslpp -LI cluster.es.server.rte 2>/dev/null | awk '{if ($3 == "C") print $0}'` fi if [ $? -ne 0 ]; then CLUINFO_HACMP="" fi #AutoStart/AAM CLUINFO_LC=`lslpp -LI EMCas.agent.rte 2>/dev/null | awk '{if ($3 == "C") print $0}'` # EMC AutoStart if [ -z "${CLUINFO_LC}" ]; then CLUINFO_LC=`lslpp -LI LGTOaam.agent.rte 2>/dev/null | awk '{if ($3 == "C") print $0}'` # AAM 4.8 to 5.1.2 if [ ! -z "${CLUINFO_LC}" ]; then LCNAME="Availability Manager" LCSHORT=AM fi fi result= if [ "${CLUINFO_HACMP}" != "" -a "${CLUINFO_LC}" != "" ]; then echo "Both HACMP server and ${LCNAME} detected." if [ "X$1" != "X-r" ]; then echo "Only one will be integrated with NetWorker. Either abort" echo "this NetWorker cluster setup and uninstall one" echo "or continue with this setup and choose one clustering s/w." prompt="Do you want to continue with the installation" default="No" yesno if [ ${result} = n ]; then myname=`basename $0` echo "${myname} exiting" exit 1 fi fi if [ "X$1" = "X-r" ]; then prompt="Remove configuration for which cluster product? (1-${LCSHORT} 2-HACMP)" else prompt="Which one would you like to configure NetWorker for: 1-${LCSHORT} 2-HACMP" fi default=1 while true; do prompt_and_answer; if [ "${result}" = 1 -o "${result}" = 2 ]; then break fi done elif [ "${CLUINFO_LC}" != "" ]; then result=1 elif [ "${CLUINFO_HACMP}" != "" ]; then result=2 else echo "No installed clustering software detected." echo "Aborting NW cluster configuration." exit 1 fi if [ "${result}" = 1 ]; then CLUTYPE="LC" MIN_CLUVER="4.8" #not currently enforced CLUINFO="${CLUINFO_LC}" CLUVER="`echo "${CLUINFO}" | awk '{ print $2 }' | awk -F. '{ print $1"."$2 }'`" if [ "${CLUVER}" -ge "${FIVE_CLUVER}" ]; then FTCLI_IMPORT="/usr/bin/nw_ux.lc.aam5.imp" else FTCLI_IMPORT="/usr/bin/nw_ux.lc.imp" fi LCMAP="/usr/bin/lcmap" else CLUTYPE="HACMP" CLUINFO="${CLUINFO_HACMP}" LCMAP="/usr/bin/lcmap" LCMAPSTATIC="/usr/bin/lcmap.hacmp" fi CLUVER="`echo "${CLUINFO}" | awk '{ print $2 }'`" NWVER="`/usr/bin/what /usr/bin/save | grep Release | awk '{ print $2 }'`" NWVER="`echo ${NWVER} | sed 's/\.Build\.[0-9]*[a-z]*//'`" else echo "$0 not written for this platform." exit 1 fi # 1. Check for system's cluster software on this host if [ -z "${CLUINFO}" ]; then echo "% Cluster packages missing. Cannot configure." exit 1 elif [ ${ARCH} = SOLARIS -a ${CLUTYPE} = SC ]; then # Have to make sure that the Sun Cluster software is # exactly the one we expect. PARSED="`echo ${CLUVER} | sed 's/,REV=.*$//'`" if [ ${PARSED} != ${ABS_CLUVER} ]; then echo "% Sun Cluster software is incompatible--expecting SC ${ABS_CLUVER}" exit 1 fi elif [ \( ${ARCH} = LINUX -o ${ARCH} = HPUX -o ${ARCH} = SOLARIS -o ${ARCH} = AIX \) -a ${CLUTYPE} = LC -a "X$1" != "X-r" ]; then if [ "${FT_DOMAIN}" = "" -o "${FT_DIR}" = "" ]; then echo "% You must set the FT_DIR and FT_DOMAIN environment variables" echo "% prior to running this script." exit 1 elif [ ! -f "${NWCLUSTPLPATH}" ]; then echo "% ${NWCLUSTPLPATH} does not exist." exit 1 fi elif [ $ARCH = HPUX -a \( ${CLUTYPE} = MCSG -o ${CLUTYPE} = LM \) ]; then # Have to make sure the Service Guard or Lock Manager is a supported version if [[ "${CLUVER}" = "" ]] || \ [[ "${CLUVER#*.}" < "${MIN_CLUVER#*.}" ]]; then echo "% Service Guard ${CLUVER} is not supported by NetWorker" echo "% Minimim version is ${MIN_CLUVER}" exit 1 fi elif [ $ARCH = AIX -a ${CLUTYPE} = HACMP ]; then hacmp_utils=`lslpp -lc cluster.base.server.utils 2>/dev/null` if [ "hacmp_utils" = "" ]; then # maybe ES is installed hacmp_utils=`lslpp -lc cluster.es.server.utils 2>/dev/null` if [ "hacmp_utils" = "" ]; then echo "HACMP Server Utilities must be installed in order" echo "to configure NetWorker" echo "Aborting NetWorker cluster aware install" exit 1 fi fi elif [ $ARCH = HPUX -a ${CLUTYPE} = LC ]; then if [[ "${CLUVER#.}" < "${MIN_CLUVER#.}" ]]; then echo "${LCNAME} version ${CLUVER} is not supported by NetWorker" echo "Minimum supported version is ${MIN_CLUVER}" echo "Aborting NetWorker cluster aware install" exit 1 fi elif [ $ARCH = DIGITAL ]; then # Have to make sure the TruCluster is a supported version CLUVER="`echo "${CLUINFO}" | sed 's/TCRBASE//'`" if [ ${CLUVER} -lt ${CLUVERMIN} ]; then echo "% TruCluster ${CLUVER} is not supported by NetWorker ${NWVER}" fi fi # 2. What NetWorker version are we using on this host. It had better # be the version which this script was designed for. # BYPASS var for quick non-gmake-release-pkg testing, etc. if [ -z "${NWVER_BYPASS}" -a ${NWVER} != ${VERSION} ]; then echo "% NetWorker software incompatible with script." exit 1 fi # 3. If we are requesting the removal of any prior cluster configuration, # the do the deconfiguration here and then exit. MV_TAG=`basename ${NSR_LOCAL}` if [ "X$1" = "X-r" ]; then echo "% Removing cluster configuration on this node." # SC 2.2: On Solaris SC, need to shut down and deregister the # networker data service agent if it is mastered by this cluster node. if [ ${ARCH} = SOLARIS -a ${CLUTYPE} = SC ]; then lh=`${HAREG} -q networker -H` if [ ! -z "${lh}" ]; then MASTERED=`${HAGET} -f mastered` MASTERED="`echo "${MASTERED}" | egrep ${lh}`" if [ ! -z "${MASTERED}" ]; then echo "Turning off the networker HA agent..." ${HAREG} -n networker echo "Unregistering the networker HA agent..." ${HAREG} -u networker fi fi fi # SC 3.0+: Want to deregister LGTO client and server resource types. # all instances must already be deleted. If they're not, it'll fail. if [ ${ARCH} = SOLARIS -a ${CLUTYPE} = SC30 ] ; then num_servers=`scrgadm -p |grep LGTO.serv | wc -l` num_clients=`scrgadm -p |grep LGTO.clnt | wc -l` num_flag=1 if [ ${num_servers} -gt 1 ] ; then echo "There is still an instance of the NetWorker server" echo "configured with the Resource Group Manager." num_flag=0 fi if [ ${num_clients} -gt 1 ] ; then echo "There is still one (or more) instances of the NetWorker" echo "client configured with the Resource Group Manager." num_flag=0 fi if [ ${num_flag} -eq 0 ] ; then echo "All instances of the LGTO.serv and LGTO.clnt" echo "resource types must be removed from the RGM before the" echo "NetWorker onfiguration can be removed." echo "Refer to the scrgadm(1m) command." exit 1 else echo "Removing LGTO client and server types." scrgadm -r -t LGTO.clnt scrgadm -r -t LGTO.serv echo "Removing ${LCMAP} file." rm -f ${LCMAP} fi fi # VCS: Want to remove NWClient agent and resource types. # All the resources and resource type should be deleted from the # VCS configuration by the user, since it requires VCS configuration # to be writable. Fail otherwise if [ ${ARCH} = SOLARIS -a ${CLUTYPE} = VCS ] ; then hatype -list | grep NWClient >/dev/null 2>&1 if [ $? -eq 0 ] ; then echo "NetWorker Client (NWClient) resource type" echo "is present in the cluster configuration." echo "NetWorker configuration cannot be removed." echo "Remove NWClient and all its instances from the" echo "VCS configuration. Refer to hares and hatype" echo "commands." exit 1 else echo "Removing NWClient agent and resource type." if [ -d ${VCS_HOME}/bin/NWClient ] ; then rm -rf ${VCS_HOME}/bin/NWClient fi if [ -f ${VCS_CONF}/conf/NWClient.cf ] ; then rm -f ${VCS_CONF}/conf/NWClient.cf fi echo "Removing ${LCMAP} file." rm -f ${LCMAP} fi fi # LC on Solaris, Linux, HPUX, or AIX, remove the NW bin lcmap file process_list="ansrd nsrd" find_processes if [ ! -z "${active_processes}" ]; then echo "NetWorker Server is still running." echo "Cannot perform NetWorker cluster deconfiguration" echo "while NetWorker Service is running." echo "Take NetWorker service or Resource Group offline" echo "and re-execute the networker.cluster -r" exit 1 fi if [ \( ${ARCH} = LINUX -o ${ARCH} = SOLARIS -o ${ARCH} = HPUX -o ${ARCH} = AIX \) -a ${CLUTYPE} = LC ]; then echo "Removing ${LCMAP} file." rm -f ${LCMAP} fi # HACMP on AIX, remove the NW bin lcmap file if [ ${ARCH} = AIX -a ${CLUTYPE} = HACMP ]; then echo "Removing ${LCMAP} file." rm -f ${LCMAP} fi # TruCluster 5.0a is different if [ $ARCH != DIGITAL ]; then # 3b. Now, remove the CLUSTER TAG file. if [ -f "${NSR_BIN}/${CLU_TAG}" ]; then rm -f ${NSR_BIN}/${CLU_TAG} elif [ "X$2" != "X" ]; then if [ -f "$2/${CLU_TAG}" ]; then rm -f $2/${CLU_TAG} fi fi # 3c. Next, set the /nsr, ${NSR_GLOBAL}, path back to where it # was before if [ -d ${NSR_LOCAL} -o -h ${NSR_LOCAL} ]; then # Check first for NSR_LOCAL before deleting -- to guard # against a second "networker.cluster -r" without an # intervening "networker.cluster" rm -f ${NSR_GLOBAL} fi if [ -f "${NSR_BIN}/${MV_TAG}" ]; then # This marked the config step that simply moved ${NSR_GLOBAL} # file into ${NSR_LOCAL}. mv ${NSR_LOCAL} ${NSR_GLOBAL} rm -f "${NSR_BIN}/${MV_TAG}" elif [ -d ${NSR_GLOBAL}.saved -o -h ${NSR_GLOBAL}.saved -o \ -f ${NSR_GLOBAL}.saved ]; then # Could be a user-created file mv ${NSR_GLOBAL}.saved ${NSR_GLOBAL} fi if [ -h ${NSR_LOCAL} ]; then rm -f ${NSR_LOCAL} fi else # If needed shut down any NetWorker services process_list="nsrexecd" find_processes if [ ! -z "${active_processes}" ]; then echo "Shutting down NetWorker services..." ${NSR_BIN}/nsr_shutdown -q fi # Make the syslog.conf changes if [ -f ${SYSLOG} ]; then sed "/${LGTO_EDIT_BEGIN}/,/${LGTO_EDIT_END}/d" ${SYSLOG} > ${LGTO_TMP1_FILE} mv ${LGTO_TMP1_FILE} ${SYSLOG} restart_syslog fi if [ -d ${NSR_LOCAL} ]; then rm -rf ${NSR_LOCAL} fi if [ -h ${NSR_LINK} ]; then rm -f ${NSR_LINK} fi echo "% The cluster configuration specific to this node has" echo "% been removed. If all the nodes in the cluster have" echo "% been unconfigured, you can remove your cluster-wide" echo "% NetWorker configuration--NetWorker's cluster action" echo "% script and application resource profile, along with the" echo "% symbolic links and CDSLs for your /nsr configuration" echo "% directories." if [ -f ${NSR_BIN}/nsrd ]; then echo "% This step will NOT remove the cluster-wide /nsr" echo "% directory which stores your server's configuration." fi prompt="Do you wish to remove NetWorker's cluster-wide configuration?" default="No" yesno if [ ${result} = n ]; then myname=`basename $0` echo "${myname} exiting" exit 0 fi # cluster removal steps if [ -f ${NSR_BIN}/nsrd ]; then if [ -h /var/cluster/caa/script/networker.scr -o -f /var/cluster/caa/script/networker.scr ]; then rm -f /var/cluster/caa/script/networker.scr fi fi if [ -h ${NSR_GLOBAL} ]; then rm -f ${NSR_GLOBAL} fi if [ -f "${NSR_BIN}/${CLU_TAG}" ]; then rm -f ${NSR_BIN}/${CLU_TAG} fi if [ -f /var/cluster/caa/profile/networker.cap ]; then rm -f /var/cluster/caa/profile/networker.cap fi echo echo "% NetWorker's cluster-wide configuration has been removed." if [ -f ${NSR_BIN}/nsrd ]; then echo echo "% If you wish, you may now remove the cluster-wide /nsr" echo "% directory, /${CLUALIAS}/nsr, which stores your" echo "% server's configuration." echo "% WARNING: This will delete your indexes and your" echo "% NetWorker client resource information!" prompt="Do you wish to remove the cluster-wide /nsr directory?" default="No" yesno if [ ${result} = n ]; then myname=`basename $0` echo echo "${myname} exiting" exit 0 fi if [ -d "/${CLUALIAS}/nsr" ]; then echo echo "% Removing cluster-wide /nsr directory." rm -rf "/${CLUALIAS}/nsr" echo echo "% The cluster-wide /nsr directory has been removed." fi fi fi exit 0 fi # 4. ${NSR_LOCAL} contains the local directory which the cluster start/stop # scripts assume contain the local NetWorker databases. if [ -d ${NSR_LOCAL} -o -h ${NSR_LOCAL} ]; then echo "% ${NSR_LOCAL} exists." echo "% We require ${NSR_LOCAL} be non-existent. Please correct." echo "% (perhaps you have already configured for clustering?)" exit 1 fi # 5. Check for the ${CLU_TAG} file. In doing so, we set the ${NSR_BIN} # variable for later use. if [ ! -f "${NSR_BIN}/nsrexecd" ]; then echo "Cannot locate the nsrexecd binary." default= prompt="In what directory are the NetWorker binaries installed" while true; do prompt_and_answer; if [ -f ${result}/nsrexecd ]; then break fi echo "Still cannot locate nsrexecd..." done NSR_BIN=${result} fi # For TruCluster 5.0a this script should run also when the TAG file exits if [ $ARCH != DIGITAL ]; then if [ -f "${NSR_BIN}/${CLU_TAG}" ]; then echo "% ${NSR_BIN}/${CLU_TAG} exists." echo "% We require ${CLU_TAG} be non-existent. Please correct." echo "% (perhaps you have already configured for clustering?)" exit 1 fi fi # 6. Let's make sure this is what the user desires. if [ ${ARCH} = SOLARIS -a ${CLUTYPE} = SC ]; then echo "------------------------------------------------------------------" echo "Sun Cluster 2.x installations have a hierarchy of cluster objects" echo "which define high available data services. Each data service is" echo "associated with a logical host that encapsulates services and disk" echo "groups which may fail over from one physical host to the next," echo "depending on the availability and health of the machine it is" echo "running upon. When a failover occurs, this logical host moves," echo "taking with it its data services and disk groups". echo "" echo "To complete this procedure, one must have a logical host defined" echo "on which the ${PRODUCTNAME} server can be entered" echo "------------------------------------------------------------------" echo "" elif [ ${ARCH} = SOLARIS -a ${CLUTYPE} = SC30 ]; then echo "------------------------------------------------------------------" echo "Sun Cluster 3.x extends the Solaris operating environment into a" echo "cluster operating system. A cluster is a collection of loosely" echo "coupled computing nodes that provide a single client view of" echo "network services or applications." echo "" echo "To complete this procedure, one must have a logical host defined" echo "on which the ${PRODUCTNAME} server can be entered" echo "------------------------------------------------------------------" echo "" elif [ ${ARCH} = SOLARIS -a ${CLUTYPE} = VCS ]; then echo "------------------------------------------------------------------" echo "Veritas Cluster Server is a high availability product. " echo "" echo "Only cluster-aware NetWorker Client is supported in the cluster." echo "" echo "Make sure there are no user-defined resource types that start with \"IP\"" echo "" echo "in Veritas Cluster Server." echo "------------------------------------------------------------------" echo "" elif [ ${ARCH} = HPUX -a ${CLUTYPE} = MCSG ]; then echo "------------------------------------------------------------------" echo "MC/ServiceGuard is a high availability product for HP-UX " echo "which defines high available applications. Each application is" echo "organized into a package that is associated with an IP address" echo "and encapsulates services and resources which may failover from" echo "one physical host to the next, depending on the availability and" echo "health of the machine it is running upon. When a failover occurs," echo "the package moves, taking with it its data services and disk groups". echo "" echo "To complete this procedure, one must have a package defined" echo "on which the ${PRODUCTNAME} server can be entered" echo "------------------------------------------------------------------" echo "" elif [ ${ARCH} = HPUX -a ${CLUTYPE} = LM ]; then echo "------------------------------------------------------------------" echo "Lock Manager is a high availability product for HP-UX with OPS support" echo "which defines high available applications. Each application is" echo "organized into a package that is associated with an IP address" echo "and encapsulates services and resources which may failover from" echo "one physical host to the next, depending on the availability and" echo "health of the machine it is running upon. When a failover occurs," echo "the package moves, taking with it its data services and disk groups". echo "" echo "To complete this procedure, one must have a package defined" echo "on which the ${PRODUCTNAME} server can be entered" echo "------------------------------------------------------------------" echo "" elif [ \( ${ARCH} = LINUX -o ${ARCH} = SOLARIS -o ${ARCH} = HPUX -o ${ARCH} = AIX \) -a ${CLUTYPE} = LC ]; then echo "---------------------------------------------------------------------" echo "${LCNAME} ${CLUVER} installations have a hierarchy of cluster objects" echo "which define high available applications. Each application service is" echo "associated with a resource group that encapsulates services, IPs and" echo "data sources, which may fail over from one physical host to the next," echo "depending on the availability and health of the machine it is" echo "running upon. When a failover occurs, this logical host--aka" echo "resource group--moves, carrying its IP addresses and data sources". echo "" echo "One must define a NetWorker resource group with an IP and shared data" echo "source, which contains the shared /nsr directory, before using" echo "NetWorker. This can be done before or after running this script." echo "The optional ftcli-import template file $FTCLI_IMPORT can be" echo "used for this purpose." echo "---------------------------------------------------------------------" echo "" elif [ ${ARCH} = AIX -a ${CLUTYPE} = HACMP ]; then echo "-----------------------------------------------------------------" echo "HACMP is a high availability product for AIX." echo "It defines failover applications which may move from one node to" echo "another, depending on the availability and health of the machine" echo "it is running upon." echo "-----------------------------------------------------------------" elif [ ${ARCH} = DIGITAL ]; then echo "-----------------------------------------------------------------" echo "TruCluster is a high availability product for Tru64 Unix." echo "It defines failover applications which may move from one node to" echo "another, depending on the availability and health of the machine" echo "it is running upon." echo "The NetWorker Client and NetWorker Server have the default cluster" echo "identity." echo "-----------------------------------------------------------------" fi prompt="Do you wish to continue?" default="Yes" yesno if [ ${result} = n ]; then myname=`basename $0` echo "${myname} exiting" exit 1 fi # #--------------------------------------------------------------------- # NOTE: We've now figured that we intend to reconfigure the nsrd to be # a cluster'd NetWorker service. #--------------------------------------------------------------------- # # #--------------------------------------------------------------------- # If needed shut down any NetWorker services #--------------------------------------------------------------------- # process_list="ansrd nsrd nsrexecd" find_processes if [ ! -z "${active_processes}" ]; then echo "Shutting down NetWorker services..." ${NSR_BIN}/nsr_shutdown -q fi # #--------------------------------------------------------------------- # The rule here is that if there is a /nsr directory, then move it to # be ${NSR_LOCAL}. If the user has installed the local NSR database # somewhere other than /nsr, then ${NSR_LOCAL} will be a link to that # local database directory. #--------------------------------------------------------------------- # if [ $ARCH != DIGITAL ]; then # DIGITAL has a default nsr_local database if [ -d ${NSR_GLOBAL} -o -h ${NSR_GLOBAL} ]; then default=${NSR_GLOBAL} elif [ -d /var/nsr ] -o -h /var/nsr ]; then default="/var/nsr" else default= fi while true ; do prompt="Enter directory where local NetWorker database is installed" prompt_and_answer if [ -d ${result} ]; then NSR_DIR=${result} break fi echo "You have not specified a valid directory" default= done if [ ${NSR_DIR} = ${NSR_GLOBAL} ]; then # move the ${NSR_GLOBAL} to ${NSR_LOCAL}, and mark this as such # with auxilury file (for remove procedure). mv ${NSR_DIR} ${NSR_LOCAL} touch "${NSR_BIN}/${MV_TAG}" else ln -s ${NSR_DIR} ${NSR_LOCAL} fi # Check for any ${NSR_GLOBAL}, including user-created file if [ -d ${NSR_GLOBAL} -o -h ${NSR_GLOBAL} -o -f ${NSR_GLOBAL} ]; then # Since we still have ${NSR_GLOBAL}, save it somewhere. mv ${NSR_GLOBAL} ${NSR_GLOBAL}.saved fi ln -s ${NSR_LOCAL} ${NSR_GLOBAL} else mkdir ${NSR_LOCAL} ln -s ${NSR_LOCAL} ${NSR_LINK} if [ ! -h ${NSR_GLOBAL} ]; then if [ -d ${NSR_GLOBAL} ]; then rm -rf ${NSR_GLOBAL} fi ln -s ${NSR_LINK} ${NSR_GLOBAL} fi fi restart_syslog # #--------------------------------------------------------------------- # Detect whether ${CLU_TAG} was already touched on TruCluster 5.x; # we need to copy the .scr and .cap files once only per installation. #--------------------------------------------------------------------- # if [ ${ARCH} = DIGITAL -a -f ${NSR_BIN}/nsrd ]; then if [ ! -f ${NSR_BIN}/${CLU_TAG} ]; then # Test for symlink and remove if present if [ -h /var/cluster/caa/script/networker.scr ]; then rm /var/cluster/caa/script/networker.scr fi if [ -f /var/cluster/caa/script/networker.scr ]; then nextrev=`ls /var/cluster/caa/script/networker.scr.[0-9]* 2>/dev/null | sed "s/\/var\/cluster\/caa\/script\/networker.scr.//" | awk 'BEGIN { max = 0 } { if ($1 > max) max = $1 } END { print max + 1 }'` mv /var/cluster/caa/script/networker.scr /var/cluster/caa/script/networker.scr.$nextrev fi cp ${NSR_BIN}/networker.action /var/cluster/caa/script/networker.scr chmod 755 /var/cluster/caa/script/networker.scr # cp resource profile file if [ -f /var/cluster/caa/profile/networker.cap ]; then nextrev=`ls /var/cluster/caa/profile/networker.cap.[0-9]* 2>/dev/null | sed "s/\/var\/cluster\/caa\/profile\/networker.cap.//" | awk 'BEGIN { max = 0 } { if ($1 > max) max = $1 } END { print max + 1 }'` mv /var/cluster/caa/profile/networker.cap /var/cluster/caa/profile/networker.cap.$nextrev fi cp ${NSR_BIN}/networker.cap /var/cluster/caa/profile/networker.cap chmod 755 /var/cluster/caa/profile/networker.cap fi fi # #--------------------------------------------------------------------- # Next, touch a file next to the nsrd binary, ${CLU_TAG}. This is # very important, as it signals to the binaries that we are configured # as a clustered NetWorker. #--------------------------------------------------------------------- # touch ${NSR_BIN}/${CLU_TAG} # #--------------------------------------------------------------------- # Now, we need to get the nsrd up and running. This is a platform- # specific step which requires notifying the system cluster software # that nsrd is now available as a data service for the cluster. #--------------------------------------------------------------------- # status=1 if [ ${ARCH} = SOLARIS -a ${CLUTYPE} = SC ]; then NW_START=networker.start NW_STOP=networker.stop FM_START=networker.fm_start FM_STOP=networker.fm_stop while true; do default= prompt="Enter the logical host to register networker data service under" prompt_and_answer # Get rid of the 2nd level and top level domain names # because HAGET and HAREG expect just the hostname lh="`echo ${result} | sed 's/\..*//'`" if [ "${lh}" != "${result}" ]; then echo "The short name \"${lh}\" will be used." fi # Check if host is a valid logical host alllhosts="`${HAGET} -f all_logical_hosts`" alllhosts="`echo "${alllhosts}" | grep ${lh}`" if [ -z "${alllhosts}" ]; then echo " \"${lh}\" is not a valid logical host of this HA configuration." echo "You will have to enter another hostname." prompt="Do you wish to continue" default="Yes" yesno if [ "${result}" = "n" ]; then myname="`basename $0`" echo "${myname} exiting" exit 1 fi continue fi break done # If this cluster node masters the given logical host, register the # networker agent after the other cluster nodes have been configured MASTERED=`${HAGET} -f mastered` MASTERED="`echo "${MASTERED}" | egrep ${lh}`" if [ ! -z "${MASTERED}" ]; then default="No" prompt="Have all cluster nodes that can master ${lh} been configured" yesno else result="n" fi # Both server and client daemons are started by Sun Cluster software if [ ${result} = y ]; then # On the cluster node which masters the logical host, we execute # the following command to register NetWorker as a data service # within the cluster on logical host ${lh}.: echo "registering networker data service..." ${HAREG} -r networker -b ${NSR_BIN} \ -m START_NET=${NW_START},STOP_NET=${NW_STOP} \ -m FM_START=${FM_START},FM_STOP=${FM_STOP} \ -h ${lh} -p ${NWCLNT},${NWSERV} -v ${VERSION} # Need to create the following tag file. It's used by the # networker.start script to figure out if the local node is mastering # the logical host. PATHPREFIX_FS=`${HAGET} -f pathprefix -h ${lh}` if [ ! -d ${PATHPREFIX_FS}/nsr ]; then mkdir -p ${PATHPREFIX_FS}/nsr fi echo "This file is needed by HA NetWorker -- DO NOT remove!!" > ${PATHPREFIX_FS}/nsr/.sc_2.2.tag # This next command will actually start NetWorker as a data # service within the cluster, with the nsrd running only on # the mastered node and nsrexecd on all the nodes. echo "activating networker data service..." ${HAREG} -y networker echo "done." # Now to verify what we've done... status=`${HAGET} -f service_is_on -s networker` elif [ ! -z "${MASTERED}" ]; then echo "You must enter the following two commands after all the nodes" echo "on which ${lh} can be mastered have been configured:" echo "" echo "${HAREG} -r networker -b ${NSR_BIN} \\" echo " -m START_NET=${NW_START},STOP_NET=${NW_STOP} \\" echo " -m FM_START=${FM_START},FM_STOP=${FM_STOP} \\" echo " -h ${lh} -p ${NWCLNT},${NWSERV} -v ${VERSION}" echo "" echo "${HAREG} -y networker" echo "" status=0 fi elif [ ${ARCH} = SOLARIS -a ${CLUTYPE} = SC30 ] ; then LCclientonly="y" echo "" echo "Do you wish to configure for both NetWorker server and client?" default=Yes prompt="Yes or No" yesno if [ ${result} = y ]; then LCclientonly="n" echo "" echo "Do you wish to add now the site-specific values for:" echo "NSR_SHARED_DISK_DIR and NSR_SERVICE_ID in ${CONTROL_FILE}" default=Yes prompt="Yes or No" yesno fi if [ ${result} = y ]; then if [ ! -f ${LCMAPSTATIC} ]; then echo "${LCMAPSTATIC} not found - check local node's installation" myname=`basename $0` echo "${myname} exiting" exit 1 fi default= prompt="Enter the Logical Hostname to be used for NetWorker" prompt_and_answer vhostname=${result} default= echo "" echo "The shared directory of the NetWorker server (the nsr" echo "directory) must be on a globally mounted file system." prompt="In what path will this nsr directory be created/located" prompt_and_answer shared_nsr_mntpt=${result} fi echo "" echo "Creating ${LCMAP} file ..." cp ${LCMAPSTATIC} ${LCMAP} chmod 775 ${LCMAP} ls -l ${LCMAP} echo "Finished creating ${LCMAP} file." echo "" # And finally, define the LGTO client and server types to the RGM. # ...But only do it once... scrgadm -p -t LGTO.clnt | grep LGTO.clnt >/dev/null 2>&1 if [ $? -eq 1 ] ; then echo "Defining LGTO.clnt resource type with RGM." scrgadm -a -t LGTO.clnt -f ${NSR_BIN}/LGTO.clnt.rtr fi scrgadm -p -t LGTO.serv | grep LGTO.serv >/dev/null 2>&1 if [ $? -eq 1 -a ${LCclientonly} = n ] ; then echo "Defining LGTO.serv resource type with RGM." scrgadm -a -t LGTO.serv -f ${NSR_BIN}/LGTO.serv.rtr fi # Start client only on each node when configuration is done--server # must be configured first. ${NSR_BIN}/nsrexecd elif [ ${ARCH} = SOLARIS -a ${CLUTYPE} = VCS ] ; then myname=`basename $0` if [ ! -f ${LCMAPSTATIC} ]; then echo "${LCMAPSTATIC} not found - check local node's installation" echo "${myname} exiting" exit 1 fi echo "" echo "Creating ${LCMAP} file ..." cp ${LCMAPSTATIC} ${LCMAP} chmod 775 ${LCMAP} ls -l ${LCMAP} echo "Finished creating ${LCMAP} file." echo "" # Create the NWClient agent and type definition. if [ -d ${VCS_HOME}/bin/NWClient -o -f ${VCS_CONF}/conf/NWClient.cf ] ; then echo "NWClient resource type already defined. Please correct." echo "${myname} exiting" exit 1 else #Create NWClient resource type agent mkdir ${VCS_HOME}/bin/NWClient ln -s ${VCS_HOME}/bin/ScriptAgent ${VCS_HOME}/bin/NWClient/NWClientAgent #Create monitor script for the NWClient agent cat <${VCS_HOME}/bin/NWClient/monitor #!/bin/sh exit 110 EOF chmod 744 ${VCS_HOME}/bin/NWClient/monitor #Create NWClient resource type declaration file if [ ! -f ${NSR_BIN}/NWClient.cf ]; then echo "${NSR_BIN}/NWClient.cf not found - check local node's installation" echo "Cannot find NWClient resource type declaration file" echo "${myname} exiting" exit 1 fi cp ${NSR_BIN}/NWClient.cf ${VCS_CONF}/conf/NWClient.cf chmod 644 ${VCS_CONF}/conf/NWClient.cf chgrp sys ${VCS_CONF}/conf/NWClient.cf fi # Start client only on each node when configuration is done--server # must be configured first. ${NSR_BIN}/nsrexecd elif [ \( ${ARCH} = LINUX -o ${ARCH} = SOLARIS -o ${ARCH} = HPUX -o ${ARCH} = AIX \) -a ${CLUTYPE} = LC ]; then LCclientonly="y" echo "" echo "Do you wish to configure for both NetWorker server and client?" default=Yes prompt="Yes or No" yesno if [ ${result} = y ]; then LCclientonly="n" CONTROL_FILE="${NSR_BIN}/nw_ux.lc" echo "" echo "Do you wish to add now the site-specific values for:" echo "NSR_SHARED_DISK_DIR and NSR_SERVICE_ID in ${CONTROL_FILE}" default=Yes prompt="Yes or No" yesno fi if [ ${result} = y ]; then if [ ! -f ${CONTROL_FILE} ]; then echo "${CONTROL_FILE} not found - check local node's installation" myname=`basename $0` echo "${myname} exiting" exit 1 fi default= prompt="Enter the published virtual hostname to be used for NetWorker" prompt_and_answer vhostname=${result} default= echo "" echo "The mountpoint of the shared NetWorker directory (the nsr" echo "directory) must be the same for all the nodes where NetWorker" echo "server can failover to." prompt="In what path will this nsr directory be created/located" prompt_and_answer shared_nsr_mntpt=${result} mv -f ${CONTROL_FILE} ${CONTROL_FILE}.copy sed \ -e "s,^ *NSR_SHARED_DISK_DIR.*$, NSR_SHARED_DISK_DIR=${shared_nsr_mntpt}," \ -e "s,^ *NSR_SERVICE_ID.*$, NSR_SERVICE_ID=${vhostname}," \ ${CONTROL_FILE}.copy > ${CONTROL_FILE} chmod 755 ${CONTROL_FILE} echo "" echo "${CONTROL_FILE} has been modified" # for LGTpa81539 pre_nwinstcreate fi echo "" echo "Creating ${LCMAP} file ..." cat <${LCMAP} #!/bin/sh PATH=$PATH:/bin export PATH FT_DIR=${FT_DIR} FT_DOMAIN=${FT_DOMAIN} export FT_DIR FT_DOMAIN \$FT_DIR/bin/ftPerl \$FT_DIR/bin/nwclust.pl EOF chmod 775 ${LCMAP} cat ${LCMAP} echo "Finished creating ${LCMAP} file." echo "" # Start client only on each node when configuration is done--server # is started by the LC/AAM/FTAS manager via manual user step. ${NSR_BIN}/nsrexecd elif [ ${ARCH} = AIX -a ${CLUTYPE} = HACMP ]; then LCclientonly="y" echo "" echo "Do you wish to configure for both NetWorker server and client?" default=Yes prompt="Yes or No" yesno if [ ${result} = y ]; then LCclientonly="n" CONTROL_FILE="${NSR_BIN}/nw_hacmp.lc" echo "" echo "Do you wish to add now the site-specific values for:" echo "NSR_SHARED_DISK_DIR and NSR_SERVERHOST in ${CONTROL_FILE}" default=Yes prompt="Yes or No" yesno fi if [ ${result} = y ]; then if [ ! -f ${CONTROL_FILE} ]; then echo "${CONTROL_FILE} not found - check local node's installation" myname=`basename $0` echo "${myname} exiting" exit 1 fi if [ ! -f ${LCMAPSTATIC} ]; then echo "${LCMAPSTATIC} not found - check local node's installation" myname=`basename $0` echo "${myname} exiting" exit 1 fi default= prompt="Enter the published Service IP Label to be used for NetWorker" prompt_and_answer vhostname=${result} default= echo "" echo "The mountpoint of the shared NetWorker directory (the nsr" echo "directory) must be the same for all the nodes where NetWorker" echo "server can failover to." prompt="In what path will this nsr directory be created/located" prompt_and_answer shared_nsr_mntpt=${result} mv -f ${CONTROL_FILE} ${CONTROL_FILE}.copy sed \ -e "s,^ *NSR_SHARED_DISK_DIR.*$, NSR_SHARED_DISK_DIR=${shared_nsr_mntpt}," \ -e "s,^ *NSR_SERVERHOST.*$, NSR_SERVERHOST=${vhostname}," \ ${CONTROL_FILE}.copy > ${CONTROL_FILE} chmod 755 ${CONTROL_FILE} echo "" echo "${CONTROL_FILE} has been modified" rm ${CONTROL_FILE}.copy fi echo "" echo "Creating ${LCMAP} file ..." cp ${LCMAPSTATIC} ${LCMAP} chmod 775 ${LCMAP} ls -l ${LCMAP} echo "Finished creating ${LCMAP} file." echo "" # Start client only on each node when configuration is done--server # is started by HACMP. (Set env. variable first.) ${NSR_BIN}/nsrexecd elif [ ${ARCH} = HPUX -a \( ${CLUTYPE} = MCSG -o ${CLUTYPE} = LM \) ]; then # Start client only on each node when configuration is done--server # is started by the Service Guard. ${NSR_BIN}/nsrexecd NSR_CLU_PATH=/etc/cmcluster/networker CONTROL_FILE="${NSR_CLU_PATH}/legato.control" PACKAGE_CONF="${NSR_CLU_PATH}/pkg.conf" default="Yes" if [ ! -d ${NSR_CLU_PATH} ]; then mkdir ${NSR_CLU_PATH} echo "Created ${NSR_CLU_PATH}." elif [ -f ${CONTROL_FILE} ]; then echo "${CONTROL_FILE} has been located" default="No" fi prompt="Do you wish to generate a new control file for NetWorker package" yesno if [ ${result} = y ]; then default= prompt="Enter the IP address to use to monitor this package" prompt_and_answer IP=${result} default= prompt="Enter the IP subnet to monitor for this package" prompt_and_answer SUBNET=${result} default= prompt="Enter the DNS name for NetWorker service" prompt_and_answer SVNAME=${result} MCSGVER="`swlist -l product | awk '/Service.*[Gg]uard/ {print $2}' | cut -f2-3 -d\".\"`" typeset MIN_VERSION="11.14" # Minimum version VxVM is supported by NetWorker if [[ "${MCSGVER#*.}" < "${MIN_VERSION#*.}" ]]; then # Support LVM only, using legato.control template default= prompt="Enter the volume group resource for this package" prompt_and_answer VG=${result} default= prompt="Enter the mounted logical volume name for this package" prompt_and_answer LV=${result} default= prompt="Enter the mount path for this package" prompt_and_answer MTPATH=${result} if [ -f ${CONTROL_FILE} ]; then mv ${CONTROL_FILE} ${CONTROL_FILE}.old fi sed -e "s,^NSR_BIN.*$,NSR_BIN=${NSR_BIN}," \ -e "s/^LGTOMON.*$/LGTOMON=\"legato.monitor ${SVNAME}\"/" \ -e "s,^VG\[0\]=.*$,VG[0]=${VG}," \ -e "s,^LV\[0\]=.*FS\[0\].*;,LV[0]=${LV} ; FS[0]=${MTPATH} ;," \ -e "s/^IP\[0\]=.*$/IP[0]=${IP}/" \ -e "s/^SUBNET\[0\]=.*$/SUBNET[0]=${SUBNET}/" \ ${NSR_BIN}/legato.control > ${CONTROL_FILE} else # Support both LVM and VxVM, using legato.control.1114 template VMNAME="`swlist | awk '/vxvm/ {print $0}'`" if [ ! -z "${VMNAME}" ]; then VxVM= prompt="Select which Volume Management will be used for the disk resource (1-LVM or 2-VxVM)" default=1 while true; do prompt_and_answer if [ "${result}" = "1" -o "${result}" = "2" ]; then break fi done if [ "${result}" = "2" ]; then default= prompt="Enter the VxVM disk group resource for this package" prompt_and_answer VxVM=${result} fi fi VG= if [ -z "${VxVM}" ]; then default= prompt="Enter the volume group resource for this package" prompt_and_answer VG=${result} fi default= prompt="Enter the mounted logical volume name for this package" prompt_and_answer LV=${result} default= prompt="Enter the mount path for this package" prompt_and_answer MTPATH=${result} if [ -z "${VxVM}" ]; then default=hfs prompt="Enter the type of the file system for this package" prompt_and_answer FSTYPE=${result} else default=vxfs prompt="Enter the type of the file system for this package" prompt_and_answer FSTYPE=${result} fi if [ -f ${CONTROL_FILE} ]; then mv ${CONTROL_FILE} ${CONTROL_FILE}.old fi if [ ! -z "${VG}" ]; then sed -e "s,^NSR_BIN.*$,NSR_BIN=${NSR_BIN}," \ -e "s/^LGTOMON.*$/LGTOMON=\"legato.monitor ${SVNAME}\"/" \ -e "s/^\#VGCHANGE=/VGCHANGE=/" \ -e "s/^CVM_ACTIVATION_CMD/\#CVM_ACTIVATION_CMD/" \ -e "s,^\#VG\[0\]=.*$,VG[0]=\"${VG}\"," \ -e "s,^LV\[0\]=\"\"; FS\[0\]=\"\"; FS_MOUNT_OPT\[0\]=\"-o rw\";,LV[0]=\"${LV}\" ; \ FS[0]=\"${MTPATH}\" ; FS_MOUNT_OPT[0]=\"-o rw\"; \# ," \ -e "s,^FS_TYPE\[0\]=.*,FS_TYPE[0]=\"${FSTYPE}\"," \ -e "s/^VXVOL/\#VXVOL/" \ -e "s/^IP\[0\]=.*$/IP[0]=\"${IP}\"/" \ -e "s/^SUBNET\[0\]=.*$/SUBNET[0]=\"${SUBNET}\"/" \ ${NSR_BIN}/legato.control.1114 > ${CONTROL_FILE} elif [ ! -z "${VxVM}" ]; then sed -e "s,^NSR_BIN.*$,NSR_BIN=${NSR_BIN}," \ -e "s/^LGTOMON.*$/LGTOMON=\"legato.monitor ${SVNAME}\"/" \ -e "s,^\#VXVM_DG\[0\]=.*$,VXVM_DG[0]=\"${VxVM}\"," \ -e "s,^LV\[0\]=\"\"; FS\[0\]=\"\";,LV[0]=\"${LV}\" ; FS[0]=\"${MTPATH}\" ;," \ -e "s/^FS_TYPE\[0\]=.*/FS_TYPE[0]=\"${FSTYPE}\"/" \ -e "s/^IP\[0\]=.*$/IP[0]=\"${IP}\"/" \ -e "s/^SUBNET\[0\]=.*$/SUBNET[0]=\"${SUBNET}\"/" \ ${NSR_BIN}/legato.control.1114 > ${CONTROL_FILE} fi fi chmod 755 ${CONTROL_FILE} echo "${CONTROL_FILE} has been created" echo "This file may need to be modified prior to starting NetWorker package." echo "Please refer to HPUX Cluster Installation Guide." fi default="Yes" if [ -f ${PACKAGE_CONF} ]; then echo "${PACKAGE_CONF} has been located" default="No" fi prompt="Do you wish to generate a new pkg.conf file for NetWorker package" yesno if [ ${result} = y ]; then default= prompt="Enter IP subnet to monitor for NetWorker package" prompt_and_answer SUBNET=${result} I=1 NODELIMIT=6 while true; do default="no more" prompt="Enter node number $I for this package" prompt_and_answer if [ "${result}" = "${default}" ] ; then break; elif [ `expr X"${result}" : ".* "` -ne 0 ]; then echo "Please enter one name at a time." continue fi echo "NODE_NAME ${result}" >> /tmp/nodenames I=`expr ${I} + 1` if [ ${I} -gt ${NODELIMIT} ]; then echo "Only ${NODELIMIT} nodes possible in an HP cluster." break fi done if [ -f ${PACKAGE_CONF} ]; then mv ${PACKAGE_CONF} ${PACKAGE_CONF}.old fi cmmakepkg -p ${PACKAGE_CONF} sed -e "s/^PACKAGE_NAME/PACKAGE_NAME networker/" \ -e "s,^RUN_SCRIPT[ | ]*$,RUN_SCRIPT ${CONTROL_FILE}," \ -e "s/^RUN_SCRIPT_TIMEOUT.*NO_TIMEOUT/RUN_SCRIPT_TIMEOUT 1200/" \ -e "s,^HALT_SCRIPT[ | ]*$,HALT_SCRIPT ${CONTROL_FILE},"\ -e "s/^HALT_SCRIPT_TIMEOUT.*NO_TIMEOUT/HALT_SCRIPT_TIMEOUT 1200/" \ -e "s/^#SERVICE_NAME.*$/SERVICE_NAME nsrd/" \ -e "s/^#SERVICE_FAIL_FAST_ENABLED.*$/SERVICE_FAIL_FAST_ENABLED NO/" \ -e "s/^#SERVICE_HALT_TIMEOUT.*$/SERVICE_HALT_TIMEOUT 120/" \ -e "s/^#SUBNET.*$/SUBNET ${SUBNET}/" \ -e "/^NODE_NAME.*/r /tmp/nodenames" \ -e "/^NODE_NAME/d" ${PACKAGE_CONF} > /tmp/pkg.conf.tmp mv /tmp/pkg.conf.tmp ${PACKAGE_CONF} rm /tmp/nodenames echo "${PACKAGE_CONF} has been created" echo "This file may need to be modified prior to creating new binary configuration file." echo "Please refer to HPUX Cluster Installation Guide." fi elif [ ${ARCH} = DIGITAL ]; then # Make the syslog.conf changes if [ -f ${SYSLOG} ]; then cp ${SYSLOG} /etc/syslog.conf_nsrsave file="${SYSLOG}" op="install" searchstr="${LGTO_EDIT_BEGIN}" appendstr=\ "# ${LGTO_EDIT_BEGIN} - DO NOT DELETE (`date +%c`) # The following lets NetWorker use the syslog facility daemon.notice /dev/console daemon.notice ${NONREL_LOGSDIR}/logs/messages daemon.notice operator local0.notice ${NONREL_LOGSDIR}/logs/summary local0.alert root,operator # ${LGTO_EDIT_END} - DO NOT DELETE" modfile restart_syslog fi # Start client only on each node ${NSR_BIN}/nsrexecd # mkdir for share /nsr database if [ -f ${NSR_BIN}/nsrd ]; then if [ ! -d "/${CLUALIAS}" ]; then mkdir "/${CLUALIAS}" fi fi fi echo "" if [ ${status} = 1 ]; then if [ "${CLUINFO_LC}" != "" -a "${LCclientonly}" = "n" ]; then echo "You can now use ftconsole to add a resource group for NetWorker" echo "using the template specifications given in $FTCLI_IMPORT." echo "Alternatively, you can modify $FTCLI_IMPORT and then" echo "run ftcli with '-c \"import $FTCLI_IMPORT\"'." echo "" echo "Note that the users who will run NetWorker on the cluster" echo "nodes must be added as secure users of ${LCNAME}." # for LGTpa81539 echo "" echo "Afterwards you should also execute nwinst.sh with system administrator's" echo "priviledge before nsrexecd starts communicating with other machines." else echo "NetWorker has been successfully cluster-configured." fi else echo "NetWorker has not been successfully cluster-configured." fi