#!/bin/ksh
#
# @(#)pdbconf.sh 1.76 98/02/20 SMI
#
#	Copyright (c) 1994 Sun Microsystems, Inc.
#
# scconf - SPARCcluster SC configuration editor script.
#

Myname=`basename $0`

PATH="/opt/SUNWcluster/bin:/usr/5bin:/usr/bin/X11:/usr/local/bin:\
/usr/bsd:/usr/ucb:$PATH"
export PATH

msg_prefix="SUNWcluster.scconf"
cdbpath="/etc/opt/SUNWcluster/conf"
PDBSSA=/opt/SUNWcluster/bin/scssa
PDBCCD=/opt/SUNWcluster/bin/scccd
RHOSTS=/.rhosts
RHOSTS_TMP=${RHOSTS}.scinstall

let callrcmd=0
tmppath=${Tmppath=/usr/tmp}
sedfile=${tmpfile=${tmppath}/sed.tmp$$}
touch $sedfile
ssafile=/var/tmp/scconf.$$
trap "echo \"$Myname: ***interrupt***\" 1>&2; /bin/rm -rf $sedfile; disable_rcmd; exit 1"\
	      1 2 3 15

########################################################################
#
# function enable_rcmd
#
# Create file /.rhosts if it does not exists and enable remote root
# access for all the nodes in the cluster. If the file already exists,
# then copy it to a temporary file preserving ownership and permissions,
# and alter the original file. 

function enable_rcmd
{
  typeset hostlist

  hostlist=$*
  let callrcmd=1
  let create_rhosts=0
  if [[ ! -f "${RHOSTS}" ]]; then
    touch ${RHOSTS}
    let create_rhosts=1
  else
    /usr/bin/cp -p ${RHOSTS} ${RHOSTS_TMP}
  fi
  /usr/bin/chmod u+w ${RHOSTS}

  for name in ${hostlist}; do
    /usr/bin/grep -F "${name} root" ${RHOSTS} > /dev/null 2>&1
    if [[ "$?" -ne 0 ]]; then
      print ${name} root >> ${RHOSTS}
    fi
  done
}

########################################################################
#
# function disable_rcmd
#
# Remove the .rhosts file if it was created in enable_rcmd. If it was
# copied to a temporary file, restore it back with the same permissions
# and ownership.

function disable_rcmd
{
  if (( callrcmd == 1 )); then
    /bin/rm -f ${RHOSTS}
    if (( create_rhosts == 0 )); then
      /bin/mv -f ${RHOSTS_TMP} ${RHOSTS}
    fi
  fi
}


#############################################################################
#
# function add_ccdssa
#
# This function changes the ccddevice.ccdssa entry in
# the ccd.database.init file.
#

function add_ccdssa
{
	ccdinit_file=$cdbpath/ccd.database.init

	if [[ $1 = none ]]; then
		echo "/^ccd.ccddevice.ssa:/s|:.*|: none|" >> $sedfile
	elif [[ $1 = ccdvol ]]; then
		echo "/^ccd.ccddevice.ssa:/s|:.*|: ccdvol|" >> $sedfile
	else
		echo "Invalid argument : $1"
		exit 1
	fi

	ccdadm -p ${ccdinit_file}
	retval=$?
	if [[ $retval != 0 ]]; then
		echo "${ccdinit_file} is corrupted"
		exit 1;
	fi

	sed -f $sedfile ${ccdinit_file}.pure > /tmp/tmpccdinit

	ccdadm -x /tmp/tmpccdinit
	mv /tmp/tmpccdinit ${ccdinit_file}

	check=$(/usr/bin/ps -e | /usr/bin/grep ccdd | wc -l)
	if [[ $check -ne "0" ]]; then
		ccdctl reinit $CLUSTNAME 1
		if [[ $? != 0 ]]; then
			echo "ccdd reinit failure"
			rm ${ccdinit_file}.pure
			exit 1;
		fi
	fi

	rm ${ccdinit_file}.pure
}

##############################################################################
#
# function change_ccd_activehosts
#
# This function changes the number of active nodes in the ccd init file.
#

function change_ccd_activehosts
{
	ccdinit_file=$cdbpath/ccd.database.init

	ccdadm -p ${ccdinit_file}	# get "pure" ccdinit file
	retval=$?
	if [[ $retval != "0" ]]; then
		echo "${ccdinit_file} is corrupted"
		exit 1;
	fi

	echo "/^ccd.nservers:/s|:.*|: $1|" >> $sedfile
	sed -f $sedfile ${ccdinit_file}.pure > /tmp/tmpccdinit

	ccdadm -x /tmp/tmpccdinit
	mv /tmp/tmpccdinit ${ccdinit_file}

	check=$(/usr/bin/ps -e | /usr/bin/grep ccdd | wc -l)
	if [[ $check -ne "0" ]]; then
		ccdctl reinit $CLUSTNAME 1
		if [[ $? != 0 ]]; then
			echo "ccdd reinit failure"
			rm ${ccdinit_file}.pure
			exit 1;
		fi
	fi

	rm ${ccdinit_file}.pure

}

##############################################################################
#
# function get_hostid hostname
#
# This function takes the hostname as an argument and returns the unique id
# assigned to the host in the static cluster configuration.

function get_hostid
{
  typeset i
  typeset numofnodes
  
  if [[ -n "$1" ]]; then
    numofnodes=$(cdbmatch cluster.number.nodes ${cdbfile})
    let i=0
    while (( i < ${numofnodes} )); do
      hostname=$(cdbmatch cluster.node.$i.hostname ${cdbfile})
      if [[ "${hostname}" = "$1" ]]; then
	print $i
	break
      else
	let i=i+1
      fi
    done
  fi
}

############################################################################

function get_newhost
{
if [[ -n "$newhost" ]]; then
	set `echo $newhost | sed -e "s/,/ /g"`
	echo "/^cluster.node.0.hostname/s|\(.*\):.*$|\1: $1|" >>$sedfile
	echo "/^cluster.node.1.hostname/s|\(.*\):.*$|\1: $2|" >>$sedfile
	echo "/^cluster.node.2.hostname/s|\(.*\):.*$|\1: $3|" >>$sedfile
	echo "/^cluster.node.3.hostname/s|\(.*\):.*$|\1: $4|" >>$sedfile
fi
}

#############################################################################

function validate_interfaces
{
if [[ $# -gt 2 ]]; then
	if [[ "$2" = "$4" ]]; then
		echo 2>&1 "$Myname: Interfaces specified must be unique!"
		exit 3
	fi
fi
}

##############################################################################

function validate_controllers
{
typeset error=0
for i in $*
do
	if $(echo $i | /bin/grep -v '^[0-9A-Fa-f]\{12\}' >/dev/null 2>&1)
	then
		echo 2>&1 "$Myname: Illegal serial number: $i"
		error=1
	fi
done
return ${error}
}

##############################################################################
 
function create_ssa_link
{
wwn=$1
ssafile=$2
 
cdbfile=$cdbpath/$CLUSTNAME.cdb
ssadevdir=$(cdbmatch ssa.devdir ${cdbfile})
sn=$(print $wwn | sed 's/^\(....\)0*/SUNW,pln@[ab]0*\1,/')
realssapath=$(/bin/grep -i $sn ${ssafile} 2>/dev/null)
cd ${ssadevdir}
/bin/rm -rf ${wwn}
ln -s ${realssapath} ${wwn}
}
 
##############################################################################
 
function create_disk_link
{
diskid=$1
devaddr=$2
 
cdbfile=$cdbpath/$CLUSTNAME.cdb
ssadevdir=$(cdbmatch ssa.devdir ${cdbfile})
cd ${ssadevdir}
/bin/rm -f ${diskid}
ln -s /dev/rdsk/${devaddr} ${diskid}
}

##############################################################################
#
# function construct_disk_list <cXtYdZs2>:<disk-serial-id>...
#
# This function takes a list of arguments of the form
# "<disk-address>:<serial_id>..."  The disk_address corresponds to the
# physical address of a disk in the form cXtYdZs2 while the serial_id is the
# serial id of the corresponding drive.  The purpose of the function is to
# break up the input list into two different lists - one corresponding to the
# disk addresses (disk_list) and the other to the serial ids (id_list). The
# address and id of a particular disk has corresponding positions on the two
# output lists. This correspondence between the two lists is relied upon by
# the calling function.
 
function construct_disk_list
{
  disk_list=""
  id_list=""
 
  for item in $*; do
    disk=${item%%:*}
    serialid=${item##*:}
    disk_list="${disk_list} ${disk}"
    id_list="${id_list} ${serialid}"
  done
 
}
 
################################################################################
#
# function construct_ssa_list <c#>.<WWN>...
#
# This function takes an argument list consisting of <c#>:<WWN>... The c# is the
# controller number on which a particular SSA is present and the WWN is the
# world-wide number of the corresponding SSA. The purpose of this function is to
# break up the input list into two different lists - one corresponding to the
# controller number list - ctlr_list - and the other corresponding to the WWN
# list - ssa_list. The controller number and the WWN have corresponding
# positions on the two output lists. This correspondence is relied upon by the
# calling function.
 
function construct_ssa_list
{
  ctlr_list=""
  ssa_list=""
 
  for item in $*; do
    ctlr=${item%%:*}
    ssa=${item##*:}
    ctlr_list="${ctlr_list} ${ctlr}"
    ssa_list="${ssa_list} ${ssa}"
  done
}

###############################################################################
#
# function get_devices nodeA nodeB
#
# For the specified hosts nodeA and nodeB, get the list of SSAs and other
# disks attached to the system, and the root device of the system. 'rsh' is
# used for remote nodes.
 
function get_devices
{
  typeset i
  typeset rootdev
  set -A node_array $*
  set -A ctlr_array
  set -A ssa_array
  set -A disks_array
  set -A id_array
  set -A rootdev_array
  set -A rootc_array

  print
  if [[ "$1" = "$2" ]]; then
    print "Getting device information for host $1."
  else
    print "Getting device information for hosts $1 and $2."
  fi
  print -n "This may take a few seconds to a few minutes..."
 
  let i=0
  while (( $i < ${#node_array[*]} )); do
    if [[ ${node_array[i]} = "${localhostname}" ]]; then
      # local node
      
      ssadevlist=$(${FINDDEVICES} ssa ${ssafile} | tr '\012' ' ')
      diskdevlist=$(${FINDDEVICES} disks | tr '\012' ' ')
      rootdev=$(${FINDDEVICES} rootdev | tr '\012' '')
      rootdev_array[i]=$(print ${rootdev})
    else
      # remote node
      
      ssadevlist=$(rsh ${node_array[i]} "${FINDDEVICES} ssa" | tr '\012' ' ')
      diskdevlist=$(rsh ${node_array[i]} "${FINDDEVICES} disks" | tr '\012' ' ')
      rootdev=$(rsh ${node_array[i]} "${FINDDEVICES} rootdev" | tr '\012' '')
      rootdev_array[i]=$(print ${rootdev})
    fi
    
    construct_ssa_list ${ssadevlist}
    ctlr_array[i]=${ctlr_list}
    ssa_array[i]=${ssa_list}
 
    construct_disk_list ${diskdevlist}
    disks_array[i]=${disk_list}
    id_array[i]=${id_list}
    
    rootc_array[i]=${rootdev_array[i]%%t*}
    let i=i+1
  done
 
  print "done."
  print
}

################################################################################
#
# function select_common_devices node1 node2
#
# Select the set of common devices (SSA's and disks in Multipacks) between nodes
# node1 and node2. This function uses the device arrays that have been
# constructed in the get_devices function.
 
function select_common_devices
{
  typeset devset
  typeset node1
  typeset node2
  typeset ii
  typeset jj
 
  let node1=$1
  let node2=$2
 
  # Extract the list of common SSAs between node1 and node2
 
  devset=""
  for ssa in ${ssa_array[node1]}; do
    if [[ ${ssa_array[node2]} = *${ssa}* ]]; then
      devset="${devset} SSA:${ssa}"
    fi
  done
 
  # Initialize temporary arrays

  set -A disk_array_node1 ${disks_array[node1]}
  set -A disk_array_node2 ${disks_array[node2]}
  set -A id_array_node1 ${id_array[node1]}
  set -A id_array_node2 ${id_array[node2]}
 
  let ii=0
  while (( $ii < ${#id_array_node1[*]} )); do
    let jj=0
    while (( $jj < ${#id_array_node2[*]} )); do
      if [[ ${id_array_node1[ii]} = ${id_array_node2[jj]} && \
            ${disk_array_node1[ii]} != ${rootdev_array[node1]} && \
            ${disk_array_node2[jj]} != ${rootdev_array[node2]} ]]; then
 
        if [[ ${node_array[node1]} = "${localhostname}" ]]; then
          devset="${devset} DISK:${disk_array_node1[ii]}:${id_array_node1[ii]}"
        else
          devset="${devset} DISK:${disk_array_node2[jj]}:${id_array_node2[jj]}"
        fi
      fi
      let jj=jj+1
    done
    let ii=ii+1
  done
 
  print ${devset}
}
 
###############################################################################
#
# function select_quorum_dev nodeA nodeB devset
#
# Select the quorum device for nodes node1 and node2 from among the set of
# devices in devset. The variable quorumdev is set to the selected quorum
# device. The selected quorum device comes in one of three flavors - <WWN>,
# <WWN>.<disk serial id>, or <disk address>:<disk serial id>
 
function select_quorum_dev
{
  typeset devset
  typeset node1
  typeset node2
  typeset qdev
  typeset wwn
 
  node1=0
  node2=1
  shift; shift
  devset=$*
 
  print
  print "Select quorum device for nodes ${node1} (${nodeA}) and ${node2} (${nodeB})."
  print "Type the number corresponding to the desired selection."
  print "For example: 1<CR>"
  print
 
  PS3='Quorum device: '
  select qdev in ${devset}; do
    if [[ -n "${qdev}" ]]; then
      
      if [[ "${qdev}" = SSA:* ]]; then
        wwn=${qdev##SSA:}
        select_qdisk_from_ssa ${wwn} ${node1} ${node2}
 
        if [[ -n "${quorumdiskdev}" ]]; then
          print
          print "Disk ${quorumdiskdev%%:*} with serial id ${quorumdiskdev##*:}"
          print "in SSA ${wwn} has been chosen as the quorum device."
          print
          quorumdev=${wwn}.${quorumdiskdev##*:}
        else
          print
          print "SSA with WWN ${wwn} has been chosen as the quorum device."
          print
          quorumdev=${wwn}
        fi
      else
        qdev=${qdev##DISK:}
        print
        print "Disk ${qdev%%:*} with serial id ${qdev##*:} has been chosen"
        print "as the quorum device."
        print
        quorumdev=${qdev}
      fi
      break
    else
      print
      print "Invalid Selection"
      print
    fi
  done
}
 
###############################################################################
#
# function get_ctlr_for_ssa wwn node
#
# Get the controller number through which the SSA with the specified wwn is
# accessible on the given node.
 
function get_ctlr_for_ssa
{
  typeset ssa
  typeset nodenum
  typeset ii
  typeset ctlrnum
 
  ssa=$1
  nodenum=$2
 
  set -A ssa_array_on_node ${ssa_array[$nodenum]}
  set -A ctlr_array_on_node ${ctlr_array[$nodenum]}
 
  let ii=0
  while (( $ii < ${#ssa_array_on_node[*]} )); do
    if [[ ${ssa_array_on_node[ii]} = ${ssa} ]]; then
      ctlrnum=${ctlr_array_on_node[ii]}
      break
    fi
    let ii=ii+1
  done
 
  print ${ctlrnum}
}
 
################################################################################
#
# function select_qdisk_from_ssa ssa node1 node2
#
# This function provides the option of selecting a quorum disk from the
# specified ssa. The two nodes that are connected to this ssa are also passed as
# input parameters to check whether the root devices of these nodes reside in
# this ssa, and if so, to exclude the corresponding disks from the selection
# process.
 
function select_qdisk_from_ssa
{
  typeset ssa
  typeset node1
  typeset node2
  typeset item
  typeset td
  typeset diskdevaddr
  typeset disksid
  typeset ctlr_node1
  typeset ctlr_node2
  typeset ssadisks
 
  ssa=$1
  node1=$2
  node2=$3
 
  ctlr_node1=$(get_ctlr_for_ssa ${ssa} ${node1})
  ctlr_node2=$(get_ctlr_for_ssa ${ssa} ${node2})
 
  if [[ ${node_array[${node1}]} = "${localhostname}" ]]; then
    ssadisks=$(${FINDDEVICES} disks ${ctlr_node1} | tr '\012' ' ')
  elif [[ ${node_array[${node2}]} = "${localhostname}" ]]; then
    ssadisks=$(${FINDDEVICES} disks ${ctlr_node2} | tr '\012' ' ')
  else
    ssadisks=$(rsh ${node_array[${node2}]} "${FINDDEVICES} disks ${ctlr_node2}" | tr '\012' ' ')
  fi
 
  construct_disk_list ${ssadisks}
 
  # set of disks in the ssa is now in the variable disk_list. First step is to
  # remove the controller number from this list to facilitate easy elimination
  # of the root devices of the two nodes from this list.
 
  disk_list=$(print ${disk_list} | sed -e 's/c[0-9]\{1,\}//g')
 
  # eliminate node1's root disk from this list
 
  if [[ "${ctlr_node1}" = ${rootc_array[${node1}]} ]]; then
    td=${rootdev_array[${node1}]##c[0-9]*([0-9])}
    disk_list=$(print ${disk_list} | sed -e "s/${td}//g")
  fi
 
  # eliminate node2's root disk from this list
 
  if [[ "${ctlr_node2}" = ${rootc_array[${node2}]} ]]; then
    td=${rootdev_array[${node2}]##c[0-9]*([0-9])}
    disk_list=$(print ${disk_list} | sed -e "s/${td}//g")
  fi
 
  # eliminate the "s2" suffix from each item in the remaining list
 
  disk_list=$(print ${disk_list} | sed -e 's/s2//g')
 
  # Force selection of a quorum disk if the ssa contains the root
  # disk of one or both nodes.
 
  if [[ "${ctlr_node1}" = ${rootc_array[${node1}]} || \
        "${ctlr_node2}" = ${rootc_array[${node2}]} ]]; then
    print
    print "The root disk of one or both of ${node_array[${node1}]} and"
    print "${node_array[${node2}]} is in SSA ${ssa}. You *will* have to"
    print "select a disk as the quorum device from this SSA. The root disk(s)"
    print "have already been excluded from the list below."
    print
  else
    disk_list="None ${disk_list}"
    print
    print "You have the option of selecting a quorum disk from SSA ${ssa}."
    print "Choose one of the following disks, identified by their target and"
    print "drive numbers as your quorum device. You may also choose"
    print "option 1 - NONE - to retain the SSA ${ssa} as your quorum device."
    print
  fi
 
  PS3='Quorum disk: '
  select quorumdiskdev in ${disk_list}; do
    if [[ -n "${quorumdiskdev}" ]]; then
      if [[ "${quorumdiskdev}" != "None" ]]; then
        
        if [[ ${node_array[${node1}]} = "${localhostname}" ]]; then
          diskdevaddr=${ctlr_node1}${quorumdiskdev}s2
          disksid=$(${PDBSSA} inquiry ${diskdevaddr})
          
        elif [[ ${node_array[${node2}]} = "${localhostname}" ]]; then
          diskdevaddr=${ctlr_node2}${quorumdiskdev}s2
          disksid=$(${PDBSSA} inquiry ${diskdevaddr})
	  
        else
          diskdevaddr=${ctlr_node2}${quorumdiskdev}s2
          disksid=$(rsh ${node_array[${node2}]} "${PDBSSA} inquiry ${diskdevaddr}")
        fi
        quorumdiskdev=${diskdevaddr}:${disksid}
      else
        quorumdiskdev=""
      fi
      break
    else
      print
      print "Invalid selection."
    fi
  done
}

###############################################################################
#
# This function is called from change_quorum_dev and takes two arguments - the
# new and the old quorum device, respectively. It releases the reservation on
# the old quorum device only if it is successfully able to reserve the new
# quorum device.
 
function reserve_release_qdev
{
typeset newqdev
typeset oldqdev
 
newqdev=$1
oldqdev=$2
 
${PDBSSA} q_reserve ${newqdev}
if [[ "$?" -ne 0 ]]; then
        print
        print "Unable to reserve new quorum device: ${newqdev}"
        print "No changes have been made to the system."
        exit 1
fi

${PDBSSA} release ${oldqdev}
}
 
##############################################################################
#
# function change_quorum_dev nodeA nodeB
#
# This is an interactive function that needs to be run on all
# statically configured nodes of the cluster to change the quorum
# device that is shared between nodeA and nodeB. 

function change_quorum_dev
{
typeset i
typeset numnodes
typeset nodename
typeset nodelist
typeset nodeA
typeset nodeB
typeset manualmode
typeset nodeA_is_down
typeset nodeB_is_down
typeset nodeA_is_member
typeset nodeB_is_member

let manualmode=0
if [[ "$1" = "-m" ]]; then
  let manualmode=1
  shift
fi
nodeA=$1
nodeB=$2

if [[ "${nodeA}" = "${nodeB}" ]]; then
  print "Two different host names must be specified."
  exit 1
fi

cdbfile=${cdbpath}/${CLUSTNAME}.cdb
FINDDEVICES=/opt/SUNWcluster/bin/finddevices

# find the hostid's of nodeA and nodeB

hostidA=$(get_hostid ${nodeA})
if [[ -z "${hostidA}" ]]; then
  print "Host ${nodeA} is not part of the cluster configuration."
  exit 1
fi

hostidB=$(get_hostid ${nodeB})
if [[ -z "${hostidB}" ]]; then
  print "Host ${nodeB} is not part of the cluster configuration."
  exit 1
fi

if (( manualmode == 1 )); then
  quorumdev=$3
  ${FINDDEVICES} ssa ${ssafile} > /dev/null 2>&1
fi

# find the old quorum device associated with the two hosts

if (( ${hostidA} < ${hostidB} )); then
  oldquorumdev=$(cdbmatch quorumdev.node.${hostidA}.${hostidB} ${cdbfile})
else
  oldquorumdev=$(cdbmatch quorumdev.node.${hostidB}.${hostidA} ${cdbfile})
fi

if [[ "${curr_members}" = *${hostidA}* ]]; then
  let nodeA_is_member=1
else
  let nodeA_is_member=0
fi

if [[ "${curr_members}" = *${hostidB}* ]]; then
  let nodeB_is_member=1
else
  let nodeB_is_member=0
fi

if (( manualmode == 0 )); then

  # enable remote access for all statically configured nodes

  let numnodes=$(cdbmatch cluster.number.nodes ${cdbfile})
  let i=0
  while (( i < numnodes )); do
    nodename=$(cdbmatch cluster.node.${i}.hostname ${cdbfile})
    nodelist="${nodelist} ${nodename}"
    let i=i+1
  done
  enable_rcmd ${nodelist}

  # This is a kludgy synchronization point to give time for slow-coaches to update
  # their rhosts file.

  sleep 8

  # check whether both hosts are alive and whether the appropriate
  # software is installed on the hosts  If one of them is not, it is not possible
  # to obtain the intersection of the devices attached to the hosts. In this case,
  # we present the list of all devices attached to the host that is up and leave it
  # to the administrator to pick a shared device as the new quorum device in the hope
  # that they know what they are doing.

  # To avoid handling this situation as a special case in the code, we set nodeA
  # and nodeB equal to each other, so that the interesection of the devices
  # becomes equal to the set of devices attached to either nodeA or nodeB.

  let nodeA_is_down=0
  let nodeB_is_down=0

  if [[ "${nodeA}" != "${localhostname}" ]]; then
    print -n "Checking host ${nodeA} ..."
    timed_run 10 /usr/sbin/ping ${nodeA}
    if [[ "$?" -ne 0 ]]; then
      print "Not responding?"
      nodeA_is_down=1
    else
      result=$(rsh ${nodeA} "ls ${FINDDEVICES}" 2> /dev/null)
      if [[ "${result}" != "${FINDDEVICES}" ]]; then
	nodeA_is_down=1
      fi
    fi
  fi

  if [[ "${nodeB}" != "${localhostname}" ]]; then
    print -n "Checking host ${nodeB} ..."
    timed_run 10 /usr/sbin/ping ${nodeB}
    if [[ "$?" -ne 0 ]]; then
      print "Not responding?"
      nodeB_is_down=1
    else
      result=$(rsh ${nodeB} "ls ${FINDDEVICES}" 2> /dev/null)
      if [[ "${result}" != "${FINDDEVICES}" ]]; then
	nodeB_is_down=1
      fi
    fi
  fi

  if (( nodeA_is_down == 1 && nodeB_is_down == 1 )); then
    print
    print "Both the specified hosts are unreachable. It is not possible to change"
    print "the quorum device of these hosts at this time."
    disable_rcmd
    exit 1
  fi

  if (( nodeA_is_down == 1 || nodeB_is_down == 1 )); then
    print
    print "One of the specified hosts is either unreachable or permissions"
    print "are not set up or the appropriate software has not been installed"
    print "on it. In this situation, it is not possible to determine the set"
    print "of devices attached to both the hosts."
    print "Note that both private and shared devices for this host will be"
    print "displayed in this mode and the administrator must exercise extreme"
    print "caution in choosing a suitable quorum device."
    print
  fi

  if (( nodeA_is_down == 1 )); then
    get_devices ${nodeB} ${nodeB}
  elif (( nodeB_is_down == 1 )); then
    get_devices ${nodeA} ${nodeA}
  else
    get_devices ${nodeA} ${nodeB}
  fi
  
  common_devices=$(select_common_devices 0 1)

  if [[ -z "${common_devices}" ]]; then
    print "Host ${nodeA} and host ${nodeB} don't share any common devices between them."
    disable_rcmd
    exit 1
  fi

  select_quorum_dev ${nodeA} ${nodeB} ${common_devices}
  #
  # Another sleep for synchronizing between nodes.
  sleep 8
  disable_rcmd
  
fi  # manualmode == 0

# quorumdev is set to one of <WWN>, <WWN>.<DSID>, or <ADDR>:<DSID>

if [[ "${quorumdev}" = *:* ]]; then
  newquorumdev=${quorumdev##*:}
  diskaddr=${quorumdev%%:*}
  
  if [[ "${nodeA}" = "${localhostname}" || "${nodeB}" = "${localhostname}" ]]; then
    create_disk_link ${newquorumdev} ${diskaddr}
  fi
else
  newquorumdev=${quorumdev}
  if [[ "${nodeA}" = "${localhostname}" || "${nodeB}" = "${localhostname}" ]]; then

    if [[ "${newquorumdev}" = *\.* ]]; then
      wwn=${newquorumdev%%.*}
    else
      wwn=${newquorumdev}
    fi
    create_ssa_link ${wwn} ${ssafile}
  fi
fi

# If either nodeA or nodeB is not in the cluster membership, but the other is,
# reserve the new quorum device and release the reservation on the old quorum
# device. 

if (( (nodeA_is_member == 1 && nodeB_is_member == 0) ||
      (nodeA_is_member == 0 && nodeB_is_member == 1) )); then
  if [[ "${nodeA}" = "${localhostname}" || "${nodeB}" = "${localhostname}" ]]; then
    reserve_release_qdev ${newquorumdev} ${oldquorumdev}
  fi
fi

/bin/rm -f ${ssafile}
if (( ${hostidA} < ${hostidB} )); then
  echo "/^quorumdev.node.${hostidA}.${hostidB}/s|\(.*\):.*$|\1: $newquorumdev|"\
>> $sedfile
else
  echo "/^quorumdev.node.${hostidB}.${hostidA}/s|\(.*\):.*$|\1: $newquorumdev|"\
>> $sedfile
fi
}
###############################################################################

function get_interfaces
{
newinterfaces=$*
if [[ -n "$newinterfaces" ]]; then
	print "newinterfaces = $newinterfaces"
	validate_interfaces $newinterfaces
	set $newinterfaces
	while [ $# -gt 0 ]
	do
		cif=$(cdbmatch cluster.node.${hostid}.if.$1 $cdbfile 2>&1| cut -d" " -f1)
		hif=$(cdbmatch cluster.node.${hostid}.haiface.$1 ${cdbfile} 2>&1| cut -d" " -f1 | sed 's/:.*$//')
		pif=$(cdbmatch cluster.node.${hostid}.ifpath.$1 ${cdbfile} 2>&1|cut -d" " -f1 | sed 's|/.*/||')

		echo "/cluster.node.${hostid}.if.$1/s|${cif}|$2|" >> $sedfile
		echo "/cluster.node.${hostid}.haiface.$1/s|${hif}|$2|">> $sedfile
		echo "/cluster.node.${hostid}.ifpath.$1/s|${pif}|$2|">> $sedfile
		shift 2
	done
fi
}

###############################################################################

function check_cluster_name
{
  typeset default_name
  default_name=$(cat ${cdbpath}/default_clustername)
  if [[ "${default_name}" != "${CLUSTNAME}" ]]; then
    print "Invalid cluster name: ${CLUSTNAME}"
    print "Configured cluster is: ${default_name}"
    exit 2
  fi
  cdbfile=${cdbpath}/${CLUSTNAME}.cdb
}

###############################################################################

function get_udlm_cfile
{
if [[ -n "$newudlmfile" ]]; then
	if [[ -n "$nudlmcfile" ]]; then
		if print "${nudlmcfile}" | grep "^/" > /dev/null 2>&1; then
			echo "/^udlm.oracle.config.file/s|:.*|: $nudlmcfile|" >> $sedfile
		else
			echo 2>&1 "\"${nudlmcfile}\": filename must be an absolute pathname."
			exit 2
		fi
	else
		echo "/^udlm.oracle.config.file/s|:.*|:|" >> $sedfile
	fi
fi
}

###############################################################################

function set_newsteptimes
{
integer base_value

if [[ -n "${steptimes}" ]]; then
	base_value=$(cdbmatch loghost.update_timeout $cdbfile 2>&1| \
		cut -d" " -f1)
	if (( steptimes <= 100 )); then
		print 2>&1 "
WARNING: The timeout value specified \"($steptimes)\" might be too low 
for the cluster to operate properly. Please refer to the SunCluster
documentation for guidelines on adjusting this value based on the
number of logical hosts defined."
	fi
	if (( steptimes >= base_value )); then
		echo "/^cmm.transition.step10.timeout/s|:.*|: $steptimes|" >> $sedfile
		echo "/^cmm.transition.step11.timeout/s|:.*|: $steptimes|" >> $sedfile
	else
		print 2>&1 "
The timeout value specified \"($steptimes)\" should be at least 
equal to the value of \"$base_value\" (loghost.update_time).

The timeout value has not been modified in the cluster database."
		exit 2
	fi
fi
}

###############################################################################

function set_newloghosttimes
{
integer steptimes

if [[ -n "${loghosttimes}" ]]; then
	steptimes=$(cdbmatch cmm.transition.step10.timeout $cdbfile 2>&1| \
		cut -d" " -f1)
	if (( steptimes < loghosttimes )); then
		print 2>&1 "
WARNING: The timeout value specified \"($loghosttimes)\" is greater than
the current value of the step timeouts \"($steptimes)\". Please refer 
to the SunCluster documentation for guidelines on adjusting this 
value based on the number of logical hosts defined.

The timeout value has not been modified in the cluster database."
		exit 2
	fi
	echo "/^loghost.update_timeout/s|:.*|: $loghosttimes|" >> $sedfile
fi
}

###############################################################################

function get_newnetaddr
{
if [[ -n "${nodeid}" ]]; then
  if [[ "${nodeid}" = @(0|1|2|3) ]]; then
    echo "/^cluster.node.${nodeid}.physaddr/s|:.*|: $newnetaddr|" >> $sedfile
  else
    echo 2>&1 "${nodeid}: Illegal nodeid. Should be 0, 1, 2, or 3"
  fi
fi
}

##############################################################################
  
function display_config
{
 typeset step10
 typeset step11
 typeset loghost

 numnodes=$(cdbmatch cluster.number.nodes $cdbfile)
 host0=$(cdbmatch cluster.node.0.hostname $cdbfile)
 host1=$(cdbmatch cluster.node.1.hostname $cdbfile)
 host2=$(cdbmatch cluster.node.2.hostname $cdbfile)
 host3=$(cdbmatch cluster.node.3.hostname $cdbfile)
 h0if0=$(cdbmatch cluster.node.0.if.0 $cdbfile | cut -d" " -f1)
 h0if1=$(cdbmatch cluster.node.0.if.1 $cdbfile | cut -d" " -f1)
 h1if0=$(cdbmatch cluster.node.1.if.0 $cdbfile | cut -d" " -f1)
 h1if1=$(cdbmatch cluster.node.1.if.1 $cdbfile | cut -d" " -f1)
 step10=$(cdbmatch cmm.transition.step10.timeout $cdbfile)
 step11=$(cdbmatch cmm.transition.step11.timeout $cdbfile)
 loghost=$(cdbmatch loghost.update_timeout $cdbfile)

 if [[ "${numnodes}" -ge 3 ]]; then
   h2if0=$(cdbmatch cluster.node.2.if.0 $cdbfile | cut -d" " -f1)
   h2if1=$(cdbmatch cluster.node.2.if.1 $cdbfile | cut -d" " -f1)
 fi
 if [[ "${numnodes}" -eq 4 ]]; then
   h3if0=$(cdbmatch cluster.node.3.if.0 $cdbfile | cut -d" " -f1)
   h3if1=$(cdbmatch cluster.node.3.if.1 $cdbfile | cut -d" " -f1)
 fi

 echo 2>&1 "
Current Configuration for Cluster $CLUSTNAME:

  Hosts in cluster: ${host0} ${host1} ${host2} ${host3}

  Private Network Interfaces for 
      ${host0}:	${h0if0} ${h0if1}
      ${host1}:	${h1if0} ${h1if1}

"
if [[ "${numnodes}" -ge 3 ]]; then
  print "      ${host2}: ${h2if0} ${h2if1}"
fi
if  [[ "${numnodes}" -eq 4 ]]; then
  print "      ${host3}: ${h3if0} ${h3if1}"
fi
print

let i=0
while (( i < ${numnodes} )); do
  let j=i+1
  while (( j < ${numnodes} )); do
    quorumdev=$(cdbmatch quorumdev.node.${i}.${j} ${cdbfile})
    if [[ -n "${quorumdev}" ]]; then
      print "  Quorum device for hosts $(eval print $`print host$i`) and $(eval print $`print host$j`): ${quorumdev}"
    fi
    let j=j+1
  done
  let i=i+1
done
print

# print out the timeout values 
print "  Logical Host Timeout Values :"
print "\tStep10			 : ${step10}"
print "\tStep11			 : ${step11}"
print "\tLogical Host 	         : ${loghost}"
print
# print information on configured logical hosts only if this node is in the
# current cluster membership.

# get the nodeid of the localhost
 
let i=0
while (( i < ${numnodes} )); do
  if [[ $(eval print $`print host$i`) = "${localhostname}" ]]; then
    localnodeid=${i}
    break
  fi
  let i=i+1
done
 
# exit if this node is not in the cluster membership
 
if [[ "${curr_members}" != *${localnodeid}* ]]; then
  exit 0
fi

loghost_rows=$(scccd ${CLUSTNAME} LOGHOST query lname "")
for lrow in ${loghost_rows}; do

  lname=${lrow%:*:*:*:*}
  lname=${lname#*:}

  nodelist=${lrow%:*:*:*}
  nodelist=${nodelist#*:*:}
  nodelist=$(print ${nodelist} | tr ',' ' ')
  set -A node_array ${nodelist}

  dglist=${lrow%:*:*}
  dglist=${dglist#*:*:*:}
  dglist=$(print ${dglist} | tr ',' ' ')

  iflist=${lrow%:*}
  iflist=${iflist#*:*:*:*:}
  iflist=$(print ${iflist} | tr ',' ' ')

  manualmode=${lrow#*:*:*:*:*:}

  print "Logical Host	: ${lname}"
  print
  print "\tNode List	: ${nodelist}"
  print "\tDisk Groups	: ${dglist}"
  print

  for i in ${iflist}; do
    iprow=$(scccd ${CLUSTNAME} LOGIP query logif ${i})

    netif_list=${iprow%:*:*}
    netif_list=${netif_list#*:*:}
    netif_list=$(print ${netif_list} | tr ',' ' ')
    set -A netif_array ${netif_list}

    ipaddr=${iprow%:*}
    ipaddr=${ipaddr#*:*:*:}

    print "\tLogical Address	: ${ipaddr}"
    print "\t\tLogical Interface	: ${i}"
    print -n "\t\tNetwork Interfaces	: "

    let n=0
    while (( n < ${#node_array[*]} )); do
      print -n "${netif_array[n]} (${node_array[n]}) "
      let n=n+1
    done
    print
  done

  print
  if [[ "${manualmode}" -eq 0 ]]; then
    print "\tAutomatic Switchover	: yes"
  else
    print "\tAutomatic Switchover	: no"
  fi
  print
done

return 0
}

######################################################################################
# create_adminfs_for_diskgroup()
#
# This is called by only change_cluster_diskgroups. For a given disk group, 
# the function 
# - creates volume of size 2MB
# - mirrors the volume
# - makes ufs file system on the volume
function create_adminfs_for_diskgroup
{

  typeset rawdev 
  typeset blkdev 	
  typeset mnt
  typeset dg

  dg=$1

  # The caller has already made sure that this is called from 
  # only one node.
  # This function gets called only if diskgroup is imported. So no
  # need to use vxdg to list the group and check for its existance.

  # Create volume and its mirror if they do not exist
  /usr/sbin/vxinfo ${dg}-stat > /dev/null 2>&1
  if [ $? -ne 0 ]; then
    /usr/sbin/vxassist -g ${dg} -U fsgen make ${dg}-stat 2m \
     > /dev/null 2> /tmp/vxassist.out
    if [ $? -ne 0 ]; then
      echo "vxassist failed to create ${dg}-stat volume on ${dg} diskgroup"
      if [ -f /tmp/vxassist.out ]; then
	/bin/grep "Cannot allocate space" /tmp/vxassist.out > /dev/null
	if [ $? -eq 0 ]; then
	  echo ""
	  echo "Unable to allocate space to create mirrored volume for administrative"
	  echo "file system in diskgroup ${dg}. 4MB of free disk space needs to be"
	  echo "present in ${dg} for this purpose to configure it for HA-NFS."

	else
	  /bin/cat /tmp/vxassist.out
	fi 
	/bin/rm -f /tmp/vxassist.out
      fi
      return 1
    else
      /usr/sbin/vxassist -g ${dg} -U fsgen mirror ${dg}-stat \
       > /dev/null 2> /tmp/vxassist.out
      if [ $? -ne 0 ]; then
	echo "Failed to create ${dg}-stat mirror on ${dg} diskgroup"
	if [ -f /tmp/vxassist.out ]; then
	  /bin/grep "Cannot allocate space" /tmp/vxassist.out > /dev/null
	  if [ $? -eq 0 ]; then
	    echo ""
	    echo "It is suggested that a mirror be created for the volume to ensure"
	    echo "the  reliability of the HA-NFS and to avoid having a single point"
	    echo "of failure. 2MB of free disk is required in  diskgroup  ${dg}  to"
	    echo "create the mirror. Run following command to create mirror:"
	    echo "    vxassist -g ${dg} -U fsgen mirror ${dg}-stat"
	    echo ""
	  else
	    /bin/cat /tmp/vxassist.out
	    echo "Run following command to create mirror:"
	    echo "vxassist -g ${dg} -U fsgen mirror ${dg}-stat"
	    echo ""
	  fi
	  /bin/rm -f /tmp/vxassist.out
	fi

      fi  # vxassist succeeded

    fi
  else
    # if the volume with that name exists then we assume that
    # the file system can be created in it.
    echo "Existing ${dg}-stat volume on ${dg} disk group will be used for HA administrative file system"
  fi

  rawdev="/dev/vx/rdsk/${dg}/${dg}-stat"
  blkdev="/dev/vx/dsk/${dg}/${dg}-stat"

  # Make an ufs file system 
  echo "y" > /tmp/yyy     # newfs commnad expects "yes" from stdin
  exec < /tmp/yyy         # newfs prompts for y/n
  /usr/sbin/newfs ${rawdev} > /tmp/newfs.error  2>&1
  if [ $? -ne 0 ]; then
    /bin/cat /tmp/newfs.error
    echo "newfs failed while creating file system on ${rawdev}"
      /bin/rm -f /tmp/newfs.error
      return 1
  fi
  rm -f /tmp/newfs.error
  rm -f /tmp/yyy
  
  haadmindir=$(cdbmatch cluster.haadmindir ${cdbfile})
  if [[ -z "${haadmindir}" ]]; then
    haadmindir="/"
  fi
  mnt=`echo "${haadmindir}/${loghost}" | sed -e "s/[/]\{2,\}/\//g"`
  if [[ ! -d "${mnt}" ]]; then
    /bin/mkdir -p ${mnt} > /dev/null 2>&1
    if [ $? -ne 0 ]; then
      echo "Could not create ${mnt} directory for ${dg} diskgroup"
	return 1
    fi
  fi
  
  /usr/sbin/mount -F ufs ${blkdev} ${mnt} > /dev/null 2>&1 
  if [ $? -ne 0 ]; then
    echo "mount -F ufs ${blkdev} ${mnt} failed"
    return 1
  fi
  
  return 0

}    # end of create_adminfs_for_diskgroup()

#####################################################################################
# configure_hads_loghost logical-host-name
#
# logical-host-name	The name of the logical host on which the administrative
#			file system needs to be created.
#
function configure_hads_loghost
{
typeset d diskgroups
integer found
typeset loghost
typeset localnodeid
typeset dglist
typeset nodelist
typeset i

loghost=$1
diskgroup=$2

localnodeid=$(get_hostid ${localhostname})
if [[ -z "${localnodeid}" ]]; then
  print "This host - ${localhostname} - is not in the cluster configuration."
  exit 1
fi

#
# We will use "haadmindir" in CDB file to denote the prefix to the
# adminstrative file system.
# The Adminstartive file system will have ${haadmindir}/logicalhost
# as the pathname.
#
haadmindir=$(cdbmatch cluster.haadmindir ${cdbfile})
if [[ -z "$haadmindir" ]]; then
  haadmindir="/"
fi

# myhanfs variable is also used by create_adminfs_for_diskgroup()
myhanfs=$(cdbmatch cluster.hanfsdir ${cdbfile})
if [[ -z "$myhanfs" ]]; then
  myhanfs="/etc/opt/SUNWcluster/conf/hanfs"
fi

vfstab_file=${myhanfs}/vfstab.${loghost}
dfstab_file=${myhanfs}/dfstab.${loghost}
mnt=`echo "${haadmindir}/${loghost}" | sed -e "s/[/]\{2,\}/\//g"`
/bin/mkdir -p ${mnt} > /dev/null 2>&1
/bin/mkdir -p ${myhanfs} > /dev/null 2>&1

#
# if the node is stopped or aborted, just create the templates for the
# vfstab and dfstab files and exist.
#

if [[ "${pdb_status}" = "stopped" || "${pdb_status}" = "aborted" ]]; then
  if [[ ! -f "${vfstab_file}" ]]; then
    touch ${vfstab_file}
  fi
  if [ ! -f ${dfstab_file} ]; then
    print  "# share -F nfs <dir name>" > ${dfstab_file}
  fi
  exit 0
fi

#
# Check if Logical Host is associated with diskgroup.
#
rows=$(scccd ${CLUSTNAME} LOGHOST query lname ${loghost})
if [ $? -ne 0 ]; then
        echo "Logical Host ${loghost} not defined"
        exit 1
fi

dglist=`echo ${rows} | /usr/bin/awk -F: ' { print $4 } '`
dglist=$(echo ${dglist} |  tr ',' ' ')

if [ "${diskgroup}" !=  "" ]; then
   found=0
   for i in ${dglist}
   do
     if [ ${i} = ${diskgroup} ]; then
    	found=1
      	break;
     fi
   done
   if [ ${found} = 0 ]; then
    echo "DiskGroup ${diskgroup} not associated with Logical Host ${loghost}"
    exit 1
   fi
   dg=${diskgroup}
fi

nodelist=`echo ${rows} | /usr/bin/awk -F: ' { print $3 } '`
nodelist=$(echo ${nodelist} |  tr ',' ' ')

if [ "${dglist}" = "" ]; then
   echo "Logical Host ${loghost} is configured with no diskgroups. No HA adminstrative filesystem will be configured"
   return 1
fi

# user has not supplied the diskgroup, so take the first one.
if [ "${dg}" == "" ]; then
	dg=`echo ${dglist} | /usr/bin/awk ' { print $1 } '`
fi

/usr/sbin/vxdg list ${dg} > /dev/null 2>&1
if [[ "$?" -eq 0 ]]; then

  if [ -f ${vfstab_file} ]; then
    pattern=`echo "${haadmindir}/${loghost}" | sed -e "s/[/]\{2,\}/\//g"`
    grep -v "^#" ${vfstab_file} | awk ' { print $3} ' | \
     grep -w ${pattern} >/dev/null 2>&1
      rc=$?
      #
      # Then We already Have an existing disk group having the
      # ha-admin filesystem. so print an Error and return.
      #
      if [ ${rc} -eq 0 ]; then
	result=$(/usr/sbin/vxdg list ${dg} > /dev/null 2>&1)
	RC=$?
	if [ ${RC} -eq 0 ]; then
	  # disk group is imported then cross check.
	  /usr/sbin/vxinfo ${dg}-stat > /dev/null 2>&1
	  RC=$?
	  if [ ${RC} -ne 0 ]; then
	    # disk group is imported but we do not have the volume
	    # but vfstab entries are present.
	    # hence we have to go ahead and continue with 
	    #
	    RC=0
	  else
	    # Both vfstab and volume are present, hence use it as it 
	    # is
	    RC=1
	  fi
	fi

	if [ ${RC} -ne 0 ]; then
	  echo "Logical Host ${loghost} already has an HA adminstrative file system on ${dg} diskgroup"
	fi
	
      fi
  fi

  # check for shared disk groups
  if print ${result} | grep shared > /dev/null 2>&1; then
    print "Illegal diskgroup: ${dg} is a shared diskgroup"
    exit 1
  fi

  create_adminfs_for_diskgroup ${dg} ${loghost}
  if [ "$?" -ne 0 ]; then
    print "After fixing the cause for failure run scconf -F command again on all nodes"
    print
    exit 1
  fi
fi

rawdev="/dev/vx/rdsk/${dg}/${dg}-stat"
blkdev="/dev/vx/dsk/${dg}/${dg}-stat"

#
# put an entry in vfstab if does not have an entry for
# adminfs of this diskgroup. The format of the entry is
# blk-dev   raw-dev mnt-pt FS-type fsck mnt@boot  mount-opt
#
entry="${blkdev}    ${rawdev}       ${mnt}  ufs     1       no      -"
if [ -f ${vfstab_file} ]; then
  grep -w ${blkdev} ${vfstab_file} > /dev/null 2>&1
  if [ $? -ne 0 ]; then
    echo ${entry} >> ${vfstab_file}
    if [ $? -ne 0 ]; then
      echo "Unable to append to the file ${vfstab_file}"
      echo "Append the following line to the the ${vfstab_file} file"
      echo "${entry}"
      echo ""
    fi
  fi
else
  if [ ! -f  ${vfstab_file} ]; then
    echo "# blk-dev   raw-dev mnt-pt FS-type fsck mnt@boot mount-opt" \
     > ${vfstab_file}
    echo ${entry} > ${vfstab_file}
  fi
fi

if [ ! -f ${dfstab_file} ]; then
  echo "# share -F nfs <dir name>" > ${dfstab_file}
fi
}

#############################################################################

function check_node_status
{
  typeset node_status
  typeset cmm_state

  print "Checking node status..."
  node_status=$(get_node_status)
  node_status=$(print ${node_status})
  
  cmm_state=${node_status##sc: }
  cmm_state=${cmm_state%% node id:*}
  pdb_status=${cmm_state% \(*\)}
  
  if [[ "${pdb_status}" = "in_transition reconfiguration" && "$1" != "p" ]]; then
    print "Node status: ${cmm_state}"
    print "The cluster configuration cannot be updated at this time"
    exit 1
  fi

  curr_members=${node_status##*membership: }
  curr_members=${curr_members%% interconnect*}
}

##############################################################################
#
# function get_logip_format input_params numnodes assigned_ifs logip_format
#
# input_params - The input parameters consist of a comma separated list of
#		 network interfaces (as many as the number of nodes parameter),
#		 a logical IP address, and optionally, a logical interface
#		 number.
#
# numnodes     - The number of nodes in the cluster.
#
# assigned_ifs - A space separated list of assigned logical interface numbers
#		 in the cluster. This list is modified on exit from the function
#		 to also include the logical interface number assigned for this
#		 particular invocation.
#
# logip_format - The output parameter consisting of a string in the following
#		 form: "<iflist>:<logical-ip-addr>:<logical-interface-number>"
#		 iflist is a comma separated list of network interfaces and
#		 there are as many as the number of nodes in the cluster.
#		 The logical interface number is either passed in as an optional
#		 input parameter or computed by this function as a
#		 cluster-wide unique logical interface number.

function get_logip_format
{
typeset i_params i_numnodes
typeset io_assigned_ifs
typeset o_logip_format

typeset iflist logip_addr logintf
typeset n retval nlookup rows

# initialize variables

iflist=""
set -A i_params $(print $1 | tr ',' ' ')
i_numnodes=$2
io_assigned_ifs=$(eval print $"$3")

# get list of network interfaces

let n=0
while (( n < i_numnodes )); do
  if [[ -z "${iflist}" ]]; then
    iflist=${i_params[n]}
  else
    iflist="${iflist},${i_params[n]}"
  fi
  let n=n+1
done

if [[ -z "${i_params[n]}" ]]; then
  print "Logical IP address not specified or incorrect number of"
  print "network interfaces specified. The number of network interfaces"
  print "should match the number of nodes specified in the node list with"
  print "the n option."
  exit 1
fi

# get and validate logical IP address

logip_addr=${i_params[n]}
retval=0
nlookup=$(lookuphost ${logip_addr}) || retval=$?
if [[ -z "${nlookup}" ]]; then
  print "Invalid logical name: ${logip_addr}"
  exit 1
elif [[ -n "{nlookup}" && "${retval}" -eq 2 ]]; then
  print "Duplicate entries for ${logip_addr} are present in /etc/hosts."
  print "Please remove duplicate entries and rerun this command."
  exit 1
fi

# check for duplicates in the Cluster Configuration Database

rows=$(scccd ${CLUSTNAME} LOGIP query ipaddr "${logip_addr}")
if [[ -n "${rows}" ]]; then
  print
  print "Error: The specified logical IP address ${logip_addr} already exists in the"
  print "Cluster Configuration Database. You can verify this using the"
  print "${Myname} <clustname> -p command."
  exit 1
fi

# get the optionally specified logical interface number, or comput a unique
# cluster-wide logical interface number

let n=n+1
logintf=${i_params[n]}
if [[ -z "${logintf}" ]]; then
  let logintf=1
  while (( 1 )); do
    if [[ "${io_assigned_ifs}" != *${logintf}* ]]; then
      break
    else
      let logintf=logintf+1
    fi
  done
else
  # The logical interface number has been specified as an option. Check for
  # duplicates in the Cluster Configuration Database

  rows=$(scccd ${CLUSTNAME} LOGIP query logif "${logintf}")
  if [[ -n "${rows}" ]]; then
    print
    print "Error: The specified logical interface number already exists in the"
    print "Cluster Configuration Database. You can verify this using the"
    print "scconf <clustname> -p command."
    exit 1
  fi
  # validate the logical interface
  if [[ "${logintf}" != +([0-9]) ]]; then
    print "Logical interface (${logintf}) must be an integer."
    exit 1
  fi
fi

# set output parameters

io_assigned_ifs="${io_assigned_ifs} ${logintf}"
io_assigned_ifs=$(print ${io_assigned_ifs} | sed -e 's/ /\\ /g')
o_logip_format="${iflist}:${logip_addr}:${logintf}"

eval $3=${io_assigned_ifs}
eval $4=${o_logip_format}

}

##############################################################################
#
# function get_node_list i_nodelist o_nodelist o_numnodes
#
# i_nodelist - comma separated list of node names that are passed into the
#	       function.
# o_nodelist - comma separated list of output node names that are passed out
#	       of this function after validation of the input node list.
# o_numnodes - an integer passed out of this function that represents the
#	       number of nodes in the output node list.

function get_node_list
{
typeset i_nodelist o_nodelist o_numnodes
typeset nodename nodeid

i_nodelist=$(print $1 | tr ',' ' ')
o_nodelist=""
let o_numnodes=0

for nodename in ${i_nodelist}; do
  
  # validate node
  nodeid=$(get_hostid ${nodename})
  if [[ -z "${nodeid}" ]]; then
    print "Host ${nodename} is not in the cluster configuration."
    exit 1
  fi
  
  if [[ "${o_nodelist}" = *${nodename}* ]]; then
    print "Duplicate entry for ${nodename}. Ignored"
    continue
  fi

  if [[ -z "${o_nodelist}" ]]; then
    o_nodelist=${nodename}
  else
    o_nodelist="${o_nodelist},${nodename}"
  fi
  let o_numnodes=o_numnodes+1
done

# set output parameters
eval $2=${o_nodelist}
eval $3=${o_numnodes}

}

##############################################################################
#
# function validate_dglist <dglist>
#
# <dglist> - A comma separated list of disk groups passed as as input argument.
# This function verifies that each of the disk groups in the input list is not
# configured on one of the existing logical hosts in the cluster configuration.
# The functions outputs an error message and exits with a non-zero exit code if
# it finds any one of the input disk group configured with any other logical
# host in the cluster.

function validate_dglist
{
typeset i_dg i_dglist
typeset rows loghost_row lname
typeset loghost_dglist loghost_dg

i_dglist=$(print $1 | tr ',' ' ')

rows=$(scccd ${CLUSTNAME} LOGHOST query lname "")
for loghost_row in ${rows}; do

  lname=${loghost_row%:*:*:*:*}
  lname=${lname#*:}

  loghost_dglist=${loghost_row%:*:*}
  loghost_dglist=${loghost_dglist#*:*:*:}
  loghost_dglist=$(print ${loghost_dglist} | tr ',' ' ')

  for i_dg in ${i_dglist}; do
    for loghost_dg in ${loghost_dglist}; do
      if [[ "${i_dg}" = "${loghost_dg}" ]]; then
	print "Error: The disk group ${i_dg} is already configured with"
	print "Logical Host ${lname}."
	exit 1
      fi
    done
  done
done
}

##############################################################################
#
# function configure_logical_host <argument-list>
#
# <argument-list> is the list of arguments following the -L option to the
# pdbconf command. See usage below. This function carries out various validation
# checks and then uses getopts to parse the command line arguments. After
# parsing is complete, the Cluster Configuration Database is updated to add
# the logical host

function configure_logical_host
{
typeset OLDOPTIND
typeset lname
typeset option options_present
typeset nodelist nodeid numnodes curr_master localnodeid
typeset dglist
typeset manualmode
typeset rows r
typeset logip_array logip_count logip_format logintf

# initialize variables
OLDOPTIND=${OPTIND}
lname=$1
shift
OPTIND=1
let manualmode=0
nodelist=""
dglist=""
set -A logip_array
let logip_count=0
let options_present=0

# check whether this node is in the cluster configuration

localnodeid=$(get_hostid ${localhostname})
if [[ -z "${localnodeid}" ]]; then
  print "This host - ${localhostname} - is not in the cluster configuration."
  exit 1
fi

# check whether this node is in the cluster membership

if [[ "${curr_members}" != *${localnodeid}* ]]; then
  print "This host - ${localhostname} - is not in the current cluster membership."
  print "The cluster configuration database cannot be updated from this node."
  exit 1
fi


# get list of already assigned logical interfaces

assigned_ifs=""
rows=$(scccd ${CLUSTNAME} LOGIP query logif "")
for r in ${rows}; do
  logintf=${r#*:*:*:*:}
  assigned_ifs="${assigned_ifs} ${logintf}"
done

# parse command line options

while getopts mrn:g:i: option $*; do
  let options_present=1
  case ${option} in
    
    n) get_node_list ${OPTARG} nodelist numnodes
       ;;
    
    g) if [[ -z "${dglist}" ]]; then
         dglist=${OPTARG}
       else
	 dglist="${dglist},${OPTARG}"
       fi
       ;;

    m) let manualmode=1
       ;;

    i) if [[ -z "${nodelist}" ]]; then
         echo "The -n option has to be specified before the -i option."
	 exit 1
       fi
       logip_format=""
       get_logip_format ${OPTARG} ${numnodes} assigned_ifs logip_format
       logip_format="${nodelist}:${logip_format}"
       logip_array[logip_count]=${logip_format}
       let logip_count=logip_count+1
       ;;

    r) if [[ -n "${nodelist}" || -n "${dglist}" || ${#logip_array[*]} -gt 0 ]]; then
         echo "The -r option cannot be used with any other option."
	 exit 1
       fi
       unconfigure_logical_host ${lname}
       exit 0
       ;;

    \?) usage
	exit 2
	;;
  esac
done

if (( options_present == 0 )); then
  usage
  exit 2
fi

# check for duplicate logical host names

rows=$(scccd ${CLUSTNAME} LOGHOST query lname ${lname})
if [[ -n "${rows}" ]]; then
  print "Definition of logical host ${lname} already exists in the cluster"
  print "configuration database."
  exit 1
fi

# check whether any of the disk groups specified belong to other logical hosts
validate_dglist ${dglist}

# get the list of assigned logical interface numbers for all the logical IP
# addresses of this logical host

assigned_ifs=""
let logip_count=0
while (( logip_count < ${#logip_array[*]} )); do
  logintf=${logip_array[logip_count]#*:*:*:}
  if [[ -z "${assigned_ifs}" ]]; then
    assigned_ifs=${logintf}
  else
    assigned_ifs="${assigned_ifs},${logintf}"
  fi
  let logip_count=logip_count+1
done

# add LOGHOST row to the CCD

r="${lname}:${nodelist}:${dglist}:${assigned_ifs}:${manualmode}"
scccd ${CLUSTNAME} LOGHOST add "lname:nodelist:dglist:iplist:mode" ${r}
if [[ "$?" -ne 0 ]]; then
  print "Unable to add logical host ${lname} to the cluster configuration database."
  print "Check system console logs for more detailed error messages."
  exit 1
fi

# add all the logical IP addresses of this logical host

let logip_count=0
while (( logip_count < ${#logip_array[logip_count]} )); do
  scccd ${CLUSTNAME} LOGIP add "nodelist:iflist:ipaddr:logif" \
	${logip_array[logip_count]}
  if [[ "$?" -ne 0 ]]; then
    print "Unable to add logical IP information - ${logip_array[logip_count]} - "
    print "for logical host ${lname}. Check the system console logs for more"
    print "detailed error messages. The Cluster configuration database is now in"
    print "an inconsistent state. Remove the logical host with the command"
    print "'scconf <clustername> -L <logical-host> -r"
    print "and resolve the errors before retrying to add the logical host to the"
    print "cluster configuration."
    exit 1
  fi
  let logip_count=logip_count+1
done

#
# Since the logical host may be configured on any node in the cluster
# and this node may not be the nodelist. we cannot check if the interface
# is configured on an NAFO.
# Hence i have for now just put a print to indicate to the user that
# the logical interfaces needs to be configured on
#
USE_NAFO=0
/usr/bin/pkginfo SUNWpnm >/dev/null 2>&1
RC=$?
if [ ${RC} -eq 0 ]; then
        USE_NAFO=1
fi
 
if [ ${USE_NAFO} -eq 1 ]; then
  print "IMPORTANT: Please Make Sure that all the interfaces to the Logical Host ${lname} are configured on Network Failover Adapter(NAFO) backup group"
  sleep 4
fi



#
# Put the logical Host in MSTATE to ON.
#
loghost_mstate="${lname}:1"
# remove it if present.
scccd ${CLUSTNAME} LOGHOST_MSTATE remove lname ${lname}
scccd ${CLUSTNAME} LOGHOST_MSTATE add "lname:mmode" ${loghost_mstate}
if [[ "$?" -ne 0 ]]; then
  print "Unable to add logical host ${lname} LOGHOST_MSTATE to the cluster configuration database."
  print "Check system console logs for more detailed error messages."
  exit 1
fi

# add the current master entry

nodelist=$(print ${nodelist} | tr ',' ' ')
curr_master=""
for r in ${nodelist}; do
  nodeid=$(get_hostid ${r})
  if [[ "${curr_members}" = *${nodeid}* ]]; then
    curr_master=${r}
    break
  fi
done

if [[ -n "${curr_master}" ]]; then
  # add current master row. This should bring up the logical host on the
  # current master
  scccd ${CLUSTNAME} LOGHOST_CM add "lname:curr_master" "${lname}:${curr_master}"
  if [[ "$?" -ne 0 ]]; then
    print "Could not bring up logical host ${lname} on the current master ${curr_master}"
    print "Check the system console logs for detailed error messages."
  fi
fi


#
# Display a message on the console that the logical host is configured
log_info "${msg_prefix}.1010" "Successfully configured ${lname} logical host"

}

##############################################################################
#
# function unconfigure_logical_host <lname>
#
# <lname> is the name of the logical host to be unconfigured. It will remove
# rows corresponding to the specified logical host from the cluster configuration
# database. One of the side effects of this will be to bring down the logical
# host on the current master, if one exists.

function unconfigure_logical_host
{
typeset lname dsname
typeset rows r
typeset logif_list logintf

lname=$1

# if this logical host is still attached to some data services, it should not
# be removed from the cluster configuration.

rows=$(scccd ${CLUSTNAME} LOGHOST_DS query lname ${lname})
if [[ -n "${rows}" ]]; then
  print "The following data services are configured on this logical host:"
  for r in ${rows}; do
    dsname=${r#*:*:}
    print -n "${dsname} "
  done
  print
  print "The logical host ${lname} cannot be removed from the cluster"
  print "configuration."
  exit 1
fi

# remove the row corresponding to the current master, if any

r=$(scccd ${CLUSTNAME} LOGHOST_CM query lname ${lname})
if [[ -n "${r}" ]]; then
  scccd ${CLUSTNAME} LOGHOST_CM remove lname ${lname}
  if [[ "$?" -ne 0 ]]; then
    print "Unable to shut down the logical host ${lname} from the current master"
    print "node ${r#*:*:}. Check the system console logs for detailed error messages."
    exit 1
  fi
fi

# remove the row corresponding to the logical host

r=$(scccd ${CLUSTNAME} LOGHOST query lname ${lname})
if [[ -n "${r}" ]]; then
  scccd ${CLUSTNAME} LOGHOST remove lname ${lname}
  if [[ "$?" -ne 0 ]]; then
    print "Unable to remove logical host ${lname} from the cluster configuration"
    print "database. Check the system console logs for detailed error messages."
    exit 1
  fi
else
  print "Logical host ${lname} is not configured in the cluster configuration database."
  exit 1
fi

# remove the rows corresponding to the logical IP addresses of this logical host

logif_list=${r%:*}
logif_list=${logif_list#*:*:*:*:}
logif_list=$(print ${logif_list} | tr ',' ' ')

for logintf in ${logif_list}; do
  scccd ${CLUSTNAME} LOGIP remove logif ${logintf}
  if [[ "$?" -ne 0 ]]; then
    print "Unable to remove logical interface numbered ${logintf} for"
    print "logical host ${lname}. Check system console logs for detailed error"
    print "messages."
  fi
done

#
# Remove the LOGHOST_MSTATE which has presistent status of logical host
#
loghost_mstate=$(scccd ${CLUSTNAME} LOGHOST_MSTATE query lname ${lname})
if [[ -n "${loghost_mstate}" ]]; then
  scccd ${CLUSTNAME} LOGHOST_MSTATE remove lname ${lname}
  if [[ "$?" -ne 0 ]]; then
        print "Unable to remove logical host ${lname} LOGHOST_MSTATE from the cluster configuration"
        print "database. Check the system console logs for detailed error messages."    exit 1
  fi
fi

#
# Display a message on the console that the logical host is configured
log_info "${msg_prefix}.1020" "Successfully removed ${lname} logical host"

}

##############################################################################
#
# function attach_ds_loghost [-r] <dsname> <lname>
#
# This function establishes the link between a data service and a logical
# host. The -r option can be specified to break this link before unconfiguring
# a logical host.

function attach_ds_loghost
{
typeset rflag
typeset lname dsname
typeset row lhost

if [[ "$1" = "-r" ]]; then
  rflag=1
  shift
fi
dsname=$1
lname=$2

row=$(scccd ${CLUSTNAME} LOGHOST_DS query dsname ${dsname})

if [[ "${rflag}" -eq 1 && -z "${row}" ]]; then
  print "Data service ${dsname} is not associated with any logical host in"
  print "the cluster configuration database."
  exit 1
fi

if [[ "${rflag}" -eq 1 ]]; then
  scccd ${CLUSTNAME} LOGHOST_DS remove "lname:dsname" "${lname}:${dsname}"
  if [[ "$?" -ne 0 ]]; then
    print "Unable to disassociate data service ${dsname} from logical host"
    print "${lname}."
    exit 1
  fi
else
  row=$(scccd ${CLUSTNAME} LOGHOST query lname ${lname})
  if [[ -z "${row}" ]]; then
    print "The logical host ${lname} is not configured in the cluster"
    print "configuration database."
    exit 1
  fi
  scccd ${CLUSTNAME} LOGHOST_DS add "lname:dsname" "${lname}:${dsname}"
  if [[ "$?" -ne 0 ]]; then
    print "Unable to associate data service ${dsname} with logical host"
    print "${lname}"
    exit 1
  fi
fi

}

##############################################################################
function usage
{
 echo 2>&1 "Usage: 

$Myname clustername -h <new hostname1> <new hostname2> <new hostname3> <new hostname4>

$Myname clustername -i <hostname> <if0> <if1>

$Myname clustername -F <logical-host> [<dg>]

$Myname clustername -L <logical-host> -n <nodelist> -g <dglist> \
		       -i <iplist> [-m]

$Myname clustername -L <logical-host> -r

$Myname clustername -p

$Myname clustername [-s] <data-service-name> <logical-host-name>

$Myname clustername -U [absolute path of the config file for Oracle Unix DLM]

$Myname clustername -N <0|1> <ethernet address of host>

$Myname clustername -q [-m] <hostname1> <hostname2> [quorum-device]

$Myname clustername -A <# of active hosts>

$Myname clustername -S <none|ccdvol>

$Myname clustername -T <step10 and step11 timeout value>

$Myname clustername -l <loghost update timeout value>
"
}

##############################################################################

   if [ $# -ge 2 ]; then
     CLUSTNAME=$1
     check_cluster_name
     print "${cdbfile}"
     shift;
   else
     usage
     exit 2
   fi
   localhostname=$(uname -n)
   let options_present=0
   while getopts pduvqUF:L:N:i:h:A:S:s:T:l: c
   do
	let options_present=1
	check_node_status ${c}
	case $c in
	   A) shift
	      change_ccd_activehosts $*
	      exit 0
	      ;;
	   F) shift
	      if [[ $# -eq 0 ]]; then
		echo 2>&1 "$Myname: Insufficient argument list.\
Should be:\$Myname clustername -F <logical-host-name> [<dg>]"
		exit 2
	      else
		configure_hads_loghost $*
	      fi
	      ;;
	   S) shift
	      add_ccdssa $*
	      ;;
	   L) shift
	      configure_logical_host $*
	      exit 0
	      ;;
	   U) shift
              newudlmfile=NUDLM
              nudlmcfile=$1 
	      ;;
	   p) shift
	      display_config
	      exit 0
	      ;;
	   s) shift
	      if [ $# -lt 2 ]; then
		echo 2>&1 "$Myname: Insufficient argument list.\
Should be:\$Myname clustername -s <data-service-name> <logical-host-name>"
		exit 2
	      else
		attach_ds_loghost $*
	      fi
	      exit 0
	      ;;
	  h) shift; 		# get rid of the -h parameter!
	      if [ $# -ne 4 ]; then
		echo 2>&1 "$Myname: Illegal Hostname format. \
Should be:\n$Myname clustername -h newhost1 newhost2 newhost3 newhost4"
	        exit 2
	      else
		newhost=$1,$2,$3,$4
		shift 2
	      fi
	      ;;

           N) shift;
	      if [ $# -ne 2 ]; then
		echo 2>&1 "$Myname: Illegal ethernet address format. \
Should be:\n$Myname clustername -N nodeid new-ethernet-address"
                exit 2
              else
                newnetaddr=$2
                nodeid=$1
                shift 2
              fi
              ;;
	   i) shift
	      if [ $# -ne 3 ]; then
		echo 2>&1 "$Myname: Illegal interface format. \
Should be:\n$Myname clustername -i hostname ifX ifY"
		exit 2
	      else
		host=$1
		newinterfaces="0 "$2" 1 "$3
		shift 3
	      fi
		;;
	   q) shift
	      if [ $# -lt 2 ]; then
		echo 2>&1 "$Myname: Illegal number of arguments. \
Should be:\n$Myname clustername -q [-m] nodeA nodeB [quorum-device]"
		exit 2
	      else
		change_quorum_dev $*
		shift 2
	      fi
		;;
	   l) shift
		loghosttimes=$(echo "$1" | tr -d '[0-9]')
		if [[ -n ${loghosttimes} ]]; then
		 print 2>&1 "$Myname: Illegal value = $1. \
Should be numeric."
		 exit 2
		else
	         if (( $1 <= 0 )); then
	           print 2>&1 "$Myname: Illegal value = $1. \
Timeout value needs to be greater than 0."
	           exit 2
	         else
               	   loghosttimes=$1
	           shift
		 fi
		fi
	        ;;
	   T) shift
		steptimes=$(echo "$1" | tr -d '[0-9]')
		if [[ -n ${steptimes} ]]; then
		 print 2>&1 "$Myname: Illegal value = $1. \
Should be numeric."
		 exit 2
		else
	         if (( $1 <= 0 )); then
	           print 2>&1 "$Myname: Illegal value = $1. \
Timeout value needs to be greater than 0."
	           exit 2
	         else
               	   steptimes=$1
	           shift
		 fi
		fi
	        ;;
	  \? ) 
	      usage
	      exit 2
	      ;;
	  * ) print "Invalid option: ${c}"
	      usage
	      exit 2
		;;
	esac
   done

if (( options_present == 0 )); then
  print "No options specified!"
  usage
  exit 2
fi

hostid=$(get_hostid ${host})

get_newhost 

get_udlm_cfile $newudlmfile $nudlmcfile

get_newnetaddr

get_interfaces $newinterfaces

set_newsteptimes

set_newloghosttimes

cp $cdbfile ${cdbfile}.o
sed -f $sedfile ${cdbfile}.o > $cdbfile

# Find and restart the inetd:
mond_pid=`/usr/bin/ps -ed | /usr/bin/grep mond | /usr/bin/awk '{print $1}'`
if [ -n "$mond_pid" ]; then
	kill $mond_pid
fi

inet_pid=`/usr/bin/ps -ed | /usr/bin/grep inetd | /usr/bin/awk '{print $1}'`
if [ -n "$inet_pid" ]; then
	kill -HUP $inet_pid
else
	echo 2>&1 "inetd is not running! Something is wrong!!"
fi
