#!/bin/ksh -p
#
# ident   "@(#)scconf.sh 1.117	01/03/28 SMI"
#
#	Copyright (c) 1994-1998 Sun Microsystems, Inc.
#
# scconf - SPARCcluster SC configuration editor script.
#

# defined for i18n
TEXTDOMAIN=scconf; export TEXTDOMAIN
TEXTDOMAINDIR=/opt/SUNWcluster/locale; export TEXTDOMAINDIR

Myname=$(basename $0)

PATH=/usr/sbin:/usr/bin:/opt/SUNWcluster/bin:/usr/ccs/bin:/bin:/sbin
export PATH

msg_prefix="SUNWcluster.scconf"
cdbpath="/etc/opt/SUNWcluster/conf"
PDBSSA=/opt/SUNWcluster/bin/scssa
PDBCCD=/opt/SUNWcluster/bin/scccd
RHOSTS=/.rhosts
RHOSTS_TMP=${RHOSTS}.scinstall

let callrcmd=0
tmppath=${Tmppath=/var/opt/SUNWcluster/run}
sedfile=${tmpfile=${tmppath}/sed.tmp$$}
touch $sedfile
ssafile=/var/opt/SUNWcluster/run/scconf.$$
trap "echo \"$Myname: ***interrupt***\" 1>&2; /bin/rm -rf $sedfile; disable_rcmd; exit 1"\
	      1 2 3 15

########################################################################
#
# function remove_all_ds_entries
#
function remove_all_ds_entries
{
typeset row
typeset dsnames
typeset ds
typeset ccd_rows
typeset r


# check whether this node is in the cluster configuration

localnodeid=$(get_hostid ${localhostname})
if [[ -z "${localnodeid}" ]]; then
  lmsg="`gettext \
'This host - %s - is not in the cluster configuration.'`"
  printf "${lmsg}\n" "${localhostname}"
  exit 1
fi

# check whether this node is in the cluster membership

if [[ "${curr_members}" != *${localnodeid}* ]]; then
  lmsg="`gettext \
'This host - %s - is not in the current cluster membership.\n\
The cluster configuration database cannot be updated from this node.'`"
  printf "${lmsg}\n" "${localhostname}"
  exit 1
fi

dsnames=$*

# on each logical host in the cluster,
# check that the data service to be removed is in the "off" state
# check that there are no data services that depend on the current ds
# unless they are also in the list to be removed
validate_no_ds_dependencies_list ${dsnames}

ccd_rows="DS_STATE DS_INFO DS_METHOD_PATH DS_NM_PATH DS_FM_PATH DS_DEPSVC_LIST DS_PKG_LIST"

for ds in ${dsnames}; do
	row=$(scccd ${CLUSTNAME} LOGHOST_DS query dsname ${ds})
	if [[ -n "${row}" ]]; then
		scccd ${CLUSTNAME} LOGHOST_DS remove dsname "${ds}"
		if [[ $? -ne 0 ]]; then
			lmsg="`gettext \
'Unable to remove data service %s from LOGHOST_DS CCD row'`"
			printf "${lmsg}\n" "${ds}"   
    		exit 1
		fi
	fi

	for r in ${ccd_rows}; do
    	row=$(scccd ${CLUSTNAME} ${r} query ds_name ${ds})
    	if [[ -n "${row}" ]]; then
        	scccd ${CLUSTNAME} ${r} remove ds_name "${ds}"
        	if [[ $? -ne 0 ]]; then
		lmsg="`gettext \
'Unable to remove data service %s from %s CCD row'`"
		printf "${lmsg}\n" "${ds}" "${r}"
            	exit 1
        	fi
		fi
	done
done
 
}

########################################################################
#
# function enable_rcmd
#
# Create file /.rhosts if it does not exists and enable remote root
# access for all the nodes in the cluster. If the file already exists,
# then copy it to a temporary file preserving ownership and permissions,
# and alter the original file. 

function enable_rcmd
{
  typeset hostlist

  hostlist=$*
  let callrcmd=1
  let create_rhosts=0
  if [[ ! -f "${RHOSTS}" ]]; then
    touch ${RHOSTS}
    let create_rhosts=1
  else
    /usr/bin/cp -p ${RHOSTS} ${RHOSTS_TMP}
  fi
  /usr/bin/chmod u+w ${RHOSTS}

  for name in ${hostlist}; do
    /usr/bin/grep -F "${name} root" ${RHOSTS} > /dev/null 2>&1
    if [[ $? -ne 0 ]]; then
      print ${name} root >> ${RHOSTS}
    fi
  done
}

########################################################################
#
# function disable_rcmd
#
# Remove the .rhosts file if it was created in enable_rcmd. If it was
# copied to a temporary file, restore it back with the same permissions
# and ownership.

function disable_rcmd
{
  if (( callrcmd == 1 )); then
    /bin/rm -f ${RHOSTS}
    if (( create_rhosts == 0 )); then
      /bin/mv -f ${RHOSTS_TMP} ${RHOSTS}
    fi
  fi
}
################################################################################
#
# function get_static_node_index live_node_index
#
# node_array is a list of live node names, while NodeNames is a list of statically
# configured nodes. Given the index of a live node in node_array, this function
# returns the index of the corresponding node in NodeNames.

function get_static_node_index
{
  typeset numnodes
  typeset nodename
  typeset i
  typeset n

  let numnodes=$(cdbmatch cluster.number.nodes ${cdbfile})
  let i=0
  while (( i < numnodes )); do
    nodename=$(cdbmatch cluster.node.${i}.hostname ${cdbfile})
    n=${node_array[$1]}
    if [[ "${n}" = "${nodename}" ]]; then
	print ${i}
	return
    else
	let i=i+1
    fi
  done

}


#############################################################################
#
# function add_ccdssa
#
# This function changes the ccddevice.ccdssa entry in
# the ccd.database.init file.
#

function add_ccdssa
{
	ccdinit_file=$cdbpath/ccd.database.init

	if [[ $1 = none ]]; then
		echo "/^ccd.ccddevice.ssa:/s|:.*|: none|" >> $sedfile
	elif [[ $1 = ccdvol ]]; then
		echo "/^ccd.ccddevice.ssa:/s|:.*|: ccdvol|" >> $sedfile
	else
		lmsg=`gettext "Invalid argument : %s"`
		printf "${lmsg}\n" "$1"
		exit 1
	fi

	ccdadm -p ${ccdinit_file}
	retval=$?
	if [[ $retval != 0 ]]; then
		lmsg=`gettext "%s is corrupted"`
		printf "${lmsg}\n" "${ccdinit_file}"
		exit 1;
	fi

	sed -f $sedfile ${ccdinit_file}.pure > ${tmppath}/tmpccdinit

	ccdadm -x ${tmppath}/tmpccdinit
	mv ${tmppath}/tmpccdinit ${ccdinit_file}

	check=$(/usr/bin/ps -e | /usr/bin/grep ccdd | wc -l)
	if [[ $check -ne "0" ]]; then
		ccdctl reinit $CLUSTNAME 1
		if [[ $? != 0 ]]; then
			lmsg=`gettext "ccdd reinit failure"`
			printf "${lmsg}\n"
			rm ${ccdinit_file}.pure
			exit 1;
		fi
	fi

	rm ${ccdinit_file}.pure
}

##############################################################################
#
# function change_ccd_activehosts
#
# This function changes the number of active nodes in the ccd init file.
#

function change_ccd_activehosts
{
	ccdinit_file=$cdbpath/ccd.database.init

	ccdadm -p ${ccdinit_file}	# get "pure" ccdinit file
	retval=$?
	if [[ $retval != "0" ]]; then
		lmsg=`gettext "%s is corrupted"`
		printf "${lmsg}\n" "${ccdinit_file}"
		exit 1;
	fi

	echo "/^ccd.nservers:/s|:.*|: $1|" >> $sedfile
	sed -f $sedfile ${ccdinit_file}.pure > ${tmppath}/tmpccdinit

	ccdadm -x ${tmppath}/tmpccdinit
	mv ${tmppath}/tmpccdinit ${ccdinit_file}

	check=$(/usr/bin/ps -e | /usr/bin/grep ccdd | wc -l)
	if [[ $check -ne "0" ]]; then
		ccdctl reinit $CLUSTNAME 1
		if [[ $? != 0 ]]; then
			lmsg=`gettext "ccdd reinit failure"`
			printf "${lmsg}\n"
			rm ${ccdinit_file}.pure
			exit 1;
		fi
	fi

	rm ${ccdinit_file}.pure

}

##############################################################################
#
# function get_hostid hostname
#
# This function takes the hostname as an argument and returns the unique id
# assigned to the host in the static cluster configuration.

function get_hostid
{
  typeset i
  typeset numofnodes
  
  if [[ -n "$1" ]]; then
    numofnodes=$(cdbmatch cluster.number.nodes ${cdbfile})
    let i=0
    while (( i < ${numofnodes} )); do
      hostname=$(cdbmatch cluster.node.$i.hostname ${cdbfile})
      if [[ "${hostname}" = "$1" ]]; then
	print $i
	break
      else
	let i=i+1
      fi
    done
  fi
}

############################################################################
# function fill_array arrayName size
#
# arrayName is the name of the array to fill
# size is the number of entries to fill
#
# fill_array() will fill the given array with the rest of the hostnames
#	of the nodes that were not initially given.  
#

function fill_array 
{
	typeset arrayName=$1
	integer size=$2

	integer cnt=$(eval echo \$\{#${arrayName}\[*]\})

	while (( cnt < size )); do
		eval ${arrayName}[${cnt}]=$(cdbmatch cluster.node.${cnt}.hostname ${cdbfile})
		(( cnt=cnt+1 ))
	done
}

############################################################################
# function check_array_duplicates arrayName
#
# arrayName is the name of the array to check for duplicates
#
# returns the index for the first duplicate found, or a value greater than 
# 	number of entries in the array if there are no duplicate entries.

function check_array_duplicates
{
typeset arrayName=$1

integer arraySize=`eval echo \\$\{#${arrayName}[*]\}`
integer i
integer j
typeset iString
typeset jString

let i=0
while (( i < arraySize )); do
	let j=0
	while (( j < arraySize )); do
		if [ ${i} -ne ${j} ]; then
			iString=$(eval echo \\$\{${arrayName}[${i}]\})
			jString=$(eval echo \\$\{${arrayName}[${j}]\})
			if [ ${iString} = ${jString} ]; then
				return ${i}
			fi
		fi
		(( j=j+1 ))
	done
	(( i=i+1 ))
done
return ${i}
}

############################################################################

function get_newhost
{

typeset i

let i=0
while (( i < ${#newhost[*]} )); do
 echo "/^cluster.node.${i}.hostname/s|\(.*\):.*$|\1: ${newhost[$i]}|" >>$sedfile
 (( i=i+1 ))
done
}

#############################################################################

function validate_interfaces
{
if [[ $# -gt 2 ]]; then
	if [[ "$2" = "$4" ]]; then
		lmsg=`gettext "%s: Interface specified must be unique!"`
		printf "${lmsg}\n" "$Myname" 2>&1
		exit 3
	fi
fi
}

##############################################################################

function validate_controllers
{
typeset error=0
for i in $*
do
	if $(echo $i | /bin/grep -v '^[0-9A-Fa-f]\{12\}' >/dev/null 2>&1)
	then
		lmsg=`gettext "%s: Illegal serial number: %s"`
		printf "${lmsg}\n" "$Myname" "$i" 2>&1
		error=1
	fi
done
return ${error}
}

##############################################################################
 
function create_ssa_link
{
wwn=$1
ssafile=$2
 
cdbfile=$cdbpath/$CLUSTNAME.cdb
ssadevdir=$(cdbmatch ssa.devdir ${cdbfile})
sn=$(print $wwn | sed 's/^\(....\)0*/SUNW,pln@[ab]0*\1,/')
realssapath=$(/bin/grep -i $sn ${ssafile} 2>/dev/null)
cd ${ssadevdir}
/bin/rm -rf ${wwn}
ln -s ${realssapath} ${wwn}
}
 
##############################################################################
 
function create_disk_link
{
diskid=$1
devaddr=$2
 
cdbfile=$cdbpath/$CLUSTNAME.cdb
ssadevdir=$(cdbmatch ssa.devdir ${cdbfile})
cd ${ssadevdir}
/bin/rm -f ${diskid}
ln -s /dev/rdsk/${devaddr} ${diskid}
}

##############################################################################
#
# function construct_disk_list <cXtYdZs2>:<disk-serial-id>...
#
# This function takes a list of arguments of the form
# "<disk-address>:<serial_id>..."  The disk_address corresponds to the
# physical address of a disk in the form cXtYdZs2 while the serial_id is the
# serial id of the corresponding drive.  The purpose of the function is to
# break up the input list into two different lists - one corresponding to the
# disk addresses (disk_list) and the other to the serial ids (id_list). The
# address and id of a particular disk has corresponding positions on the two
# output lists. This correspondence between the two lists is relied upon by
# the calling function.
 
function construct_disk_list
{
  disk_list=""
  id_list=""
 
  for item in $*; do
    disk=${item%%:*}
    serialid=${item##*:}
    disk_list="${disk_list} ${disk}"
    id_list="${id_list} ${serialid}"
  done
 
}
 
################################################################################
#
# function construct_ssa_list <c#>.<WWN>...
#
# This function takes an argument list consisting of <c#>:<WWN>... The c# is the
# controller number on which a particular SSA is present and the WWN is the
# world-wide number of the corresponding SSA. The purpose of this function is to
# break up the input list into two different lists - one corresponding to the
# controller number list - ctlr_list - and the other corresponding to the WWN
# list - ssa_list. The controller number and the WWN have corresponding
# positions on the two output lists. This correspondence is relied upon by the
# calling function.
 
function construct_ssa_list
{
  ctlr_list=""
  ssa_list=""
 
  for item in $*; do
    ctlr=${item%%:*}
    ssa=${item##*:}
    ctlr_list="${ctlr_list} ${ctlr}"
    ssa_list="${ssa_list} ${ssa}"
  done
}

###############################################################################
#
# function get_devices nodeA nodeB
#
# For the specified hosts nodeA and nodeB, get the list of SSAs and other
# disks attached to the system, and the root device of the system. 'rsh' is
# used for remote nodes.
 
function get_devices
{
  typeset i
  typeset rootdev
  set -A node_array $*
  set -A ctlr_array
  set -A ssa_array
  set -A disks_array
  set -A id_array
  set -A rootdev_array
  set -A rootc_array

  print
  if [[ "$1" = "$2" ]]; then
    lmsg=`gettext 'Getting device information for host %s'`
    printf "${lmsg}\n" "$1"
  else
    lmsg=`gettext 'Getting device information for hosts %s and %s'`	
    printf "${lmsg}\n" "${1}" "${2}"
  fi
  lmsg="`gettext 'This may take a few seconds to a few minutes...'`"
  printf "${lmsg}" 
  
 
  let i=0
  while (( $i < ${#node_array[*]} )); do
    if [[ ${node_array[i]} = "${localhostname}" ]]; then
      # local node
      
      ssadevlist=$(${FINDDEVICES} ssa ${ssafile} | tr '\012' ' ')
      diskdevlist=$(${FINDDEVICES} disks | tr '\012' ' ')
      rootdev=$(${FINDDEVICES} rootdev | tr '\012' '')
      rootdev_array[i]=$(print ${rootdev})
    else
      # remote node
      
      ssadevlist=$(rsh ${node_array[i]} "${FINDDEVICES} ssa" | tr '\012' ' ')
      diskdevlist=$(rsh ${node_array[i]} "${FINDDEVICES} disks" | tr '\012' ' ')
      rootdev=$(rsh ${node_array[i]} "${FINDDEVICES} rootdev" | tr '\012' '')
      rootdev_array[i]=$(print ${rootdev})
    fi
    
    construct_ssa_list ${ssadevlist}
    ctlr_array[i]=${ctlr_list}
    ssa_array[i]=${ssa_list}
 
    construct_disk_list ${diskdevlist}
    disks_array[i]=${disk_list}
    id_array[i]=${id_list}
    
    rootc_array[i]=${rootdev_array[i]%%t*}
    let i=i+1
  done
 
  lmsg="`gettext 'done'`"
  printf "${lmsg}\n" 
}


###############################################################################
#
# function get_direct_devices nodeA ... nodeN
#
# For the specified hosts nodeA thru nodeN, get the list of disks 
# attached to the system, and the root device of the system. 'rsh' is
# used for remote nodes.
 
function get_direct_devices
{
  typeset i
  typeset rootdev
  set -A node_array $*
  set -A ctlr_array
  set -A ssa_array
  set -A disks_array
  set -A id_array
  set -A rootdev_array
  set -A rootc_array

  lmsg="`gettext \
'\n\
Getting device information for reachable nodes in the cluster.\n\
This may take a few seconds to a few minutes...'`"
   printf "${lmsg}" 

  let i=0
  while (( $i < ${#node_array[*]} )); do
    if [[ ${node_array[i]} = "${localhostname}" ]]; then
      # local node
      
      ssadevlist=$(${FINDDEVICES} ssa ${ssafile} | tr '\012' ' ')
      diskdevlist=$(${FINDDEVICES} disks | tr '\012' ' ')
      rootdev=$(${FINDDEVICES} rootdev | tr '\012' '')
      rootdev_array[i]=$(print ${rootdev})
    else
      # remote node
      
      ssadevlist=$(rsh ${node_array[i]} "${FINDDEVICES} ssa" | tr '\012' ' ')
      diskdevlist=$(rsh ${node_array[i]} "${FINDDEVICES} disks" | tr '\012' ' ')
      rootdev=$(rsh ${node_array[i]} "${FINDDEVICES} rootdev" | tr '\012' '')
      rootdev_array[i]=$(print ${rootdev})
    fi
    
    construct_ssa_list ${ssadevlist}
    ctlr_array[i]=${ctlr_list}
    ssa_array[i]=${ssa_list}
 
    construct_disk_list ${diskdevlist}
    disks_array[i]=${disk_list}
    id_array[i]=${id_list}
    
    rootc_array[i]=${rootdev_array[i]%%t*}
    let i=i+1
  done
 
  lmsg="`gettext 'done\n'`"
  printf "${lmsg}\n"

}

################################################################################
#
# function select_common_devices node1 ... nodeN
#
# Select the set of common devices (disks in photon) between nodes
# node1 thru nodeN. This function uses the device arrays that have been
# constructed in the get_devices function.
 
function select_common_devices
{
  typeset devset
  typeset node1
  typeset node2
  typeset ii
  typeset jj
 
  let node1=$1
  let node2=$2
 
  # Extract the list of common SSAs between node1 and node2
 
  devset=""
  for ssa in ${ssa_array[node1]}; do
    if [[ ${ssa_array[node2]} = *${ssa}* ]]; then
      devset="${devset} SSA:${ssa}"
    fi
  done
 
  # Initialize temporary arrays

  set -A disk_array_node1 ${disks_array[node1]}
  set -A disk_array_node2 ${disks_array[node2]}
  set -A id_array_node1 ${id_array[node1]}
  set -A id_array_node2 ${id_array[node2]}
 
  let ii=0
  while (( $ii < ${#id_array_node1[*]} )); do
    let jj=0
    while (( $jj < ${#id_array_node2[*]} )); do
      if [[ ${id_array_node1[ii]} = ${id_array_node2[jj]} && \
            ${disk_array_node1[ii]} != ${rootdev_array[node1]} && \
            ${disk_array_node2[jj]} != ${rootdev_array[node2]} ]]; then
 
        if [[ ${node_array[node1]} = "${localhostname}" ]]; then
          devset="${devset} DISK:${disk_array_node1[ii]}:${id_array_node1[ii]}"
        else
          devset="${devset} DISK:${disk_array_node2[jj]}:${id_array_node2[jj]}"
        fi
	break;
      fi
      let jj=jj+1
    done
    let ii=ii+1
  done
 
  print ${devset}
}

################################################################################
#
# function select_direct_common_devices node1 ... nodeN
#
# Select the set of common devices between nodes
# node1 thru nodeN. This function uses the device arrays that have been
# constructed in the get_direct_devices function.
# Here is whats going on here:
# 
# - the disk and id arrays for the first
# active node (first_node) are loaded into working arrays. ii holds this node
# index.
# - Starting at node 1, for each serial
# number in the id array it will search (using grep) for that serial
# number in the devices for the current node. 
# - If it finds one then it will fingure out what
# entry in the current id array that is and will then store than index along
# with the index for node ii into an index array. 
# - The same search will be
# done for nodes all remaining nodes, each time if a match is found the index
# for that specific node will be stored in the index array. 
# - If at any
# time one of the nodes does not contain a matching serial number, the
# next serial number for node ii is used and the index array re-init
# since that disk is not common to all.
#
# After all the nodes are checked and we have a valid common disk we
# then load it into the devset variable, with each node getting their
# proper disk id (cX...).
#
# Once we have the list of common devices we call the new function
# select_direct_node_quorum_dev which presents to the user the list of
# common devices for each node and accepts input. This is just a
# modified version of the current select_node_quorum_dev function for
# dual attached storage.
 
function select_direct_common_devices
{
  typeset devset
  typeset first_node
  typeset node_index
  typeset ii
  typeset jj

  let first_node=0
  let highest_node=$1

  # Initialize temporary arrays

  set -A disk_array1 ${disks_array[${first_node}]}
  set -A id_array1 ${id_array[${first_node}]}


  let ii=0
  while (( ii < ${#id_array1[*]} )); do
    let node=1
    unset index_array
    while (( node < highest_node )); do
      if print ${id_array[node]} | /usr/bin/tr ' ' '\n' | /bin/grep -w ${id_array1[ii]} >/dev/null 2>&1
      then
	set -A id_array2 ${id_array[node]}
	set -A disk_array2 ${disks_array[node]}
	let jj=0
	while (( jj < ${#id_array2[*]} )); do
	  if [[ ${id_array1[ii]} = ${id_array2[jj]} && \
	        ${disk_array1[ii]} != ${rootdev_array[first_node]} && \
	        ${disk_array2[jj]} != ${rootdev_array[node]} ]]; then
	    index_array[node]=${jj}
	    index_array[first_node]=${ii}
	    break                 # next node
	   fi
	   let jj=jj+1
	done
      else
	break                     # next ii or next disk
      fi
      let node=node+1
    done

    let jj=0
    if (( ${#index_array[*]} == num_live_nodes )); then
      while (( jj < ${#index_array[*]} )); do
	set -A disk_array_tmp ${disks_array[jj]}
	set -A id_array_tmp ${id_array[jj]}
	if [[ ${node_array[jj]} = ${localhostname} ]]; then
	  devset="${devset} DISK:${disk_array_tmp[${index_array[jj]}]}:${id_array_tmp[${index_array[jj]}]}"
	fi
	let jj=jj+1
      done	
    fi

    let ii=ii+1
  done

  print ${devset}

}
 
###############################################################################
#
# function select_quorum_dev nodeA nodeB devset
#
# Select the quorum device for nodes node1 and node2 from among the set of
# devices in devset. The variable quorumdev is set to the selected quorum
# device. The selected quorum device comes in one of three flavors - <WWN>,
# <WWN>.<disk serial id>, or <disk address>:<disk serial id>
 
function select_quorum_dev
{
  typeset devset
  typeset node1
  typeset node2
  typeset qdev
  typeset wwn
 
  node1=0
  node2=1
  shift; shift
  devset=$*
 
  lmsg=`gettext \
'Select quorum device for %s and %s.\n\
Type the number corresponding to the desired selection.\n\
For example: 1\n'`
  printf "${lmsg}\n" "${nodeA}" "${nodeB}"

  PS3='Quorum device: '
  select qdev in ${devset}; do
    if [[ -n "${qdev}" ]]; then
      
      if [[ "${qdev}" = SSA:* ]]; then
        wwn=${qdev##SSA:}
        select_qdisk_from_ssa ${wwn} ${node1} ${node2}
 
        if [[ -n "${quorumdiskdev}" ]]; then
	  lmsg="`gettext \
'\nDisk %s with serial id %s \n\
in SSA %s has been chosen as the quorum device.\n\n'`" 
	  printf "${lmsg}\n" "${quorumdiskdev%%:*}" "${quorumdiskdev##*:}" \
		"${wwn}" 
          quorumdev=${wwn}.${quorumdiskdev##*:}
        else
	  lmsg="`gettext \
'\nSSA with WWN %s has been chosen as the quorum device.'`"
	  printf "${lmsg}\n" "${wwn}"
          quorumdev=${wwn}
        fi
      else
        qdev=${qdev##DISK:}
	lmsg="`gettext \
'Disk %s with serial id %s has been chosen\n\
as the quorum device.\n'`"
	printf "${lmsg}\n" "${qdev%%:*}" "${qdev##*:}"	
        quorumdev=${qdev}
      fi
      break
    else
      lmsg="`gettext '\nInvalid Selection\n'`"
      printf "${lmsg}\n" 

    fi
  done
}
################################################################################
#
# function select_direct_node_quorum_dev numofnodes devset
#
# Select the quorum device for nodes node0 thru nodeN, where N is
# passed in.
# The device is selected from among the set of devices in the variable
# devset. The variable quorumdev is set to the selected quorum device. 
# The selected quorum device comes in the form 
# <disk address>:<disk serial id>

function select_direct_node_quorum_dev
{
  typeset devset
  typeset num_of_nodes
  typeset qdev
  typeset wwn

  num_of_nodes=$1
  shift
  devset=$*

  lmsg="`gettext 'Select quorum device for the following nodes:'`"
  printf "${lmsg}\n"
  let i=0
  while (( i < ${num_of_nodes}-1 )); do
    node_static_index=$(get_static_node_index ${i})
    print " ${node_static_index} (${node_array[${i}]})"
    let i=i+1
  done
  lmsg="`gettext 'and'`"
  printf "${lmsg}\n"
  node_static_index=$(get_static_node_index ${i})
  print " ${node_static_index} (${node_array[${i}]})"
  print <<!EOM
"
Type the number corresponding to the desired selection.
For example: 1\<CR\>
"
!EOM

  PS3='Quorum device: '
  select qdev in ${devset}; do
    if [[ -n "${qdev}" ]]; then
      
      qdev=${qdev##DISK:}
      lmsg="`gettext \
'Disk %s with serial id %s has been chosen\n\
as the quorum device.\n'`"
      printf "${lmsg}\n" "${qdev%%:*}" "${qdev##*:}"
      quorumdev=${qdev}
      break
    else
      lmsg="`gettext '\nInvalid Selection\n'`"
      printf "${lmsg}\n" 
    fi
  done
}
 
###############################################################################
#
# function get_ctlr_for_ssa wwn node
#
# Get the controller number through which the SSA with the specified wwn is
# accessible on the given node.
 
function get_ctlr_for_ssa
{
  typeset ssa
  typeset nodenum
  typeset ii
  typeset ctlrnum
 
  ssa=$1
  nodenum=$2
 
  set -A ssa_array_on_node ${ssa_array[$nodenum]}
  set -A ctlr_array_on_node ${ctlr_array[$nodenum]}
 
  let ii=0
  while (( $ii < ${#ssa_array_on_node[*]} )); do
    if [[ ${ssa_array_on_node[ii]} = ${ssa} ]]; then
      ctlrnum=${ctlr_array_on_node[ii]}
      break
    fi
    let ii=ii+1
  done
 
  print ${ctlrnum}
}
 
################################################################################
#
# function select_qdisk_from_ssa ssa node1 node2
#
# This function provides the option of selecting a quorum disk from the
# specified ssa. The two nodes that are connected to this ssa are also passed as
# input parameters to check whether the root devices of these nodes reside in
# this ssa, and if so, to exclude the corresponding disks from the selection
# process.
 
function select_qdisk_from_ssa
{
  typeset ssa
  typeset node1
  typeset node2
  typeset item
  typeset td
  typeset diskdevaddr
  typeset disksid
  typeset ctlr_node1
  typeset ctlr_node2
  typeset ssadisks
 
  ssa=$1
  node1=$2
  node2=$3
 
  ctlr_node1=$(get_ctlr_for_ssa ${ssa} ${node1})
  ctlr_node2=$(get_ctlr_for_ssa ${ssa} ${node2})
 
  if [[ ${node_array[${node1}]} = "${localhostname}" ]]; then
    ssadisks=$(${FINDDEVICES} disks ${ctlr_node1} | tr '\012' ' ')
  elif [[ ${node_array[${node2}]} = "${localhostname}" ]]; then
    ssadisks=$(${FINDDEVICES} disks ${ctlr_node2} | tr '\012' ' ')
  else
    ssadisks=$(rsh ${node_array[${node2}]} "${FINDDEVICES} disks ${ctlr_node2}" | tr '\012' ' ')
  fi
 
  construct_disk_list ${ssadisks}
 
  # set of disks in the ssa is now in the variable disk_list. First step is to
  # remove the controller number from this list to facilitate easy elimination
  # of the root devices of the two nodes from this list.
 
  disk_list=$(print ${disk_list} | sed -e 's/c[0-9]\{1,\}//g')
 
  # eliminate node1's root disk from this list
 
  if [[ "${ctlr_node1}" = ${rootc_array[${node1}]} ]]; then
    td=${rootdev_array[${node1}]##c[0-9]*([0-9])}
    disk_list=$(print ${disk_list} | sed -e "s/${td}//g")
  fi
 
  # eliminate node2's root disk from this list
 
  if [[ "${ctlr_node2}" = ${rootc_array[${node2}]} ]]; then
    td=${rootdev_array[${node2}]##c[0-9]*([0-9])}
    disk_list=$(print ${disk_list} | sed -e "s/${td}//g")
  fi
 
  # eliminate the "s2" suffix from each item in the remaining list
 
  disk_list=$(print ${disk_list} | sed -e 's/s2//g')
 
  # Force selection of a quorum disk if the ssa contains the root
  # disk of one or both nodes.
 
  if [[ "${ctlr_node1}" = ${rootc_array[${node1}]} || \
        "${ctlr_node2}" = ${rootc_array[${node2}]} ]]; then
    lmsg="`gettext \
'\nThe root disk of one or both of %s and \n\
%s is in SSA %s. You *will* have to\n\
select a disk as the quorum device from this SSA. The root disk(s)\n\
have already been excluded from the list below.'`"
    printf "${lmsg}\n" "${node_array[${node1}]}" "${node_array[${node2}]}" \
	   "${ssa}" 
  else
    disk_list="None ${disk_list}"
    lmsg="`gettext \
'\nYou have the option of selecting a quorum disk from SSA %s.\n\
Choose one of the following disks, identified by their target and\n\
drive numbers as your quorum device. You may also choose\n\
option 1 - NONE - to retain the SSA %s as your quorum device.'`"
    printf "${lmsg}\n" "${ssa}" "${ssa}"
  fi
 
  PS3='Quorum disk: '
  select quorumdiskdev in ${disk_list}; do
    if [[ -n "${quorumdiskdev}" ]]; then
      if [[ "${quorumdiskdev}" != "None" ]]; then
        
        if [[ ${node_array[${node1}]} = "${localhostname}" ]]; then
          diskdevaddr=${ctlr_node1}${quorumdiskdev}s2
          disksid=$(${PDBSSA} inquiry ${diskdevaddr})
          
        elif [[ ${node_array[${node2}]} = "${localhostname}" ]]; then
          diskdevaddr=${ctlr_node2}${quorumdiskdev}s2
          disksid=$(${PDBSSA} inquiry ${diskdevaddr})
	  
        else
          diskdevaddr=${ctlr_node2}${quorumdiskdev}s2
          disksid=$(rsh ${node_array[${node2}]} "${PDBSSA} inquiry ${diskdevaddr}")
        fi
        quorumdiskdev=${diskdevaddr}:${disksid}
      else
        quorumdiskdev=""
      fi
      break
    else
      lmsg="`gettext '\nInvalid selection'`"
      printf "${lmsg}\n"
    fi
  done
}

###############################################################################
#
# This function is called from change_quorum_dev and takes two arguments - the
# new and the old quorum device, respectively. It releases the reservation on
# the old quorum device only if it is successfully able to reserve the new
# quorum device.
 
function reserve_release_qdev
{
typeset newqdev
typeset oldqdev
 
newqdev=$1
oldqdev=$2
 
${PDBSSA} q_reserve ${newqdev}
if [[ $? -ne 0 ]]; then
	lmsg="`gettext \
'\nUnable to reserve new quorum device: %s\n\
No changes have been made to the system.'`"
	printf "${lmsg}\n" "${newqdev}"
        exit 1
fi

${PDBSSA} release ${oldqdev}
}
 
##############################################################################
#
# function change_quorum_dev nodeA nodeB
#
# This is an interactive function that needs to be run on all
# statically configured nodes of the cluster to change the quorum
# device that is shared between nodeA and nodeB. 

function change_quorum_dev
{
typeset i
typeset numnodes
typeset nodename
typeset nodelist
typeset nodeA
typeset nodeB
typeset manualmode
typeset nodeA_is_down
typeset nodeB_is_down
typeset nodeA_is_member
typeset nodeB_is_member
typeset flags
typeset direct_connect

let manualmode=0
let direct_connect=0

while getopts :Dm: c
do
  case $c in
    m) let manualmode=1
       quorumdev=$OPTARG
       ;;
    D) let direct_connect=1
       ;;
    :) lmsg=`gettext "%s: Missing parameter to %s"` 
       printf "${lmsg}\n" "$Myname" "$OPTARG" 2>&1	
       ;;
    \?) lmsg=`gettext "%s: %s is not a valid sub option to -q."`
	printf "${lmsg}\n" "$Myname" "$OPTARG" 2>&1
	usage
	exit 2
	;;
    esac
done

(( shift_amount = OPTIND - 1 ))
shift $shift_amount

if (( direct_connect == 1 )); then
  change_direct_quorum_dev $manualmode $quorumdev
  return 0
fi

if (( $# < 2 )); then
  lmsg=`gettext "%s: Illegal number of arguments. \
Should be:\n%s clustername -q [-m quorum-device] nodeA nodeB"`
  printf "${lmsg}\n" "${Myname}" "${Myname}" 2>&1
  exit 2
fi

nodeA=$1
nodeB=$2

if [[ "${nodeA}" = "${nodeB}" ]]; then
  lmsg=`gettext 'Two different host names must be specified.'`
  printf "${lmsg}\n"
  exit 1
fi

cdbfile=${cdbpath}/${CLUSTNAME}.cdb
FINDDEVICES=/opt/SUNWcluster/bin/finddevices

# find the hostid's of nodeA and nodeB

hostidA=$(get_hostid ${nodeA})
if [[ -z "${hostidA}" ]]; then
  lmsg=`gettext 'Host %s is not part of the cluster configuration.'`
  printf "${lmsg}\n" "${nodeA}" 
  exit 1
fi

hostidB=$(get_hostid ${nodeB})
if [[ -z "${hostidB}" ]]; then
  lmsg="`gettext 'Host %s is not part of the cluster configuration.'`"
  printf "${lmsg}\n" "${nodeB}"
  exit 1
fi

if (( manualmode == 1 )); then
  ${FINDDEVICES} ssa ${ssafile} > /dev/null 2>&1
fi

# find the old quorum device associated with the two hosts

if (( ${hostidA} < ${hostidB} )); then
  oldquorumdev=$(cdbmatch quorumdev.node.${hostidA}.${hostidB} ${cdbfile})
else
  oldquorumdev=$(cdbmatch quorumdev.node.${hostidB}.${hostidA} ${cdbfile})
fi

if [[ "${curr_members}" = *${hostidA}* ]]; then
  let nodeA_is_member=1
else
  let nodeA_is_member=0
fi

if [[ "${curr_members}" = *${hostidB}* ]]; then
  let nodeB_is_member=1
else
  let nodeB_is_member=0
fi

if (( manualmode == 0 )); then

  # enable remote access for all statically configured nodes

  let numnodes=$(cdbmatch cluster.number.nodes ${cdbfile})
  let i=0
  while (( i < numnodes )); do
    nodename=$(cdbmatch cluster.node.${i}.hostname ${cdbfile})
    nodelist="${nodelist} ${nodename}"
    let i=i+1
  done
  enable_rcmd ${nodelist}

  # This is a kludgy synchronization point to give time for slow-coaches to update
  # their rhosts file.

  sleep 8

  # check whether both hosts are alive and whether the appropriate
  # software is installed on the hosts  If one of them is not, it is not possible
  # to obtain the intersection of the devices attached to the hosts. In this case,
  # we present the list of all devices attached to the host that is up and leave it
  # to the administrator to pick a shared device as the new quorum device in the hope
  # that they know what they are doing.

  # To avoid handling this situation as a special case in the code, we set nodeA
  # and nodeB equal to each other, so that the interesection of the devices
  # becomes equal to the set of devices attached to either nodeA or nodeB.

  let nodeA_is_down=0
  let nodeB_is_down=0

  if [[ "${nodeA}" != "${localhostname}" ]]; then
    lmsg="`gettext 'Checking host %s ...'`"
    printf "${lmsg}\n" "${nodeA}"
    timed_run 10 /usr/sbin/ping ${nodeA}
    if [[ $? -ne 0 ]]; then
      lmsg="`gettext 'Not responding?'`"
      printf "${lmsg}\n"
      nodeA_is_down=1
    else
      result=$(rsh ${nodeA} "/bin/ls ${FINDDEVICES}" 2> /dev/null)
      if [[ "${result}" != "${FINDDEVICES}" ]]; then
	nodeA_is_down=1
      fi
    fi
  fi

  if [[ "${nodeB}" != "${localhostname}" ]]; then
    lmsg="`gettext 'Checking host %s ...'`"
    printf "${lmsg}" "${nodeB}"
    timed_run 10 /usr/sbin/ping ${nodeB}
    if [[ $? -ne 0 ]]; then
      lmsg="`gettext 'Not responding?'`" 
      printf "${lmsg}\n"
      nodeB_is_down=1
    else
      result=$(rsh ${nodeB} "/bin/ls ${FINDDEVICES}" 2> /dev/null)
      if [[ "${result}" != "${FINDDEVICES}" ]]; then
	nodeB_is_down=1
      fi
    fi
  fi

  if (( nodeA_is_down == 1 && nodeB_is_down == 1 )); then
    lmsg="`gettext \
'\nBoth the specified hosts are unreachable. It is not possible to change\n\
the quorum device of these hosts at this time.'`"
    printf "${lmsg}\n" 
    disable_rcmd
    exit 1
  fi

  if (( nodeA_is_down == 1 || nodeB_is_down == 1 )); then
    lmsg="`gettext \
'\nOne of the specified hosts is either unreachable or permissions\n\
are not set up or the appropriate software has not been installed\n\
on it. In this situation, it is not possible to determine the set\n\
of devices attached to both the hosts.\n\
Note that both private and shared devices for this host will be\n\
displayed in this node and the administrator must exercise extreme\n\
caution in choosing a suitable quorum device.\n'`"
    printf "${lmsg}\n"  
  fi

  if (( nodeA_is_down == 1 )); then
    get_devices ${nodeB} ${nodeB}
  elif (( nodeB_is_down == 1 )); then
    get_devices ${nodeA} ${nodeA}
  else
    get_devices ${nodeA} ${nodeB}
  fi
  
  common_devices=$(select_common_devices 0 1)

  if [[ -z "${common_devices}" ]]; then
    lmsg="`gettext \
'Host %s and host %s do not share any common devices between them.'`"
    printf "${lmsg}\n" "${nodeA}" "${nodeB}"
    disable_rcmd
    exit 1
  fi

  select_quorum_dev ${nodeA} ${nodeB} ${common_devices}
  #
  # Another sleep for synchronizing between nodes.
  sleep 8
  disable_rcmd
  
fi  # manualmode == 0

# quorumdev is set to one of <WWN>, <WWN>.<DSID>, or <ADDR>:<DSID>

if [[ "${quorumdev}" = *:* ]]; then
  newquorumdev=${quorumdev##*:}
  diskaddr=${quorumdev%%:*}
  
  if [[ "${nodeA}" = "${localhostname}" || "${nodeB}" = "${localhostname}" ]]; then
    create_disk_link ${newquorumdev} ${diskaddr}
  fi
else
  newquorumdev=${quorumdev}
  if [[ "${nodeA}" = "${localhostname}" || "${nodeB}" = "${localhostname}" ]]; then

    if [[ "${newquorumdev}" = *\.* ]]; then
      wwn=${newquorumdev%%.*}
    else
      wwn=${newquorumdev}
    fi
    create_ssa_link ${wwn} ${ssafile}
  fi
fi

# If either nodeA or nodeB is not in the cluster membership, but the other is,
# reserve the new quorum device and release the reservation on the old quorum
# device. 

if (( (nodeA_is_member == 1 && nodeB_is_member == 0) ||
      (nodeA_is_member == 0 && nodeB_is_member == 1) )); then
  if [[ "${nodeA}" = "${localhostname}" || "${nodeB}" = "${localhostname}" ]]; then
    reserve_release_qdev ${newquorumdev} ${oldquorumdev}
  fi
fi

/bin/rm -f ${ssafile}
if (( ${hostidA} < ${hostidB} )); then
  echo "/^quorumdev.node.${hostidA}.${hostidB}/s|\(.*\):.*$|\1: $newquorumdev|"\
>> $sedfile
else
  echo "/^quorumdev.node.${hostidB}.${hostidA}/s|\(.*\):.*$|\1: $newquorumdev|"\
>> $sedfile
fi
}
##############################################################################
#
# function change_direct_quorum_dev
#
# This is an interactive function that needs to be run on all
# statically configured nodes of the cluster to change the quorum
# device that is shared between all the nodes defined in this cluster.

function change_direct_quorum_dev
{
typeset -i i
typeset nodename
typeset nodelist
typeset manualmode
typeset -i directc
typeset num_of_nodes
typeset quorumdev
typeset myhostid
typeset live_nodes
typeset oldquorumdev
typeset newquorumdev
typeset -i i_am_a_member
typeset node
typeset common_devices
typeset num_live_nodes
typeset diskaddr

let manualmode=$1
shift

# We are in the direct connect module. If the direct connect
# sc flag is not set then we need to abort.
directc=$(cdbmatch cmm.directattacheddevice ${cdbfile})
if (( directc != 1 )); then
    lmsg=`gettext "%s: Illegal option specified. Direct connected quorum\n\
device has been selected. However the cluster configuration database\n\
does not indicate that this is valid for this cluster."`
    printf "${lmsg}\n" "${Myname}" 2>&1
    exit 2
fi


# get the number of nodes in this cluster, if not greater than 2, abort.
num_of_nodes=$(cdbmatch cluster.number.nodes ${cdbfile})
if (( num_of_nodes <= 2 )); then
  lmsg="`gettext '%s: Illegal number nodes configured in cluster for this option.'`"
  printf "${lmsg}\n" "${Myname}" 2>&1
  exit 2
fi


cdbfile=${cdbpath}/${CLUSTNAME}.cdb
FINDDEVICES=/opt/SUNWcluster/bin/finddevices

if (( manualmode == 1 )); then
  if (( $# != 1 )); then
    lmsg="`gettext '%s: Illegal number of arguments.\n\
Should be:\n%s clustername -q -D [-m quorum-device]'`"	
    printf "${lmsg}\n" "${Myname}" "${Myname}" 2>&1
    exit 2
  else
    quorumdev=$1
  fi
fi

# What is my host id?
myhostid=$(get_hostid ${localhostname})

# find the old quorum device (get for 0.1 - just as good as any!)

oldquorumdev=$(cdbmatch quorumdev.node.0.1 ${cdbfile})

# Am I a cluster member? If so I'm going to do something specific
# later.
if [[ "${curr_members}" = *${myhostid}* ]]; then
  let i_am_a_member=1
else
  let i_am_a_member=0
fi

let i=0
while (( i < num_of_nodes )); do
    nodename=$(cdbmatch cluster.node.${i}.hostname ${cdbfile})
    nodelist="${nodelist} ${nodename}"
    let i=i+1
done

if (( manualmode == 0 )); then

  # enable remote access for all statically configured nodes

  enable_rcmd ${nodelist}

  # This is a kludgy synchronization point to give time for 
  # slow-coaches to update their rhosts file.

  sleep 8

  # check whether all the hosts are alive and whether the appropriate
  # software is installed on the hosts  If one or more of them are not, 
  # it is not possible to obtain the intersection of the devices 
  # attached to the hosts. In this case, we present the list of all 
  # devices attached to the host that is up and leave it to the 
  # administrator to pick a shared device as the new quorum device in the 
  # hopes that they know what they are doing.

  let i=0
  live_nodes=""
  let num_live_nodes=0
  set -A node_array ${nodelist}
  
  while (( i < num_of_nodes )); do
    node=${node_array[i]}
    lmsg="`gettext 'Checking host %s ...'`"
    printf "${lmsg}\n" "${node}"
    /opt/SUNWcluster/bin/timed_run 10 /usr/sbin/ping ${node} > /dev/null 2>&1
    if [[ $? -ne 0 ]]; then
      lmsg="`gettext 'Not responding?'`"
      printf "${lmsg}\n"
    else
      result=$(rsh ${node} "/bin/ls ${FINDDEVICES}" 2> /dev/null)
      if [[ "${result}" = "${FINDDEVICES}" ]]; then
	live_nodes="${live_nodes} ${node}"
	let num_live_nodes=num_live_nodes+1
      fi
    fi
    let i=i+1
  done

  if (( num_live_nodes == 0 )); then
    lmsg="`gettext \
'\nAll of the specified hosts are unreachable. It is not possible to change\n\
the quorum device of these hosts at this time.'`"
    printf "${lmsg}\n" 
    disable_rcmd
    exit 1
  fi

  if (( num_live_nodes > 0 && num_live_nodes < num_of_nodes )); then
    lmsg="`gettext \
'\nOne or more of the specified hosts are either unreachable or permissions\n\
are not set up or the appropriate software has not been installed\n\
on it. In this situation, it is not possible to determine the set \n\
of devices attached to all the hosts.\n\
Note that both private and shared devices for this host will be \n\
displayed in this mode and the administrator must exercise extreme\n\
caution in choosing a suitable quorum device.\n'`"
    printf "${lmsg}\n"

  fi

  get_direct_devices ${live_nodes}
  
  common_devices=$(select_direct_common_devices ${num_live_nodes})

  if [[ -z "${common_devices}" ]]; then
    if (( num_live_nodes > 1 )); then
        lmsg="`gettext 'Hosts do not share any common devices between them:\n%s'`"
    elif (( num_live_nodes == 1 )); then
        lmsg="`gettext 'Host does not share any common devices with any other node:\n%s'`"
    fi
    printf "${lmsg}\n" "${live_nodes}"
    disable_rcmd
    exit 1
  fi

  select_direct_node_quorum_dev ${num_live_nodes} ${common_devices}
  #
  # Another sleep for synchronizing between nodes.
  sleep 8
  disable_rcmd
  
fi  # manualmode == 0

# quorumdev is set to <ADDR>:<DSID>

newquorumdev=${quorumdev##*:}
diskaddr=${quorumdev%%:*}
  
create_disk_link ${newquorumdev} ${diskaddr}

# Am I a cluster member? If not then do nothing, if yes then am I the only
# member? If yes call reserve_release_qdev to reserve the new quorum device 
# and release the reservation on the old quorum device, if not do nothing.

if (( i_am_a_member == 1 )); then
  set -- ${curr_members}
  if (( $# == 1 )); then
    reserve_release_qdev ${newquorumdev} ${oldquorumdev}
  fi
fi

/bin/rm -f ${ssafile}
set -A node_array ${nodelist}
let i=0
while (( i < ${#node_array[*]} )); do
  host1=$(get_hostid ${node_array[${i}]})
  let j=i+1
  while (( j < num_of_nodes )); do
    host2=$(get_hostid ${node_array[${j}]})
    echo "/^quorumdev.node.${host1}.${host2}/s|\(.*\):.*$|\1: $newquorumdev|"\
     >>$sedfile
    let j=j+1
  done
  let i=i+1
done

}
###############################################################################

function get_interfaces
{
newinterfaces=$*
if [[ -n "$newinterfaces" ]]; then
	lmsg="`gettext 'newinterfaces = %s'`"
	printf "${lmsg}\n" "${newinterfaces}"
	validate_interfaces $newinterfaces
	set $newinterfaces
	while [ $# -gt 0 ]
	do
		cif=$(cdbmatch cluster.node.${hostid}.if.$1 ${cdbfile} 2>&1| cut -d" " -f1)
		hif=$(cdbmatch cluster.node.${hostid}.haiface.$1 ${cdbfile} 2>&1| cut -d" " -f1 | sed 's/:.*$//'
)
		pif=$(cdbmatch cluster.node.${hostid}.ifpath.$1 ${cdbfile} 2>&1| cut -d" " -f1 | sed 's|/.*/||')
		echo "/cluster.node.${hostid}.if.$1/s|${cif}|$2|" >> ${sedfile}
		echo "/cluster.node.${hostid}.haiface.$1/s|${hif}|$2|">> ${sedfile}
		echo "/cluster.node.${hostid}.ifpath.$1/s|${pif}|$2|">> ${sedfile}
		shift 2
	done
fi
}

###############################################################################

function check_cluster_name
{
  typeset default_name
  default_name=$(cat ${cdbpath}/default_clustername)
  if [[ "${default_name}" != "${CLUSTNAME}" ]]; then
    lmsg="`gettext 'Invalid cluster name: %s\n\
Configured cluster is: %s'`"
    printf "${lmsg}\n" "${CLUSTNAME}" "${default_name}"  
    exit 2
  fi
  cdbfile=${cdbpath}/${CLUSTNAME}.cdb
}

###############################################################################

function get_udlm_cfile
{
if [[ -n "$newudlmfile" ]]; then
	if [[ -n "$nudlmcfile" ]]; then
		if print "${nudlmcfile}" | grep "^/" > /dev/null 2>&1; then
			echo "/^udlm.oracle.config.file/s|:.*|: $nudlmcfile|" >> $sedfile
		else
			lmsg=`gettext "\"%s\": filename must be an absolute pathname."`
			printf "${lmsg}" "${nudlmcfile}" 2>&1
			exit 2
		fi
	else
		echo "/^udlm.oracle.config.file/s|:.*|:|" >> $sedfile
	fi
fi
}

###############################################################################

function set_newsteptimes
{
integer base_value

if [[ -n "${steptimes}" ]]; then
	base_value=$(cdbmatch loghost.update_timeout $cdbfile 2>&1| \
		cut -d" " -f1)
	if (( steptimes <= 100 )); then
		lmsg=`gettext \
"WARNING: The timeout value specified \"%s\" might be too low\n\
for the cluster to operate properly. Please refer to the Sun Cluster\n\
documentation for guidelines on adjusting this value based on the \n\
number of logical hosts defined."`
		printf "${lmsg}\n" "${steptimes}" 2>&1 
	fi
	if (( steptimes >= base_value )); then
		echo "/^cmm.transition.step10.timeout/s|:.*|: $steptimes|" >> $sedfile
		echo "/^cmm.transition.step11.timeout/s|:.*|: $steptimes|" >> $sedfile
	else
		lmsg=`gettext \
"The timeout value specified \"%s\" should be at least \n\
equal to the value of \"%s\" (loghost.update_time).\n\n\
The timeout value has not been modified in the cluster database."`
		printf "${lmsg}\n" "${steptimes}" "${base_value}" 2>&1
		exit 2
	fi
fi
}

###############################################################################

function set_newloghosttimes
{
integer steptimes

if [[ -n "${loghosttimes}" ]]; then
	steptimes=$(cdbmatch cmm.transition.step10.timeout $cdbfile 2>&1| \
		cut -d" " -f1)
	if (( steptimes <= loghosttimes )); then
		lmsg=`gettext \
"WARNING: The timeout value specified \"%s\" is greater than \n\
the current value of the step timeouts \"%s\". Please refer\n\
to the SunCluster documentation for guidelines on adjusting this\n\
value based on the number of logical hosts defined.\n\
\nThe timeout value has not been modified in the cluster database."`
		printf "${lmsg}\n" "${loghosttimes}" "${steptimes}" 2>&1
		exit 2
	fi
	new_returntime=$((2 * ${loghosttimes} + 50 ))
	echo "/^cmm.transition.return.timeout/s|:.*|: ${new_returntime}|" >> $sedfile
	echo "/^loghost.update_timeout/s|:.*|: $loghosttimes|" >> $sedfile
fi
}

###############################################################################

function get_newnetaddr
{
if [[ -n "${nodeid}" ]]; then
  if [[ "${nodeid}" = @(0|1|2|3) ]]; then
    echo "/^cluster.node.${nodeid}.physaddr/s|:.*|: $newnetaddr|" >> $sedfile
  else
    lmsg="`gettext '%s: Illegal nodeid. Should be 0, 1, 2, or 3'`"
    printf "${lmsg}\n" "${nodeid}" 2>&1	
  fi
fi
}

##############################################################################
  
function display_config
{
 typeset step10
 typeset step11
 typeset loghost

 typeset directc

 directc=$(cdbmatch cmm.directattacheddevice $cdbfile)
 numnodes=$(cdbmatch cluster.number.nodes $cdbfile)
 host0=$(cdbmatch cluster.node.0.hostname $cdbfile)
 host1=$(cdbmatch cluster.node.1.hostname $cdbfile)
 host2=$(cdbmatch cluster.node.2.hostname $cdbfile)
 host3=$(cdbmatch cluster.node.3.hostname $cdbfile)
 h0if0=$(cdbmatch cluster.node.0.if.0 $cdbfile | cut -d" " -f1)
 h0if1=$(cdbmatch cluster.node.0.if.1 $cdbfile | cut -d" " -f1)
 h1if0=$(cdbmatch cluster.node.1.if.0 $cdbfile | cut -d" " -f1)
 h1if1=$(cdbmatch cluster.node.1.if.1 $cdbfile | cut -d" " -f1)
 step10=$(cdbmatch cmm.transition.step10.timeout $cdbfile)
 step11=$(cdbmatch cmm.transition.step11.timeout $cdbfile)
 loghost=$(cdbmatch loghost.update_timeout $cdbfile)

 if [[ "${numnodes}" -ge 3 ]]; then
   h2if0=$(cdbmatch cluster.node.2.if.0 $cdbfile | cut -d" " -f1)
   h2if1=$(cdbmatch cluster.node.2.if.1 $cdbfile | cut -d" " -f1)
 fi
 if [[ "${numnodes}" -eq 4 ]]; then
   h3if0=$(cdbmatch cluster.node.3.if.0 $cdbfile | cut -d" " -f1)
   h3if1=$(cdbmatch cluster.node.3.if.1 $cdbfile | cut -d" " -f1)
 fi

 lmsg="`gettext 'Current Configuration for Cluster %s\n\n\
  Hosts in cluster:\n\t%s %s %s %s\n\n\
  Private Network Interfaces for\n\t%s:\t%s %s\n\t%s:\t%s %s\n'`"
 printf "${lmsg}\n" "${CLUSTNAME}" "${host0}" "${host1}" "${host2}" \
	 "${host3}" "${host0}" "${h0if0}" "${h0if1}" "${host1}" \
	 "${h1if0}" "${h1if1}" 2>&1

if [[ "${numnodes}" -ge 3 ]]; then
  print "      ${host2}: ${h2if0} ${h2if1}"
fi
if  [[ "${numnodes}" -eq 4 ]]; then
  print "      ${host3}: ${h3if0} ${h3if1}"
fi
lmsg="`gettext \
'\n  Quorum Device Information\n'`"
printf "${lmsg}\n"
let i=0
while (( i < ${numnodes} )); do
  let j=i+1
  while (( j < ${numnodes} )); do
    quorumdev=$(cdbmatch quorumdev.node.${i}.${j} ${cdbfile})
    if [[ -n "${quorumdev}" ]]; then
      if (( directc == 1 )); then
	lmsg="`gettext 'Quorum device for cluster %s is %s'`"
	printf "${lmsg}\n" "${CLUSTNAME}" "${quorumdev}"
	break 2
      else
        h1=$(eval print $`print host$i`)
        h2=$(eval print $`print host$j`)
        lmsg=`gettext "Quorum device for hosts %s and %s: %s"`
        printf "${lmsg}\n" "${h1}" "${h2}" "${quorumdev}"
      fi
    fi
    let j=j+1
  done
  let i=i+1
done
print

# print out the timeout values 
lmsg="`gettext \
'  Logical Host Timeout Value :\n\
\tStep10		  :%s\n\
\tStep11		  :%s\n\
\tLogical Host		  :%s'`"
printf "${lmsg}\n" "${step10}" "${step11}" "${loghost}"
 directatt=$(cdbmatch cmm.directattacheddevice $cdbfile)
 if [[ ${directatt} = 1 ]]; then
	lmsg="`gettext '\nCluster TC/SSP Information\n'`"
	printf "${lmsg}\n"	
	host0tcp=$(cdbmatch cluster.node.0.tc_ssp.port $cdbfile)
	host1tcp=$(cdbmatch cluster.node.1.tc_ssp.port $cdbfile)
	host2tcp=$(cdbmatch cluster.node.2.tc_ssp.port $cdbfile)
	host3tcp=$(cdbmatch cluster.node.3.tc_ssp.port $cdbfile)
	tc_ssp_lock=$(cdbmatch cluster.tc_ssp.lock $cdbfile)

	host0tc=$(cdbmatch cluster.tc_ssp.${host0tcp%.*}.ip $cdbfile)
	host1tc=$(cdbmatch cluster.tc_ssp.${host1tcp%.*}.ip $cdbfile)
	host2tc=$(cdbmatch cluster.tc_ssp.${host2tcp%.*}.ip $cdbfile)
	host3tc=$(cdbmatch cluster.tc_ssp.${host3tcp%.*}.ip $cdbfile)
	locktc=${tc_ssp_lock%.*}
	tcssp_locktc=$(cdbmatch cluster.tc_ssp.${locktc}.ip $cdbfile)

	lmsg="`gettext \
'%s TC/SSP, port   : %s, %s\n\
%s TC/SSP, port   : %s, %s\n\
%s TC/SSP, port   : %s, %s\n\
%s TC/SSP, port   : %s, %s\n\
%s Locking TC/SSP, port : %s, %s'`"
	printf "${lmsg}\n" "${host0}" "${host0tc}" "${host0tcp#*.}"\
			   "${host1}" "${host1tc}" "${host1tcp#*.}"\
			   "${host2}" "${host2tc}" "${host2tcp#*.}"\
			   "${host3}" "${host3tc}" "${host3tcp#*.}"\
			   "$CLUSTNAME" "${tcssp_locktc}" "${tc_ssp_lock#*.}"
 fi
	
print ""
# print information on configured logical hosts only if this node is in the
# current cluster membership.

# get the nodeid of the localhost
 
let i=0
while (( i < ${numnodes} )); do
  if [[ $(eval print $`print host$i`) = "${localhostname}" ]]; then
    localnodeid=${i}
    break
  fi
  let i=i+1
done
 
# exit if this node is not in the cluster membership
 
if [[ "${curr_members}" != *${localnodeid}* ]]; then
  exit 0
fi

loghost_rows=$(scccd ${CLUSTNAME} LOGHOST query lname "")
for lrow in ${loghost_rows}; do

  lname=${lrow%:*:*:*:*}
  lname=${lname#*:}

  nodelist=${lrow%:*:*:*}
  nodelist=${nodelist#*:*:}
  nodelist=$(print ${nodelist} | tr ',' ' ')
  set -A node_array ${nodelist}

  dglist=${lrow%:*:*}
  dglist=${dglist#*:*:*:}
  dglist=$(print ${dglist} | tr ',' ' ')

  iflist=${lrow%:*}
  iflist=${iflist#*:*:*:*:}
  iflist=$(print ${iflist} | tr ',' ' ')

  manualmode=${lrow#*:*:*:*:*:}

  lmsg="`gettext \
'Logical Host 	: %s\n\
\n\tNode List	: %s\n\
\tDisk Groups	: %s\n'`"
  printf "${lmsg}\n" "${lname}" "${nodelist}" "${dglist}"

  for i in ${iflist}; do
    iprow=$(scccd ${CLUSTNAME} LOGIP query logif ${i})

    netif_list=${iprow%:*:*}
    netif_list=${netif_list#*:*:}
    netif_list=$(print ${netif_list} | tr ',' ' ')
    set -A netif_array ${netif_list}

    ipaddr=${iprow%:*}
    ipaddr=${ipaddr#*:*:*:}

    lmsg="`gettext \
'\tLogical Address	: %s\n\
\t\tLogical Interface 	: %s\n\
\t\tNetwork Interface 	: '`"
    printf "${lmsg}" "${ipaddr}" "${i}"

    let n=0
    while (( n < ${#node_array[*]} )); do
      lmsg="`gettext '%s (%s)'`"
      printf "${lmsg} " "${netif_array[n]}" "${node_array[n]}"
      let n=n+1
    done
    print
  done

  print
  if [[ "${manualmode}" -eq 0 ]]; then
    lmsg="`gettext '\tAutomatic Switchover	: yes'`"
    printf "${lmsg}\n"
  else
    lmsg="`gettext '\tAutomatic Switchover	: no'`"
    printf "${lmsg}\n"
  fi
  print
done

return 0
}

################################################################################
# create_adminfs_for_diskgroup()
#
# This is called by configure_hads_loghost. For a given disk group, 
# the function 
# - creates volume of size 2MB
# - mirrors the volume
# - makes ufs file system on the volume
function create_adminfs_for_diskgroup
{

  typeset rawdev 
  typeset blkdev 	
  typeset mnt
  typeset dg

  dg=$1

  # The caller has already made sure that this is called from 
  # only one node.
  # This function gets called only if diskgroup is imported. So no
  # need to use vxdg to list the group and check for its existence.

  # Create volume and its mirror if they do not exist
  /usr/sbin/vxinfo ${dg}-stat > /dev/null 2>&1
  if [ $? -ne 0 ]; then
    LC_ALL=C /usr/sbin/vxassist -g ${dg} -U fsgen make ${dg}-stat 2m \
     > /dev/null 2> ${tmppath}/vxassist.out
    if [ $? -ne 0 ]; then
      LC_ALL=C
      lmsg="`gettext \
	'vxassist failed to create %s-stat volume on %s diskgroup'`"
      printf "${lmsg}\n" "${dg}" "${dg}"
      if [ -f ${tmppath}/vxassist.out ]; then
	/bin/grep "Cannot allocate space" ${tmppath}/vxassist.out > /dev/null
	if [ $? -eq 0 ]; then
	  lmsg="`gettext '\n\
	Unable to allocate space to create mirrored volume for administrative\n\
	file system in diskgroup %s. 4MB of free disk space needs to be\n\
	present in %s for this purpose to configure it for HA-NFS.'`"
	  printf "${lmsg}\n" "${dg}" "${dg}"
	else
	  /bin/cat ${tmppath}/vxassist.out
	fi 
	/bin/rm -f ${tmppath}/vxassist.out
      fi
      return 1
    else
      LC_ALL=C /usr/sbin/vxassist -g ${dg} -U fsgen mirror ${dg}-stat \
       > /dev/null 2> ${tmppath}/vxassist.out
      if [ $? -ne 0 ]; then
	lmsg="`gettext 'Failed to create %s-stat mirror on %s diskgroup'`"
	printf "${lmsg}\n" "${dg}" "${dg}"
	if [ -f ${tmppath}/vxassist.out ]; then
	  /bin/grep "Cannot allocate space" ${tmppath}/vxassist.out > /dev/null
	  if [ $? -eq 0 ]; then
	    lmsg="`gettext '\n\
	    It is suggested that a mirror be created for the volume to ensure\n\
	    the reliability of the HA-NFS and to avoid having a single point\n\
	    of failutre. 2MB of free disk is required in diskgroup %s to\n\
	    create the mirror. Run following command to create mirror:\n\
	          vxassist -g %s -U fsgen mirror %s-stat\n\n'`"
	    printf "${lmsg}\n" "${dg}" "${dg}" "${dg}"
	  else
	    /bin/cat ${tmppath}/vxassist.out
	    lmsg="`gettext 'Run following command to create mirror:\n\
vxassist -g %s -U fsgen mirror %s-stat\n\n'`"
	    printf "${lmsg}\n" "${dg}" "${dg}"
	  fi
	  /bin/rm -f ${tmppath}/vxassist.out
	fi

      fi  # vxassist succeeded

    fi
  else
    # if the volume with that name exists then we assume that
    # the file system can be created in it.
    lmsg="`gettext 'Existing %s-stat volume on %s disk group will be used \n\
for HA administrative file system'`" 
    printf "${lmsg}\n" "${dg}" "${dg}" 
  fi

  rawdev="/dev/vx/rdsk/${dg}/${dg}-stat"
  blkdev="/dev/vx/dsk/${dg}/${dg}-stat"

  # Make an ufs file system 
  echo "y" > ${tmppath}/yyy     # newfs commnad expects "yes" from stdin
  exec < ${tmppath}/yyy         # newfs prompts for y/n
  /usr/sbin/newfs ${rawdev} > ${tmppath}/newfs.error  2>&1
  if [ $? -ne 0 ]; then
    /bin/cat ${tmppath}/newfs.error
    lmsg="`gettext 'newfs failed while creating file system on %s'`"
    printf "${lmsg}\n" "${rawdev}"
      /bin/rm -f ${tmppath}/newfs.error
      return 1
  fi
  rm -f ${tmppath}/newfs.error
  rm -f ${tmppath}/yyy
  
  haadmindir=$(cdbmatch cluster.haadmindir ${cdbfile})
  if [[ -z "${haadmindir}" ]]; then
    haadmindir="/"
  fi
  mnt=$(echo "${haadmindir}/${loghost}" | sed -e "s/[/]\{2,\}/\//g")
  if [[ ! -d "${mnt}" ]]; then
    /bin/mkdir -p ${mnt} > /dev/null 2>&1
    if [ $? -ne 0 ]; then
      lmsg="`gettext 'Could not create %s directory for %s diskgroup'`"
      printf "${lmsg}\n" "${mnt}" "${dg}"
	return 1
    fi
  fi
  
  /usr/sbin/mount -F ufs ${blkdev} ${mnt} > /dev/null 2>&1 
  if [ $? -ne 0 ]; then
    lmsg="`gettext 'mount -F ufs %s %s failed'`"
    printf "${lmsg}\n" "${blkdev}" "${mnt}"
    return 1
  fi
  
  return 0

}    # end of create_adminfs_for_diskgroup()

################################################################################
# configure_hads_loghost logical-host-name
#
# logical-host-name	The name of the logical host on which the administrative
#			file system needs to be created.
#
function configure_hads_loghost
{
typeset d diskgroups
integer found
typeset loghost
typeset localnodeid
typeset dglist
typeset nodelist
typeset i

loghost=$1
diskgroup=$2

localnodeid=$(get_hostid ${localhostname})
if [[ -z "${localnodeid}" ]]; then
  lmsg="`gettext \
'This host - %s - is not in the cluster configuration.'`" 
  printf "${lmsg}\n" "${localhostname}"
  exit 1
fi

#
# We will use "haadmindir" in CDB file to denote the prefix to the
# adminstrative file system.
# The Adminstartive file system will have ${haadmindir}/logicalhost
# as the pathname.
#
haadmindir=$(cdbmatch cluster.haadmindir ${cdbfile})
if [[ -z "$haadmindir" ]]; then
  haadmindir="/"
fi

# myhanfs variable is also used by create_adminfs_for_diskgroup()
myhanfs=$(cdbmatch cluster.hanfsdir ${cdbfile})
if [[ -z "$myhanfs" ]]; then
  myhanfs="/etc/opt/SUNWcluster/conf/hanfs"
fi

vfstab_file=${myhanfs}/vfstab.${loghost}
dfstab_file=${myhanfs}/dfstab.${loghost}
mnt=$(echo "${haadmindir}/${loghost}" | sed -e "s/[/]\{2,\}/\//g")
/bin/mkdir -p ${mnt} > /dev/null 2>&1
/bin/mkdir -p ${myhanfs} > /dev/null 2>&1

#
# if the node is stopped or aborted, just create the templates for the
# vfstab and dfstab files and exist.
#

if [[ "${pdb_status}" = "stopped" || "${pdb_status}" = "aborted" ]]; then
  if [[ ! -f "${vfstab_file}" ]]; then
    touch ${vfstab_file}
  fi
  if [ ! -f ${dfstab_file} ]; then
    print  "# share -F nfs <dir name>" > ${dfstab_file}
  fi
  exit 0
fi

#
# Check if Logical Host is associated with diskgroup.
#
rows=$(scccd ${CLUSTNAME} LOGHOST query lname ${loghost})
if [ $? -ne 0 ]; then
	lmsg="`gettext 'Logical Host %s not defined'`"
	printf "${lmsg}\n" "${loghost}"
        exit 1
fi

dglist=$(echo ${rows} | /usr/bin/awk -F: ' { print $4 } ')
dglist=$(echo ${dglist} |  tr ',' ' ')

if [ "${diskgroup}" !=  "" ]; then
   found=0
   for i in ${dglist}
   do
     if [ ${i} = ${diskgroup} ]; then
    	found=1
      	break;
     fi
   done
   if [ ${found} = 0 ]; then
    lmsg="`gettext 'DiskGroup %s not associated with Logical Host %s'`"
    printf "${lmsg}\n" "${diskgroup}" "${loghost}"
    exit 1
   fi
   dg=${diskgroup}
fi

nodelist=$(echo ${rows} | /usr/bin/awk -F: ' { print $3 } ')
nodelist=$(echo ${nodelist} |  tr ',' ' ')

if [ "${dglist}" = "" ]; then
   lmsg="`gettext 'Logical Host %s is configured with no diskgroups. No HA administive filesystem will be configured'`"
   printf "${lmsg}\n" "${loghost}"
   return 1
fi

# user has not supplied the diskgroup, so take the first one.
if [ "${dg}" == "" ]; then
	dg=$(echo ${dglist} | /usr/bin/awk ' { print $1 } ')
fi

/usr/sbin/vxdg list ${dg} > /dev/null 2>&1
if [[ $? -eq 0 ]]; then

  if [ -f ${vfstab_file} ]; then
    pattern=$(echo "${haadmindir}/${loghost}" | sed -e "s/[/]\{2,\}/\//g")
    grep -v "^#" ${vfstab_file} | awk ' { print $3} ' | \
     grep -w ${pattern} >/dev/null 2>&1
      rc=$?
      #
      # Then We already Have an existing disk group having the
      # ha-admin filesystem. so print an Error and return.
      #
      if [ ${rc} -eq 0 ]; then
	result=$(LC_ALL=C /usr/sbin/vxdg list ${dg} > /dev/null 2>&1)
	RC=$?
	if [ ${RC} -eq 0 ]; then
	  # disk group is imported then cross check.
	  /usr/sbin/vxinfo ${dg}-stat > /dev/null 2>&1
	  RC=$?
	  if [ ${RC} -ne 0 ]; then
	    # disk group is imported but we do not have the volume
	    # but vfstab entries are present.
	    # hence we have to go ahead and continue with 
	    #
	    RC=0
	  else
	    # Both vfstab and volume are present, hence use it as it 
	    # is
	    RC=1
	  fi
	fi

	if [ ${RC} -ne 0 ]; then
     	  lmsg="`gettext 'Logical Host %s already has an HA administrative file system on %s diskgroup'`"
	  printf "${lmsg}\n" "${loghost}" "${dg}"
	fi
	
      fi
  fi

  # check for shared disk groups
  if print ${result} | grep shared > /dev/null 2>&1; then
    lmsg="`gettext \
'Illegal diskgroup: %s is a shared diskgroup'`"
    printf "${lmsg}\n" "${dg}"
    exit 1
  fi

  create_adminfs_for_diskgroup ${dg} ${loghost}
  if [ $? -ne 0 ]; then
    lmsg="`gettext \
'After fixing the cause for failure run scconf -F command again on all nodes'`"
    printf "${lmsg}\n\n"
    exit 1
  fi
fi

rawdev="/dev/vx/rdsk/${dg}/${dg}-stat"
blkdev="/dev/vx/dsk/${dg}/${dg}-stat"

#
# put an entry in vfstab if does not have an entry for
# adminfs of this diskgroup. The format of the entry is
# blk-dev   raw-dev mnt-pt FS-type fsck mnt@boot  mount-opt
#
entry="${blkdev}    ${rawdev}       ${mnt}  ufs     1       no      -"
if [ -f ${vfstab_file} ]; then
  grep -w ${blkdev} ${vfstab_file} > /dev/null 2>&1
  if [ $? -ne 0 ]; then
    echo ${entry} >> ${vfstab_file}
    if [ $? -ne 0 ]; then
      lmsg="`gettext 'Unable to append to the file %s.\n\
Append the following line to the %s file:\n%s\n'`"
      printf "${lmsg}\n" "${vfstab_file}" "${vfstab_file}" "${entry}"   
    fi
  fi
else
  if [ ! -f  ${vfstab_file} ]; then
    echo "# blk-dev   raw-dev mnt-pt FS-type fsck mnt@boot mount-opt" \
     > ${vfstab_file}
    echo ${entry} > ${vfstab_file}
  fi
fi

if [ ! -f ${dfstab_file} ]; then
  echo "# share -F nfs <dir name>" > ${dfstab_file}
fi
}

#############################################################################

function check_node_status
{
  typeset node_status
  typeset cmm_state

  lmsg="`gettext 'Checking node status...'`"
  printf "${lmsg}\n"
  node_status=$(LC_ALL=C get_node_status)
  node_status=$(print ${node_status})
  
  cmm_state=${node_status##sc: }
  cmm_state=${cmm_state%% node id:*}
  pdb_status=${cmm_state% \(*\)}
  
  if [[ "${pdb_status}" = "in_transition reconfiguration" && "$1" != "p" ]]; then
    lmsg="`gettext \
'Node status: %s\n\
The cluster configuration cannot be updated at this time'`"
    printf "${lmsg}\n" "${cmm_state}"
    exit 1
  fi

  curr_members=${node_status##*membership: }
  curr_members=${curr_members%% interconnect*}
}

##############################################################################
#
# function get_logip_format input_params numnodes assigned_ifs logip_format
#
# input_params - The input parameters consist of a comma separated list of
#		 network interfaces (as many as the number of nodes parameter),
#		 a logical IP address, and optionally, a logical interface
#		 number.
#
# numnodes     - The number of nodes in the cluster.
#
# assigned_ifs - A space separated list of assigned logical interface numbers
#		 in the cluster. This list is modified on exit from the function
#		 to also include the logical interface number assigned for this
#		 particular invocation.
#
# logip_format - The output parameter consisting of a string in the following
#		 form: "<iflist>:<logical-ip-addr>:<logical-interface-number>"
#		 iflist is a comma separated list of network interfaces and
#		 there are as many as the number of nodes in the cluster.
#		 The logical interface number is either passed in as an optional
#		 input parameter or computed by this function as a
#		 cluster-wide unique logical interface number.

function get_logip_format
{
typeset i_params i_numnodes i_nodes
typeset io_assigned_ifs
typeset o_logip_format

typeset iflist logip_addr logintf
typeset n retval nlookup rows err

# initialize variables

iflist=""
set -A i_params $(print $1 | tr ',' ' ')
set -A i_nodes $(print $nodelist | tr ',' ' ')
i_numnodes=$2
io_assigned_ifs=$(eval print $"$3")


# get list of network interfaces

let n=0
while (( n < i_numnodes )); do
  if [[ -z "${iflist}" ]]; then

    iflist=${i_params[n]}

    #
    # Use the -h option without the -s option.  This is because logical
    # hosts can be established when the cluster is not running on the other
    # node(s).  Without the -s option, the rpc traffic will go over the
    # public network.  (With the -s option, the rpc traffic goes over the
    # private network and will not work if the remote node is not a cluster
    # member.)
    #
    /opt/SUNWpnm/bin/pnmrtop -h ${i_nodes[n]} ${i_params[n]} \
	> /dev/null 2>&1 || err=$?
    if [[ "${err}" -eq 3 ]]; then
	print "Interface (${i_params[n]}) on host (${i_nodes[n]}) is"\
		"not registered with PNM."
	print "Logical Host Configuration failed!"
	exit 1
    elif [[ "${err}" -ne 0 ]]; then
	print "pnmrtop(1M) returned error code - ${err}"
	print "Check if PNM service is functioning correctly.  Also check"
	print "/var/adm/messages for error messages from pnmd(1M)."
	print "Logical Host Configuration failed!"
	exit 1
    fi

  else

    iflist="${iflist},${i_params[n]}"

    /opt/SUNWpnm/bin/pnmrtop -h ${i_nodes[n]} ${i_params[n]} \
	> /dev/null 2>&1 || err=$?

    if [[ "${err}" -eq 3 ]]; then
	print "Interface (${i_params[n]}) on host (${i_nodes[n]}) is"\
		"not registered with PNM."
	print "Logical Host Configuration failed!"
	exit 1
    elif [[ "${err}" -ne 0 ]]; then
	print "pnmrtop(1M) returned error code - ${err}"
	print "Check if PNM service is functioning correctly.  Also check"
	print "/var/adm/messages for error messages from pnmd(1M)."
	print "Logical Host Configuration failed!"
	exit 1
    fi

  fi

  let n=n+1
done

if [[ -z "${i_params[n]}" ]]; then
  lmsg="`gettext \
'Logical IP address not specified or incorrect number of\n\
network interfaces specifed. The number of network interfaces\n\
should match the number of nodes specified in the node list with\n\
the n option'`"
  printf "${lmsg}\n"
  exit 1
fi

# get and validate logical IP address

logip_addr=${i_params[n]}
retval=0
nlookup=$(lookuphost ${logip_addr}) || retval=$?
if [[ -z "${nlookup}" ]]; then
  lmsg="`gettext 'Invalid logical name: %s'`"
  printf "${lmsg}\n" "${logip_addr}"
  exit 1
elif [[ -n "{nlookup}" && "${retval}" -eq 2 ]]; then
  lmsg="`gettext \
'Duplicate entries for %s are present in /etc/hosts.\n\
Please remove duplicate entries and rerun this command.'`"
  printf "${lmsg}\n" "${logip_addr}"
  exit 1
fi

# check for duplicates in the Cluster Configuration Database

rows=$(scccd ${CLUSTNAME} LOGIP query ipaddr "${logip_addr}")
if [[ -n "${rows}" ]]; then
  lmsg="`gettext \
'\nError: The specified logical IP address %s already exists in the \n\
Cluster Configuration Database. You can verify this using the\n\
%s <clustname> -p command.'`"
  printf "${lmsg}\n" "${logip_addr}" "${Myname}"
  exit 1
fi

# get the optionally specified logical interface number, or comput a unique
# cluster-wide logical interface number

let n=n+1
logintf=${i_params[n]}
if [[ -z "${logintf}" ]]; then
  let logintf=1
  while (( 1 )); do
    if [[ "${io_assigned_ifs}" != *${logintf}* ]]; then
      break
    else
      let logintf=logintf+1
    fi
  done
else
  # The logical interface number has been specified as an option. Check for
  # duplicates in the Cluster Configuration Database

  rows=$(scccd ${CLUSTNAME} LOGIP query logif "${logintf}")
  if [[ -n "${rows}" ]]; then
    lmsg="`gettext \
'\nError: The specified logical interface number already exists in the\n\
Cluster Configuration Database. You can verify this using the\n\
scconf <clustname> -p command.'`"
    printf "${lmsg}\n"
    exit 1
  fi
  # validate the logical interface
  if [[ "${logintf}" != +([0-9]) ]]; then
    lmsg="`gettext \
'Logical interface (%s) must be an integer.'`"
    printf "${lmsg}\n" "${logintf}"
    exit 1
  fi
fi

# set output parameters

io_assigned_ifs="${io_assigned_ifs} ${logintf}"
io_assigned_ifs=$(print ${io_assigned_ifs} | sed -e 's/ /\\ /g')
o_logip_format="${iflist}:${logip_addr}:${logintf}"

eval $3=${io_assigned_ifs}
eval $4=${o_logip_format}

}

##############################################################################
#
# function get_node_list i_nodelist o_nodelist o_numnodes
#
# i_nodelist - comma separated list of node names that are passed into the
#	       function.
# o_nodelist - comma separated list of output node names that are passed out
#	       of this function after validation of the input node list.
# o_numnodes - an integer passed out of this function that represents the
#	       number of nodes in the output node list.
function get_node_list
{
typeset i_nodelist o_nodelist o_numnodes
typeset nodename nodeid

i_nodelist=$(printf "%s" "$1" | tr ',' ' ')
o_nodelist=""
let o_numnodes=0

for nodename in ${i_nodelist}; do
  
  # validate node
  nodeid=$(get_hostid ${nodename})
  if [[ -z "${nodeid}" ]]; then
    lmsg="`gettext \
'Host %s is not in the cluster configuration.'`"
    printf "${lmsg}\n" "${nodename}"
    exit 1
  fi

  for n in ${o_nodelist}; do
	if [[ "${n}" = "${nodename}" ]]
	then
		lmsg="`gettext 'Duplicate entry for %s. Aborting'`"
		printf "${lmsg}\n" "${nodename}"
		exit 1 
	fi
  done
 
  if [[ -z "${o_nodelist}" ]]; then
    o_nodelist=${nodename}
  else
    o_nodelist="${o_nodelist} ${nodename}"
  fi
  let o_numnodes=o_numnodes+1
done

o_nodelist=$(print ${o_nodelist} | tr ' ' ',')

# set output parameters
eval $2=${o_nodelist}
eval $3=${o_numnodes}

}

##############################################################################
#
# function validate_dglist <dglist>
#
# <dglist> - A comma separated list of disk groups passed as as input argument.
# This function verifies that each of the disk groups in the input list is not
# configured on one of the existing logical hosts in the cluster configuration.
# The functions outputs an error message and exits with a non-zero exit code if
# it finds any one of the input disk group configured with any other logical
# host in the cluster.

function validate_dglist
{
typeset i_dg i_dglist
typeset rows loghost_row lname
typeset loghost_dglist loghost_dg
typeset vxdgtmp="/var/opt/SUNWcluster/run/vxdg.$$"

i_dglist=$(print $1 | tr ',' ' ')

rows=$(scccd ${CLUSTNAME} LOGHOST query lname "")
for loghost_row in ${rows}; do

  lname=${loghost_row%:*:*:*:*}
  lname=${lname#*:}

  loghost_dglist=${loghost_row%:*:*}
  loghost_dglist=${loghost_dglist#*:*:*:}
  loghost_dglist=$(print ${loghost_dglist} | tr ',' ' ')

  for i_dg in ${i_dglist}; do
    for loghost_dg in ${loghost_dglist}; do
      if [[ "${i_dg}" = "${loghost_dg}" ]]; then
  	lmsg="`gettext \
'Error: The disk group %s is already configured with\n\
Logical Host %s.'`"
	printf "${lmsg}\n" "${i_dg}" "${lname}"
	exit 1
      fi
    done
  done
done

# We're finished if this is an SDS configuration.
if [[ "${vm}" = "sds" ]]; then
	return 0
fi

for i_dg in ${i_dglist}; do
# ok we know that this dg is not a component of an existing logical
# host, but it could be imported by another node. This could be
# a case in which dg name supplied is simply a mistake, or the
# dg might have been created on another node and has not been
# deported or....?
#
#	vxdg import <dg>
#	$? = 12 (disk group exists and is imported by me)
#	$? = 20 with message:
# ERROR: Disk group lh1: import failed: Disk is in use by another host
#	means that this dg exists but is imported by someone else.
#	$? = 20 with message:
# ERROR: Disk group lh3: import failed: No valid disk found containing dg
#	means that this dg does not exist.
#
# So...
#	if i get a status of 12 on the import its ok because its ok
#	if i already have the dg imported.
#	if i get a 20 then tell the user that the dg is imported by
#	someone else.
#
# In any event if we cannot import it, tell the user and quit.
	LC_ALL=C /usr/sbin/vxdg import ${i_dg} > ${vxdgtmp} 2>&1
	rstatus=$?
	if (( rstatus == 20 )); then
		if /bin/egrep 'Disk is in use by another host' ${vxdgtmp} \
			> /dev/null 2>&1
		then
			lmsg="`gettext \
'Error: The disk group %s already exists and\n\
is imported by another host'`"
			printf "${lmsg}\n" "${i_dg}" 
			/bin/rm -rf ${vxdgtmp}	
			exit 1
		elif /bin/egrep 'No valid disk found containing disk group' \
			${vxdgtmp} >/dev/null 2>&1
		then
			lmsg="`gettext \
'Error: The disk group %s does not exist.'`"
			printf "${lmsg}\n" "${i_dg}"
			/bin/rm -rf ${vxdgtmp}	
			exit 1
		fi
	elif (( rstatus == 0 || rstatus == 12 )); then
# a status of 0 means we were able to import the dg, 
# a status of 12 means we already had it imported.
# in either of these cases lets deport it now.
		/usr/sbin/vxdg deport ${i_dg}
	else
		lmsg="`gettext \
'Error: An undetermined error was encountered while \
attempting\nto import disk group %s. Repair \
this before proceeding.'`"
		printf "${lmsg}\n" "{i_dg}"
		/bin/rm -rf ${vxdgtmp}	
		exit 1
	fi
	/bin/rm -rf ${vxdgtmp}	
done
}


#######################################################################
#
# function validate_no_ds_dependencies <dsname> <lname>
#
# This function verifies that the data service <dsname> does not depend 
# on any other data services on logical host <lname>;
# if it finds that it does, it prints an error message and it exits.
# It also checks that the data service to be removed is in the "off" state.

function validate_no_ds_dependencies
{
typeset dsname
typeset lname
typeset dsdep_row dsdep_list
typeset dsdep_list dsdep
typeset dsrows row ds
typeset dsstate_row dsstate

dsname=$1
lname=$2

# check that the data service to be removed is in the "off" state
# get the state row from the ccd
dsstate_row=$(scccd ${CLUSTNAME} DS_STATE query ds_name ${dsname})
dsstate=${dsstate_row#*:*:}
if [[ "${dsstate}" != "0" ]]; then
  lmsg="`gettext \
'\nError: Data service %s is on. Cannot remove a service that is on.'`"
  printf "${lmsg}\n" "${dsname}"
  exit 1
fi

# get list of data services on this lhost
ds_rows=$(scccd ${CLUSTNAME} LOGHOST_DS query lname ${lname})

# for each row get name of data service
for row in ${ds_rows}; do
  ds=${row#*:*:}

  # get row that contains list of data services dependent on <ds>
  dsdep_row=$(scccd ${CLUSTNAME} DS_DEPSVC_LIST query ds_name ${ds})

  # parse row and get list of dependent data services
  dsdep_list=${dsdep_row#*:*:}
  dsdep_list=$(print ${dsdep_list} | tr ',' ' ')

  # check if <dsname> is in the dependency list
  # if yes, print error and exit
  for dsdep in ${dsdep_list}; do
    if [[ "${dsname}" = "${dsdep}" ]]; then
      lmsg="`gettext \
'\nError: Data service %s\ncannot be removed from logical host %s\n\
because data service %s,\nwhich depends on it, is still present.'`"
      printf "${lmsg}\n" "${dsname}" "${lname}" "${ds}"
      exit 1
    fi
  done
done

}


########################################################################
#
# function validate_no_ds_dependencies_list <dslist> 
#
# This function verifies that the data services in <dslist> do not depend
# on any other data services, unless thay also are in the list to be
# removed. If it finds otherwise, it prints an error message and it exits.
# This is verified on each lhost independently.
# It also checks that each data service to be removed is in the "off" state.

function validate_no_ds_dependencies_list
{
typeset dsdep_row dsdep_list
typeset lh_rows lh_row lname
typeset dsdep_row dsdep_list dsdep
typeset dsrows row ds
typeset ds_list dsname ds_rem
typeset isin_flag
typeset dsstate_row dsstate

ds_list=$1

# for each service in list
for dsname in ${ds_list}; do

  # check that the data service to be removed is in the "off" state
  # get the state row from the ccd
  dsstate_row=$(scccd ${CLUSTNAME} DS_STATE query ds_name ${dsname})
  dsstate=${dsstate_row#*:*:}
  if [[ "${dsstate}" != "0" ]]; then
    lmsg="`gettext \
'\nError: Data service %s is on. Cannot remove a service that is on.'`"
    printf "${lmsg}\n" "${dsname}"
    exit 1
  fi

  # get list of rows that show lhosts associated with this ds
  lh_rows=$(scccd ${CLUSTNAME} LOGHOST_DS query dsname ${dsname})

  # parse each row and get name of lhost
  for lh_row in ${lh_rows}; do
    lname=${lh_row%:*}
    lname=${lname#*:}

    # get rows that contain list of data services on this lhost
    ds_rows=$(scccd ${CLUSTNAME} LOGHOST_DS query lname ${lname})

    # for each row parse row and get name of data service
    for row in ${ds_rows}; do

      ds=${row#*:*:}

      # get row that contains list of data services dependent on <ds>
      dsdep_row=$(scccd ${CLUSTNAME} DS_DEPSVC_LIST query ds_name ${ds})

      # parse row and get list of dependent data services
      dsdep_list=${dsdep_row#*:*:}
      dsdep_list=$(print ${dsdep_list} | tr ',' ' ')

      # for each service in list of dependent data services
      # check if <dsname> is in the dependency list
      for dsdep in ${dsdep_list}; do
        if [[ "${dsname}" = "${dsdep}" ]]; then
          # check if the dependent service is also being removed;
 	  isin_flag=0
	  for ds_rem in ${ds_list}; do
            if [[ "${ds_rem}" = "${ds}" ]]; then
	      isin_flag=1
	    fi
	  done
          # if dependent service is not in removal list, 
	  # print error and exit
	  if [[ "${isin_flag}" -eq 0 ]]; then
  	    lmsg="`gettext \
'\nError: Data service %s\ncannot be removed from logical host %s\n\
because data service %s,\nwhich depends on it, is not being removed.'`"
	    printf "${lmsg}\n" "${dsname}" "${lname}" "${ds}"
	    exit 1
	  fi	# if flag is zero (dependent service is not being removed)
        fi	# if names are equal
      done	# for dsdep - each ds dependent on ds on lhost
    done	# for row - each ds on lhost
  done		# for lhrow - each lhost
done 		# for dsname - each ds in list to be removed

}



#####################################################################
#
# function validate_all_ds_dependencies <dsname> <lname>
#
# This function verifies that all the data services on which
# data service <dsname> depends are also registered with lhost <lname>
# If not it prints an error message and it exits.

function validate_all_ds_dependencies
{
typeset dsname
typeset lname
typeset dsdep_list dsdep
typeset dslh_list dslh

dsname=$1
lname=$2

# get row which contains list of data services on which <dsname> depends
dsdep_row=$(scccd ${CLUSTNAME} DS_DEPSVC_LIST query ds_name ${dsname})

# parse row to get list of data services on which <dsname> depends
dsdep_list=${dsdep_row#*:*:}
dsdep_list=$(print ${dsdep_list} | tr ',' ' ')

# for each data service <dsdep> in list of dependent data services
#   check if <dsdep> is one of <dslh> rows
#   if no, print error and exit
for dsdep in ${dsdep_list}; do
  # get row that contains data service <dsdep> on <lhost>
  dslh_row=$(scccd ${CLUSTNAME} LOGHOST_DS query "lname:dsname" \
	 "${lname}:${dsdep}")

  # if row was not found, this data service 
  # is not associated with <lhost>; print error message and exit
  if [[ -z "${dslh_row}" ]]; then
    lmsg="`gettext \
'\nError: Data service %s\n\
cannot be added to logical host %s\n\
because data service %s,\n\
on which this data service depends, is not added.'`"
    printf "${lmsg}\n" "${dsname}" "${lname}" "${dsdep}"
    exit 1
  fi
done

}


########################################################################
# get_passwd
#
# read in the password for the TC/SSP
# This routine should change so that it will make a call to a C-program
# binary, and encrypt the password.
#
# $1 = devType: is either TC or SSP

get_passwd() {
	typeset devType=$1

	tcak ${devType} ${tmppath}/tcak.$$
	passwd=$(/bin/cat ${tmppath}/tcak.$$)
	/bin/rm -rf ${tmppath}/tcak.$$
}

# determine if device is TC or SSP if possible
determine_TC_SSP() {

	typeset nodeInfo
	typeset dev
	typeset pt
	integer num_of_nodes
	integer n

	# if the lockport number is available 
	if [ ${lockport} ]; then
		# if the lockport is "-1" then it is an SSP
		if [ ${lockport} -eq -1 ]; then
			devArch="E10000"
		else
		#it is a TC
			devArch="other"
		fi
	else
		let num_of_nodes=$(cdbmatch cluster.number.nodes ${cdbfile})
		# if the device already exist in the CDB
		if (( $i < $num_of_nodes )); then
		#find an attached device
			let n=0
			while (( $n < 4 )); do
				nodeInfo=$(cdbmatch cluster.node.$n.tc_ssp.port ${cdbfile})
				if [ -n "${nodeInfo}" ]; then
				#Check if connected to TC/SSP
					dev=`echo "${nodeInfo}" \
					   | sed -e "s|\..*||"`
					# if device is attached to node
					if [ ${dev} -eq ${i} ]; then
						pt=`echo "${nodeInfo}" \
					   	  | sed -e "s|.*\.||"`
						# Check if port is SSP
						if [ ${pt} -eq -1 ]; 
						then
							devArch="E10000"
						else
							devArch="other"
						fi
						break
					fi
				fi
				((n=n+1))
			done
		fi
	fi
	# end of determine_TC_SSP
}


##############################################################################
#
# function configure_host_tc_ssp
#
# -H <hostname> [-dpt]
#  This option allows changing architecture of "hostname" as well as
#  it's TC/SSP (specified by IP address) and it's port on the TC/SSP.
#
# -t <old-ip-address | name> [-Pil]
#  This option allows adding a new TC/SSP or changing the ip address
#  or password of an existing (configured) TC/SSP.
#

function configure_host_tc_ssp
{
cdbfile=$cdbpath/$CLUSTNAME.cdb

if [[ "${pdb_status}" != "aborted" ]]; then
   if [[ "${pdb_status}" != "stopped" ]]; then
	lmsg="`gettext 'This option can only be used when the cluster is down\n\
Please ensure all nodes are down and retry.'`"
	printf "${lmsg}\n"
	exit 2
   fi
fi

type=$1
shift

typeset	lockport newip ip getpasswd
typeset TC tc_ip first_empty
typeset TC_num port arch tc chk chktype
typeset	nodeid TC_port

if [[ "${type}" = "tc" ]]; then
	#
	# This is the -t option. With suboptions -l, -i, -P.
	#
	TC=$1
	shift
while getopts Pi:l: option $*; do
  case ${option} in
  l)	lockport=${OPTARG}
	;;
  i)	newip=${OPTARG}
	;;
  P)	getpasswd=1
	;;
 \?)	usage
	exit 2
	;;
  esac
done
	#
	# Check the type of the TC argument, hostname or ip address.
	#
	chktype=$(echo "${TC}" | /bin/tr -d '[0-9].')
	if [[ -n ${chktype} ]]; then
		# TC is a name
		ip=$(/usr/sbin/arp ${TC} | /bin/awk '{print $2}' \
			| /bin/sed 's/(//' | /bin/sed 's/)//')
	else
		# TC is an IP address
		ip=${TC}
	fi

	#
	# find matching ip address in CDB file
	#
	let i=0
	while (( $i < 4 )); do
		tc_ip=$(cdbmatch cluster.tc_ssp.$i.ip ${cdbfile})

		if [[ -z ${tc_ip} ]]; then
			if [[ -z ${first_empty} ]]; then
				first_empty=$i
			fi
		fi

		if [[ "${tc_ip}" = "${ip}" ]]; then
			break
		fi
		let i=i+1
	done

	#
	# If no matching ip address was found in the CDB file
	# Then this must be a whole new TC, be sure to get it's
	# password and fill into the first empty slot.
	# If none exists bail out.
	#
	if (( $i == 4 )); then
		if [[ -z ${getpasswd} ]]; then
			lmsg="`gettext 'No such TC currently defined : %s\n\
Need to specify a passwd'`"			
			printf "${lmsg}\n" "${TC}"
			getpasswd=1
		fi
		if [[ -z ${first_empty} ]]; then
			lmsg="`gettext 'No such TC current defined\n\
Must provide the address of an\n\
existing TC if you want to change it.'`"
			printf "${lmsg}\n" 
			exit 2
		fi
		TC_num=${first_empty}
		newip=${ip}
	else
		TC_num=$i
		lmsg="`gettext 'Found TC/SSP: %s'`"
		printf "${lmsg}\n" "${tc_ip}"
	fi

	#
	# If the locking port is specified on this TC, change it.
	#
	if [ ${lockport} ]; then
		echo "/^cluster.tc_ssp.lock/s|:.*|: ${TC_num}.${lockport}|" >> $sedfile
	fi

	#
	# If a new ip address is specified for this TC then use it
	# else get it from arp or user.
	#
	if [ ${newip} ]; then
		chktype=$(echo "${newip}" | /bin/tr -d '[0-9].')
		if [ ${chktype} ]; then
			lmsg="`gettext 'Invalid IP address : %s.'`"	
			printf "${lmsg}\n" "${newip}"
			exit 2
		fi
		echo "/^cluster.tc_ssp.${TC_num}.ip/s|:.*|: ${newip}|" >> $sedfile
		# Change TC name to this ip address
		TC=${newip}
	else
		newip=$(/usr/sbin/arp ${TC} | /bin/awk '{print $2}' \
			| /bin/sed 's/(//' | /bin/sed 's/)//')
		if [[ -z ${newip} ]]; then
			lmsg="`gettext \
'Cannot determine IP address for host : %s\n\
Provide manually.'`"
			printf "${lmsg}\n" "${TC}" 
			exit 2
		fi
		echo "/^cluster.tc_ssp.${TC_num}.ip/s|:.*|: ${newip}|" >> $sedfile
	fi

	#
	# If a new password is required get it from the user.
	#
	if [ ${getpasswd} ]; then
		devArch="unknown"
		# determine_TC_SSP will set the devArch variable
		determine_TC_SSP
		if [ "${devArch}" == "unknown" ]; then
			echo ""
			lmsg=$(gettext \
"Cannot determine if this device is a Terminal\n\
Concentrator or a System Service Processor.\n\
Assuming TC device.\n\
If the device is an SSP device, then please\n\
enter the password for user ssp instead of root.")
			printf "${lmsg}\n" 2>&1
		fi
		# get_passwd sets the variable 'passwd'
		stty -echo
		if [ "${devArch}" == "E10000" ]; then
			get_passwd SSP
		else
			get_passwd TC
		fi
		stty echo
		echo "/^cluster.tc_ssp.${TC_num}.ak/c\\" >> $sedfile
		echo "cluster.tc_ssp.${TC_num}.ak	: \c" >> $sedfile
		sed -e "s/[\]\{1,\}/&&/g" >> $sedfile <<!EOM
${passwd}
!EOM
	fi
	return 0
fi

if [[ ${type} = "host" ]]; then
	#
	# This is the -H option with suboptions -d, -t and -p.
	#
	host=$1
	shift
while getopts t:d:p: option $*; do
  case ${option} in
  t)	tc=${OPTARG}
	;;
  p)	port=${OPTARG}
	;;
  d)	arch=${OPTARG}
	;;
  esac
done
	#
	# First find the nodeid corresponding to this hostname.
	#
	let i=0
	while (( $i < 4 )); do
		hostn=$(cdbmatch cluster.node.$i.hostname $cdbfile)
		if [[ "${hostn}" = "${host}" ]]; then
			break
		fi
		let i=i+1
	done

	if (( $i == 4 )); then
		lmsg="`gettext 'No such host exists : %s'`"
		printf "${lmsg}\n" "${host}"
		exit 2
	else
		nodeid=$i
	fi

	#
	# If a new architecture is being specified for this host, then
	# make sure the TC/SSP ip address is also provided.
	#
	if [ ${arch} ]; then
		if [[ "${arch}" != "E10000" ]]; then
			if [[ "${arch}" != "other" ]]; then
				lmsg="`gettext \
'Only architecture type of \"E10000\" or \"other\" is allowed'`"
				printf "${lmsg}\n"
				exit 2
			fi
		fi
		if [[ -z ${tc} ]]; then
			lmsg="`gettext \
'A new TC/SSP must be specified if the host architecture changes'"
				printf "${lmsg}\n"
			exit 2
		fi
		echo "/^cluster.node.${nodeid}.arch/s|:.*|: ${arch}|" >> $sedfile
	else
		arch=$(cdbmatch cluster.node.$i.arch ${cdbfile})
	fi

	#
	# If a new TC/SSP ip address is specified, then make sure it
	# is already defined in the CDB file. Then find the it's index
	# number and use it.
	#
	if [ ${tc} ]; then
		#
		# make sure a valid IP address is specified
		#
		chk=$(echo "${tc}" | /bin/tr -d '[0-9].')
		if [ ${chk} ]; then
			# TC may be a name
			ip=$(/usr/sbin/arp ${tc} | /bin/awk '{print $2}' \
				| /bin/sed 's/(//' | /bin/sed 's/)//')
			if [[ -z ${ip} ]]; then
				lmsg="`gettext \
'Cannot determine IP address for TC/SSP: %s.\n\
Provide the correct TC/SSP name'`"
				printf "${lmsg}\n" "${tc}"
				exit 2
			fi
			tc=${ip}
		fi

		#
		# find the TC index corresponding to the specified ip addr.
		#
		let i=0
		while (( $i < 4 )); do
			tc_ip=$(cdbmatch cluster.tc_ssp.$i.ip ${cdbfile})
			if [[ "${tc_ip}" = "${tc}" ]]; then
				break
			fi
			let i=i+1
		done
		if (( $i == 4 )); then
			lmsg="`gettext 'No such TC/SSP currently defined : %s'`"
			printf "${lmsg}\n" "${tc}"  
			exit 2
		else
			TC_num=$i
		fi
		#
		# Now we know nodeid and tc#, so see if port is
		# specified to complete the entry
		#
		if [ ${port} ]; then
			if [ "${arch}" == "other" ]; then
				chk=$(echo "${port}" | /bin/tr -d '[0-9]')
			elif [ "${arch}" == "E10000" ]; then
				chk=$(echo "${port}" | sed -e "s|^-1$||")
			else
				lmsg="`gettext 'Illegal architecture: %s'`"
				printf "${lmsg}\n" "${arch}"
				exit 2
			fi
			if [ ${chk} ]; then
				lmsg="`gettext 'Illegal port value: %s'`"
				printf "${lmsg}\n" "${port}"
				exit 2
			fi
			echo "/^cluster.node.${nodeid}.tc_ssp.port/s|:.*|: ${TC_num}.${port}|" >> $sedfile
		else
			lmsg="`gettext \
'No port specified on the new TC/SSP for this host'`" 
			printf "${lmsg}\n"
			exit 2
		fi
		return 0
	fi
	if [ ${port} ]; then
		if [ "${arch}" == "other" ]; then
			chk=$(echo "${port}" | /bin/tr -d '[0-9]')
		elif [ "${arch}" == "E10000" ]; then
			chk=$(echo "${port}" | sed -e "s|^-1$||")
		else
			lmsg="`gettext 'Illegal architecture: %s'`"
			printf "${lmsg}\n" "${arch}"
			exit 2
		fi
		if [ ${chk} ]; then
			lmsg="`gettext 'Illegal port value: %s'`"
			printf "${lmsg}\n" "${port}"
			exit 2
		fi
		TC_port=$(cdbmatch cluster.node.${nodeid}.tc_ssp.port ${cdbfile})
		TC_num=${TC_port%.*}
		echo "/^cluster.node.${nodeid}.tc_ssp.port/s|:.*|: ${TC_num}.${port}|" >> $sedfile
	fi
	return 0
fi

}

##############################################################################
#
# function configure_logical_host <argument-list>
#
# <argument-list> is the list of arguments following the -L option to the
# pdbconf command. See usage below. This function carries out various validation
# checks and then uses getopts to parse the command line arguments. After
# parsing is complete, the Cluster Configuration Database is updated to add
# the logical host

function configure_logical_host
{
typeset OLDOPTIND
typeset lname
typeset option options_present
typeset nodelist nodeid numnodes curr_master localnodeid
typeset dglist
typeset manualmode
typeset rows r
typeset logip_array logip_count logip_format logintf

# initialize variables
OLDOPTIND=${OPTIND}
lname=$1
shift
OPTIND=1
let manualmode=0
nodelist=""
dglist=""
set -A logip_array
let logip_count=0
let options_present=0

# check whether this node is in the cluster configuration

localnodeid=$(get_hostid ${localhostname})
if [[ -z "${localnodeid}" ]]; then
  lmsg="`gettext \
'This host - %s - is not in the cluster configuration.'`"
  printf "${lmsg}\n" "{localhostname}"
  exit 1
fi

# check whether this node is in the cluster membership

if [[ "${curr_members}" != *${localnodeid}* ]]; then
  lmsg="`gettext \
'This host - %s - is not in the current cluster membership\n\
The cluster configuration database cannot be updated from this node.'`"
  printf "${lmsg}\n" "${localhostname}"
  
  exit 1
fi


# get list of already assigned logical interfaces

assigned_ifs=""
rows=$(scccd ${CLUSTNAME} LOGIP query logif "")
for r in ${rows}; do
  logintf=${r#*:*:*:*:}
  assigned_ifs="${assigned_ifs} ${logintf}"
done

# parse command line options
input_args=$(echo $* | sed -e 's/,  */,/g' -e 's/  *,/,/g')
while getopts mrn:g:i: option ${input_args}; do
  let options_present=1
  case ${option} in
    
    n) get_node_list ${OPTARG} nodelist numnodes
       ;;
    
    g) if [[ -z "${dglist}" ]]; then
         dglist=${OPTARG}
       else
	 dglist="${dglist},${OPTARG}"
       fi
       ;;

    m) let manualmode=1
       ;;

    i) if [[ -z "${nodelist}" ]]; then
	 lmsg="`gettext  \
'The -n option has to be specified before the -i option.'`"
	 printf "${lmsg}\n"
	 exit 1
       fi
       logip_format=""
       get_logip_format ${OPTARG} ${numnodes} assigned_ifs logip_format
       logip_format="${nodelist}:${logip_format}"
       logip_array[logip_count]=${logip_format}
       let logip_count=logip_count+1
       ;;

    r) if [[ -n "${nodelist}" || -n "${dglist}" || ${#logip_array[*]} -gt 0 ]]; then
	 lmsg="`gettext \
'The -r option cannot be used with any other option.'`"
	 printf "${lmsg}\n"
	 exit 1
       fi
       unconfigure_logical_host ${lname}
       exit 0
       ;;

    \?) usage
	exit 2
	;;
  esac
done

if (( options_present == 0 )); then
  usage
  exit 2
fi

# check for duplicate logical host names

rows=$(scccd ${CLUSTNAME} LOGHOST query lname ${lname})
if [[ -n "${rows}" ]]; then
  lmsg="`gettext \
'Definition of logical host %s already exists in the cluster\n\
configuration database.'`"
  printf "${lmsg}\n" "${lname}"
  exit 1
fi

# check whether any of the disk groups specified belong to other logical hosts
validate_dglist ${dglist}

# get the list of assigned logical interface numbers for all the logical IP
# addresses of this logical host

assigned_ifs=""
let logip_count=0
while (( logip_count < ${#logip_array[*]} )); do
  logintf=${logip_array[logip_count]#*:*:*:}
  if [[ -z "${assigned_ifs}" ]]; then
    assigned_ifs=${logintf}
  else
    assigned_ifs="${assigned_ifs},${logintf}"
  fi
  let logip_count=logip_count+1
done

# add LOGHOST row to the CCD

r="${lname}:${nodelist}:${dglist}:${assigned_ifs}:${manualmode}"
scccd ${CLUSTNAME} LOGHOST add "lname:nodelist:dglist:iplist:mode" ${r}
if [[ $? -ne 0 ]]; then
  lmsg="`gettext \
'Unable to add logical host %s to the cluster configuration database.\n\
Check system console logs for more detailed error messages.'`"
  printf "${lmsg}\n" "${lname}"
  exit 1
fi

# add all the logical IP addresses of this logical host

let logip_count=0
while (( logip_count < ${#logip_array[*]} )); do
  scccd ${CLUSTNAME} LOGIP add "nodelist:iflist:ipaddr:logif" \
	${logip_array[logip_count]}
  if [[ $? -ne 0 ]]; then
    lmsg="`gettext \
'Unable to add logical IP information - %s -\n\
for logical host %s. Check the system console logs for more\n\
detailed error messages. The Cluster configuration database is now in\n\
an inconsistent state. Remove the logical host with the command\n\
scconf <clustername> -L <logical-host> -r\n\
and resolve the errors before retrying to add the logical host to the\n\
cluster configuration.'`" 
    printf "${lmsg}\n" "${logip_array[logip_count]}" "${lname}"
    exit 1
  fi
  let logip_count=logip_count+1
done

#


















 



#
# Put the logical Host in MSTATE to ON.
#
loghost_mstate="${lname}:1"
# remove it if present.
scccd ${CLUSTNAME} LOGHOST_MSTATE remove lname ${lname}
scccd ${CLUSTNAME} LOGHOST_MSTATE add "lname:mmode" ${loghost_mstate}
if [[ $? -ne 0 ]]; then
  lmsg="`gettext \
'Unable to add logical host %s LOGHOST_MSTATE to the cluster \
configuration database\n\
Check system console logs for more detailed error messages.'`"
  printf "${lmsg}\n" "${lname}"
  exit 1
fi

# add the current master entry

nodelist=$(print ${nodelist} | tr ',' ' ')
curr_master=""
for r in ${nodelist}; do
  nodeid=$(get_hostid ${r})
  if [[ "${curr_members}" = *${nodeid}* ]]; then
    curr_master=${r}
    break
  fi
done

if [[ -n "${curr_master}" ]]; then
  # add current master row. This should bring up the logical host on the
  # current master
  scccd ${CLUSTNAME} LOGHOST_CM add "lname:curr_master" "${lname}:${curr_master}"
  if [[ $? -ne 0 ]]; then
    lmsg="`gettext \
'Could not bring up logical host %s on the current master %s\n\
Check the system console logs for detailed error messages.'`"
    printf "${lmsg}\n" "${lname}" "${curr_master}"
  fi
fi


#
# Display a message on the console that the logical host is configured
log_info "${msg_prefix}.1010" "Successfully configured ${lname} logical host"

}

##############################################################################
#
# function unconfigure_logical_host <lname>
#
# <lname> is the name of the logical host to be unconfigured. It will remove
# rows corresponding to the specified logical host from the cluster configuration
# database. One of the side effects of this will be to bring down the logical
# host on the current master, if one exists.

function unconfigure_logical_host
{
typeset lname dsname
typeset rows r
typeset logif_list logintf

lname=$1

# if this logical host is still attached to some data services, it should not
# be removed from the cluster configuration.

rows=$(scccd ${CLUSTNAME} LOGHOST_DS query lname ${lname})
if [[ -n "${rows}" ]]; then
  lmsg="`gettext \
'The following data services are configured on this logical host:'`"
  printf "${lmsg}\n" 
  for r in ${rows}; do
    dsname=${r#*:*:}
    print -n "${dsname} "
  done
  lmsg="`gettext \
'\nThe logical host %s cannot be removed from the cluster\n\
configuration.'`"
  printf "${lmsg}\n" "${lname}" 
  exit 1
fi

# remove the row corresponding to the current master, if any

r=$(scccd ${CLUSTNAME} LOGHOST_CM query lname ${lname})
if [[ -n "${r}" ]]; then
  scccd ${CLUSTNAME} LOGHOST_CM remove lname ${lname}
  if [[ $? -ne 0 ]]; then
    lmsg="`gettext \
'Unable to shut down the logical host %s from the current master\n\
node %s. Check the system console logs for detailed error messages'`"
    printf "${lmsg}\n" "${lname}"  "${r#*:*:}"
    exit 1
  fi
fi

# remove the row corresponding to the logical host

r=$(scccd ${CLUSTNAME} LOGHOST query lname ${lname})
if [[ -n "${r}" ]]; then
  scccd ${CLUSTNAME} LOGHOST remove lname ${lname}
  if [[ $? -ne 0 ]]; then
    lmsg="`gettext \
'Unable to remove logical host %s from the cluster configuration\n\
database. Check the system console logs for detailed error messages.'`"
    printf "${lmsg}\n" "${lname}" 
    exit 1
  fi
else
  lmsg="`gettext \
'Logical host %s is not configured in the cluster configuration database.'`"
  printf "${lmsg}\n" "${lname}"
  exit 1
fi

# remove the rows corresponding to the logical IP addresses of this logical host

logif_list=${r%:*}
logif_list=${logif_list#*:*:*:*:}
logif_list=$(print ${logif_list} | tr ',' ' ')

for logintf in ${logif_list}; do
  scccd ${CLUSTNAME} LOGIP remove logif ${logintf}
  if [[ $? -ne 0 ]]; then
    lmsg="`gettext \
'Unable to remove logical interface numbered %s for\n\
logical host %s. Check system console logs for detailed error\n\
messages.'`"
    printf "${lmsg}\n" "${logintf}" "${lname}" 
  fi
done

#
# Remove the LOGHOST_MSTATE which has presistent status of logical host
#
loghost_mstate=$(scccd ${CLUSTNAME} LOGHOST_MSTATE query lname ${lname})
if [[ -n "${loghost_mstate}" ]]; then
  scccd ${CLUSTNAME} LOGHOST_MSTATE remove lname ${lname}
  if [[ $? -ne 0 ]]; then
	lmsg="`gettext \
'Unable to remove logical host %s LOGHOST_MSTATE from the \
cluster configuration\n\
database. Check the system console logs for detailed error messages'`"
	printf "${lmsg}\n" "${lname}"
  fi
fi

#
# Display a message on the console that the logical host is configured
log_info "${msg_prefix}.1020" "Successfully removed ${lname} logical host"

}

##############################################################################
#
# function attach_ds_loghost [-r] <dsname> <lname>
#
# This function establishes the link between a data service and a logical
# host. The -r option can be specified to break this link before unconfiguring
# a logical host.

function attach_ds_loghost
{
typeset rflag
typeset lname dsname
typeset row lhost
typeset ds_row
typeset dsstate_row dsstate

if [[ "$1" = "-r" ]]; then
  rflag=1
  shift
fi
dsname=$1
lname=$2

# get all rows where <dsname> is associated with any lhost
row=$(scccd ${CLUSTNAME} LOGHOST_DS query dsname ${dsname})

# case of -s -r flag: remove ds from this lhost
if [[ "${rflag}" -eq 1 ]]; then

  # check that the data service is now associated with a lhost
  if [[ -z "${row}" ]]; then
    lmsg="`gettext \
'Data service %s\nis not associated with any logical host\n\
in the cluster configuration database.'`"
    printf "${lmsg}\n" "${dsname}"
    exit 1
  # this ds is associated with at least one lhost
  # check that the data service is now associated with this logical host
  # get row that contains data service <dsname> on <lhost>
  else	
    ds_row=$(scccd ${CLUSTNAME} LOGHOST_DS query "lname:dsname" \
	"${lname}:${dsname}")

    # if row was not found, this data service is not associated with this
    # lhost and cannot be removed; print error message and exit
    if [[ -z "${ds_row}" ]]; then
      lmsg="`gettext \
'Data service %s\nis not currently associated with logical host\n%s.'`"
      printf "${lmsg}\n" "${dsname}" "${lname}"
      exit 1
    fi
  fi	# end of checks for -s -r case

  # check that the data service to be removed is in the "off" state;
  # check that there are no data services dependent on <dsname>;
  # it exits if there are errors
  validate_no_ds_dependencies ${dsname} ${lname}

  # update CCD: remove data service from lhost
  scccd ${CLUSTNAME} LOGHOST_DS remove "lname:dsname" "${lname}:${dsname}"
  if [[ $? -ne 0 ]]; then
    lmsg="`gettext \
'Unable to disassociate data service %s\n from logical host\n%s. '`"
    printf "${lmsg}\n" "${dsname}" "${lname}"
    exit 1
  fi
# end case of -s -r flag: remove ds from this lh
# case of -s flag: add ds to this lh
else

   # First check that if data service is in the "on" state, if it is
   # then it must be turned "off" before using -s option - bugid 4207922.
   # get the state row from the ccd
   dsstate_row=$(scccd ${CLUSTNAME} DS_STATE query ds_name ${dsname})
   dsstate=${dsstate_row#*:*:}
   if [[ "${dsstate}" != "0" ]]; then
     lmsg="`gettext \
   '\nError: Data service %s is on. Cannot associate a service that is on.'`"
     printf "${lmsg}\n" "${dsname}"
     exit 1
   fi

  row=$(scccd ${CLUSTNAME} LOGHOST query lname ${lname})
  if [[ -z "${row}" ]]; then
    lmsg="`gettext \
'The logical host %s\n\
is not configured in the cluster configuration database.'`"
    printf "${lmsg}\n" "${lname}"
    exit 1

  # check that the data service is not already associated 
  # with this logical host
  else
    # get row that shows that <dsname> is associated with <lname>
    ds_row=$(scccd ${CLUSTNAME} LOGHOST_DS query "lname:dsname" \
	"${lname}:${dsname}")

    # if row was found, this data service is already associated with this
    # lhost and cannot be added; print error message and exit
    if [[ -n "${ds_row}" ]]; then
        lmsg="`gettext \
'Data service %s\nis already associated with logical host\n%s.'`"
        printf "${lmsg}\n" "${dsname}" "${lname}"
        exit 1
    fi
  # end of checks for -s case
  fi

  # check that all ds's that this ds is dependent on are also registered
  # with this lhost
  validate_all_ds_dependencies ${dsname} ${lname}
  
  # update CCD: add data service to lhost
  scccd ${CLUSTNAME} LOGHOST_DS add "lname:dsname" "${lname}:${dsname}"
  if [[ $? -ne 0 ]]; then
    lmsg="`gettext \
'Unable to associated data service %s\nwith logical host\n %s.'`"
    printf "${lmsg}\n" "${dsname}" "${lname}"
    exit 1
  fi
fi	# end case of -s flag: add ds to this lh

}

##############################################################################
function set_directattach_token
{
 typeset -i directconnect

 let directconnect=$1

 lmsg="`gettext 'Setting the direct attached flag to:'`" 
 printf "${lmsg}\n"

 case $directconnect in
   0) lmsg="`gettext \
'\toff-clusters greater than two nodes will not use directly'`"
      printf "${lmsg}\n"
	;;
   1) lmsg="`gettext \
'\t on-clusters greater than two nodes will user directly'`" 
      printf "${lmsg}\n" 
	;;
 esac 
 lmsg="`gettext '\t 	attached devices'`"
 printf "${lmsg}\n" 
 echo "/^cmm.directattacheddevice/s|:.*|: ${directconnect}|" \
                >> $sedfile
}

##############################################################################
function usage
{
lmsg="`gettext 'Usage:\n\
%s clustername -h <new hostname1> [... <new hostname4>]\n\
%s clustername -i <hostname> <if0> <if1>\n\
%s clustername -F <logical-host> [<dg>]\n\
%s clustername -L <logical-host> -n <nodelist> -g <dglist> \n\
    		-i <iplist> [-m]\n\
%s clustername -L <logical-host> -r \n\
%s clustername -p \n\
%s clustername -s [-r] <data-service-name> <logical-host-name>\n\
%s clustername -U [absolute path of the config file for Oracle Unix DLM]\n\
%s clustername -N <0|1> <ethernet address of host>\n\
%s clustername -q [-m quorum-device] <hostname1> <hostname2>\n\
%s clustername -q -D [-m quorum-device]\n\
%s clustername -A <# of active hosts>\n\
%s clustername -S <none|ccdvol>\n\
%s clustername -T <step10 and step11 timeout value>\n\
%s clustername -H <hostname> [-dpt]\n\
%s clustername -t <old-ip-addr|name> [-Pil]\n\
%s clustername -l <loghost update timeout value>\n\
%s clustername -R <data-service-name> [<data-service-name>...]'`"
printf "${lmsg}\n" "${Myname}" "${Myname}" "${Myname}" "${Myname}" \
       "${Myname}" "${Myname}" "${Myname}" "${Myname}" "${Myname}" \
       "${Myname}" "${Myname}" "${Myname}" "${Myname}" "${Myname}" \
       "${Myname}" "${Myname}" "${Myname}" "${Myname}" 

}


##############################################################################
# look up a value in the configuration file
function enmatch
{
	cdbmatch $* ${cdbfile}
	if [ $? -ne 0 ]; then


		lmsg=`gettext "cdbmatch %s %s failed"`
		printf "${lmsg}\n" $* ${cdbfile} 1>&2
		return 1
	fi
	return 0
}
##############################################################################

   # Cluster Application Bit Assignment for cluster.pdbapps vector in cdb file.
   CVM=3
   VxVM=4
   SDS=5


   if [ $# -ge 2 ]; then
     CLUSTNAME=$1
     check_cluster_name
     shift;
   else
     usage
     exit 2
   fi
   localhostname=$(uname -n)
   let options_present=0

   # Bit 5 in pdbapps vector designates DiskSuite as
   # configured volume manager
   pdbapps=$(enmatch cluster.pdbapps)
   if [ $? -ne 0 ]; then
	exit 1
   fi
   vm=$(appmatch ${pdbapps} ${SDS})
   if [[ "${vm}" = "1" ]]; then
	vm=sds
   else
	# don't care
	vm=xxx
   fi

   while getopts pduvqDUF:L:N:i:h:A:S:s:t:H:T:l:R: c
   do
	let options_present=1
	check_node_status ${c}
	case $c in
	   A) shift
	      change_ccd_activehosts $*
	      exit 0
	      ;;
	   F) shift
	      if [[ $# -eq 0 ]]; then
		lmsg="`gettext '%s: Insufficient argument list.\n\
Should be: \n%s clustername -F <logical-host-name> [<dg>]'`"
		printf "${lmsg}\n" "${Myname}" "${Myname}"
		exit 2
	      else
		if [[ "${vm}" = "sds" ]]; then
			lmsg=$(gettext \
"The '-F' option is not meaningful in DiskSuite configurations")
			printf "${lmsg}\n"
			exit 0
		fi
		configure_hads_loghost $*
	      fi
	      ;;
	   S) if [[ "${vm}" = "sds" ]]; then
		  lmsg=$(gettext \
"The '-S' option is not meaningful in DiskSuite configurations")
		  printf "${lmsg}\n"
		  exit 0
	      fi
	      shift
	      add_ccdssa $*
	      ;;
	   L) shift
	      configure_logical_host $*
	      exit 0
	      ;;
	   t) shift
	      configure_host_tc_ssp tc $*
              break
	      ;;
	   H) shift
	      configure_host_tc_ssp host $*
              break
	      ;;
	   U) shift
              newudlmfile=NUDLM
              nudlmcfile=$1 
	      ;;
	   D) directconnect=0
	      set_directattach_token ${directconnect}
	      ;;
	  +D) directconnect=1
	      set_directattach_token ${directconnect}
	      ;;
	   p) shift
	      display_config
	      exit 0
	      ;;
	   s) shift
	      if [ $# -lt 2 ]; then
		lmsg="`gettext '%s: Insufficient argument list.\n\
Should be:\n%s clustername -s [-r] <data-service-name> <logical-host-name>'`"	
		printf "${lmsg}\n" "${Myname}" "${Myname}"
		exit 2
	      else
		attach_ds_loghost $*
	      fi
	      exit 0
	      ;;
	  h) shift; 		# get rid of the -h parameter!
	      if [ $# -gt 4 ]; then
		lmsg="`gettext '%s: Illegal Hostname format. \n\ 
Should be:\n\
	%s clustername -h newhost1 [... newhost4]'`"	
		printf "${lmsg}\n" "${Myname}" "${Myname}"
	        exit 2
	      else
		set -A newhost $*
		shift ${#newhost[*]}
		numofnodes=$(cdbmatch cluster.number.nodes ${cdbfile})
		if [ ${numofnodes} -lt ${#newhost[*]} ]; then
			lmsg="`gettext '%s: Illegal Hostname format. \n\
More Hostnames than number of Nodes.'`"
			printf "${lmsg}\n" "${Myname}" 2>&1
			exit 2
		elif [ ${numofnodes} -gt ${#newhost[*]} ]; then
			fill_array newhost ${numofnodes}
		fi
		check_array_duplicates newhost
		if [ $? -lt ${#newhost[*]} ]; then
			lmsg="`gettext '%s: Illegal Hostname format.\n\
Duplicate Hostnames used.\n'`"
			printf "${lmsg}\n" "${Myname}"
			exit 2
		fi
	      fi
	      ;;

           N) shift;
	      if [ $# -ne 2 ]; then
		lmsg="`gettext '%s: Illegal ethernet address format.\n\
Should be:\n\
	%s clustername -N nodeid new-ethernet-address'`"
	        printf "${lmsg}\n" "${Myname}" "${Myname}"
                exit 2
              else
                newnetaddr=$2
                nodeid=$1
                shift 2
              fi
              ;;
	   i) shift
	      if [ $# -ne 3 ]; then
		lmsg="`gettext '%s: Illegal interface format.\n\
Should be:\n\
	%s clustername -i hostname ifX ifY'`"
		printf "${lmsg}\n" "${Myname}" "${Myname}"
		exit 2
	      else
		host=$1
		newinterfaces="0 "$2" 1 "$3
		shift 3
	      fi
		;;
	   q) if [[ "${vm}" = "sds" ]]; then
		  lmsg=$(gettext \
"The '-q' option is not meaningful in DiskSuite configurations")
		  printf "${lmsg}\n" 
		  exit 0
	      fi
	      shift
	      change_quorum_dev $*
	      shift $#
	      ;;
	   l) shift
		loghosttimes=$(echo "$1" | tr -d '[0-9]')
		if [[ -n ${loghosttimes} ]]; then
		 lmsg="`gettext '%s: Illegal value = %s. Should be numeric'`"
		 printf "${lmsg}\n" "$Myname" "$1" 2>&1

		 exit 2
		else
	         if (( $1 <= 0 )); then
		   lmsg="`gettext \
'%s: Illegal value = %s. Timeout value needs to be greater than 0'`"
		   printf "${lmsg}\n" "${Myname}" "$1" 2>&1 
	           exit 2
	         else
               	   loghosttimes=$1
	           shift
		 fi
		fi
	        ;;
	   T) shift
		steptimes=$(echo "$1" | tr -d '[0-9]')
		if [[ -n ${steptimes} ]]; then
		 lmsg="`gettext '%s: Illegal value = %s. Should be numeric.'`"
		 printf "${lmsg}\n" "${Myname}" "$1" 2>&1
		 exit 2
		else
	         if (( $1 <= 0 )); then
		   lmsg="`gettext \
'%s: Illegal value = %s. Timeout value needs to be greater than 0.'`"
		   printf "${lmsg}\n" "$Myname" "$1" 2>&1
	           exit 2
	         else
               	   steptimes=$1
	           shift
		 fi
		fi
	        ;;
       R) shift
          if [[ $# -eq 0 ]]; then
	lmsg="`gettext '%s: Insufficient argument list.\n\
Should be:\n\
	%s clustername -R <data-service-name> [<data-service-name>...]'`"
	printf "${lmsg}\n" "$Myname" "$Myname"
        exit 2
          else
        remove_all_ds_entries $*
          fi
          ;;
	  \? ) 
	      usage
	      exit 2
	      ;;
	  * ) lmsg="`gettext 'Invalid option: %s'`"
	      printf "${lmsg}\n" "${c}"
	      usage
	      exit 2
		;;
	esac
   done

if (( options_present == 0 )); then
  lmsg="`gettext 'No options specified!'`"
  printf "${lmsg}\n" 
  usage
  exit 2
fi

hostid=$(get_hostid ${host})

get_newhost 

get_udlm_cfile $newudlmfile $nudlmcfile

get_newnetaddr

get_interfaces $newinterfaces

set_newsteptimes

set_newloghosttimes

cp $cdbfile ${cdbfile}.o
sed -f $sedfile ${cdbfile}.o > $cdbfile

# Find and restart the inetd:
mond_pid=$(/usr/bin/ps -ed | /usr/bin/grep mond | /usr/bin/awk '{print $1}')
if [ -n "$mond_pid" ]; then
	kill $mond_pid
fi

inet_pid=$(/usr/bin/ps -ed | /usr/bin/grep -w inetd | /usr/bin/awk '{print $1}')
if [ -n "$inet_pid" ]; then
	kill -HUP $inet_pid
else
	lmsg="`gettext 'inetd is not running! Something is wrong!!'`"
	printf "${lmsg}\n" 2>&1
fi
