#!/bin/ksh
#
#pragma ident   "@(#)loghost_sync.m4 1.50     03/05/14 SMI"
#
# Copyright 1997-2003 Sun Microsystems, Inc.  All rights reserved.
# Use is subject to license terms.
#
# loghost_sync - Logical Host synchronization command
#
# The purpose of this command is to execute on all nodes whenever the
# row corresponding to the LOGHOST_CM format is updated in the
# CCD. Whenever a row of this format is removed, the corresponding
# logical host along with its associated data services is shut down on
# the master node of the logical host. When a row of this format is
# added to the CCD, the corresponding logical host and its associated
# data services is brought up on the current master.
#
# The arguments obtained by this command are:
#
# <keyword> - can be one of CCDSYNC_PRE_ADDU, CCDSYNC_POST_ADDU,
#			    CCDSYNC_PRE_REMOVE, CCDSYNC_POST_REMOVE
#
# <row>     - the row being added/removed
#
# <ccdfile> - the full pathname of the dynamic CCD file. This is
#	      required because the dynamic CCD cannot be queried in
#	      the conventional manner from this command. It has to be
#	      queried as a static database.
#
# <numnodes> - integer identifying the number of nodes in the static
#	       cluster configuration.
#
# <curr_members> - list of node ids identifying the current cluster
#		   membership.
#
# <localnodeid> - node id of this node.
svadm=/usr/opt/SUNWesm/sbin/svadm
sc_stop=/usr/opt/SUNWesm/sbin/sc.stop.sh

keyword=$1

case ${keyword} in
	CCDSYNC_PRE_RESTORE | CCDSYNC_PRE_RESTOREKEY | \
		CCDSYNC_POST_RESTORE | CCDSYNC_POST_RESTOREKEY)
			row=""
			CCDFILE=$2
			numnodes=$3
			curr_members=$4
			localnodeid=$5 ;;
	*)
			row=$2
			CCDFILE=$3
			numnodes=$4
			curr_members=$5
			localnodeid=$6 ;;
esac
		
# This should be removed later
export CCDFILE

pre="SUNWcluster.loghost"
INCLUDE=.
RECONF_SCRIPTS=/opt/SUNWcluster/etc/reconf/scripts/
${INCLUDE} ${RECONF_SCRIPTS}/reconf_ener.disks


###########################################################################
#
# function init
#
# The init function just initializes a lot of global variables. It also
# computes the dependency of the data services sitting on top of this
# logical host, and initializes arrays that corresponds to the data
# services's state, the logical hosts of the data service mastered on this
# node, not mastered on this node, etc.

function init
{
typeset loghost_row
typeset r rows
typeset ds_list dsname
typeset ordered_ds_list
typeset all_loghosts lm_loghosts nm_loghosts
typeset n
typeset pdbapps


# Cluster Application Bit Assignment. Refer to the cdb file for all
# assignments

CVM=3
VxVM=4
SDS=5

CLUSTERBIN=/opt/SUNWcluster/bin
CLUSTERVAR=/var/opt/SUNWcluster
CLUSTERETC=/etc/opt/SUNWcluster/conf
# SSACLI is needed by the programs called by loghost_sync.
SSACLI=${CLUSTERBIN}/scssa
MDBIN=/usr/sbin

CLUSTERNAME=$(/bin/cat ${CLUSTERETC}/default_clustername)
cdbfile=${CLUSTERETC}/${CLUSTERNAME}.cdb
localhostname=$(${CLUSTERBIN}/cdbmatch cluster.node.${localnodeid}.hostname \
			${cdbfile})
clusterstate=$(${CLUSTERBIN}/clustm getstate ${CLUSTERNAME})

pdbapps=$(${CLUSTERBIN}/cdbmatch cluster.pdbapps ${cdbfile})
vm=$(${CLUSTERBIN}/appmatch ${pdbapps} ${CVM})
if [[ "${vm}" = "1" ]]; then
  vm=cvm
else
  vm=$(${CLUSTERBIN}/appmatch ${pdbapps} ${VxVM})
  if [[ "${vm}" = "1" ]]; then
    vm=vxvm
  else
      vm=$(${CLUSTERBIN}/appmatch ${pdbapps} ${SDS})
      if [[ "${vm}" = "1" ]]; then
	vm=sds
	if [[ ! -x ${MDBIN}/metaset || ! -x ${MDBIN}/metastat ]]; then
	  log_info "${pre}.4342" "DiskSuite is not properly installed."
	  exit 1
	fi
      else
	log_info "${pre}.4340" "Volume manager is not properly configured."
	  exit 1
      fi
  fi
fi

let n=0
allnodes=""
while (( n < ${numnodes} )); do
  allnodes="${allnodes} ${n}"
  let n=n+1
done

# retrieve name of logical host and current master

lname=${row%:*}
lname=${lname#*:}
curr_master=${row#*:*:}

loghost_row=$(${CLUSTERBIN}/scccd -f ${CCDFILE} ${CLUSTERNAME} LOGHOST query \
			lname ${lname})
if [[ -z "${loghost_row}" ]]; then
  log_info "${pre}.4010" "$0 ${keyword} ${row}: The definition of logical host ${lname} does not exist in ${CCDFILE}"
  exit 1
fi

# split the information in loghost_row

dglist=${loghost_row%:*:*}
dglist=${dglist#*:*:*:}
dglist=$(print ${dglist} | tr ',' ' ')

nodelist=${loghost_row%:*:*:*}
nodelist=${nodelist#*:*:}
nodelist=$(print ${nodelist} | tr ',' ' ')

let in_nodelist=0
for nodename in ${nodelist}; do
  if [[ "${nodename}" = "${localhostname}" ]]; then
    let in_nodelist=1
    break
  fi
done

iplist=${loghost_row%:*}
iplist=${iplist#*:*:*:*:}
iplist=$(print ${iplist} | tr ',' ' ')

# get list of all data services on this logical host

rows=$(${CLUSTERBIN}/scccd -f ${CCDFILE} ${CLUSTERNAME} LOGHOST_DS query \
		lname ${lname})
ds_list=""
for r in ${rows}; do
  dsname=${r#*:*:}
  ds_list="${ds_list} ${dsname}"
done

ordered_ds_list=""
get_ordered_ds_list "${ds_list}" ordered_ds_list
ordered_ds_list=$(print ${ordered_ds_list} | tr ',' ' ')

# initialize arrays

set -A ordered_ds_array ${ordered_ds_list}
set -A all_loghosts_array
set -A lm_loghosts_array
set -A nm_loghosts_array
set -A ds_state_array

let n=0
while (( n < ${#ordered_ds_array[*]} )); do
  all_loghosts=""
  lm_loghosts=""
  nm_loghosts=""
  construct_method_args_list ${ordered_ds_array[n]} all_loghosts lm_loghosts \
		nm_loghosts
  all_loghosts_array[n]=${all_loghosts}
  lm_loghosts_array[n]=${lm_loghosts}
  nm_loghosts_array[n]=${nm_loghosts}

  # retrieve the state of the data service

  rows=$(${CLUSTERBIN}/scccd -f ${CCDFILE} ${CLUSTERNAME} DS_STATE query \
			ds_name ${ordered_ds_array[n]})
  ds_state_array[n]=${rows#*:*:}
  let n=n+1
done

# Changes for bugid_4411476
#
# Now get some data in case we are in the abort phase
#
if [ -f ${CLUSTERVAR}/loghost.dg ]; then
	row=$(<${CLUSTERVAR}/loghost.dg)
	abort_lname=${row%:*:*:*}
fi
}

###########################################################################
#
# function cvm_deport dglist
#
# dglist - The list of disk groups to be deported
#
function cvm_deport
{
typeset g retval retval2
typeset vol volumes raw_device pid_list

if [ $# -gt 0 ]; then
  for g in $*; do
    retval=0
    log_info "${pre}.5015" "Obtaining list of diskgroups to deport"
    /usr/sbin/vxdg list "${g}" > /dev/null 2>&1 || retval=$?
    
    if [[ ${retval} -eq 0 ]]; then
      log_info "${pre}.5017" "Successfully obtained diskgroup deport list"
      volumes=$(/usr/sbin/vxprint -qv -g "${g}" -e open -F "vol:%{name}")
      if [[ ${volumes} != "" && -x $svadm && -r /dev/sv && -x $sc_stop ]] ; then
                #
                # This may caused by Instant Image, so call Instant Image 
                # sc_stop script, which umount file systems for II volumes,
                # kill applications accessing II volumes, disable II volume
                # pair and disable sv.
                #
                log_info "${pre}.1175" \
                         "calling ${sc_stop} to disable Instant Image or SNDR"
                volumes_para=$( for vol in ${volumes} ; do
                                   print "/dev/vx/rdsk/${g}/${vol}"
                               done )
                ${sc_stop} "${volumes_para}"
                volumes=$(/usr/sbin/vxprint -qv -g "${g}" -e open \
                                            -F "vol:%{name}")
      fi

      if [[ ${volumes} != "" ]] ; then
                #
                # This may be due to some processes still holding references
                # to these volumes. Gather and kill such processes.
                #
                volumes_para=$( for vol in ${volumes} ; do
                                   print "/dev/vx/rdsk/${g}/${vol}"
                               done )
                kill_proc ${volumes_para}
      fi

      log_info "${pre}.1180" "deporting ${g}"

      retval2=0
      /usr/sbin/vxdg deport "${g}" || retval2=$?
      
      if [[ ${retval2} -eq 31 ]]; then
            log_info "${pre}.5020" "Disk group ${g} busy. Deport failed"
            # Obtain the open volumes that keep the dg busy
            volumes=$(/usr/sbin/vxprint -qv -g "${g}" -e open -F "vol:%{name}")
            for vol in ${volumes}; do
		raw_device="/dev/vx/rdsk/${g}/${vol}"
		pid_list=$(/usr/sbin/fuser ${raw_device} 2>/dev/null |
						tr -s ' ' ' ')
		if [[ -n "${pid_list}" ]]; then
            	    log_info "${pre}.5030" "Process(es) ${pid_list} has(have) volume ${vol} open, thus preventing the deport of the ${g} disk group."
        	fi
      	    done
	    return 1
      fi
	# added following line by bugid_4411476
	#
	log_info "$pre.5020" "deported ${g} successfully"
    fi
  done
fi

return 0
}

###########################################################################
#
# function cvm_import dglist
#
# dglist - list of diskgroups to be imported
#
function cvm_import
{
typeset g retval retval2

if [ $# -gt 0 ]; then
  for g in $*; do
    retval=0
    log_info "${pre}.5015" "Obtaining list of diskgroups to import"
    /usr/sbin/vxdg list "${g}" > /dev/null 2>&1 || retval=$?
    
    if [[ ${retval} -ne 0 ]]; then
      log_info "${pre}.1170" "importing ${g}"
      retval2=0
      /usr/sbin/vxdg -fCt import "${g}" || retval2=$?
      
      if [[ ${retval2} -ne 0 ]]; then
	if [[ ${retval2} -eq 20 ]]; then
              log_info "${pre}.4035" "NOTICE: Check disk group ${g} for failed disks \
or other missing components"
	fi
	log_info "${pre}.4030" "Import of disk group ${g} failed"
	return ${retval2}
      else
	# added following line by bugid_4411476
	#
	log_info "$pre.1170" "imported ${g} successfully"

        # now we want to check to see if this dg is actually ok do work with.
        startable_vols=$(/usr/sbin/vxrecover -g "${g}" -np | wc -l)
        defined_vols=$(/usr/sbin/vxprint -g "${g}" -S -v | \
                awk 'BEGIN { old=0 }
                   { if(NR == 1 && $1 == "VOLUMES") old=1;
                     if(NR != 1) { if(old==1) print $1; else print $3 }}')
        vols_with_sub_vols=$(/usr/sbin/vxprint -v -g "${g}" -F '%name %nvollayer' \
                | awk '$2 > 1' | wc -l)
	if (( defined_vols == vols_with_sub_vols + startable_vols )) ; then
                # we are ok, ie we have at least one plex in each volume that is ok
                # and thus we are startable.
		/usr/sbin/vxrecover -g "${g}" -sb  
	else
                # the dg has at least some plexes that cannot even be assembled. stop
                # what we are doing and abort this logical host.
	        log_info "${pre}.4030" "Import of disk group ${g} failed"
		log_info "${pre}.4035" "NOTICE: Check disk group ${g} for \
			failed disks or other missing components"
                # just to be safe, deport the dg that is in question.
		log_info "${pre}.1180" "deporting diskgroup ${g}"
		/usr/sbin/vxdg deport "${g}" >/dev/null 2>&1
		return 255
	fi
      fi
    fi
  done
fi

log_info "${pre}.4025" "Diskgroup import stage complete"
return 0
}


###########################################################################
#
# function sds_tkown dslist
#
# dslist - list of disksets to take
#
# returns 0 on success; 77 if a diskset was read-only; 1 for other failures
# 
function sds_tkown
{
typeset TMPERR
typeset READONLY
typeset DS
typeset TKO_EXITSTATUS
typeset TKO_MAXATTEMPTS
typeset TKO_ATTEMPTS
typeset TKO_OK
typeset TKO_RC

TKO_EXITSTATUS=0
TMPERR=/var/opt/SUNWcluster/run/metaset.err.$$
READONLY=false
for DS in $*; do

    # We will not get into PRE_ADDU to call this routine if we already
    # own the diskset by normal means.  However, we could already own the
    # diskset if the system administrator took the diskset outside of Sun
    # Cluster.  Check for this condition.

    log_info "${pre}.6310" "Check diskset ownership for ${DS}"
    ${MDBIN}/metaset -s ${DS} -o > ${TMPERR} 2>&1
    if [[ $? -ne 0 ]]; then
	log_info "${pre}.6315" "Diskset ${DS} not owned by this node"
	# We don't own the diskset.
	# We try up to three times to take advantage of the fact that if
	# an mdd daemon dies, inetd will restart it.
	TKO_MAXATTEMPTS=3
	TKO_ATTEMPTS=1
	TKO_OK=0
 
	while true; do
	    # The disk probe interval currently is not configurable.
	    # The value of 2000 milliseconds has been used historically
	    # with success.
	    log_info "${pre}.6320" "Taking ownership of diskset ${DS}"
	    ${MDBIN}/metaset -s ${DS} -f -t 2000 2> $TMPERR
	    TKO_RC=$?

	    case ${TKO_RC} in
	    0)	# read-write take ownership
		# HAVM_OK
		TKO_OK=1
		if [[ ${TKO_ATTEMPTS} -ne 1 ]]; then
		    # Log fact that operation succeeded this time
		    # because it failed on a previous iteration
		    # which issued an error message.
		    log_info "${pre}.6340" "metaset -s ${DS} -f -t succeeded"
		fi
		# break out of while loop
		break
		;;
	    66|2|3) # read-only take ownership
		READONLY=true
		if [[ -s ${TMPERR} ]]; then
		    log_info "${pre}.3340" "$(cat ${TMPERR})"
		fi
		case ${TKO_RC} in
		66) # stale database
		    log_info "${pre}.3341" "Stale database for diskset ${DS}"
		    ;;
		2)  # tagged data
		    log_info "${pre}.3342" \
			"Tagged data encountered for diskset ${DS}"
		    ;;
		3)  # half mediators and half replicas
		    log_info "${pre}.3343" \
"Only 50% replicas and 50% mediator hosts available for diskset ${DS}"
		    ;;
		esac

		${MDBIN}/metaset -s ${DS} -r >${TMPERR} 2>&1
		if [[ $? -eq 0 ]]; then
		    log_info "${pre}.2340" "Diskset ${DS} released"
		else
		    log_info "${pre}.3344" "Error releasing diskset ${DS}"
		    if [[ -s ${TMPERR} ]]; then
			log_info "${pre}.3345" \
			    "metaset -s ${DS} -r: $(cat ${TMPERR})"
		    fi
		fi
		# break out of while loop
		break
		;;
	    *)  # other non-zero exit code
		log_info "${pre}.3346" \
"metaset -s ${DS} -f -t failed with exit code ${TKO_RC}: $(cat ${TMPERR})"
		;;
	    esac

	    if [[ ${TKO_ATTEMPTS} -ge ${TKO_MAXATTEMPTS} ]]; then
		# break out of while loop
		break
	    fi
	    TKO_ATTEMPTS=$(expr ${TKO_ATTEMPTS} + 1)
	    log_info "${pre}.2341" "Will try metaset -s ${DS} -f -t again"
	# end while loop
	done
 
	if [[ ${TKO_OK} -eq 1 ]]; then
	    # Took ownership okay, we're finished with this diskset.
	    continue
	fi
 
	# Failed on this diskset, so whole script will exit failure.
	# Caller will do error recovery.
	TKO_EXITSTATUS=1
    else
	# We do own the diskset.  So we need to verify that
	# we do not own the diskset on a read-only basis.
	# We got into this code because the system administrator took
	# ownership of diskset outside of Sun Cluster.

	# To determine whether we own the diskset read-only,
	# verify that ownership of the diskset is established,
	# then use this private interface:
	#
	#   # MD_DEBUG=SETINFO; export MD_DEBUG
	#   # metastat -s <setname> -S
	#   Status for set 2 = 0x1a<SNARFED,STALE,NM_LOADED>
	#   #
	#    
	# Look for either "STALE" or "TAGDATA" in a line formatted
	# as above.

	MD_DEBUG=SETINFO; export MD_DEBUG
	${MDBIN}/metastat -s ${DS} -S > ${TMPERR} 2>&1
	if [[ $? -ne 0 ]]; then
	    log_info "${pre}.3347" \
"metastat command failed; unable to determine read-only status of diskset ${DS}"
	    unset MD_DEBUG
	    continue
	fi
	unset MD_DEBUG

	# Output looks like:
	#     "Status for set 2 = 0x1a<SNARFED,STALE,NM_LOADED>"
	# STALE | TAGDATA => readonly
	egrep 'STALE|TAGDATA' ${TMPERR} > /dev/null 2>&1
	if [[ $? -ne 0 ]]; then
	    # Output of metastat does not indicate STALE or TAGDATA
	    continue
	fi

	# Output of metastat indicates we DO have ro ownership
	log_info "${pre}.3348" \
	    "Could not get read-write ownership of diskset ${DS}"
	READONLY=true

	# release diskset
	${MDBIN}/metaset -s ${DS} -r >${TMPERR} 2>&1
	if [[ $? -eq 0 ]]; then
	    log_info "${pre}.2342" "Diskset ${DS} released"
	else
	    log_info "${pre}.3349" "Error releasing diskset ${DS}"
	    if [[ -s ${TMPERR} ]]; then
		log_info "${pre}.3350" "metaset -s ${DS} -r: $(cat ${TMPERR})"
	    fi
	fi

	# Caller will do error recovery.
	TKO_EXITSTATUS=1
    fi

log_info "${pre}.4026" "Diskset ownership stage complete"
# end for loop
done

rm -f ${TMPERR}
if [ ${READONLY} = true ]; then
	return 77
fi
return $TKO_EXITSTATUS
}


###########################################################################
#
# Determine whether a resync is causing diskset release to fail.
# If so, log a warning message.
#
grep_for_resync()
{
typeset GFR_DS
	GFR_DS="$1"
	if ${MDBIN}/metastat -s ${GFR_DS} 2>&1 \
	    | fgrep -i "resync" >/dev/null ; then
	log_info "${pre}.2343" \
"According to metastat command, there is a meta resync active in the diskset ${GFR_DS}"
fi
}


###########################################################################
#
# function sds_release dslist
#
# dslist - list of disksets to release
#
# Returns 0 on success; 1 if release of any diskset failed.
#
function sds_release
{
typeset DS
typeset retval
retval=0
for DS in $*; do
	TMPERR=/var/opt/SUNWcluster/run/rel.err.$$
	log_info "${pre}.6310" "Check diskset ownership for ${DS}"
	${MDBIN}/metaset -s ${DS} -o 2> /dev/null
	if [[ $? -ne 0 ]]; then
		log_info "${pre}.6315" "Diskset ${DS} not owned by this node"
		# We do not own the diskset; do nothing.
		continue
	fi
	# only release if we currently own
	log_info "${pre}.6330" "Relinquishing ownership of diskset ${DS}" 
	${MDBIN}/metaset -s ${DS} -r > ${TMPERR} 2>&1
	if [[ $? -ne 0 ]]; then
                #
                # failing to release diskset may due to configuring
                # Instant Image. So check and call $sc_stop
                # Or else, this may be due to some processes still holding
                # references to the metadevices. Gather and kill such processes.
                #
                real_fail=1
                if [[ -x $svadm && -r /dev/sv && -x $sc_stop ]] ; then
                    #
                    # Collect the disksets to pass to $sc_stop. Exclude 
                    # SDS trans metadevice, but include the underlying
                    # matadevices of a trans device. See bug 4323547.
                    #
                    # we use metastat to gather the metadevices in a
                    # given diskset ${DS}. Example output is like,
                    #
                    #  DS/d0 -t DS/d1 DS/d4          # metadevice d0
                    #  DS/d1 -m DS/d2 DS/d3 1
                    #  DS/d2 1 1 /dev/did/dsk/d2s3
                    #  DS/d3 1 1 /dev/did/dsk/d3s3
                    #  DS/d4 -m DS/d5 DS/d6 1
                    #  DS/d5 1 1 /dev/did/dsk/d2s4
                    #  DS/d6 1 1 /dev/did/dsk/d3s4 
                    #  DS/d10 -m DS/d12 DS/d13 1     # medtadevice d10
                    #  DS/d12 1 1 /dev/did/dsk/d2s0
                    #  DS/d13 1 1 /dev/did/dsk/d3s0
                    #  DS/d20 1 1 /dev/did/dsk/d2s5  # medtadevice d20
                    #
                    # We are looking for "top level" metadevices except
                    # in the case of trans metadevices, in which case, we
                    # want the underlying metadevices.
                    # 
                    # A "top level" metadevice is a metadevice which is not
                    # a underlying component of any other metadevice.
                    #
                    # The "leaves[]" associative array is used to invalidate
                    # metadevices which belong to a "top level" metadevice,
                    # or in the case of trans metadevices, invalidate the
                    # trans metadevice itself.
                    # 
                    # We eventually print out the metadevices which have not
                    # been invalidated, using split() to extract the fields
                    # we want to construct /dev/md/%s/rdsk/%s output.
                    # 
                    # the final output for above example is,
                    #
                    # /dev/md/DS/d1 /dev/md/DS/d4 /dev/md/DS/d10 /dev/md/DS/d20
                    #
                    volumes_para=$(${MDBIN}/metastat -s ${DS} -p | nawk '{
                       if($2 ~ "-t") leaves[$1] = 1;
                       for(i=2; i<=NF; i++)
                           if ( $2 !~ "-t" ) leaves[$i] = 1;
                       if($1 ~ /\// && leaves[$1] != 1) {
                           split($1,A,"/");
                           printf("/dev/md/%s/rdsk/%s\n",A[1],A[2]);
                       } }')
                    log_info "${pre}.1175" \
                        "calling ${sc_stop} to disable Instant Image or SNDR"
                    #
                    # calling Instant Image sc_stop script, which umount file
                    # systems for II volumes, kill applications which accessing
                    # II volumes, disable II volume pairs and disable sv.
                    #
                    ${sc_stop} "${volumes_para}"

                    ${MDBIN}/metaset -s ${DS} -r > ${TMPERR} 2>&1
                    if [[ $? -eq 0 ]] ; then 
                       real_fail=0
                    fi
                fi
                if [[ ${real_fail} -eq 1 ]] ; then
                    #
                    # Gather all the top level metadevices to pass to kill_proc
                    #
                    volumes_para=$(${MDBIN}/metastat -s ${DS} -p | nawk '{
                       for(i=2; i<=NF; i++)
                           if ( $2 !~ "-t" ) leaves[$i] = 1;
                       if($1 ~ /\// && leaves[$1] != 1) {
                           split($1,A,"/");
                           printf("/dev/md/%s/rdsk/%s\n",A[1],A[2]);
                       } }')
                    kill_proc ${volumes_para}
                    ${MDBIN}/metaset -s ${DS} -r > ${TMPERR} 2>&1
                    if [[ $? -ne 0 ]] ; then
                       log_info "${pre}.3351" "release of diskset failed"
                       if grep -i -w busy ${TMPERR} >/dev/null 2>&1 ; then
                           log_info "${pre}.3352" \
                               "Some disk partition(s) in the metaset are 'busy'"
                           grep_for_resync ${DS} &
                       fi
                       retval=1
                    fi
                fi
	fi
	rm -f ${TMPERR}
done
log_info "${pre}.4027" "Diskset relinquish stage complete"
return ${retval}
}

###########################################################################
#
# function kill_proc volumes_para
#
# volumes_para - The list of volumes/metadevices
#
# This function kills any procs that have open references to the passed
# list of volumes/metadevices

function kill_proc
{
typeset -i maxretry retrycnt n
typeset pid_list pid newpid_list

if [ $# -gt 0 ]; then
    maxretry=1
    retrycnt=0
    while [ ${maxretry} -ge ${retrycnt} ]
            do
            pid_list=`/usr/sbin/fuser -k $* 2>/dev/null`
            if [ -z "${pid_list}" ]; then
                     break
            fi
            #
            # Check whether the processes have gone away or not
            #
            n=0
            while (( n < 3 )); do
                for pid in ${pid_list}; do
                        #
                        # return value of zero indicates process still alive
                        #
                        /usr/bin/kill -s 0 ${pid} 2>/dev/null
                        if [[ $? -eq 0 ]]; then
                                newpid_list="${newpid_list} ${pid}"
                        fi
                done
                if [ -z "${newpid_list}" ]; then
                        break
                fi
                #
                # Send one more signal in case the previous one is missed
                #
                /usr/bin/kill -9 ${newpid_list} 2>/dev/null
                pid_list=${newpid_list}
                newpid_list=
                /usr/bin/sleep 1
                let n=n+1
            done
            let retrycnt=retrycnt+1
    done
fi
}

###########################################################################
#
# function construct_method_args_list i_dsname o_all_loghosts o_lm_loghosts
#				      o_nm_loghosts
#
# i_dsname - The data service name forms the input parameter
#
# o_all_loghosts - An output parameter that is a list of all the logical
#		   hosts in the cluster configuration offering this data
#		   service.
#
# o_lm_loghosts - An output parameter that is a list of all logical hosts
#		  that this physical node is supposed to master that is
#		  offering this data service.
#
# o_nm_loghosts - An output parameter that is a list of all logical hosts
#		  that this physical node is not supposed to master that is
#		  offering this data service.

function construct_method_args_list
{
typeset i_dsname
typeset o_all_loghosts
typeset o_lm_loghosts
typeset o_nm_loghosts
typeset cm_row cm
typeset r rows
typeset loghost_name
typeset tmp_list1 tmp_list2
i_dsname=$1

# get list of all logical hosts offering this data service

rows=$(${CLUSTERBIN}/scccd -f ${CCDFILE} ${CLUSTERNAME} LOGHOST_DS query \
			dsname ${i_dsname})

o_all_loghosts=""
o_lm_loghosts=""
o_nm_loghosts=""

for r in ${rows}; do
  loghost_name=${r%:*}
  loghost_name=${loghost_name#*:}
  if [[ -z "${o_all_loghosts}" ]]; then
    o_all_loghosts=${loghost_name}
  else
    o_all_loghosts="${o_all_loghosts},${loghost_name}"
  fi

  # special handling is required for the abort case, because the LOGHOST_CM
  # entries are not removed from the CCD and no decisions about mastered and
  # not-mastered lists should be made based on these entries. After the abort
  # transition, we know that this node will not own anything, so we stick
  # everything into the not-mastered list.

  if [[ "${clusterstate}" = "abort" ]]; then
    if [[ -z "${o_nm_loghosts}" ]]; then
      o_nm_loghosts=${loghost_name}
    else
      o_nm_loghosts="${o_nm_loghosts},${loghost_name}"
    fi
    continue
  fi

  # retrieve the current master of the logical host

  cm_row=$(${CLUSTERBIN}/scccd -f ${CCDFILE} ${CLUSTERNAME} LOGHOST_CM query \
			lname ${loghost_name})
  cm=${cm_row#*:*:}

  if [[ ( -n "${cm}" && "${cm}" = "${localhostname}" ) || \
        ( "${loghost_name}" = "${lname}"  && "${curr_master}" = "${localhostname}" ) \
     ]]; then
    if [[ -z "${o_lm_loghosts}" ]]; then
      o_lm_loghosts=${loghost_name}
    else
      o_lm_loghosts="${o_lm_loghosts},${loghost_name}"
    fi
  else
    if [[ -z "${o_nm_loghosts}" ]]; then
      o_nm_loghosts=${loghost_name}
    else
      o_nm_loghosts="${o_nm_loghosts},${loghost_name}"
    fi
  fi
done

# if we are removing the LOGHOST_CM row corresponding to lname, remove
# lname from the o_lm_loghosts list, and add it to the o_nm_loghosts list
 
if [[ "${keyword}" = "CCDSYNC_PRE_REMOVE" ]]; then
  tmp_list1=$(print ${o_lm_loghosts} | tr ',' ' ')
  tmp_list2=""
  for l in ${tmp_list1}; do
    if [[ "${l}" != "${lname}" ]]; then
      if [[ -z "${tmp_list2}" ]]; then
	tmp_list2=${l}
      else
	tmp_list2="${tmp_list2},${l}"
      fi
    else
      if [[ -z "${o_nm_loghosts}" ]]; then
	o_nm_loghosts=${l}
      else
	o_nm_loghosts="${o_nm_loghosts},${l}"
      fi
    fi
  done
  o_lm_loghosts=${tmp_list2}
fi


eval $2=${o_all_loghosts}
eval $3=${o_lm_loghosts}
eval $4=${o_nm_loghosts}
}

###########################################################################
#
# function ifconfig_ip mode logif_list
#
# mode - The mode of the logical interface - can be "up" or "down"
#
# logif_list - The list of logical interfaces that has to be "ifconfig'ed"
#	       according to the specified mode for the logical host
#	       identified by the global variable ${lname}

function ifconfig_ip
{
typeset mode
typeset logif_list l iprow
typeset ip_nodelist nodearray
typeset iflist ifarray
typeset ipaddr
typeset localnode_index n found
typeset retval retval2
typeset arp_list
typeset nafogroup
typeset nafoif


mode=$1
shift
logif_list=$*
arp_list=""

for l in ${logif_list}; do

  # retrieve the logical IP information for this logical interface
  iprow=$(${CLUSTERBIN}/scccd -f ${CCDFILE} ${CLUSTERNAME} LOGIP query \
		logif ${l})
  if [[ -z "${iprow}" ]]; then
    continue
  fi

  ip_nodelist=${iprow%:*:*:*}
  ip_nodelist=${ip_nodelist#*:}
  ip_nodelist=$(print ${ip_nodelist} | tr ',' ' ')

  iflist=${iprow%:*:*}
  iflist=${iflist#*:*:}
  iflist=$(print ${iflist} | tr ',' ' ')

  ipaddr=${iprow%:*}
  ipaddr=${ipaddr#*:*:*:}

  set -A nodearray ${ip_nodelist}
  set -A ifarray ${iflist}

  # search for my node index

  let n=0
  let found=0
  while (( n < ${#nodearray[*]} )); do
    if [[ "${nodearray[n]}" = "${localhostname}" ]]; then
      let found=1
      let localnode_index=${n}
      break
    fi
    let n=n+1
  done
  if (( found == 0 )); then
    continue
  fi

  #
  # Usage of PNM.
  # To the interface which is of real significance , get the active
  # interface for the backup group which the interface belongs
  # XXX: For temporary stuff , check if PNM package exists,if
  # assuming that people will use PNM.
  #
  USE_NAFO=0
  /usr/bin/pkginfo SUNWpnm >/dev/null 2>&1
  RC=$?
  if [ ${RC} -eq 0 ]; then
        USE_NAFO=1
  fi
  if [ ${USE_NAFO} -eq 1 ]; then
        nafogroup=$(/opt/SUNWpnm/bin/pnmrtop ${ifarray[localnode_index]})
        RC=$?
        if [ ${RC} -ne 0 ]; then
                if [ "${mode}" = "up" ]; then
                  #
                  # pnm should have been up and configured.
                  # hence this is an error.
                  #
                  log_info "${pre}.4900" "error in getting the PNM backup group for ${ifarray[localnode_index]}. Please Check if interface is configured on a PNM backup group  OR check console for errors messages from PNM"
                  return 1
                fi
                # 
                # PNM failed but it is possible that loghost/pnm could be
                # shutdown before calling the logical host.(reconfiguration)
                # to have an idempotent operation, if we supposed to shutdown
                # (ifconfig down) then ignore the error and return.
                # Hence we do nothing and assume that real adaptor will
                # is the one cosnideration.

        else
          # 
          # now that pnm group is found, we get the active interface.
          #
          nafoif=$(/opt/SUNWpnm/bin/pnmptor ${nafogroup})
          RC=$?
          if [ ${RC} -ne 0 ]; then
                #
                # we cannot get the pnmptor and this means that we do
                # do not have any active interface on the PNM
                #
                if [ "${mode}" = "up" ]; then
                  log_info "${pre}.4901" "There seems to be no active interface for PNM backup group ${nafogroup}. Please Check if interface is configured on a PNM backup group  OR check console for errors messages from PNM"
                  return 1
                fi
         else
                ifarray[localnode_index]=${nafoif}
         fi
      fi   
  fi

# Fix for
# 4151335 Logical interfaces are not configured at the time of calling START methods
# If mode is "up" Do ifconfig irrespective of whether it is already there
# because it might be there without having been marked up

if [[ "${mode}" = "up" ]]; then
   retval2=0
   /usr/sbin/ifconfig ${ifarray[localnode_index]}:${l} ${ipaddr} netmask + \
       broadcast + -trailers up || retval2=$?
   if [[ ${retval2} -ne 0 ]]; then
       log_info "${pre}.4010" "ifconfig ${ifarray[localnode_index]}:${l} ${ipaddr} up failed"
       return 1
   fi
   arp_list="${arp_list} ${ipaddr}@${ifarray[localnode_index]}"

 else  # Mode is NOT "up"

 # check whether the logical interface is already up

   retval=0
   /usr/sbin/ifconfig -au | /bin/grep -w ${ifarray[localnode_index]}:${l} \
       > /dev/null 2>&1 || retval=$?
   if [[ ${retval} -eq 0 ]]; then

     # interface is up. ifconfig it down if mode is down
     if [[ "${mode}" = "down" ]]; then
       retval2=0
       /usr/sbin/ifconfig ${ifarray[localnode_index]}:${l}  down || \
               retval2=$?
       if [[ ${retval2} -ne 0 ]]; then
         log_info "${pre}.4020" "ifconfig ${ifarray[localnode_index]}:${l} down failed"
         return 1
       fi
     fi

   elif [[ ${retval} -eq 1 ]]; then

     # Bug fix for
     # 4151335 Logical interfaces are not configured at the time of
     # calling START methods
     # interface is down. ifconfig it without marking it up if mode is config_but_dn

     if [[ "${mode}" = "config_but_dn" ]]; then
       retval2=0
       /usr/sbin/ifconfig ${ifarray[localnode_index]}:${l} plumb
       /usr/sbin/ifconfig ${ifarray[localnode_index]}:${l} ${ipaddr} netmask + \
               broadcast + -trailers || retval2=$?
       if [[ ${retval2} -ne 0 ]]; then
         log_info "${pre}.4010" "ifconfig ${ifarray[localnode_index]}:${l} ${ipaddr} up failed"
         return 1
       fi
       #arp_list="${arp_list} ${ipaddr}@${ifarray[localnode_index]}"
     fi
   fi # if retval -eq 1, i.e. the interface is NOT there
 fi # If mode is "up"
done # For all logical interfaces

if [[ "${mode}" = "up" && -n "${arp_list}" ]]; then
  ${CLUSTERBIN}/sc_retrans_arp ${CLUSTERNAME} 5 retransarp.${lname} ${arp_list} &
elif [[ "${mode}" = "down" ]]; then
  killarp ${lname}
fi

return 0
}

###########################################################################
#
# function killarp i_lname
#
# i_lname - input logical host name
#
# This function kills any remnants of the arp program running on this node
# for the input logical host.

function killarp
{
typeset i_lname
typeset dummy pid

i_lname=$1
if [[ -f "${CLUSTERVAR}/retransarp.${i_lname}" ]]; then
  pid=$(/bin/cat ${CLUSTERVAR}/retransarp.${i_lname})
  kill -9 ${pid} || dummy=$?
  if [[ -f "${CLUSTERVAR}/retransarp.${i_lname}" ]]; then
    /bin/rm -f ${CLUSTERVAR}/retransarp.${i_lname}
  fi
fi
}

###########################################################################
#
# function get_ordered_ds_list i_ds_list o_ds_list
#
# i_ds_list - The input list of data services.
#
# o_ds_list - The output list of data services is the ordered list of input
#	      data services ordered according to the dependency between the
#	      serivces.
#
function get_ordered_ds_list
{
typeset i_ds_list
typeset o_ds_list tmp_ds_list
typeset rescan_list dependency_list
typeset found
typeset i j k

i_ds_list=$1
rescan_list=""

while (( 1 )); do
  for i in ${i_ds_list}; do

    dependency_list=$(${CLUSTERBIN}/scccd -f ${CCDFILE} ${CLUSTERNAME} \
		DS_DEPSVC_LIST query ds_name ${i})
    dependency_list=${dependency_list#*:*:}
    dependency_list=$(print ${dependency_list} | tr ',' ' ')

    # If the data service has no dependencies, put it in the output list.
    # Otherwise, evaluate each of the dependent data services for other
    # dependencies

    if [[ -z "${dependency_list}" ]]; then
      if [[ -z "${o_ds_list}" ]]; then
	o_ds_list=${i}
      else
	o_ds_list="${o_ds_list},${i}"
      fi
    else
      let found=0
      for j in ${dependency_list}; do
	tmp_ds_list=$(print ${o_ds_list} | tr ',' ' ')
	for k in ${tmp_ds_list}; do
	  if [[ "${j}" = "${k}" ]]; then
	    let found=1
	    break
	  fi
	done
	if (( found == 0 )); then
	  break
	fi
      done

      if (( found == 0 )); then
	rescan_list="${rescan_list} ${i}"
      else
	if [[ -z "${o_ds_list}" ]]; then
	  o_ds_list=${i}
	else
	  o_ds_list="${o_ds_list},${i}"
	fi
      fi
    fi
  done

  i_ds_list=${rescan_list}
  rescan_list=""
  if [[ -z "${i_ds_list}" ]]; then
    break
  fi
done

eval $2=${o_ds_list}
}

###########################################################################
#
# function exec_ds_method method_type
#
# method_type - input keyword which has one of the following values: "start",
#		"start_net", "stop", "stop_net", "abort", "abort_net",
#		"fm_init", "fm_start", "fm_stop", "fm_check".
#
# This function executes the method identified by the method_type parameter
# for all the data services in the ordered_ds_list array according to their
# dependency order.

function exec_ds_method
{
typeset method_type
typeset dsname dsrow
typeset method_path method_to
typeset n
method_type=$1
if [[ "${method_type}" = "start" || "${method_type}" = "start_net" || \
      "${method_type}" = "fm_init" || "${method_type}" = "fm_start" ]]; then
  
  let n=0
  while (( n < ${#ordered_ds_array[*]} )); do
    dsname=${ordered_ds_array[n]}
    if [[ "${method_type}" = "start" ]]; then

      dsrow=$(${CLUSTERBIN}/scccd -f ${CCDFILE} ${CLUSTERNAME} DS_METHOD_PATH \
		query ds_name ${dsname})
      method_path=${dsrow%:*:*:*:*:*}
      method_path=${method_path#*:*:}

      method_to=${dsrow%:*:*:*:*}
      method_to=${method_to#*:*:*:}

    elif [[ "${method_type}" = "start_net" ]]; then

      dsrow=$(${CLUSTERBIN}/scccd -f ${CCDFILE} ${CLUSTERNAME} DS_NM_PATH \
		query ds_name ${dsname})
      method_path=${dsrow%:*:*:*:*:*}
      method_path=${method_path#*:*:}

      method_to=${dsrow%:*:*:*:*}
      method_to=${method_to#*:*:*:}

    elif [[ "${method_type}" = "fm_init" || "${method_type}" = "fm_start" ]]; then

      dsrow=$(${CLUSTERBIN}/scccd -f ${CCDFILE} ${CLUSTERNAME} DS_FM_PATH \
		query ds_name ${dsname})
      
      if [[ "${method_type}" = "fm_init" ]]; then
	method_path=${dsrow%:*:*:*:*:*:*:*}
	method_path=${method_path#*:*:}

	method_to=${dsrow%:*:*:*:*:*:*}
	method_to=${method_to#*:*:*:}
	
      elif [[ "${method_type}" = "fm_start" ]]; then
	method_path=${dsrow%:*:*:*:*:*}
	method_path=${method_path#*:*:*:*:}

	method_to=${dsrow%:*:*:*:*}
	method_to=${method_to#*:*:*:*:*:}
      fi
    fi

    if [[ -n "${method_path}" ]]; then
      call_method ${method_type} ${n} ${method_path} ${method_to}
    fi

    let n=n+1
  done

elif [[ "${method_type}" = "stop" || "${method_type}" = "stop_net" || \
	"${method_type}" = "abort" || "${method_type}" = "abort_net" || \
	"${method_type}" = "fm_stop" ]]; then

  let n=${#ordered_ds_array[*]}-1
  while (( n >= 0 )); do
    dsname=${ordered_ds_array[n]}

    if [[ "${method_type}" = "stop" || "${method_type}" = "abort" ]]; then
      dsrow=$(${CLUSTERBIN}/scccd -f ${CCDFILE} ${CLUSTERNAME} DS_METHOD_PATH \
		query ds_name ${dsname})

      if [[ "${method_type}" = "stop" ]]; then
	method_path=${dsrow%:*:*:*}
	method_path=${method_path#*:*:*:*:}

	method_to=${dsrow%:*:*}
	method_to=${method_to#*:*:*:*:*:}
	
      elif [[ "${method_type}" = "abort" ]]; then
	method_path=${dsrow%:*}
	method_path=${method_path#*:*:*:*:*:*:}

	method_to=${dsrow#*:*:*:*:*:*:*:}
      fi

    elif [[ "${method_type}" = "stop_net" || "${method_type}" = "abort_net" ]]; then
      dsrow=$(${CLUSTERBIN}/scccd -f ${CCDFILE} ${CLUSTERNAME} DS_NM_PATH \
		query ds_name ${dsname})

      if [[ "${method_type}" = "stop_net" ]]; then
	method_path=${dsrow%:*:*:*}
	method_path=${method_path#*:*:*:*:}

	method_to=${dsrow%:*:*}
	method_to=${method_to#*:*:*:*:*:}

      elif [[ "${method_type}" = "abort_net" ]]; then
	method_path=${dsrow%:*}
	method_path=${method_path#*:*:*:*:*:*:}

	method_to=${dsrow#*:*:*:*:*:*:*:}
      fi

    elif [[ "${method_type}" = "fm_stop" ]]; then
      dsrow=$(${CLUSTERBIN}/scccd -f ${CCDFILE} ${CLUSTERNAME} DS_FM_PATH \
		query ds_name ${dsname})

      method_path=${dsrow%:*:*:*}
      method_path=${method_path#*:*:*:*:*:*:}

      method_to=${dsrow%:*:*}
      method_to=${method_to#*:*:*:*:*:*:*:}
    fi

    if [[ -n "${method_path}" ]]; then
      call_method ${method_type} ${n} ${method_path} ${method_to}
    fi
    let n=n-1
  done
fi
}

###########################################################################
#
# function call_method method_type ds_index method_path method_to
#
# method_type - the type of the method to be called. Same as the first
#		argument to exec_ds_method
#
# ds_index - the index of the data service in the ordered_ds_array.
#
# method_path - the full path of the method to be called.
#
# method_to - the timeout registered with the method

function call_method
{
typeset method_type
typeset ds_index
typeset method_path
typeset method_to
typeset retval method_called
method_type=$1
ds_index=$2
method_path=$3
method_to=$4

CONF_FILE=/etc/opt/SUNWcluster/conf/dflt_sched_class

DFLT_CLASS=TS
if [ -f ${CONF_FILE} ] ; then
        DFLT_CLASS=`cat ${CONF_FILE}`
fi



cpu_count=$(/usr/sbin/psrinfo 2> /dev/null | awk 'END {print NR}' )

retval=0
let method_called=0

if [[ "${ds_state_array[ds_index]}" = "1" && -f "${method_path}" ]]; then
  
  if [[ "${method_type}" = "stop" || "${method_type}" = "stop_net" || \
        "${method_type}" = "abort" || "${method_type}" = "abort_net" || \
        "${method_type}" = "fm_stop" ]]; then

	  # for stop/stopnet let us run as REAL time
	  # changing the real time priority to the minimum (100) instead of
	  # 114 which is the priority of the spawning ccdd.
          # bug 4321549: only execute method in RT when there are more than 1 CPUs
          if (( cpu_count > 1)); then
 	  	/usr/bin/priocntl -c RT -p 0 -e ${CLUSTERBIN}/timed_run ${method_to} ${method_path} \
		"${lm_loghosts_array[ds_index]}" "${nm_loghosts_array[ds_index]}" \
			${method_to} || retval=$?
          else
                /usr/bin/priocntl -c ${DFLT_CLASS} -e ${CLUSTERBIN}/timed_run ${method_to} ${method_path} \
                "${lm_loghosts_array[ds_index]}" "${nm_loghosts_array[ds_index]}" \
                        ${method_to} || retval=$?
          fi

  else
	 # run start/start_net as TIME SHARED.

	  /usr/bin/priocntl -c ${DFLT_CLASS} -e ${CLUSTERBIN}/timed_run \
		${method_to} ${method_path} "${lm_loghosts_array[ds_index]}" \
			 "${nm_loghosts_array[ds_index]}"  ${method_to} 
	  retval=$?
  fi
  let method_called=1

elif [[ "${ds_state_array[ds_index]}" = "0" && -f "${method_path}" ]]; then
  if [[ "${method_type}" = "stop" || "${method_type}" = "stop_net" || \
        "${method_type}" = "abort" || "${method_type}" = "abort_net" || \
        "${method_type}" = "fm_stop" ]]; then
      # bug 4321549: only execute method in RT when there are more than 1 CPUs
      if (( cpu_count > 1 )) ; then
    	/usr/bin/priocntl -c RT -p 0 -e ${CLUSTERBIN}/timed_run ${method_to} ${method_path} "" \
		"${all_loghosts_array[ds_index]}" ${method_to} || retval=$?
      else
          /usr/bin/priocntl -c ${DFLT_CLASS} -e ${CLUSTERBIN}/timed_run ${method_to} ${method_path} ""\
                "${all_loghosts_array[ds_index]}" ${method_to} || retval=$?
      fi
      let method_called=1
  fi
fi

if (( method_called == 1 )); then
  if [[ ${retval} -eq 0 ]]; then
    log_info "${pre}.1050" "${method_type} method of data service ${ordered_ds_array[ds_index]} completed successfully."
  else
    log_info "${pre}.3010" "${method_type} method of data service ${ordered_ds_array[ds_index]} failed"
  fi
fi

}


case ${keyword} in
  
  CCDSYNC_PRE_ADDU)
	init

	export CLUSTERNAME

	if [[ "${curr_master}" = "${localhostname}" ]]; then
	  log_info "${pre}.1030" "Taking over logical host ${lname}"

	  # remove (leftover) file that indicates SDS tkown was read-only
	  rm -f ${CLUSTERVAR}/readonly.${lname}
	  
	  # Create an error file.  If all PRE_ADDU operations succeed,
	  # remove the error file at the end.  Later, POST_ADDU will
	  # exit 1 if the error file exists.
	  touch ${CLUSTERVAR}/loghost.${lname}

	# next line added by bugid_4411476
	#
	echo "${lname}:${vm}:import:${dglist}" > ${CLUSTERVAR}/loghost.dg

	  if [[ "${vm}" != "sds" ]]; then
	    cvm_import ${dglist} 
	    if [[ $? -ne 0 ]]; then
	      # The ${CLUSTERVAR}/loghost.${lname} created above records
	      # the failure.
	      exit 0
	    fi
	  else
	    sds_tkown ${dglist}
	    result=$?
	    if [[ ${result} -ne 0 ]]; then
	      # A message was logged in sds_tkown.
	      # The ${CLUSTERVAR}/loghost.${lname} created above records
	      # the failure.
	      # loghostreconfig will put the logical host into maintenance
	      # mode if it detects this file.
	      if [[ ${result} -eq 77 ]]; then
		touch ${CLUSTERVAR}/readonly.${lname}
		# Create file for scadmin.  Of course if scadmin is
		# running on another node, it cannot use this information.
		touch ${CLUSTERVAR}/readonly
	      fi
	      exit 0
	    fi
	  fi

	  curr_nodes=""

	  for n in ${curr_members}; do
	    nodename=$(${CLUSTERBIN}/cdbmatch cluster.node.${n}.hostname \
				${cdbfile})
	    curr_nodes="${curr_nodes} ${nodename}"
	  done

	  if [[ "${vm}" != "sds" ]]; then
	    if anyone_else_present "${nodelist}" "${curr_nodes}" ${localhostname} ; then
	      failure_fencing release private ${dglist}
	    else
	      failure_fencing reserve private ${dglist}
	    fi
	  fi
	# ---- added following lines by bugid_4411476
	#
	# Do the echo here, because the step could timeout during
	# the scnfs mount. This means the abort sequence will try
	# to umount the filesystems prior to the deport
	#
	echo "${lname}:${vm}:mount:${dglist}" > ${CLUSTERVAR}/loghost.dg
	# ---- end of added lines

	  ${CLUSTERBIN}/scnfs ${CLUSTERNAME} mount ${lname}
	  if [[ $? -ne 0 ]]; then
		# The ${CLUSTERVAR}/loghost.${lname} created above records
		# the failure.
		exit 0
	  fi

	fi

	if [[ "${curr_master}" = "${localhostname}" ]]; then

       # Bug fix for
       # 4151335 Logical interfaces are not configured at the time of
       # calling START methods

       ifconfig_ip config_but_dn ${iplist}
       if [ $? -ne 0 ]; then
             # This should not happen
             # Something is wrong
             exit 0
       fi
     fi
	# added next line by bugid_4411476
	#
	echo "${lname}:${vm}:start:${dglist}" > ${CLUSTERVAR}/loghost.dg

	if (( in_nodelist == 1 )); then
	  exec_ds_method start
	fi

	if [[ "${curr_master}" = "${localhostname}" ]]; then


	# added next line by bugid_4411476
	#
	echo "${lname}:${vm}:ifconfig:${dglist}" > ${CLUSTERVAR}/loghost.dg

	  ifconfig_ip up ${iplist} 
	  if [ $? -ne 0 ]; then
		# The ${CLUSTERVAR}/loghost.${lname} created above records
		# the failure.
		exit 0
	  fi
	fi

	# added following by bugid_4411476
	#
	# Record state of the dataservice
	#
	echo "${lname}:${vm}:start_net:${dglist}" > ${CLUSTERVAR}/loghost.dg

	if (( in_nodelist == 1 )); then
	  exec_ds_method start_net
	  exec_ds_method fm_init
	fi

	# removing this file means that all PRE_ADDU operations
	# succeeded.
	rm -rf ${CLUSTERVAR}/loghost.${lname}
	# added next line by bugid_4411476
	#
	rm -f ${CLUSTERVAR}/loghost.dg
	;;

  CCDSYNC_POST_ADDU)
	init

	# If the file created by PRE_ADDU exists, there was a
	# problem and we exit with an error.
	if [ -f ${CLUSTERVAR}/loghost.${lname} ]; then
		rm -rf ${CLUSTERVAR}/loghost.${lname}
		exit 1
	fi

	export CLUSTERNAME

	if [[ "${curr_master}" = "${localhostname}" ]]; then
	  log_info "${pre}.1040" "Take over of logical host ${lname} succeeded"
	fi

	exec_ds_method fm_start
	;;

  CCDSYNC_PRE_REMOVE)
	integer failed
	init

	export CLUSTERNAME

	if [[ "${curr_master}" = "${localhostname}" ]]; then
	  log_info "${pre}.1010" "Giving up logical host ${lname}"
	fi

	# Fix for bug# 4286147: Creation of the error file moved up here. 
	# We need to do this before calling any of data service methods 
	# in CCDSYNC_PRE_REMOVE, in case we time out during these methods.

	# Create an error file.  If all PRE_REMOVE operations succeed,
	# remove the error file at the end.  Later, POST_REMOVE will
	# exit 1 if the error file exists.
	touch ${CLUSTERVAR}/loghost.${lname}

	#
	# since fm_stop is already called by loghostreconfig
	# for abort cases, there is no need to do it here.
	#
	if [[ "${clusterstate}" != "abort" ]]; then
		exec_ds_method fm_stop
	fi

	if (( in_nodelist == 1 )); then
	  if [[ "${clusterstate}" = "abort" ]]; then
	    exec_ds_method abort_net
	  else
	    exec_ds_method stop_net
	  fi
	fi

	# If this is not set at the end of this routine, we may safely
	# remove the ${CLUSTERVAR}/loghost.${lname} file we are about
	# to create.  If it's set, it means we failed to ifconfig down
	# some IP addresses.
	failed=0

	if [[ "${curr_master}" = "${localhostname}" ]]; then

	  ifconfig_ip down ${iplist} 
	  if [[ $? -ne 0 ]]; then

		#	
		# no exiting here because methods may
		# complain if i exit here.

		# This flag indicates we should NOT remove the
		# ${CLUSTERVAR}/loghost.${lname} file (created above)
		# at the end of this procedure.
		failed=1
	  fi 

	fi 

	if (( in_nodelist == 1 )); then 
		if [[ "${clusterstate}" = "abort" ]]; then 
			exec_ds_method abort 
		else
	    		exec_ds_method stop
	  	fi
	fi

	# remove (leftover) file that indicates SDS release failure
	rm -f ${CLUSTERVAR}/release_failed.${lname}

	if [[ "${curr_master}" = "${localhostname}" ]]; then

	  ${CLUSTERBIN}/scnfs ${CLUSTERNAME} unmount ${lname}
	  
	  if [[ "${vm}" != "sds" ]]; then
	    cvm_deport ${dglist}
	    if [[ $? -ne 0 ]]; then

	    # the deport operation has failed. We will make one last attempt
	    # by calling the abort methods to close the open volumes and
	    # retrying the deport. If this fails too, we return an error and
	    # let the higher layers of software handle the error. Failure
	    # fencing will be removed in the abort transition.

	      exec_ds_method abort
	      # Record failures in file.
	      cvm_deport ${dglist}
	      if [[ $? -ne 0 ]]; then
		# The ${CLUSTERVAR}/loghost.${lname} created above records
		# the failure.
		exit 0
	      fi
	    fi
	      
	    failure_fencing release private ${dglist}
	  else
	    sds_release ${dglist}
	    if [[ $? -ne 0 ]]; then

	      # A release can fail because devices or volumes are busy due
	      # to data services that didn't shut down properly, or due to
	      # resyncs in progress.
	      # Record the failure in a file so that POST_REMOVE knows.
	      touch ${CLUSTERVAR}/release_failed.${lname}
	      # Create file for scadmin.  Of course if scadmin is
	      # running on another node, it cannot use this information.
	      touch ${CLUSTERVAR}/release_failed
	    fi
	  fi
	fi

	# We may safely remove this file if the 'failed' flag is not
	# set.  The failed flag indicates 'ifconfig down' of some
	# IP addresses failed.  If flag not set, all PRE_REMOVE operations
	# succeeded.
	if [[ "${failed}" -eq 0 ]]; then
		rm -rf ${CLUSTERVAR}/loghost.${lname}
	fi
	
	# following added by bugid_4411476
	#
	# Now see if the loghost.dg file should be removed
	if [[ "${abort_lname}" = "${lname}" ]]; then
		rm -f ${CLUSTERVAR}/loghost.dg
	fi
	;;

  CCDSYNC_POST_REMOVE)
	init

	# If the file created by PRE_REMOVE exists, there was a
	# problem and we exit with an error.
	if [ -f ${CLUSTERVAR}/loghost.${lname} ]; then
		rm -rf ${CLUSTERVAR}/loghost.${lname}
		exit 1
	fi

	# Special handling for failure of SDS diskset release while
	# going into maintenance mode:
	if [[ "${vm}" = "sds" ]]; then
		# If the release of the diskset failed, and
		# we're putting logical host into maintenance mode,
		# do nothing here but exit 0.
		# scadmin will properly handle.
		if [[ -f ${CLUSTERVAR}/release_failed.${lname} ]]; then
			mstate=$(${CLUSTERBIN}/scccd -f ${CCDFILE} \
			${clustname} LOGHOST_MSTATE query lname ${lname})
			mode=${mstate#*:*:}
			if [[ "${mode}" -eq "0" ]]; then
				# This lhost is going to maintenance mode
				# No work required here,
				# but don't print give up success message.
				exit 0
			fi
			if [[ -f ${DOINGSTOPFLAG} ]]; then
				# We're in the middle of stopping this node.
				log_info "${pre}.4341" \
"Failure to release diskset in ${lname} will result in reboot of this node."
				exit 1
			fi
			# The lhost is switching from one node to another,
			# so let scadmin/loghostreconfig back out
			# of the switchover.
			log_info "${pre}.3353" \
"Backing out switchover of logical host ${lname} because diskset release failed"
			exit 1
		fi
	fi
 
	# added by bugid_4411476
	#
	if [ -f ${CLUSTERVAR}/loghost.dg ]; then
		rm -f ${CLUSTERVAR}/loghost.dg
	fi

	# Everything went as planned.

	if [[ "${curr_master}" = "${localhostname}" ]]; then
	  log_info "${pre}.1020" "Give up of logical host ${lname} succeeded"
	fi
	;;

  CCDSYNC_PRE_RESTORE)
	# doesn't call init
	#
	# first check if there are logical hosts mastered
	# if any exit with 1.
	#
	CLUSTERBIN=/opt/SUNWcluster/bin
	CLUSTERVAR=/var/opt/SUNWcluster
	CLUSTERETC=/etc/opt/SUNWcluster/conf
	# SSACLI is needed by the programs called by loghost_sync.
	SSACLI=${CLUSTERBIN}/scssa

	CLUSTERNAME=$(/bin/cat ${CLUSTERETC}/default_clustername)
	cdbfile=${CLUSTERETC}/${CLUSTERNAME}.cdb
	localhostname=$(${CLUSTERBIN}/cdbmatch \
		cluster.node.${localnodeid}.hostname ${cdbfile})
  	cm_row=$(${CLUSTERBIN}/scccd -f ${CCDFILE} ${CLUSTERNAME} \
		LOGHOST_CM query curr_master ${localhostname})

	if [ "${cm_row}" !=  "" ]; then
	log_info "${pre}.4090" "Cannot Restore CCD database when Logical Hosts are mastered"
		exit 1;
	fi
	;;

  CCDSYNC_PRE_RESTOREKEY | CCDSYNC_POST_RESTORE | CCDSYNC_POST_RESTOREKEY)
		;;

esac

exit 0
