#! /bin/sh
#
#	Copyright 03/11/97 Sun Microsystems, Inc.  All Rights Reserved.
#
#ident "@(#)tivoli_svc_stop_net.shi		1.2	97/03/11 SMI"
#
# tivoli_svc_stop_net -- stop the Tivoli oserv client instance
#
# Usage: tivoli_svc_stop_net <mastered list> <not mastered list>

#
#pragma ident "@(#)ds_boiler	1.3	98/09/15 SMI"
#
# common boiler for HA data services
#
#


ARGV0=`basename $0`
LOGGER=logger
HA_SLOGFACILITY=`haget -f syslog_facility`
HA_SLOGTAG=hadf
prog_path=`dirname $0`

# source in ha-services common utilities
. hads_utilities

# add the ha-service directory to the path
PATH=${prog_path}:${PATH}

#
# for use by subsequent hactl command, get hostnames of local and remote hosts
#
LOCALHOST=`uname -n`

#! /bin/sh 
#
#	Copyright 04/30/99 Sun Microsystems, Inc.  All Rights Reserved.
#
#pragma ident       "@(#)do_service 1.12     00/08/25 SMI"
#
#

SYSLOG_PREFIX="SUNWcluster.ha.tivoli"

# This file is replicated on both servers.
HATIV_CONFIG_FILE=/etc/opt/SUNWsctiv/hadsconf

#
# Call the parser to handle the config file.
#
if [ ! -f $HATIV_CONFIG_FILE ]; then
	logerr "${SYSLOG_PREFIX}.4027" `gettext "$HATIV_CONFIG_FILE doesn't exist"`
	exit 1
fi

source_env_file $HATIV_CONFIG_FILE
if [ $? -ne 0 ]; then
	# source_env_file logs error message if it fails.
	# No need to log another; just exit.
	exit 1
fi

#
# Time to wait for SIGTERM to stop a process.
# This should be in the config file.
#
STOP_TIMEOUT=15
#
# bundle_do_svc <action>
#
# is called for each instance
#
# We must start oserv clients after the oserv server has started,
# and we must stop oserv clients before the oserv server is stopped.
# Therefore we start/stop the server and start_net/stop_net the clients.
#
bundle_do_svc ()
{
	action=$1
	prefix="$SYSLOG_PREFIX.$action"

	# oserv and CLI commands are influenced by the WLOCALHOST
	# environment variable.  Set this variable to the Managed Node
	# name of this instance of the oserv daemon.

	WLOCALHOST=`basename $_INST_CONF_DIR | sed 's/.db$//'`
	export WLOCALHOST

LD_LIBRARY_PATH="${_INST_PRIV_TIV_LIB}/solaris2:/usr/openwin/lib:/usr/lib:/usr/ucblib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}" 
export LD_LIBRARY_PATH

	case $action in

	'start' | 'start_net')

		# start the oserv server in start method first
		# start the oserv clients in start_net method

		if [ "${_INST_PRIV_TIV_OSERV_TYPE}" = "server" -a \
		     "$action" = "start_net" ]; then
			# if this is an oserv server in start_net script, no-op
			exit 0
		fi

		if [ "${_INST_PRIV_TIV_OSERV_TYPE}" = "client" -a \
		     "$action" = "start" ]; then
			# if this is an oserv client in start_net script, no-op
			exit 0
		fi

		# The start script is specified in the hadsconf file.

		if [ ! -x ${_INST_START} ]; then
			logerr "$prefix.4000" \
				`gettext "<${_INST_START}> is not executable."`
			exit 1
		fi

		if [ ! -d "$_INST_PRIV_TIV_BIN" -o \
		     ! -d "$_INST_PRIV_TIV_LIB" -o \
		     ! -d "$_INST_CONF_DIR" ] ; then
			logerr "$prefix.4028" `gettext "Instance ${_INST_NAME} is not configured properly.  Check the path for CONF_DIR, PRIV_TIV_BIN and PRIV_TIV_LIB"`
			exit 1
		fi

		# The process monitor facility calls the start program,
		# passing to it the instance-specific information it needs.
		# Note that we're using pmf to start/stop, but not to probe.

		pmfadm -c ${_INST_NAME} \
		    /bin/sh -c "${_INST_START} ${_INST_PRIV_TIV_BIN} ${_INST_PRIV_TIV_LIB} ${_INST_LOGICAL_HOST} ${_INST_PORT} ${_INST_CONF_DIR} >/dev/null 2>&1"

		if [ $? -ne 0 ]; then
			logerr "$prefix.4001" \
	`gettext "pmfadm failed to start Tivoli instance ${_INST_NAME}"`
			exit 1
		else
			lognotice "$prefix.2000" \
			`gettext "Started Tivoli instance ${_INST_NAME}"`
		fi

	;;

	'stop' | 'abort')

		# if this is an oserv server, stop/abort it
		# if this is an oserv client, no-op

		if [ "${_INST_PRIV_TIV_OSERV_TYPE}" = "client" ]; then
			# no-op
			exit 0
		fi

		# remove oserv server from pmfd's queue and then kill it
		pmfadm -s ${_INST_NAME} -w  ${STOP_TIMEOUT} TERM || \
		     pmfadm -s ${_INST_NAME} KILL
		if [ $? -ne 0 ]; then
			logerr "$prefix.4002" \
		`gettext "pmfadm failed to stop oserv instance ${_INST_NAME}"`
			exit 1
		else
			lognotice "$prefix.2001" \
			    `gettext "Stopped oserv instance ${_INST_NAME}"`
		fi

		# XXX Do we need to kill the probe oserv in abort method?
	;;

	'stop_net' | 'abort_net')

		# if this is an oserv client, stop/abort it
		# if this is an oserv server, no-op

		if [ "${_INST_PRIV_TIV_OSERV_TYPE}" = "server" ]; then
			# no-op
			exit 0
		fi

		# delete from queue, but don't kill
		pmfadm -s ${_INST_NAME}
		if [ $? -ne 0 ]; then
			logerr "$prefix.4003" \
		`gettext "pmfadm failed to delete ${_INST_NAME} from queue"`
			exit 1
		fi

		# WLOCALHOST must be set to the Managed Node name of this
		# instance of the oserv daemon.

		${_INST_PRIV_TIV_BIN}/solaris2/bin/odadmin shutdown
		res=$?
		# If normal shutdown fails try killing by alternate methods.
		if [ $res -ne 0 ]; then
			pmfadm -k ${_INST_NAME} -w  ${STOP_TIMEOUT} TERM || \
					pmfadm -s ${_INST_NAME} KILL
			res=$?
		fi

		if [ $res -ne 0 ]; then
			logerr "$prefix.4004" \
		`gettext "odadmin failed to stop instance ${_INST_NAME}."`
			exit 1
		else
			lognotice "$prefix.2002" \
			    `gettext "Stopped Tivoli instance ${_INST_NAME}"`
		fi
	;;

	'fm_start')

		# pmf starts tivoli_probe
		# tivoli_probe runs until tivoli_fm_stop kills it.
		# Don't start probe if diskset is in maintenance mode.

		# If this Tivoli instance's diskset is in maint mode, exit now.
		MAINT=`haget -f is_maint -h ${_INST_LOGICAL_HOST}`
		if [ "$MAINT" = "1" ]; then
			exit 0
		fi

		# start the probe oserv if REMOTE=y and it is not running
		if [ "$_INST_PROBE_REMOTE_1" = "y" ] ; then
			if [ -z "${_INST_PRIV_TIV_PROBE_DIR}" ] ; then
				logerr "$prefix.4005" \
     `gettext "the probe information is not correct for instance ${_INST_NAME}"`
			else
				TIV_DB=`ls -d ${_INST_PRIV_TIV_PROBE_DIR}/*.db`
				if [ -z "`ps -ef | grep -w oserv | grep \" $TIV_DB$\"`" ] ; then
					WLOCALHOST=`basename $TIV_DB | sed 's/.db$//'`
					export WLOCALHOST
					${_INST_START} \
					${_INST_PRIV_TIV_PROBE_DIR}/bin \
					${_INST_PRIV_TIV_PROBE_DIR}/lib \
					"`uname -n`" ${_INST_PORT} \
					${TIV_DB}
					if [ $? -ne 0 ]; then
						logerr "$prefix.4006" \
   `gettext "Failed to start the probe oserv for Tivolo instance ${_INST_NAME}"`
					else
						lognotice "$prefix.2003" \
	`gettext "Started the probe oserv for Tivoli instance ${_INST_NAME}"`
					fi
				fi
			fi
		fi

		# Start probe program only if a) instance is running
		# locally and local probes are configured on, or if
		# b) instance is running remotely and remote probes are
		# configured on.  Otherwise just exit.
		# If instance is running remotely, 2nd arg to probe
		# program is 'y'; otherwise it's 'n'.  Probe program
		# needs to know this so it can figure out where to get
		# the wping binary.

		INST_RUNS_REMOTELY=n
		is_member "${_INST_LOGICAL_HOST}" "$MASTERED_LOGICAL_HOSTS"
		if [ $? -ne 0 ]; then
			# Server is not running locally.
			INST_RUNS_REMOTELY=y
		fi

		if [ "$INST_RUNS_REMOTELY" = "n"  -a  \
		     "$_INST_PROBE_LOCAL_1" != "y" ]; then
			# no work to do
			exit 0
		fi
		if [ "$INST_RUNS_REMOTELY" = "y"  -a  \
		     "$_INST_PROBE_REMOTE_1" != "y" ]; then
			# no work to do
			exit 0
		fi

		pmfadm -c ${_INST_NAME}.probe \
			/bin/sh -c "${_INST_PROBE_PROG_1} ${_INST_NAME} $INST_RUNS_REMOTELY >/dev/null 2>&1"

		if [ $? -ne 0 ]; then
			logerr "$prefix.4007" \
`gettext "pmfadm failed to start Tivoli probe for instance ${_INST_NAME}"`
			exit 1
		else
			lognotice "$prefix.2004" \
		`gettext "Started Tivoli probe instance ${_INST_NAME}.probe"`
		fi
	;;

	'fm_stop')

		# If probe not running, do nothing
		ha_svc_not_running ${_INST_NAME}.probe && exit 0

		# pmf kills tivoli_probe
		pmfadm -s ${_INST_NAME}.probe -w ${STOP_TIMEOUT} TERM || \
			pmfadm -s ${_INST_NAME}.probe KILL
		if [ $? -ne 0 ]; then
			logerr "$prefix.4008" \
`gettext "pmfadm failed to stop Tivoli probe instance ${_INST_NAME}.probe"`
			exit 1
		else
			lognotice "$prefix.2005" \
		`gettext "Stopped Tivoli probe instance ${_INST_NAME}.probe"`
		fi
	;;

	'fm_check')

		# If the oserv server is running on this machine and
		# we're taking over a logical host with an oserv client,
		# then we could do a quick wping here of the server to
		# make sure it's OK.
		#
		# However if we're taking over a logical host with the
		# oserv server, there's not a whole lot we can check here.
		#
		# Note: Our current probing mechanism (wping) depends on
		# the oserv server being up.
		#
		# Just exit 0.

		exit 0
	;;

	esac

	exit 0
}
#include_boiler

generic_svc stop_net "$1" "$2" "$3"
