[SCM] CTDB repository - branch master updated -
ctdb-1.0.84-11-g5d188af
Ronnie Sahlberg
sahlberg at samba.org
Wed Jun 17 23:08:10 GMT 2009
The branch, master has been updated
via 5d188af387a2a1d68d66f47edb7a9ca546ed357c (commit)
via be3e23c9fcb9c716e492af102830a4f6ad8bda7b (commit)
from 46e8c3737e6ff54fc80de8e962e922924c27bc35 (commit)
http://gitweb.samba.org/?p=sahlberg/ctdb.git;a=shortlog;h=master
- Log -----------------------------------------------------------------
commit 5d188af387a2a1d68d66f47edb7a9ca546ed357c
Author: Martin Schwenke <martin at meltin.net>
Date: Tue Jun 16 12:47:59 2009 +1000
New tests for NFS and CIFS tickles.
New tests/complex/ subdirectory contains 2 new tests to ensure that
NFS and CIFS connections are tracked by CTDB and that tickle resets
are sent when a node is disabled.
Changes to ctdb_test_functions.bash to support these tests.
Signed-off-by: Martin Schwenke <martin at meltin.net>
commit be3e23c9fcb9c716e492af102830a4f6ad8bda7b
Author: Martin Schwenke <martin at meltin.net>
Date: Tue Jun 16 12:42:29 2009 +1000
Increase threshold in 51_ctdb_bench from 2% to 5%.
The threshold for the difference in the number messages sent in either
direction around the ring of nodes was set to 2%. Something
environmental is causing this different to sometimes be as high as 3%.
We're confident it isn't a CTDB issue so we're increasing the
threshold to 5%.
Signed-off-by: Martin Schwenke <martin at meltin.net>
-----------------------------------------------------------------------
Summary of changes:
tests/complex/31_nfs_tickle.sh | 115 ++++++++++++++++++++++++++++++++
tests/complex/32_cifs_tickle.sh | 112 +++++++++++++++++++++++++++++++
tests/scripts/ctdb_test_functions.bash | 72 ++++++++++++++++++++-
tests/simple/51_ctdb_bench.sh | 2 +-
4 files changed, 299 insertions(+), 2 deletions(-)
create mode 100755 tests/complex/31_nfs_tickle.sh
create mode 100755 tests/complex/32_cifs_tickle.sh
Changeset truncated at 500 lines:
diff --git a/tests/complex/31_nfs_tickle.sh b/tests/complex/31_nfs_tickle.sh
new file mode 100755
index 0000000..4961b7d
--- /dev/null
+++ b/tests/complex/31_nfs_tickle.sh
@@ -0,0 +1,115 @@
+#!/bin/bash
+
+test_info()
+{
+ cat <<EOF
+Verify that NFS connections are monitored and that NFS tickles are sent.
+
+We create a connection to the NFS server on a node and confirm that
+this connection is registered in the nfs-tickles/ subdirectory in
+shared storage. Then disable the relevant NFS server node and ensure
+that it send an appropriate reset packet.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 nodes with public addresses.
+
+* Test must be run on a real or virtual cluster rather than against
+ local daemons.
+
+* Test must not be run from a cluster node.
+
+* Cluster nodes must be listening on the NFS TCP port (2049).
+
+Steps:
+
+1. Verify that the cluster is healthy.
+2. Connect from the current host (test client) to TCP port 2049 using
+ the public address of a cluster node.
+3. Determine the source socket used for the connection.
+4. Ensure that CTDB records the source socket details in the nfs-tickles
+ directory on shared storage.
+5. Disable the node that the connection has been made to.
+6. Verify that a TCP tickle (a reset packet) is sent to the test client.
+
+Expected results:
+
+* CTDB should correctly record the socket in the nfs-tickles directory
+ and should send a reset packet when the node is disabled.
+EOF
+}
+
+. ctdb_test_functions.bash
+
+set -e
+
+ctdb_test_init "$@"
+
+ctdb_test_check_real_cluster
+
+onnode 0 $CTDB_TEST_WRAPPER cluster_is_healthy
+
+# We need this for later, so we know how long to sleep.
+try_command_on_node 0 $CTDB getvar MonitorInterval
+monitor_interval="${out#*= }"
+#echo "Monitor interval on node $test_node is $monitor_interval seconds."
+
+echo "Getting list of public IPs..."
+try_command_on_node 0 "$CTDB ip -n all | sed -e '1d'"
+
+# When selecting test_node we just want a node that has public IPs.
+# This will work and is economically semi-randomly. :-)
+read x test_node <<<"$out"
+
+ips=""
+while read ip pnn ; do
+ if [ "$pnn" = "$test_node" ] ; then
+ ips="${ips}${ips:+ }${ip}"
+ fi
+done <<<"$out" # bashism to avoid problem setting variable in pipeline.
+
+echo "Selected node ${test_node} with IPs: $ips"
+
+test_ip="${ips%% *}"
+test_port=2049
+
+echo "Connecting to node ${test_node} on IP ${test_ip}:${test_port} with netcat..."
+
+nc -d -w $(($monitor_interval * 4)) $test_ip $test_port &
+nc_pid=$!
+ctdb_test_exit_hook_add "kill $nc_pid >/dev/null 2>&1"
+
+wait_until_get_src_socket "tcp" "${test_ip}:${test_port}" $nc_pid "nc"
+src_socket="$out"
+echo "Source socket is $src_socket"
+
+echo "Sleeping for MonitorInterval..."
+sleep_for $monitor_interval
+
+try_command_on_node $test_node hostname
+test_hostname=$out
+
+try_command_on_node -v 0 cat /gpfs/.ctdb/nfs-tickles/$test_hostname/$test_ip
+
+if [ "${out/${src_socket}/}" != "$out" ] ; then
+ echo "GOOD: NFS connection tracked OK in tickles file."
+else
+ echo "BAD: Socket not tracked in NFS tickles file:"
+ testfailures=1
+fi
+
+filter="src host $test_ip and tcp src port $test_port and dst host ${src_socket%:*} and tcp dst port ${src_socket##*:} and tcp[tcpflags] & tcp-rst != 0"
+tcpdump_start "$filter"
+
+echo "Disabling node $test_node"
+try_command_on_node 1 $CTDB disable -n $test_node
+onnode 0 $CTDB_TEST_WRAPPER wait_until_node_has_status $test_node disabled
+
+tcpdump_wait
+
+echo "GOOD: here's the tickle reset:"
+tcpdump -n -r $tcpdump_filename 2>/dev/null
+
+echo "Expect a restart..."
+
+ctdb_test_exit
diff --git a/tests/complex/32_cifs_tickle.sh b/tests/complex/32_cifs_tickle.sh
new file mode 100755
index 0000000..40230c1
--- /dev/null
+++ b/tests/complex/32_cifs_tickle.sh
@@ -0,0 +1,112 @@
+#!/bin/bash
+
+test_info()
+{
+ cat <<EOF
+Verify that CIFS connections are monitored and that CIFS tickles are sent.
+
+We create a connection to the CIFS server on a node and confirm that
+this connection is registered by CTDB. Then disable the relevant CIFS
+server node and ensure that it send an appropriate reset packet.
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 nodes with public addresses.
+
+* Test must be run on a real or virtual cluster rather than against
+ local daemons.
+
+* Test must not be run from a cluster node.
+
+* Clustered Samba must be listening on TCP port 445.
+
+Steps:
+
+1. Verify that the cluster is healthy.
+2. Connect from the current host (test client) to TCP port 445 using
+ the public address of a cluster node.
+3. Determine the source socket used for the connection.
+4. Using the "ctdb gettickle" command, ensure that CTDB records the
+ connection details.
+5. Disable the node that the connection has been made to.
+6. Verify that a TCP tickle (a reset packet) is sent to the test client.
+
+Expected results:
+
+* CTDB should correctly record the connection and should send a reset
+ packet when the node is disabled.
+EOF
+}
+
+. ctdb_test_functions.bash
+
+set -e
+
+ctdb_test_init "$@"
+
+ctdb_test_check_real_cluster
+
+onnode 0 $CTDB_TEST_WRAPPER cluster_is_healthy
+
+# We need this for later, so we know how long to sleep.
+try_command_on_node 0 $CTDB getvar MonitorInterval
+monitor_interval="${out#*= }"
+#echo "Monitor interval on node $test_node is $monitor_interval seconds."
+
+echo "Getting list of public IPs..."
+try_command_on_node 0 "$CTDB ip -n all | sed -e '1d'"
+
+# When selecting test_node we just want a node that has public IPs.
+# This will work and is economically semi-randomly. :-)
+read x test_node <<<"$out"
+
+ips=""
+while read ip pnn ; do
+ if [ "$pnn" = "$test_node" ] ; then
+ ips="${ips}${ips:+ }${ip}"
+ fi
+done <<<"$out" # bashism to avoid problem setting variable in pipeline.
+
+echo "Selected node ${test_node} with IPs: $ips"
+
+test_ip="${ips%% *}"
+test_port=445
+
+echo "Connecting to node ${test_node} on IP ${test_ip}:${test_port} with netcat..."
+
+nc -d -w $(($monitor_interval * 4)) $test_ip $test_port &
+nc_pid=$!
+ctdb_test_exit_hook_add "kill $nc_pid >/dev/null 2>&1"
+
+wait_until_get_src_socket "tcp" "${test_ip}:${test_port}" $nc_pid "nc"
+src_socket="$out"
+echo "Source socket is $src_socket"
+
+# Right here we assume that Samba is able to register the tickle with
+# CTDB faster than it takes us to wait for netstat to register the
+# connection and then use onnode below to ask CTDB about it.
+
+try_command_on_node -v 0 ctdb gettickles $test_ip
+
+if [ "${out/SRC: ${src_socket} /}" != "$out" ] ; then
+ echo "GOOD: CIFS connection tracked OK by CTDB."
+else
+ echo "BAD: Socket not tracked by CTDB."
+ testfailures=1
+fi
+
+filter="src host $test_ip and tcp src port $test_port and dst host ${src_socket%:*} and tcp dst port ${src_socket##*:} and tcp[tcpflags] & tcp-rst != 0"
+tcpdump_start "$filter"
+
+echo "Disabling node $test_node"
+try_command_on_node 1 $CTDB disable -n $test_node
+onnode 0 $CTDB_TEST_WRAPPER wait_until_node_has_status $test_node disabled
+
+tcpdump_wait
+
+echo "GOOD: here's the tickle reset:"
+tcpdump -n -r $tcpdump_filename 2>/dev/null
+
+echo "Expect a restart..."
+
+ctdb_test_exit
diff --git a/tests/scripts/ctdb_test_functions.bash b/tests/scripts/ctdb_test_functions.bash
index f1e1417..1930ae1 100644
--- a/tests/scripts/ctdb_test_functions.bash
+++ b/tests/scripts/ctdb_test_functions.bash
@@ -61,7 +61,7 @@ ctdb_test_exit ()
[ $(($testfailures+0)) -eq 0 -a $status -ne 0 ] && testfailures=$status
- eval "$ctdb_test_exit_hook"
+ eval "$ctdb_test_exit_hook" || true
unset ctdb_test_exit_hook
if ! onnode 0 $CTDB_TEST_WRAPPER cluster_is_healthy ; then
@@ -79,6 +79,11 @@ ctdb_test_exit ()
test_exit
}
+ctdb_test_exit_hook_add ()
+{
+ ctdb_test_exit_hook="${ctdb_test_exit_hook}${ctdb_test_exit_hook:+ ; }$*"
+}
+
ctdb_test_run ()
{
local name="$1" ; shift
@@ -150,6 +155,14 @@ ctdb_test_init ()
trap "ctdb_test_exit" 0
}
+ctdb_test_check_real_cluster ()
+{
+ [ -n "$CTDB_TEST_REAL_CLUSTER" ] && return 0
+
+ echo "ERROR: This test must be run on a real/virtual cluster, not local daemons."
+ return 1
+}
+
########################################
# Sets: $out
@@ -401,6 +414,63 @@ wait_until_ips_are_on_nodeglob ()
wait_until 60 ips_are_on_nodeglob "$@"
}
+get_src_socket ()
+{
+ local proto="$1"
+ local dst_socket="$2"
+ local pid="$3"
+ local prog="$4"
+
+ local pat="^${proto}[[:space:]]+[[:digit:]]+[[:space:]]+[[:digit:]]+[[:space:]]+[^[:space:]]+[[:space:]]+${dst_socket//./\\.}[[:space:]]+ESTABLISHED[[:space:]]+${pid}/${prog}[[:space:]]*\$"
+ out=$(netstat -tanp |
+ egrep "$pat" |
+ awk '{ print $4 }')
+
+ [ -n "$out" ]
+}
+
+wait_until_get_src_socket ()
+{
+ local proto="$1"
+ local dst_socket="$2"
+ local pid="$3"
+ local prog="$4"
+
+ echo "Waiting for ${prog} to establish connection to ${dst_socket}..."
+
+ wait_until 5 get_src_socket "$@"
+}
+
+# filename will be in $tcpdump_filename, pid in $tcpdump_pid
+# By default, wait for 1 matching packet on any interface.
+tcpdump_start ()
+{
+ local filter="$1"
+ local count="${2:-1}"
+ local iface="${3:-any}"
+
+ echo "Running tcpdump to capture ${count} packet(s) on interface ${iface}."
+ tcpdump_filename=$(mktemp)
+ ctdb_test_exit_hook_add "rm -f $tcpdump_filename"
+ tcpdump -s 1500 -w $tcpdump_filename -c "$count" -i "$iface" "$filter" &
+ tcpdump_pid=$!
+ ctdb_test_exit_hook_add "kill $tcpdump_pid >/dev/null 2>&1"
+ echo "Waiting for tcpdump output file to be initialised..."
+ wait_until 10 test -f $tcpdump_filename
+ sleep_for 1
+}
+
+not ()
+{
+ ! "$@"
+}
+
+tcpdump_wait ()
+{
+ echo "Waiting for tcpdump to complete..."
+ wait_until 5 not kill -0 $tcpdump_pid >/dev/null 2>&1
+}
+
#######################################
daemons_stop ()
diff --git a/tests/simple/51_ctdb_bench.sh b/tests/simple/51_ctdb_bench.sh
index 6caae05..0c9f5c9 100755
--- a/tests/simple/51_ctdb_bench.sh
+++ b/tests/simple/51_ctdb_bench.sh
@@ -83,7 +83,7 @@ fi
perc_diff=$(( ($positive - $negative) * 100 / $positive ))
perc_diff=${perc_diff#-}
-check_percent=2
+check_percent=5
if [ $perc_diff -le $check_percent ] ; then
echo "OK: percentage difference between +ive and -ive ($perc_diff%) <= $check_percent%"
else
--
CTDB repository
More information about the samba-cvs
mailing list