[SCM] CTDB repository - branch 2.5 updated - ctdb-2.5.2-128-gd9e26bc

Amitay Isaacs amitay at samba.org
Sun Mar 30 22:46:05 MDT 2014


The branch, 2.5 has been updated
       via  d9e26bc91acddb8c8c94367febd9fbf22b7cf8c0 (commit)
       via  dc49ae82872782af565b69a490005534a1cafb3f (commit)
       via  21dc1075c37b505ed82275b572c01387509f4148 (commit)
       via  fae66ccd1c5c6ea59027f40a1c835731da42a512 (commit)
       via  12002bc837b7e3e2dc9c347988e021cedb6286e9 (commit)
       via  9560f2e3750d9230cce94b7c6a691e8f23888448 (commit)
       via  1ded2ab992aaa0783d54ab98b86d09526a3664b5 (commit)
       via  6be4f552c22e1abf170257004bd9336ffe091c03 (commit)
       via  6269de052b99af0c621f8e5f754c7dfced64c207 (commit)
       via  3dcf912122e4f66ac6c6dcef05e5e85710820f4f (commit)
       via  5eb9b8dae8ae0d2196a4851ac3219ec017cf8173 (commit)
      from  9246d3f56d3ed7ffa78fcf8952082cd38507da19 (commit)

http://gitweb.samba.org/?p=ctdb.git;a=shortlog;h=2.5


- Log -----------------------------------------------------------------
commit d9e26bc91acddb8c8c94367febd9fbf22b7cf8c0
Author: Amitay Isaacs <amitay at gmail.com>
Date:   Thu Mar 27 15:07:58 2014 +1100

    tests: Do not mix bool and int data types
    
    Signed-off-by: Amitay Isaacs <amitay at gmail.com>
    Reviewed-by: Martin Schwenke <martin at meltin.net>
    
    Autobuild-User(master): Amitay Isaacs <amitay at samba.org>
    Autobuild-Date(master): Fri Mar 28 07:56:18 CET 2014 on sn-devel-104
    
    (Imported from commit 533ad246443bbea5116c06fb6478453ddf004080)

commit dc49ae82872782af565b69a490005534a1cafb3f
Author: Amitay Isaacs <amitay at gmail.com>
Date:   Thu Mar 27 15:11:09 2014 +1100

    tests: Use ctdb_fetch_lock instead of ctdb_fetch_lock_readonly
    
    This will test that ctdb_fetch_lock correctly revokes readonly
    delegations.
    
    Signed-off-by: Amitay Isaacs <amitay at gmail.com>
    Reviewed-by: Martin Schwenke <martin at meltin.net>
    
    (Imported from commit 30f7d7db8c0bf370464238133f75b5e0bdce37c6)

commit 21dc1075c37b505ed82275b572c01387509f4148
Author: Amitay Isaacs <amitay at gmail.com>
Date:   Thu Mar 27 15:06:58 2014 +1100

    client: ctdb_fetch_lock should check for readonly delegations
    
    When readonly delegations were added, ctdb_fetch_lock code should have
    been modified to include the check for readonly flags.
    
    Signed-off-by: Amitay Isaacs <amitay at gmail.com>
    Reviewed-by: Martin Schwenke <martin at meltin.net>
    
    (Imported from commit 78015320b60b0fd0d8c3dc65fbbe3e38e4a02993)

commit fae66ccd1c5c6ea59027f40a1c835731da42a512
Author: Martin Schwenke <martin at meltin.net>
Date:   Thu Mar 27 16:26:21 2014 +1100

    tests: Fix and extend read-only records test
    
    This test currently counts the number of read-only-enabled databases
    and expects there to only be 1.  It fails when there are existing
    databases with read-only already enabled.  Instead, check just the
    test database.
    
    Clean up the test by adding some functions to check for precisely the
    read-only flags that should be set on a node after each operation.
    
    Signed-off-by: Martin Schwenke <martin at meltin.net>
    Reviewed-by: Amitay Isaacs <amitay at gmail.com>
    
    (Imported from commit aa7cd51bbc438555552584a7d71f5bae6909603f)

commit 12002bc837b7e3e2dc9c347988e021cedb6286e9
Author: Martin Schwenke <martin at meltin.net>
Date:   Fri Feb 28 15:54:54 2014 +1100

    tests: Add a new NFS tickle test
    
    This one ensures that a newly started node gets an up-to-date tickle
    list.  Tweak some of the integration test functions to accommodate
    this.
    
    Signed-off-by: Martin Schwenke <martin at meltin.net>
    Reviewed-by: Amitay Isaacs <amitay at gmail.com>
    
    (Imported from commit 234f8eb5712c38872444c5dd7a258903b389b062)

commit 9560f2e3750d9230cce94b7c6a691e8f23888448
Author: Martin Schwenke <martin at meltin.net>
Date:   Fri Nov 29 14:07:43 2013 +1100

    tests/integration: Decentralise the daemon restart code
    
    Signed-off-by: Martin Schwenke <martin at meltin.net>
    Reviewed-by: Michael Adam <obnox at samba.org>
    
    (Imported from commit f67a1043b381f623ec9c97ac137568ca7ab6cd8a)

commit 1ded2ab992aaa0783d54ab98b86d09526a3664b5
Author: Martin Schwenke <martin at meltin.net>
Date:   Fri Nov 29 14:05:49 2013 +1100

    tests/integration: Update daemons shutdown pseudo-test
    
    Don't scatter the TEST_LOCAL_DAEMONS logic around the code.  Limit it
    to the local daemons file.
    
    Signed-off-by: Martin Schwenke <martin at meltin.net>
    Reviewed-by: Michael Adam <obnox at samba.org>
    
    (Imported from commit f05db5edc434da61bc7cd7233c5b9cb61cd888bf)

commit 6be4f552c22e1abf170257004bd9336ffe091c03
Author: Martin Schwenke <martin at meltin.net>
Date:   Thu Nov 28 17:27:06 2013 +1100

    tests/simple: Local daemons version of setup_ctdb() overrides
    
    setup_ctdb() doesn't need to do anything on a cluster.  To avoid a
    conditional, just override it for local daemons.
    
    Signed-off-by: Martin Schwenke <martin at meltin.net>
    Reviewed-by: Michael Adam <obnox at samba.org>
    
    (Imported from commit 6b15fe247c4c026064a8f412011d484d3b74bfc7)

commit 6269de052b99af0c621f8e5f754c7dfced64c207
Author: Martin Schwenke <martin at meltin.net>
Date:   Thu Nov 28 17:26:09 2013 +1100

    tests/integration: Remove some unused functions
    
    This was the start of some refactorisation that was never completed.
    
    Signed-off-by: Martin Schwenke <martin at meltin.net>
    Reviewed-by: Michael Adam <obnox at samba.org>
    
    (Imported from commit 85a711f5a8e9aaea42595cc53998a250b0ff6469)

commit 3dcf912122e4f66ac6c6dcef05e5e85710820f4f
Author: Martin Schwenke <martin at meltin.net>
Date:   Thu Nov 28 17:08:43 2013 +1100

    tests/simple: Move the local daemons code to its own file
    
    This is just a straight move.  The clever stuff will follow.  :-)
    
    Signed-off-by: Martin Schwenke <martin at meltin.net>
    Reviewed-by: Michael Adam <obnox at samba.org>
    
    (Imported from commit 28acce28626d17c27af831240a5ea593e0410616)

commit 5eb9b8dae8ae0d2196a4851ac3219ec017cf8173
Author: Martin Schwenke <martin at meltin.net>
Date:   Thu Nov 14 20:36:52 2013 +1100

    tests/integration: Be more careful when killing ctdbd
    
    Also match $TEST_VAR_DIR in the socket name.  This means that we'll
    only ever kill ctdbd process belong to our own test run.
    
    Signed-off-by: Martin Schwenke <martin at meltin.net>
    Reviewed-by: Michael Adam <obnox at samba.org>
    
    (Imported from commit 0ae3d009f36066c794e6f39c6dd45735e706df34)

-----------------------------------------------------------------------

Summary of changes:
 client/ctdb_client.c                      |   15 ++
 tests/complex/34_nfs_tickle_restart.sh    |   98 +++++++++++++
 tests/scripts/integration.bash            |  194 ++------------------------
 tests/simple/75_readonly_records_basic.sh |  216 ++++++++++++++++------------
 tests/simple/99_daemons_shutdown.sh       |    6 +-
 tests/simple/scripts/local.bash           |    3 +
 tests/simple/scripts/local_daemons.bash   |  136 ++++++++++++++++++
 tests/src/ctdb_fetch_readonly_loop.c      |    2 +-
 tests/src/ctdb_update_record.c            |    2 +-
 9 files changed, 396 insertions(+), 276 deletions(-)
 create mode 100755 tests/complex/34_nfs_tickle_restart.sh
 create mode 100644 tests/simple/scripts/local.bash
 create mode 100644 tests/simple/scripts/local_daemons.bash


Changeset truncated at 500 lines:

diff --git a/client/ctdb_client.c b/client/ctdb_client.c
index 885dbfd..7560115 100644
--- a/client/ctdb_client.c
+++ b/client/ctdb_client.c
@@ -709,6 +709,21 @@ again:
 		goto again;
 	}
 
+	/* if this is a request for read/write and we have delegations
+	   we have to revoke all delegations first
+	*/
+	if ((h->header.dmaster == ctdb_db->ctdb->pnn) &&
+	    (h->header.flags & CTDB_REC_RO_HAVE_DELEGATIONS)) {
+		ctdb_ltdb_unlock(ctdb_db, key);
+		ret = ctdb_client_force_migration(ctdb_db, key);
+		if (ret != 0) {
+			DEBUG(DEBUG_DEBUG,("ctdb_fetch_readonly_lock: force_migration failed\n"));
+			talloc_free(h);
+			return NULL;
+		}
+		goto again;
+	}
+
 	DEBUG(DEBUG_DEBUG,("ctdb_fetch_lock: we are dmaster - done\n"));
 	return h;
 }
diff --git a/tests/complex/34_nfs_tickle_restart.sh b/tests/complex/34_nfs_tickle_restart.sh
new file mode 100755
index 0000000..93587e2
--- /dev/null
+++ b/tests/complex/34_nfs_tickle_restart.sh
@@ -0,0 +1,98 @@
+#!/bin/bash
+
+test_info()
+{
+    cat <<EOF
+Verify that a newly started CTDB node gets updated tickle details
+
+Prerequisites:
+
+* An active CTDB cluster with at least 2 nodes with public addresses.
+
+* Test must be run on a real or virtual cluster rather than against
+  local daemons.
+
+* Cluster nodes must be listening on the NFS TCP port (2049).
+
+Steps:
+
+As with 31_nfs_tickle.sh but restart a node after the tickle is
+registered.
+
+Expected results:
+
+* CTDB should correctly communicated tickles to new CTDB instances as
+  they join the cluster.
+EOF
+}
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init "$@"
+
+ctdb_test_check_real_cluster
+
+cluster_is_healthy
+
+# Reset configuration
+ctdb_restart_when_done
+
+# We need this for later, so we know how long to run nc for.
+try_command_on_node any $CTDB getvar MonitorInterval
+monitor_interval="${out#*= }"
+#echo "Monitor interval on node $test_node is $monitor_interval seconds."
+
+select_test_node_and_ips
+try_command_on_node $test_node "$CTDB listnodes -Y"
+listnodes_output="$out"
+numnodes=$(wc -l <<<"$listnodes_output")
+
+test_port=2049
+
+echo "Connecting to node ${test_node} on IP ${test_ip}:${test_port} with netcat..."
+
+nc -d -w 600 $test_ip $test_port &
+nc_pid=$!
+ctdb_test_exit_hook_add "kill $nc_pid >/dev/null 2>&1"
+
+wait_until_get_src_socket "tcp" "${test_ip}:${test_port}" $nc_pid "nc"
+src_socket="$out"
+echo "Source socket is $src_socket"
+
+wait_for_monitor_event $test_node
+
+echo "Wait until NFS connection is tracked by CTDB on test node ..."
+wait_until 10 check_tickles $test_node $test_ip $test_port $src_socket
+
+echo "Select a node to restart ctdbd"
+rn=$(awk -F: -v test_node=$test_node \
+    '$2 != test_node { print $2 ; exit }' <<<"$listnodes_output")
+
+echo "Restarting CTDB on node ${rn}"
+try_command_on_node $rn $CTDB_TEST_WRAPPER restart_ctdb_1
+
+# In some theoretical world this is racy.  In practice, the node will
+# take quite a while to become healthy, so this will beat any
+# assignment of IPs to the node.
+echo "Setting NoIPTakeover on node ${rn}"
+try_command_on_node $rn $CTDB setvar NoIPTakeover 1
+
+wait_until_healthy
+
+echo "Getting TickleUpdateInterval..."
+try_command_on_node $test_node $CTDB getvar TickleUpdateInterval
+update_interval="$out"
+
+echo "Wait until NFS connection is tracked by CTDB on all nodes..."
+if ! wait_until $(($update_interval * 2)) \
+    check_tickles_all $numnodes $test_ip $test_port $src_socket ; then
+    echo "BAD: connection not tracked on all nodes:"
+    echo "$out"
+    exit 1
+fi
+
+# We could go on to test whether the tickle ACK gets sent.  However,
+# this is tested in previous tests and the use of NoIPTakeover
+# complicates things on a 2 node cluster.
diff --git a/tests/scripts/integration.bash b/tests/scripts/integration.bash
index c014a11..4a1f091 100644
--- a/tests/scripts/integration.bash
+++ b/tests/scripts/integration.bash
@@ -2,25 +2,6 @@
 
 . "${TEST_SCRIPTS_DIR}/common.sh"
 
-# If we're not running on a real cluster then we need a local copy of
-# ctdb (and other stuff) in $PATH and we will use local daemons.
-if [ -n "$TEST_LOCAL_DAEMONS" ] ; then
-    export CTDB_NODES_SOCKETS=""
-    for i in $(seq 0 $(($TEST_LOCAL_DAEMONS - 1))) ; do
-	CTDB_NODES_SOCKETS="${CTDB_NODES_SOCKETS}${CTDB_NODES_SOCKETS:+ }${TEST_VAR_DIR}/sock.${i}"
-    done
-
-    # Use in-tree binaries if running against local daemons.
-    # Otherwise CTDB need to be installed on all nodes.
-    if [ -n "$ctdb_dir" -a -d "${ctdb_dir}/bin" ] ; then
-	PATH="${ctdb_dir}/bin:${PATH}"
-        export CTDB_LOCK_HELPER="${ctdb_dir}/bin/ctdb_lock_helper"
-        export CTDB_EVENT_HELPER="${ctdb_dir}/bin/ctdb_event_helper"
-    fi
-
-    export CTDB_NODES="${TEST_VAR_DIR}/nodes.txt"
-fi
-
 ######################################################################
 
 export CTDB_TIMEOUT=60
@@ -350,7 +331,7 @@ wait_until_healthy ()
 
     echo "Waiting for cluster to become healthy..."
 
-    wait_until 120 _cluster_is_healthy
+    wait_until $timeout onnode -q any $CTDB_TEST_WRAPPER _cluster_is_healthy
 }
 
 # This function is becoming nicely overloaded.  Soon it will collapse!  :-)
@@ -500,112 +481,6 @@ wait_until_node_has_some_ips ()
 
 #######################################
 
-daemons_stop ()
-{
-    echo "Attempting to politely shutdown daemons..."
-    onnode 1 $CTDB shutdown -n all || true
-
-    echo "Sleeping for a while..."
-    sleep_for 1
-
-    local pat="ctdbd --socket=.* --nlist .* --nopublicipcheck"
-    if pgrep -f "$pat" >/dev/null ; then
-	echo "Killing remaining daemons..."
-	pkill -f "$pat"
-
-	if pgrep -f "$pat" >/dev/null ; then
-	    echo "Once more with feeling.."
-	    pkill -9 -f "$pat"
-	fi
-    fi
-
-    rm -rf "${TEST_VAR_DIR}/test.db"
-}
-
-daemons_setup ()
-{
-    mkdir -p "${TEST_VAR_DIR}/test.db/persistent"
-
-    local public_addresses_all="${TEST_VAR_DIR}/public_addresses_all"
-    local no_public_addresses="${TEST_VAR_DIR}/no_public_addresses.txt"
-    rm -f $CTDB_NODES $public_addresses_all $no_public_addresses
-
-    # If there are (strictly) greater than 2 nodes then we'll randomly
-    # choose a node to have no public addresses.
-    local no_public_ips=-1
-    [ $TEST_LOCAL_DAEMONS -gt 2 ] && no_public_ips=$(($RANDOM % $TEST_LOCAL_DAEMONS))
-    echo "$no_public_ips" >$no_public_addresses
-
-    # When running certain tests we add and remove eventscripts, so we
-    # need to be able to modify the events.d/ directory.  Therefore,
-    # we use a temporary events.d/ directory under $TEST_VAR_DIR.  We
-    # copy the actual test eventscript(s) in there from the original
-    # events.d/ directory that sits alongside $TEST_SCRIPT_DIR.
-    local top=$(dirname "$TEST_SCRIPTS_DIR")
-    local events_d="${top}/events.d"
-    mkdir -p "${TEST_VAR_DIR}/events.d"
-    cp -p "${events_d}/"* "${TEST_VAR_DIR}/events.d/"
-
-    local i
-    for i in $(seq 1 $TEST_LOCAL_DAEMONS) ; do
-	if [ "${CTDB_USE_IPV6}x" != "x" ]; then
-	    echo ::$i >>"$CTDB_NODES"
-	    ip addr add ::$i/128 dev lo
-	else
-	    echo 127.0.0.$i >>"$CTDB_NODES"
-	    # 2 public addresses on most nodes, just to make things interesting.
-	    if [ $(($i - 1)) -ne $no_public_ips ] ; then
-		echo "192.168.234.$i/24 lo" >>"$public_addresses_all"
-		echo "192.168.234.$(($i + $TEST_LOCAL_DAEMONS))/24 lo" >>"$public_addresses_all"
-	    fi
-	fi
-    done
-}
-
-daemons_start_1 ()
-{
-    local pnn="$1"
-    shift # "$@" gets passed to ctdbd
-
-    local public_addresses_all="${TEST_VAR_DIR}/public_addresses_all"
-    local public_addresses_mine="${TEST_VAR_DIR}/public_addresses.${pnn}"
-    local no_public_addresses="${TEST_VAR_DIR}/no_public_addresses.txt"
-
-    local no_public_ips=-1
-    [ -r $no_public_addresses ] && read no_public_ips <$no_public_addresses
-
-    if  [ "$no_public_ips" = $pnn ] ; then
-	echo "Node $no_public_ips will have no public IPs."
-    fi
-
-    local node_ip=$(sed -n -e "$(($pnn + 1))p" "$CTDB_NODES")
-    local ctdb_options="--sloppy-start --reclock=${TEST_VAR_DIR}/rec.lock --nlist $CTDB_NODES --nopublicipcheck --listen=${node_ip} --event-script-dir=${TEST_VAR_DIR}/events.d --logfile=${TEST_VAR_DIR}/daemon.${pnn}.log -d 3 --log-ringbuf-size=10000 --dbdir=${TEST_VAR_DIR}/test.db --dbdir-persistent=${TEST_VAR_DIR}/test.db/persistent --dbdir-state=${TEST_VAR_DIR}/test.db/state"
-
-    if [ $pnn -eq $no_public_ips ] ; then
-	ctdb_options="$ctdb_options --public-addresses=/dev/null"
-    else
-	cp "$public_addresses_all" "$public_addresses_mine"
-	ctdb_options="$ctdb_options --public-addresses=$public_addresses_mine"
-    fi
-
-    # We'll use "pkill -f" to kill the daemons with
-    # "--socket=.* --nlist .* --nopublicipcheck" as context.
-    $VALGRIND ctdbd --socket="${TEST_VAR_DIR}/sock.$pnn" $ctdb_options "$@" ||return 1
-}
-
-daemons_start ()
-{
-    # "$@" gets passed to ctdbd
-
-    echo "Starting $TEST_LOCAL_DAEMONS ctdb daemons..."
-
-    for i in $(seq 0 $(($TEST_LOCAL_DAEMONS - 1))) ; do
-	daemons_start_1 $i "$@"
-    done
-}
-
-#######################################
-
 _ctdb_hack_options ()
 {
     local ctdb_options="$*"
@@ -616,7 +491,7 @@ _ctdb_hack_options ()
     esac
 }
 
-_restart_ctdb ()
+restart_ctdb_1 ()
 {
     _ctdb_hack_options "$@"
 
@@ -627,59 +502,21 @@ _restart_ctdb ()
     fi
 }
 
-_ctdb_start ()
+# Restart CTDB on all nodes.  Override for local daemons.
+_restart_ctdb_all ()
 {
-    _ctdb_hack_options "$@"
-
-    /etc/init.d/ctdb start
+    onnode -p all $CTDB_TEST_WRAPPER restart_ctdb_1 "$@"
 }
 
+# Nothing needed for a cluster.  Override for local daemons.
 setup_ctdb ()
 {
-    if [ -n "$CTDB_NODES_SOCKETS" ] ; then
-	daemons_setup
-    fi
-}
-
-# Common things to do after starting one or more nodes.
-_ctdb_start_post ()
-{
-    onnode -q 1  $CTDB_TEST_WRAPPER wait_until_healthy || return 1
-
-    echo "Setting RerecoveryTimeout to 1"
-    onnode -pq all "$CTDB setvar RerecoveryTimeout 1"
-
-    # In recent versions of CTDB, forcing a recovery like this blocks
-    # until the recovery is complete.  Hopefully this will help the
-    # cluster to stabilise before a subsequent test.
-    echo "Forcing a recovery..."
-    onnode -q 0 $CTDB recover
-    sleep_for 1
-
-    echo "ctdb is ready"
-}
-
-# This assumes that ctdbd is not running on the given node.
-ctdb_start_1 ()
-{
-    local pnn="$1"
-    shift # "$@" is passed to ctdbd start.
-
-    echo -n "Starting CTDB on node ${pnn}..."
-
-    if [ -n "$CTDB_NODES_SOCKETS" ] ; then
-	daemons_start_1 $pnn "$@"
-    else
-	onnode $pnn $CTDB_TEST_WRAPPER _ctdb_start "$@"
-    fi
-
-    # If we're starting only 1 node then we're doing something weird.
-    ctdb_restart_when_done
+    :
 }
 
 restart_ctdb ()
 {
-    # "$@" is passed to ctdbd start.
+    # "$@" is passed to restart_ctdb_all.
 
     echo -n "Restarting CTDB"
     if $ctdb_test_restart_scheduled ; then
@@ -689,18 +526,13 @@ restart_ctdb ()
 
     local i
     for i in $(seq 1 5) ; do
-	if [ -n "$CTDB_NODES_SOCKETS" ] ; then
-	    daemons_stop
-	    daemons_start "$@"
-	else
-	    onnode -p all $CTDB_TEST_WRAPPER _restart_ctdb "$@"
-	fi || {
+	_restart_ctdb_all "$@" || {
 	    echo "Restart failed.  Trying again in a few seconds..."
 	    sleep_for 5
 	    continue
 	}
 
-	onnode -q 1  $CTDB_TEST_WRAPPER wait_until_healthy || {
+	wait_until_healthy || {
 	    echo "Cluster didn't become healthy.  Restarting..."
 	    continue
 	}
@@ -740,6 +572,12 @@ restart_ctdb ()
     return 1
 }
 
+# Does nothing on cluster and should be overridden for local daemons
+maybe_stop_ctdb ()
+{
+    :
+}
+
 ctdb_restart_when_done ()
 {
     ctdb_test_restart_scheduled=true
diff --git a/tests/simple/75_readonly_records_basic.sh b/tests/simple/75_readonly_records_basic.sh
index 80a6adc..6cd2cce 100755
--- a/tests/simple/75_readonly_records_basic.sh
+++ b/tests/simple/75_readonly_records_basic.sh
@@ -3,11 +3,11 @@
 test_info()
 {
     cat <<EOF
-Readonly records can be activated at runtime using a ctdb command.
-If readonly records are not activated, then any attempt to fetch a readonly
+Read-only records can be activated at runtime using a ctdb command.
+If read-only records are not activated, then any attempt to fetch a read-only
 copy should be automatically upgraded to a read-write fetch_lock().
 
-If readonly delegations are present, then any attempt to aquire a read-write
+If read-only delegations are present, then any attempt to aquire a read-write
 fetch_lock will trigger all delegations to be revoked before the fetch lock
 completes.
 
@@ -20,21 +20,16 @@ Steps:
 
 1. Verify that the status on all of the ctdb nodes is 'OK'.
 2. create a test database and some records
-3. try to fetch readonly records, this should not result in any delegations
-4. activate readonly support
-5. try to fetch readonly records, this should result in delegations
+3. try to fetch read-only records, this should not result in any delegations
+4. activate read-only support
+5. try to fetch read-only records, this should result in delegations
 6. do a fetchlock  and the delegations should be revoked
-7. try to fetch readonly records, this should result in delegations
+7. try to fetch read-only records, this should result in delegations
 8. do a recovery  and the delegations should be revoked
 
 Expected results:
 
-3. No delegations created when db is not in readonly mode
-4. It is possible to activate readonly support for a database
-5. Delegations should be created
-6. Delegations should be revoked
-8. Delegations should be revoked
-
+Delegations should be created and revoked as above
 
 EOF
 }
@@ -50,114 +45,149 @@ cluster_is_healthy
 # Reset configuration
 ctdb_restart_when_done
 
-try_command_on_node 0 "$CTDB listnodes"
-num_nodes=$(echo "$out" | wc -l)
-
-
-# create a temporary database to test with
-echo create test database test.tdb
-try_command_on_node 0 $CTDB attach test.tdb
-
-
-# create some records
-try_command_on_node all $CTDB_TEST_WRAPPER ctdb_update_record
-
-#
-# 3
-# try readonly requests
-echo Try some readonly fetches, these should all be upgraded to full fetchlocks
-try_command_on_node 0,1,2 $CTDB_TEST_WRAPPER "ctdb_fetch_readonly_once </dev/null"
+######################################################################
 
-# no delegations should have been created
-numreadonly=`try_command_on_node -v all $CTDB cattdb test.tdb | grep READONLY | wc -l`
-[ "$numreadonly" != "0" ] && {
-    echo "BAD: readonly delegations were created, but the feature is not activated on the database"
-    exit 1
+# Confirm that no nodes have databases with read-only delegations
+check_no_readonly ()
+{
+    try_command_on_node all $CTDB cattdb $testdb
+    local ro_flags="RO_HAVE_READONLY|RO_HAVE_DELEGATIONS"
+    local numreadonly=$(grep -c -E "$ro_flags" <<<"$out") || true
+    if [ $numreadonly -eq 0 ] ; then
+	echo "GOOD: no read-only delegations"
+    else
+	echo "BAD: there are read-only delegations"
+	echo "$out"
+	exit 1
+    fi
 }
 
+# Check that the test record has the correct read-only flags on the
+# given nodes.  The first node is the dmaster, which should know there
+# are delegations but should not be flagged as having a read-only
+# copy.  Subsequent nodes should have a read-only copy but not know
+# about any (other) delegations.
+check_readonly ()
+{
+    local dmaster="$1" ; shift
+    local others="$*"
+
+    local count
+
+    try_command_on_node $dmaster $CTDB cattdb $testdb
+    count=$(grep -c -E "RO_HAVE_DELEGATIONS" <<<"$out") || true
+    if [ $count -eq 1 ] ; then
+	echo "GOOD: dmaster ${dmaster} has read-only delegations"
+    else
+	echo "BAD: dmaster ${dmaster} has no read-only delegations"
+	echo "$out"
+	exit 1


-- 
CTDB repository


More information about the samba-cvs mailing list