[SCM] CTDB repository - branch 1.2 updated - ctdb-1.0.114-322-g510a545

Ronnie Sahlberg sahlberg at samba.org
Tue Sep 7 17:20:47 MDT 2010


The branch, 1.2 has been updated
       via  510a545ce0d1b10b83bb2ffa265ff573d36839f2 (commit)
      from  c46b9a800af58a604d79d2295540b99335a9f5d9 (commit)

http://gitweb.samba.org/?p=sahlberg/ctdb.git;a=shortlog;h=1.2


- Log -----------------------------------------------------------------
commit 510a545ce0d1b10b83bb2ffa265ff573d36839f2
Author: Ronnie Sahlberg <ronniesahlberg at gmail.com>
Date:   Wed Sep 8 09:16:42 2010 +1000

    Change how NATGW is configured to allow special nodes that do not have
    network connectivity outside of the cluster to still be able to
    participate in a natgw group.
    These nodes can not become natgw master since they lack external network
    connectivity.
    
    These nodes are configured just the same way as for any other node with
    NATGW, with the following two exceptions :
    * we do NOT set CTDB_NATGW_PUBLIC_IFACE at all on these nodes.
      since these ndoes lack external network we should not check the interface
      for link.
    * we must set CTDB_NATGW_SLAVE_ONLY=yes to flag that this is a node that
      can not become natgw master.

-----------------------------------------------------------------------

Summary of changes:
 config/ctdb.sysconfig    |    9 +++++
 config/events.d/11.natgw |   17 ++++++---
 doc/ctdbd.1              |   15 ++++++--
 doc/ctdbd.1.html         |   89 +++++++++++++++++++++++++--------------------
 doc/ctdbd.1.xml          |    9 +++++
 tools/ctdb.c             |   25 +++++++++++++
 6 files changed, 116 insertions(+), 48 deletions(-)


Changeset truncated at 500 lines:

diff --git a/config/ctdb.sysconfig b/config/ctdb.sysconfig
index c94ce68..011593d 100644
--- a/config/ctdb.sysconfig
+++ b/config/ctdb.sysconfig
@@ -183,6 +183,15 @@ CTDB_RECOVERY_LOCK="/some/place/on/shared/storage"
 # CTDB_NATGW_DEFAULT_GATEWAY=10.0.0.1
 # CTDB_NATGW_PRIVATE_NETWORK=10.1.1.0/24
 # CTDB_NATGW_NODES=/etc/ctdb/natgw_nodes
+#
+# Normally any node in the natgw group can act as the natgw master.
+# In some configurations you may have special nodes that is a part of the
+# cluster/natgw group, but where the node lacks connectivity to the 
+# public network.
+# For these cases, set this variable to make these nodes not able to
+# become natgw master.
+#
+# CTDB_NATGW_SLAVE_ONLY=yes
 
 
 # PER_IP_ROUTING configuration
diff --git a/config/events.d/11.natgw b/config/events.d/11.natgw
index 61c09e9..f8892a9 100755
--- a/config/events.d/11.natgw
+++ b/config/events.d/11.natgw
@@ -8,13 +8,22 @@
 . $CTDB_BASE/functions
 loadconfig
 
-[ -z "$CTDB_NATGW_PUBLIC_IFACE" ] && exit 0
+[ -z "$CTDB_NATGW_NODES" ] && exit 0
+
+# Update capabilities to show whether we support teh NATGW capability or not
+if [ "$CTDB_NATGW_SLAVE_ONLY" = "yes" ] ; then
+	ctdb setnatgwstate off
+else
+	ctdb setnatgwstate on
+fi
 
 delete_all() {
 	local _ip=`echo $CTDB_NATGW_PUBLIC_IP | cut -d '/' -f1`
 	local _maskbits=`echo $CTDB_NATGW_PUBLIC_IP | cut -d '/' -f2`
 
-	delete_ip_from_iface $CTDB_NATGW_PUBLIC_IFACE $_ip $_maskbits
+	[ -z "$CTDB_NATGW_PUBLIC_IFACE" ] || {
+	    delete_ip_from_iface $CTDB_NATGW_PUBLIC_IFACE $_ip $_maskbits
+	}
 	delete_ip_from_iface lo $_ip 32
 
 	ip route del 0.0.0.0/0 metric 10 >/dev/null 2>/dev/null
@@ -42,8 +51,6 @@ case "$1" in
 	echo 3 > /proc/sys/net/ipv4/conf/all/arp_ignore
 	# do not send out arp requests from loopback addresses
 	echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
-	# update capabilities to show we are using natgw
-	ctdb setnatgwstate on
 	;;
 
     recovered|updatenatgw|ipreallocated)
@@ -59,7 +66,7 @@ case "$1" in
 
 
 	if [ "$NATGWMASTER" = "-1" ]; then
-		echo "There is not NATGW master node"
+		echo "There is no NATGW master node"
 		exit 1
 	fi
 
diff --git a/doc/ctdbd.1 b/doc/ctdbd.1
index e3f2482..5800520 100644
--- a/doc/ctdbd.1
+++ b/doc/ctdbd.1
@@ -1,13 +1,13 @@
 '\" t
 .\"     Title: ctdbd
 .\"    Author: [FIXME: author] [see http://docbook.sf.net/el/author]
-.\" Generator: DocBook XSL Stylesheets v1.75.1 <http://docbook.sf.net/>
-.\"      Date: 02/24/2010
+.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
+.\"      Date: 09/08/2010
 .\"    Manual: CTDB - clustered TDB database
 .\"    Source: ctdb
 .\"  Language: English
 .\"
-.TH "CTDBD" "1" "02/24/2010" "ctdb" "CTDB \- clustered TDB database"
+.TH "CTDBD" "1" "09/08/2010" "ctdb" "CTDB \- clustered TDB database"
 .\" -----------------------------------------------------------------
 .\" * set default formatting
 .\" -----------------------------------------------------------------
@@ -527,6 +527,15 @@ NAT\-GW is configured in /etc/sysconfigctdb by setting the following variables:
 # CTDB_NATGW_DEFAULT_GATEWAY=10\&.0\&.0\&.1
 # CTDB_NATGW_PRIVATE_NETWORK=10\&.1\&.1\&.0/24
 # CTDB_NATGW_NODES=/etc/ctdb/natgw_nodes
+#
+# Normally any node in the natgw group can act as the natgw master\&.
+# In some configurations you may have special nodes that is a part of the
+# cluster/natgw group, but where the node lacks connectivity to the 
+# public network\&.
+# For these cases, set this variable to make these nodes not able to
+# become natgw master\&.
+#
+# CTDB_NATGW_SLAVE_ONLY=yes
     
 .fi
 .if n \{\
diff --git a/doc/ctdbd.1.html b/doc/ctdbd.1.html
index d6d5ce2..063fbce 100644
--- a/doc/ctdbd.1.html
+++ b/doc/ctdbd.1.html
@@ -1,4 +1,4 @@
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>ctdbd</title><meta name="generator" content="DocBook XSL Stylesheets V1.75.1"></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="refentry" title="ctdbd"><a name="ctdbd.1"></a><div class="titlepage"></div><div class="refnamediv"><h2>Name</h2><p>ctdbd &#8212; The CTDB cluster daemon</p></div><div class="refsynopsisdiv" title="Synopsis"><h2>Synopsis</h2><div class="cmdsynopsis"><p><code class="command">ctdbd</code> </p></div><div class="cmdsynopsis"><p><code class="command">ctdbd</code>  [-? --help] [-d --debug=&lt;INTEGER&gt;] {--dbdir=&lt;directory&gt;} {--dbdir-persistent=&lt;directory&gt;} [--event-script-dir=&lt;directory&gt;] [-i --interactive] [--listen=&lt;address&gt;] [--logfile=&lt;filename&gt;] [--lvs] {--nlist=&lt;filename&gt;} [--no-lmaster] [--no-recmaster] [--nosetsched] {--notification-script=&lt;filename&gt;} [--public-add
 resses=&lt;filename&gt;] [--public-interface=&lt;interface&gt;] {--reclock=&lt;filename&gt;} [--single-public-ip=&lt;address&gt;] [--socket=&lt;filename&gt;] [--start-as-disabled] [--start-as-stopped] [--syslog] [--log-ringbuf-size=&lt;num-entries&gt;] [--torture] [--transport=&lt;STRING&gt;] [--usage]</p></div></div><div class="refsect1" title="DESCRIPTION"><a name="id550120"></a><h2>DESCRIPTION</h2><p>
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>ctdbd</title><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="refentry" title="ctdbd"><a name="ctdbd.1"></a><div class="titlepage"></div><div class="refnamediv"><h2>Name</h2><p>ctdbd &#8212; The CTDB cluster daemon</p></div><div class="refsynopsisdiv" title="Synopsis"><h2>Synopsis</h2><div class="cmdsynopsis"><p><code class="command">ctdbd</code> </p></div><div class="cmdsynopsis"><p><code class="command">ctdbd</code>  [-? --help] [-d --debug=&lt;INTEGER&gt;] {--dbdir=&lt;directory&gt;} {--dbdir-persistent=&lt;directory&gt;} [--event-script-dir=&lt;directory&gt;] [-i --interactive] [--listen=&lt;address&gt;] [--logfile=&lt;filename&gt;] [--lvs] {--nlist=&lt;filename&gt;} [--no-lmaster] [--no-recmaster] [--nosetsched] {--notification-script=&lt;filename&gt;} [--public-add
 resses=&lt;filename&gt;] [--public-interface=&lt;interface&gt;] {--reclock=&lt;filename&gt;} [--single-public-ip=&lt;address&gt;] [--socket=&lt;filename&gt;] [--start-as-disabled] [--start-as-stopped] [--syslog] [--log-ringbuf-size=&lt;num-entries&gt;] [--torture] [--transport=&lt;STRING&gt;] [--usage]</p></div></div><div class="refsect1" title="DESCRIPTION"><a name="id424915"></a><h2>DESCRIPTION</h2><p>
       ctdbd is the main ctdb daemon.
     </p><p>
       ctdbd provides a clustered version of the TDB database with automatic rebuild/recovery of the databases upon nodefailures.
@@ -8,7 +8,7 @@
       ctdbd provides monitoring of all nodes in the cluster and automatically reconfigures the cluster and recovers upon node failures.
     </p><p>
       ctdbd is the main component in clustered Samba that provides a high-availability load-sharing CIFS server cluster.
-    </p></div><div class="refsect1" title="OPTIONS"><a name="id550147"></a><h2>OPTIONS</h2><div class="variablelist"><dl><dt><span class="term">-? --help</span></dt><dd><p>
+    </p></div><div class="refsect1" title="OPTIONS"><a name="id424942"></a><h2>OPTIONS</h2><div class="variablelist"><dl><dt><span class="term">-? --help</span></dt><dd><p>
             Print some help text to the screen.
           </p></dd><dt><span class="term">-d --debug=&lt;DEBUGLEVEL&gt;</span></dt><dd><p>
             This option sets the debuglevel on the ctdbd daemon which controls what will be written to the logfile. The default is 0 which will only log important events and errors. A larger number will provide additional logging.
@@ -154,10 +154,10 @@
 	    implemented in the future.
           </p></dd><dt><span class="term">--usage</span></dt><dd><p>
             Print useage information to the screen.
-          </p></dd></dl></div></div><div class="refsect1" title="Private vs Public addresses"><a name="id507260"></a><h2>Private vs Public addresses</h2><p>
+          </p></dd></dl></div></div><div class="refsect1" title="Private vs Public addresses"><a name="id382958"></a><h2>Private vs Public addresses</h2><p>
       When used for ip takeover in a HA environment, each node in a ctdb 
       cluster has multiple ip addresses assigned to it. One private and one or more public.
-    </p><div class="refsect2" title="Private address"><a name="id507269"></a><h3>Private address</h3><p>
+    </p><div class="refsect2" title="Private address"><a name="id382967"></a><h3>Private address</h3><p>
         This is the physical ip address of the node which is configured in 
         linux and attached to a physical interface. This address uniquely
         identifies a physical node in the cluster and is the ip addresses
@@ -187,7 +187,7 @@
         10.1.1.2
         10.1.1.3
         10.1.1.4
-      </pre></div><div class="refsect2" title="Public address"><a name="id507304"></a><h3>Public address</h3><p>
+      </pre></div><div class="refsect2" title="Public address"><a name="id383002"></a><h3>Public address</h3><p>
         A public address on the other hand is not attached to an interface.
         This address is managed by ctdbd itself and is attached/detached to
         a physical node at runtime.
@@ -248,7 +248,7 @@
 	unavailable. 10.1.1.1 can not be failed over to node 2 or node 3 since
 	these nodes do not have this ip address listed in their public
 	addresses file.
-	</p></div></div><div class="refsect1" title="Node status"><a name="id507365"></a><h2>Node status</h2><p>
+	</p></div></div><div class="refsect1" title="Node status"><a name="id383063"></a><h2>Node status</h2><p>
       The current status of each node in the cluster can be viewed by the 
       'ctdb status' command.
     </p><p>
@@ -285,50 +285,50 @@
       RECMASTER or NATGW.
       This node does not perticipate in the CTDB cluster but can still be
       communicated with. I.e. ctdb commands can be sent to it.
-    </p></div><div class="refsect1" title="PUBLIC TUNABLES"><a name="id507414"></a><h2>PUBLIC TUNABLES</h2><p>
+    </p></div><div class="refsect1" title="PUBLIC TUNABLES"><a name="id383111"></a><h2>PUBLIC TUNABLES</h2><p>
     These are the public tuneables that can be used to control how ctdb behaves.
-    </p><div class="refsect2" title="KeepaliveInterval"><a name="id507423"></a><h3>KeepaliveInterval</h3><p>Default: 1</p><p>
+    </p><div class="refsect2" title="KeepaliveInterval"><a name="id383121"></a><h3>KeepaliveInterval</h3><p>Default: 1</p><p>
     How often should the nodes send keepalives to eachother.
-    </p></div><div class="refsect2" title="KeepaliveLimit"><a name="id507436"></a><h3>KeepaliveLimit</h3><p>Default: 5</p><p>
+    </p></div><div class="refsect2" title="KeepaliveLimit"><a name="id383133"></a><h3>KeepaliveLimit</h3><p>Default: 5</p><p>
     After how many keepalive intervals without any traffic should a node
     wait until marking the peer as DISCONNECTED.
-    </p></div><div class="refsect2" title="MonitorInterval"><a name="id507449"></a><h3>MonitorInterval</h3><p>Default: 15</p><p>
+    </p></div><div class="refsect2" title="MonitorInterval"><a name="id383147"></a><h3>MonitorInterval</h3><p>Default: 15</p><p>
     How often should ctdb run the event scripts to check for a nodes health.
-    </p></div><div class="refsect2" title="TickleUpdateInterval"><a name="id507461"></a><h3>TickleUpdateInterval</h3><p>Default: 20</p><p>
+    </p></div><div class="refsect2" title="TickleUpdateInterval"><a name="id383160"></a><h3>TickleUpdateInterval</h3><p>Default: 20</p><p>
     How often will ctdb record and store the "tickle" information used to
     kickstart stalled tcp connections after a recovery.
-    </p></div><div class="refsect2" title="EventScriptTimeout"><a name="id507475"></a><h3>EventScriptTimeout</h3><p>Default: 20</p><p>
+    </p></div><div class="refsect2" title="EventScriptTimeout"><a name="id383173"></a><h3>EventScriptTimeout</h3><p>Default: 20</p><p>
     How long should ctdb let an event script run before aborting it and
     marking the node unhealthy.
-    </p></div><div class="refsect2" title="RecoveryBanPeriod"><a name="id507488"></a><h3>RecoveryBanPeriod</h3><p>Default: 300</p><p>
+    </p></div><div class="refsect2" title="RecoveryBanPeriod"><a name="id383186"></a><h3>RecoveryBanPeriod</h3><p>Default: 300</p><p>
     If a node becomes banned causing repetitive recovery failures. The node will
     eventually become banned from the cluster.
     This controls how long the culprit node will be banned from the cluster
     before it is allowed to try to join the cluster again.
     Don't set to small. A node gets banned for a reason and it is usually due
     to real problems with the node.
-    </p></div><div class="refsect2" title="DatabaseHashSize"><a name="id507504"></a><h3>DatabaseHashSize</h3><p>Default: 100000</p><p>
+    </p></div><div class="refsect2" title="DatabaseHashSize"><a name="id383202"></a><h3>DatabaseHashSize</h3><p>Default: 100000</p><p>
     Size of the hash chains for the local store of the tdbs that ctdb manages.
-    </p></div><div class="refsect2" title="RerecoveryTimeout"><a name="id507516"></a><h3>RerecoveryTimeout</h3><p>Default: 10</p><p>
+    </p></div><div class="refsect2" title="RerecoveryTimeout"><a name="id383215"></a><h3>RerecoveryTimeout</h3><p>Default: 10</p><p>
     Once a recovery has completed, no additional recoveries are permitted until this timeout has expired.
-    </p></div><div class="refsect2" title="EnableBans"><a name="id507529"></a><h3>EnableBans</h3><p>Default: 1</p><p>
+    </p></div><div class="refsect2" title="EnableBans"><a name="id383228"></a><h3>EnableBans</h3><p>Default: 1</p><p>
     When set to 0, this disables BANNING completely in the cluster and thus nodes can not get banned, even it they break. Don't set to 0.
-    </p></div><div class="refsect2" title="DeterministicIPs"><a name="id507543"></a><h3>DeterministicIPs</h3><p>Default: 1</p><p>
+    </p></div><div class="refsect2" title="DeterministicIPs"><a name="id383242"></a><h3>DeterministicIPs</h3><p>Default: 1</p><p>
     When enabled, this tunable makes ctdb try to keep public IP addresses locked to specific nodes as far as possible. This makes it easier for debugging since you can know that as long as all nodes are healthy public IP X will always be hosted by node Y. 
     </p><p>
     The cost of using deterministic IP address assignment is that it disables part of the logic where ctdb tries to reduce the number of public IP assignment changes in the cluster. This tunable may increase the number of IP failover/failbacks that are performed on the cluster by a small margin.
-    </p></div><div class="refsect2" title="DisableWhenUnhealthy"><a name="id507563"></a><h3>DisableWhenUnhealthy</h3><p>Default: 0</p><p>
+    </p></div><div class="refsect2" title="DisableWhenUnhealthy"><a name="id383262"></a><h3>DisableWhenUnhealthy</h3><p>Default: 0</p><p>
     When set, As soon as a node becomes unhealthy, that node will also automatically become permanently DISABLED. Once a node is DISABLED, the only way to make it participate in the cluster again and host services is by manually enabling the node again using 'ctdb enable'. 
     </p><p>
     This disables parts of the resilience and robustness of the cluster and should ONLY be used when the system administrator is actively monitoring the cluster, so that nodes can be enabled again.
-    </p></div><div class="refsect2" title="NoIPFailback"><a name="id507583"></a><h3>NoIPFailback</h3><p>Default: 0</p><p>
+    </p></div><div class="refsect2" title="NoIPFailback"><a name="id383282"></a><h3>NoIPFailback</h3><p>Default: 0</p><p>
     When set to 1, ctdb will not perform failback of IP addresses when a node becomes healthy. Ctdb WILL perform failover of public IP addresses when a node becomes UNHEALTHY, but when the node becomes HEALTHY again, ctdb will not fail the addresses back.
     </p><p>
     Use with caution! Normally when a node becomes available to the cluster
 ctdb will try to reassign public IP addresses onto the new node as a way to distribute the workload evenly across the clusternode. Ctdb tries to make sure that all running nodes have approximately the same number of public addresses it hosts.
     </p><p>
     When you enable this tunable, CTDB will no longer attempt to rebalance the cluster by failing IP addresses back to the new nodes. An unbalanced cluster will therefore remain unbalanced until there is manual intervention from the administrator. When this parameter is set, you can manually fail public IP addresses over to the new node(s) using the 'ctdb moveip' command.
-    </p></div></div><div class="refsect1" title="LVS"><a name="id507611"></a><h2>LVS</h2><p>
+    </p></div></div><div class="refsect1" title="LVS"><a name="id383310"></a><h2>LVS</h2><p>
     LVS is a mode where CTDB presents one single IP address for the entire
     cluster. This is an alternative to using public IP addresses and round-robin
     DNS to loadbalance clients across the cluster.
@@ -369,7 +369,7 @@ ctdb will try to reassign public IP addresses onto the new node as a way to dist
     the processing node back to the clients. For read-intensive i/o patterns you can acheive very high throughput rates in this mode.
     </p><p>
     Note: you can use LVS and public addresses at the same time.
-    </p><div class="refsect2" title="Configuration"><a name="id507667"></a><h3>Configuration</h3><p>
+    </p><div class="refsect2" title="Configuration"><a name="id383366"></a><h3>Configuration</h3><p>
     To activate LVS on a CTDB node you must specify CTDB_PUBLIC_INTERFACE and 
     CTDB_LVS_PUBLIC_ADDRESS in /etc/sysconfig/ctdb.
 	</p><p>
@@ -392,7 +392,7 @@ You must also specify the "--lvs" command line argument to ctdbd to activete LVS
     all of the clients from the node BEFORE you enable LVS. Also make sure
     that when you ping these hosts that the traffic is routed out through the
     eth0 interface.
-    </p></div><div class="refsect1" title="REMOTE CLUSTER NODES"><a name="id507704"></a><h2>REMOTE CLUSTER NODES</h2><p>
+    </p></div><div class="refsect1" title="REMOTE CLUSTER NODES"><a name="id383403"></a><h2>REMOTE CLUSTER NODES</h2><p>
 It is possible to have a CTDB cluster that spans across a WAN link. 
 For example where you have a CTDB cluster in your datacentre but you also
 want to have one additional CTDB node located at a remote branch site.
@@ -421,7 +421,7 @@ CTDB_CAPABILITY_RECMASTER=no
     </p><p>
 	Verify with the command "ctdb getcapabilities" that that node no longer
 	has the recmaster or the lmaster capabilities.
-    </p></div><div class="refsect1" title="NAT-GW"><a name="id551307"></a><h2>NAT-GW</h2><p>
+    </p></div><div class="refsect1" title="NAT-GW"><a name="id383442"></a><h2>NAT-GW</h2><p>
       Sometimes it is desireable to run services on the CTDB node which will
       need to originate outgoing traffic to external servers. This might
       be contacting NIS servers, LDAP servers etc. etc.
@@ -444,7 +444,7 @@ CTDB_CAPABILITY_RECMASTER=no
       if there are no public addresses assigned to the node.
       This is the simplest way but it uses up a lot of ip addresses since you
       have to assign both static and also public addresses to each node.
-    </p><div class="refsect2" title="NAT-GW"><a name="id551336"></a><h3>NAT-GW</h3><p>
+    </p><div class="refsect2" title="NAT-GW"><a name="id426195"></a><h3>NAT-GW</h3><p>
       A second way is to use the built in NAT-GW feature in CTDB.
       With NAT-GW you assign one public NATGW address for each natgw group.
       Each NATGW group is a set of nodes in the cluster that shares the same
@@ -459,7 +459,7 @@ CTDB_CAPABILITY_RECMASTER=no
       In each NATGW group, one of the nodes is designated the NAT Gateway
       through which all traffic that is originated by nodes in this group
       will be routed through if a public addresses are not available. 
-    </p></div><div class="refsect2" title="Configuration"><a name="id551358"></a><h3>Configuration</h3><p>
+    </p></div><div class="refsect2" title="Configuration"><a name="id426217"></a><h3>Configuration</h3><p>
       NAT-GW is configured in /etc/sysconfigctdb by setting the following
       variables:
     </p><pre class="screen">
@@ -498,31 +498,40 @@ CTDB_CAPABILITY_RECMASTER=no
 # CTDB_NATGW_DEFAULT_GATEWAY=10.0.0.1
 # CTDB_NATGW_PRIVATE_NETWORK=10.1.1.0/24
 # CTDB_NATGW_NODES=/etc/ctdb/natgw_nodes
-    </pre></div><div class="refsect2" title="CTDB_NATGW_PUBLIC_IP"><a name="id551401"></a><h3>CTDB_NATGW_PUBLIC_IP</h3><p>
+#
+# Normally any node in the natgw group can act as the natgw master.
+# In some configurations you may have special nodes that is a part of the
+# cluster/natgw group, but where the node lacks connectivity to the 
+# public network.
+# For these cases, set this variable to make these nodes not able to
+# become natgw master.
+#
+# CTDB_NATGW_SLAVE_ONLY=yes
+    </pre></div><div class="refsect2" title="CTDB_NATGW_PUBLIC_IP"><a name="id426266"></a><h3>CTDB_NATGW_PUBLIC_IP</h3><p>
       This is an ip address in the public network that is used for all outgoing
       traffic when the public addresses are not assigned.
       This address will be assigned to one of the nodes in the cluster which
       will masquerade all traffic for the other nodes.
     </p><p>
       Format of this parameter is IPADDRESS/NETMASK
-    </p></div><div class="refsect2" title="CTDB_NATGW_PUBLIC_IFACE"><a name="id551415"></a><h3>CTDB_NATGW_PUBLIC_IFACE</h3><p>
+    </p></div><div class="refsect2" title="CTDB_NATGW_PUBLIC_IFACE"><a name="id426280"></a><h3>CTDB_NATGW_PUBLIC_IFACE</h3><p>
       This is the physical interface where the CTDB_NATGW_PUBLIC_IP will be
       assigned to. This should be an interface connected to the public network.
     </p><p>
       Format of this parameter is INTERFACE
-    </p></div><div class="refsect2" title="CTDB_NATGW_DEFAULT_GATEWAY"><a name="id551428"></a><h3>CTDB_NATGW_DEFAULT_GATEWAY</h3><p>
+    </p></div><div class="refsect2" title="CTDB_NATGW_DEFAULT_GATEWAY"><a name="id426293"></a><h3>CTDB_NATGW_DEFAULT_GATEWAY</h3><p>
       This is the default gateway to use on the node that is elected to host
       the CTDB_NATGW_PUBLIC_IP. This is the default gateway on the public network.
     </p><p>
       Format of this parameter is IPADDRESS
-    </p></div><div class="refsect2" title="CTDB_NATGW_PRIVATE_NETWORK"><a name="id551442"></a><h3>CTDB_NATGW_PRIVATE_NETWORK</h3><p>
+    </p></div><div class="refsect2" title="CTDB_NATGW_PRIVATE_NETWORK"><a name="id426306"></a><h3>CTDB_NATGW_PRIVATE_NETWORK</h3><p>
       This is the network/netmask used for the interal private network.
     </p><p>
       Format of this parameter is IPADDRESS/NETMASK
-    </p></div><div class="refsect2" title="CTDB_NATGW_NODES"><a name="id551453"></a><h3>CTDB_NATGW_NODES</h3><p>
+    </p></div><div class="refsect2" title="CTDB_NATGW_NODES"><a name="id426319"></a><h3>CTDB_NATGW_NODES</h3><p>
       This is the list of all nodes that belong to the same NATGW group
       as this node. The default is /etc/ctdb/natgw_nodes.
-    </p></div><div class="refsect2" title="Operation"><a name="id551463"></a><h3>Operation</h3><p>
+    </p></div><div class="refsect2" title="Operation"><a name="id426329"></a><h3>Operation</h3><p>
       When the NAT-GW functionality is used, one of the nodes is elected
       to act as a NAT router for all the other nodes in the group when
       they need to originate traffic to the external public network.
@@ -537,7 +546,7 @@ CTDB_CAPABILITY_RECMASTER=no
     </p><p>
       This is implemented in the 11.natgw eventscript. Please see the
       eventscript for further information.
-    </p></div><div class="refsect2" title="Removing/Changing NATGW at runtime"><a name="id551488"></a><h3>Removing/Changing NATGW at runtime</h3><p>
+    </p></div><div class="refsect2" title="Removing/Changing NATGW at runtime"><a name="id426354"></a><h3>Removing/Changing NATGW at runtime</h3><p>
       The following are the procedures to change/remove a NATGW configuration 
       at runtime, without having to restart ctdbd.
     </p><p>
@@ -551,7 +560,7 @@ CTDB_CAPABILITY_RECMASTER=no
 1, Run 'CTDB_BASE=/etc/ctdb /etc/ctdb/events.d/11.natgw removenatgw'
 2, Then change the configuration in /etc/sysconfig/ctdb
 3, Run 'CTDB_BASE=/etc/ctdb /etc/ctdb/events.d/11.natgw updatenatgw'
-    </pre></div></div><div class="refsect1" title="NOTIFICATION SCRIPT"><a name="id551519"></a><h2>NOTIFICATION SCRIPT</h2><p>
+    </pre></div></div><div class="refsect1" title="NOTIFICATION SCRIPT"><a name="id426385"></a><h2>NOTIFICATION SCRIPT</h2><p>
       Notification scripts are used with ctdb to have a call-out from ctdb
       to a user-specified script when certain state changes occur in ctdb.
       This is commonly to set up either sending SNMP traps or emails
@@ -563,17 +572,17 @@ CTDB_CAPABILITY_RECMASTER=no
       See /etc/ctdb/notify.sh for an example script.
     </p><p>
       CTDB currently generates notifications on these state changes:
-    </p><div class="refsect2" title="unhealthy"><a name="id551542"></a><h3>unhealthy</h3><p>
+    </p><div class="refsect2" title="unhealthy"><a name="id426409"></a><h3>unhealthy</h3><p>
       This call-out is triggered when the node changes to UNHEALTHY state.
-    </p></div><div class="refsect2" title="healthy"><a name="id551552"></a><h3>healthy</h3><p>
+    </p></div><div class="refsect2" title="healthy"><a name="id426418"></a><h3>healthy</h3><p>
       This call-out is triggered when the node changes to HEALTHY state.
-    </p></div><div class="refsect2" title="startup"><a name="id551561"></a><h3>startup</h3><p>
+    </p></div><div class="refsect2" title="startup"><a name="id426427"></a><h3>startup</h3><p>
       This call-out is triggered when ctdb has started up and all managed services are up and running.
-    </p></div></div><div class="refsect1" title="ClamAV Daemon"><a name="id551572"></a><h2>ClamAV Daemon</h2><p>
+    </p></div></div><div class="refsect1" title="ClamAV Daemon"><a name="id426438"></a><h2>ClamAV Daemon</h2><p>
 CTDB has support to manage the popular anti-virus daemon ClamAV.
 This support is implemented through the
 eventscript : /etc/ctdb/events.d/31.clamd.
-</p><div class="refsect2" title="Configuration"><a name="id551580"></a><h3>Configuration</h3><p>
+</p><div class="refsect2" title="Configuration"><a name="id426447"></a><h3>Configuration</h3><p>
 Start by configuring CLAMAV normally and test that it works. Once this is
 done, copy the configuration files over to all the nodes so that all nodes
 share identical CLAMAV configurations.
@@ -602,10 +611,10 @@ Once you have restarted CTDBD, use
 ctdb scriptstatus
 </pre><p>
 and verify that the 31.clamd eventscript is listed and that it was executed successfully.
-</p></div></div><div class="refsect1" title="SEE ALSO"><a name="id551630"></a><h2>SEE ALSO</h2><p>
+</p></div></div><div class="refsect1" title="SEE ALSO"><a name="id426497"></a><h2>SEE ALSO</h2><p>
       ctdb(1), onnode(1)
       <a class="ulink" href="http://ctdb.samba.org/" target="_top">http://ctdb.samba.org/</a>
-    </p></div><div class="refsect1" title="COPYRIGHT/LICENSE"><a name="id551643"></a><h2>COPYRIGHT/LICENSE</h2><div class="literallayout"><p><br>
+    </p></div><div class="refsect1" title="COPYRIGHT/LICENSE"><a name="id426509"></a><h2>COPYRIGHT/LICENSE</h2><div class="literallayout"><p><br>
 Copyright (C) Andrew Tridgell 2007<br>
 Copyright (C) Ronnie sahlberg 2007<br>
 <br>
diff --git a/doc/ctdbd.1.xml b/doc/ctdbd.1.xml
index aa51f43..91e2f9f 100644
--- a/doc/ctdbd.1.xml
+++ b/doc/ctdbd.1.xml
@@ -900,6 +900,15 @@ CTDB_CAPABILITY_RECMASTER=no
 # CTDB_NATGW_DEFAULT_GATEWAY=10.0.0.1
 # CTDB_NATGW_PRIVATE_NETWORK=10.1.1.0/24
 # CTDB_NATGW_NODES=/etc/ctdb/natgw_nodes
+#
+# Normally any node in the natgw group can act as the natgw master.
+# In some configurations you may have special nodes that is a part of the
+# cluster/natgw group, but where the node lacks connectivity to the 
+# public network.
+# For these cases, set this variable to make these nodes not able to
+# become natgw master.
+#
+# CTDB_NATGW_SLAVE_ONLY=yes
     </screen>
     </refsect2>
 
diff --git a/tools/ctdb.c b/tools/ctdb.c
index 2a9eb49..389dfdb 100644
--- a/tools/ctdb.c
+++ b/tools/ctdb.c
@@ -627,6 +627,7 @@ struct natgw_node {
 static int control_natgwlist(struct ctdb_context *ctdb, int argc, const char **argv)
 {
 	int i, ret;
+	uint32_t capabilities;
 	const char *natgw_list;
 	int nlines;
 	char **lines;
@@ -705,6 +706,14 @@ static int control_natgwlist(struct ctdb_context *ctdb, int argc, const char **a
 	 */
 	for(i=0;i<nodemap->num;i++){
 		if (!(nodemap->nodes[i].flags & (NODE_FLAGS_DISCONNECTED|NODE_FLAGS_STOPPED|NODE_FLAGS_DELETED|NODE_FLAGS_BANNED|NODE_FLAGS_UNHEALTHY))) {
+			ret = ctdb_ctrl_getcapabilities(ctdb, TIMELIMIT(), nodemap->nodes[i].pnn, &capabilities);
+			if (ret != 0) {
+				DEBUG(DEBUG_ERR, ("Unable to get capabilities from node %u\n", nodemap->nodes[i].pnn));
+				return ret;
+			}
+			if (!(capabilities&CTDB_CAP_NATGW)) {
+				continue;
+			}
 			printf("%d %s\n", nodemap->nodes[i].pnn,ctdb_addr_to_str(&nodemap->nodes[i].addr));
 			break;
 		}
@@ -713,6 +722,14 @@ static int control_natgwlist(struct ctdb_context *ctdb, int argc, const char **a
 	if (i == nodemap->num) {
 		for(i=0;i<nodemap->num;i++){
 			if (!(nodemap->nodes[i].flags & (NODE_FLAGS_DISCONNECTED|NODE_FLAGS_STOPPED|NODE_FLAGS_DELETED))) {
+				ret = ctdb_ctrl_getcapabilities(ctdb, TIMELIMIT(), nodemap->nodes[i].pnn, &capabilities);
+				if (ret != 0) {
+					DEBUG(DEBUG_ERR, ("Unable to get capabilities from node %u\n", nodemap->nodes[i].pnn));
+					return ret;
+				}
+				if (!(capabilities&CTDB_CAP_NATGW)) {
+					continue;
+				}
 				printf("%d %s\n", nodemap->nodes[i].pnn,ctdb_addr_to_str(&nodemap->nodes[i].addr));
 				break;
 			}
@@ -722,6 +739,14 @@ static int control_natgwlist(struct ctdb_context *ctdb, int argc, const char **a
 	if (i == nodemap->num) {
 		for(i=0;i<nodemap->num;i++){
 			if (!(nodemap->nodes[i].flags & (NODE_FLAGS_DISCONNECTED|NODE_FLAGS_DELETED))) {
+				ret = ctdb_ctrl_getcapabilities(ctdb, TIMELIMIT(), nodemap->nodes[i].pnn, &capabilities);
+				if (ret != 0) {
+					DEBUG(DEBUG_ERR, ("Unable to get capabilities from node %u\n", nodemap->nodes[i].pnn));
+					return ret;
+				}
+				if (!(capabilities&CTDB_CAP_NATGW)) {
+					continue;
+				}
 				printf("%d %s\n", nodemap->nodes[i].pnn, ctdb_addr_to_str(&nodemap->nodes[i].addr));
 				break;
 			}


-- 
CTDB repository


More information about the samba-cvs mailing list