[SCM] CTDB repository - branch 1.2.39-28 updated - ctdb-1.9.1-505-g6b2c55d

Ronnie Sahlberg sahlberg at samba.org
Wed Mar 14 17:57:42 MDT 2012


The branch, 1.2.39-28 has been updated
       via  6b2c55d6ac396835597613b8f7fa271ac2bd6f4b (commit)
       via  cd8f268da8841d3803e64379ae8fb5384a7e7946 (commit)
       via  ecb58d0f17680e79f434a9d8f3048c71d967241b (commit)
       via  800dd35b7378af76fefb227b10de2f5ede82542c (commit)
       via  d156dec63857b19d1c55fbe089ef9f02e2b8bb50 (commit)
       via  7d9c1087ac7989bc705a373c4b4414f4f669f7c4 (commit)
       via  81d680873cb087e7baeb375eda1ebfc339ed10f4 (commit)
       via  0a460fc762ffab9b77431eeb48302bdac4682a10 (commit)
      from  635ff897405735aaf856f5dc900c2ae6aacf9243 (commit)

http://gitweb.samba.org/?p=ctdb.git;a=shortlog;h=1.2.39-28


- Log -----------------------------------------------------------------
commit 6b2c55d6ac396835597613b8f7fa271ac2bd6f4b
Author: Ronnie Sahlberg <ronniesahlberg at gmail.com>
Date:   Thu Mar 15 10:22:16 2012 +1100

    new version 1.2.39-30

commit cd8f268da8841d3803e64379ae8fb5384a7e7946
Author: Ronnie Sahlberg <ronniesahlberg at gmail.com>
Date:   Thu Mar 15 10:16:31 2012 +1100

    Revert "Add CTDB_CONTROL_CHECK_SRVID"
    
    This reverts commit 39d9e8b813e5957bd8319bd5ad6c4c9978c27dad.

commit ecb58d0f17680e79f434a9d8f3048c71d967241b
Author: Ronnie Sahlberg <ronniesahlberg at gmail.com>
Date:   Thu Mar 15 10:16:16 2012 +1100

    Revert "Record Fetch Collapse: Collapse multiple fetch request into one single request"
    
    This reverts commit 8bc2cca22faf33dad8908567fa30b55173cb3426.

commit 800dd35b7378af76fefb227b10de2f5ede82542c
Author: Ronnie Sahlberg <ronniesahlberg at gmail.com>
Date:   Thu Mar 15 10:15:56 2012 +1100

    Revert "Add back la-count based migration"
    
    This reverts commit 0c4ae347a20a780fe0326036de699e9f6cbb6d26.

commit d156dec63857b19d1c55fbe089ef9f02e2b8bb50
Author: Ronnie Sahlberg <ronniesahlberg at gmail.com>
Date:   Thu Mar 15 10:15:33 2012 +1100

    Revert "set max lacount to default to 20"
    
    This reverts commit 9e702ae900809bde8c3ba7e7a29a1171d0b7ed86.

commit 7d9c1087ac7989bc705a373c4b4414f4f669f7c4
Author: Ronnie Sahlberg <ronniesahlberg at gmail.com>
Date:   Thu Mar 15 10:14:44 2012 +1100

    Revert "Disable FetchLockCollapse by default and disable LACount too"
    
    This reverts commit 0a460fc762ffab9b77431eeb48302bdac4682a10.

commit 81d680873cb087e7baeb375eda1ebfc339ed10f4
Author: Ronnie Sahlberg <ronniesahlberg at gmail.com>
Date:   Tue Mar 13 17:33:20 2012 +1100

    version 1.2.39-29   disable fetchlockcollapse and lacout by default

commit 0a460fc762ffab9b77431eeb48302bdac4682a10
Author: Ronnie Sahlberg <ronniesahlberg at gmail.com>
Date:   Tue Mar 13 17:32:41 2012 +1100

    Disable FetchLockCollapse by default and disable LACount too

-----------------------------------------------------------------------

Summary of changes:
 client/ctdb_client.c       |   14 +--
 common/ctdb_ltdb.c         |    1 -
 include/ctdb_private.h     |   10 +--
 include/ctdb_protocol.h    |    4 +-
 packaging/RPM/ctdb.spec.in |    4 +-
 server/ctdb_call.c         |   20 ++--
 server/ctdb_control.c      |    3 -
 server/ctdb_daemon.c       |  239 --------------------------------------------
 server/ctdb_ltdb_server.c  |   11 --
 server/ctdb_tunables.c     |    4 +-
 10 files changed, 17 insertions(+), 293 deletions(-)


Changeset truncated at 500 lines:

diff --git a/client/ctdb_client.c b/client/ctdb_client.c
index 7103ef6..5e06604 100644
--- a/client/ctdb_client.c
+++ b/client/ctdb_client.c
@@ -72,7 +72,7 @@ struct ctdb_req_header *_ctdbd_allocate_pkt(struct ctdb_context *ctdb,
 */
 int ctdb_call_local(struct ctdb_db_context *ctdb_db, struct ctdb_call *call,
 		    struct ctdb_ltdb_header *header, TALLOC_CTX *mem_ctx,
-		    TDB_DATA *data, uint32_t caller)
+		    TDB_DATA *data)
 {
 	struct ctdb_call_info *c;
 	struct ctdb_registered_call *fn;
@@ -106,15 +106,7 @@ int ctdb_call_local(struct ctdb_db_context *ctdb_db, struct ctdb_call *call,
 	}
 
 	/* we need to force the record to be written out if this was a remote access */
-	if (header->laccessor != caller) {
-		header->lacount = 0;
-	}
-	header->laccessor = caller;
-	header->lacount++;
-
-	/* we need to force the record to be written out if this was a remote access,
-	   so that the lacount is updated */
-	if (c->new_data == NULL && header->laccessor != ctdb->pnn) {
+	if (c->new_data == NULL) {
 		c->new_data = &c->record_data;
 	}
 
@@ -369,7 +361,7 @@ static struct ctdb_client_call_state *ctdb_client_call_local_send(struct ctdb_db
 	*(state->call) = *call;
 	state->ctdb_db = ctdb_db;
 
-	ret = ctdb_call_local(ctdb_db, state->call, header, state, data, ctdb->pnn);
+	ret = ctdb_call_local(ctdb_db, state->call, header, state, data);
 
 	return state;
 }
diff --git a/common/ctdb_ltdb.c b/common/ctdb_ltdb.c
index b0ab3e6..3ee7fe8 100644
--- a/common/ctdb_ltdb.c
+++ b/common/ctdb_ltdb.c
@@ -66,7 +66,6 @@ static void ltdb_initial_header(struct ctdb_db_context *ctdb_db,
 	/* initial dmaster is the lmaster */
 	header->dmaster = ctdb_lmaster(ctdb_db->ctdb, &key);
 	header->flags = CTDB_REC_FLAG_AUTOMATIC;
-	header->laccessor = header->dmaster;
 }
 
 
diff --git a/include/ctdb_private.h b/include/ctdb_private.h
index 0eb3b6e..675ea49 100644
--- a/include/ctdb_private.h
+++ b/include/ctdb_private.h
@@ -122,8 +122,6 @@ struct ctdb_tunable {
 	uint32_t vacuum_fast_path_count;
 	uint32_t lcp2_public_ip_assignment;
 	uint32_t allow_client_db_attach;
-	uint32_t fetch_lock_collapse;
-	uint32_t max_lacount;
 };
 
 /*
@@ -525,10 +523,6 @@ struct ctdb_db_context {
 				  struct ctdb_ltdb_header *header,
 				  TDB_DATA data);
 
-	/* used to track which records we are currently fetching
-	   so we can avoid sending duplicate fetch requests
-	*/
-	struct trbt_tree *deferred_fetch;
 };
 
 
@@ -796,7 +790,7 @@ struct ctdb_call_state *ctdb_daemon_call_send_remote(struct ctdb_db_context *ctd
 
 int ctdb_call_local(struct ctdb_db_context *ctdb_db, struct ctdb_call *call,
 		    struct ctdb_ltdb_header *header, TALLOC_CTX *mem_ctx,
-		    TDB_DATA *data, uint32_t caller);
+		    TDB_DATA *data);
 
 #define ctdb_reqid_find(ctdb, reqid, type)	(type *)_ctdb_reqid_find(ctdb, reqid, #type, __location__)
 
@@ -960,8 +954,6 @@ int ctdb_dispatch_message(struct ctdb_context *ctdb, uint64_t srvid, TDB_DATA da
 int daemon_register_message_handler(struct ctdb_context *ctdb, uint32_t client_id, uint64_t srvid);
 int ctdb_deregister_message_handler(struct ctdb_context *ctdb, uint64_t srvid, void *private_data);
 int daemon_deregister_message_handler(struct ctdb_context *ctdb, uint32_t client_id, uint64_t srvid);
-int daemon_check_srvids(struct ctdb_context *ctdb, TDB_DATA indata,
-			TDB_DATA *outdata);
 
 int32_t ctdb_ltdb_enable_seqnum(struct ctdb_context *ctdb, uint32_t db_id);
 int32_t ctdb_ltdb_update_seqnum(struct ctdb_context *ctdb, uint32_t db_id, uint32_t srcnode);
diff --git a/include/ctdb_protocol.h b/include/ctdb_protocol.h
index 90af21f..a8282e3 100644
--- a/include/ctdb_protocol.h
+++ b/include/ctdb_protocol.h
@@ -363,7 +363,6 @@ enum ctdb_controls {CTDB_CONTROL_PROCESS_EXISTS          = 0,
 		    CTDB_CONTROL_TCP_ADD_DELAYED_UPDATE  = 126,
 		    CTDB_CONTROL_GET_STAT_HISTORY	 = 127,
 		    CTDB_CONTROL_SCHEDULE_FOR_DELETION   = 128,
-		    CTDB_CONTROL_CHECK_SRVIDS		 = 130,
 };
 
 /*
@@ -482,8 +481,7 @@ enum ctdb_trans2_commit_error {
 struct ctdb_ltdb_header {
 	uint64_t rsn;
 	uint32_t dmaster;
-	uint16_t laccessor;
-	uint16_t lacount;
+	uint32_t reserved1;
 #define CTDB_REC_FLAG_DEFAULT			0x00000000
 #define CTDB_REC_FLAG_MIGRATED_WITH_DATA	0x00010000
 #define CTDB_REC_FLAG_VACUUM_MIGRATED		0x00020000
diff --git a/packaging/RPM/ctdb.spec.in b/packaging/RPM/ctdb.spec.in
index 45e0a31..a4bab8a 100644
--- a/packaging/RPM/ctdb.spec.in
+++ b/packaging/RPM/ctdb.spec.in
@@ -4,7 +4,7 @@ Summary: Clustered TDB
 Vendor: Samba Team
 Packager: Samba Team <samba at samba.org>
 Version: 1.2.39
-Release: 28GITHASH
+Release: 30GITHASH
 Epoch: 0
 License: GNU GPL version 3
 Group: System Environment/Daemons
@@ -144,6 +144,8 @@ development libraries for ctdb
 %{_libdir}/libctdb.a
 
 %changelog
+* Thu Mar 15 2012 : Version 1.2.39-30
+ - revert fetchlock collapse, CHECK_SRVIDS, lacount migration
 * Thu Nov 17 2011 : Version 1.2.39
  - Handle canceled monitor events better, dont assume they are always status==OK
 * Mon Oct 17 2011 : Version 1.2.38
diff --git a/server/ctdb_call.c b/server/ctdb_call.c
index 08710eb..622ef74 100644
--- a/server/ctdb_call.c
+++ b/server/ctdb_call.c
@@ -339,7 +339,7 @@ static void ctdb_become_dmaster(struct ctdb_db_context *ctdb_db,
 		return;
 	}
 
-	ctdb_call_local(ctdb_db, state->call, &header, state, &data, ctdb->pnn);
+	ctdb_call_local(ctdb_db, state->call, &header, state, &data);
 
 	ret = ctdb_ltdb_unlock(ctdb_db, state->call->key);
 	if (ret != 0) {
@@ -532,15 +532,11 @@ void ctdb_request_call(struct ctdb_context *ctdb, struct ctdb_req_header *hdr)
 	CTDB_INCREMENT_STAT(ctdb, hop_count_bucket[bucket]);
 
 
-	/* if this nodes has done enough consecutive calls on the same record
-	   then give them the record
-	   or if the node requested an immediate migration
-	*/
-	if ( c->hdr.srcnode != ctdb->pnn &&
-	     ((header.laccessor == c->hdr.srcnode
-	       && header.lacount >= ctdb->tunable.max_lacount
-	       && ctdb->tunable.max_lacount != 0)
-	      || (c->flags & CTDB_IMMEDIATE_MIGRATION)) ) {
+	/* Try if possible to migrate the record off to the caller node.
+	 * From the clients perspective a fetch of the data is just as 
+	 * expensive as a migration.
+	 */
+	if (c->hdr.srcnode != ctdb->pnn) {
 		if (ctdb_db->transaction_active) {
 			DEBUG(DEBUG_INFO, (__location__ " refusing migration"
 			      " of key %s while transaction is active\n",
@@ -559,7 +555,7 @@ void ctdb_request_call(struct ctdb_context *ctdb, struct ctdb_req_header *hdr)
 		}
 	}
 
-	ctdb_call_local(ctdb_db, call, &header, hdr, &data, c->hdr.srcnode);
+	ctdb_call_local(ctdb_db, call, &header, hdr, &data);
 
 	ret = ctdb_ltdb_unlock(ctdb_db, call->key);
 	if (ret != 0) {
@@ -782,7 +778,7 @@ struct ctdb_call_state *ctdb_call_local_send(struct ctdb_db_context *ctdb_db,
 	*(state->call) = *call;
 	state->ctdb_db = ctdb_db;
 
-	ret = ctdb_call_local(ctdb_db, state->call, header, state, data, ctdb->pnn);
+	ret = ctdb_call_local(ctdb_db, state->call, header, state, data);
 
 	event_add_timed(ctdb->ev, state, timeval_zero(), call_local_trigger, state);
 
diff --git a/server/ctdb_control.c b/server/ctdb_control.c
index 83e1bd1..748907f 100644
--- a/server/ctdb_control.c
+++ b/server/ctdb_control.c
@@ -253,9 +253,6 @@ static int32_t ctdb_control_dispatch(struct ctdb_context *ctdb,
 	case CTDB_CONTROL_DEREGISTER_SRVID:
 		return daemon_deregister_message_handler(ctdb, client_id, srvid);
 
-	case CTDB_CONTROL_CHECK_SRVIDS:
-		return daemon_check_srvids(ctdb, indata, outdata);
-
 	case CTDB_CONTROL_ENABLE_SEQNUM:
 		CHECK_CONTROL_DATA_SIZE(sizeof(uint32_t));
 		return ctdb_ltdb_enable_seqnum(ctdb, *(uint32_t *)indata.dptr);
diff --git a/server/ctdb_daemon.c b/server/ctdb_daemon.c
index 3d5a71e..f0c7ec9 100644
--- a/server/ctdb_daemon.c
+++ b/server/ctdb_daemon.c
@@ -27,7 +27,6 @@
 #include "system/wait.h"
 #include "../include/ctdb_client.h"
 #include "../include/ctdb_private.h"
-#include "../common/rb_tree.h"
 #include <sys/socket.h>
 
 struct ctdb_client_pid_list {
@@ -205,42 +204,6 @@ int daemon_deregister_message_handler(struct ctdb_context *ctdb, uint32_t client
 	return ctdb_deregister_message_handler(ctdb, srvid, client);
 }
 
-int daemon_check_srvids(struct ctdb_context *ctdb, TDB_DATA indata,
-			TDB_DATA *outdata)
-{
-	uint64_t *ids;
-	int i, num_ids;
-	uint8_t *results;
-
-	if ((indata.dsize % sizeof(uint64_t)) != 0) {
-		DEBUG(DEBUG_ERR, ("Bad indata in daemon_check_srvids, "
-				  "size=%d\n", (int)indata.dsize));
-		return -1;
-	}
-
-	ids = (uint64_t *)indata.dptr;
-	num_ids = indata.dsize / 8;
-
-	results = talloc_zero_array(outdata, uint8_t, (num_ids+7)/8);
-	if (results == NULL) {
-		DEBUG(DEBUG_ERR, ("talloc failed in daemon_check_srvids\n"));
-		return -1;
-	}
-	for (i=0; i<num_ids; i++) {
-		struct ctdb_message_list *ml;
-		for (ml=ctdb->message_list; ml; ml=ml->next) {
-			if (ml->srvid == ids[i]) {
-				break;
-			}
-		}
-		if (ml != NULL) {
-			results[i/8] |= (1 << (i%8));
-		}
-	}
-	outdata->dptr = (uint8_t *)results;
-	outdata->dsize = talloc_get_size(results);
-	return 0;
-}
 
 /*
   destroy a ctdb_client
@@ -395,190 +358,6 @@ static void daemon_incoming_packet_wrap(void *p, struct ctdb_req_header *hdr)
 	daemon_incoming_packet(client, hdr);	
 }
 
-struct ctdb_deferred_fetch_call {
-	struct ctdb_deferred_fetch_call *next, *prev;
-	struct ctdb_req_call *c;
-	struct ctdb_daemon_packet_wrap *w;
-};
-
-struct ctdb_deferred_fetch_queue {
-	struct ctdb_deferred_fetch_call *deferred_calls;
-};
-
-struct ctdb_deferred_requeue {
-	struct ctdb_deferred_fetch_call *dfc;
-	struct ctdb_client *client;
-};
-
-/* called from a timer event and starts reprocessing the deferred call.*/
-static void reprocess_deferred_call(struct event_context *ev, struct timed_event *te, 
-				       struct timeval t, void *private_data)
-{
-	struct ctdb_deferred_requeue *dfr = (struct ctdb_deferred_requeue *)private_data;
-	struct ctdb_client *client = dfr->client;
-
-	talloc_steal(client, dfr->dfc->c);
-	daemon_incoming_packet(client, (struct ctdb_req_header *)dfr->dfc->c);
-	talloc_free(dfr);
-}
-
-/* the referral context is destroyed either after a timeout or when the initial
-   fetch-lock has finished.
-   at this stage, immediately start reprocessing the queued up deferred
-   calls so they get reprocessed immediately (and since we are dmaster at
-   this stage, trigger the waiting smbd processes to pick up and aquire the
-   record right away.
-*/
-static int deferred_fetch_queue_destructor(struct ctdb_deferred_fetch_queue *dfq)
-{
-
-	/* need to reprocess the packets from the queue explicitely instead of
-	   just using a normal destructor since we want, need, to
-	   call the clients in the same oder as the requests queued up
-	*/
-	while (dfq->deferred_calls != NULL) {
-		struct ctdb_client *client;
-		struct ctdb_deferred_fetch_call *dfc = dfq->deferred_calls;
-		struct ctdb_deferred_requeue *dfr;
-
-		DLIST_REMOVE(dfq->deferred_calls, dfc);
-
-		client = ctdb_reqid_find(dfc->w->ctdb, dfc->w->client_id, struct ctdb_client);
-		if (client == NULL) {
-			DEBUG(DEBUG_ERR,(__location__ " Packet for disconnected client %u\n",
-				 dfc->w->client_id));
-			continue;
-		}
-
-		/* process it by pushing it back onto the eventloop */
-		dfr = talloc(client, struct ctdb_deferred_requeue);
-		if (dfr == NULL) {
-			DEBUG(DEBUG_ERR,("Failed to allocate deferred fetch requeue structure\n"));
-			continue;
-		}
-
-		dfr->dfc    = talloc_steal(dfr, dfc);
-		dfr->client = client;
-
-		event_add_timed(dfc->w->ctdb->ev, client, timeval_zero(), reprocess_deferred_call, dfr);
-	}
-
-	return 0;
-}
-
-/* insert the new deferral context into the rb tree.
-   there should never be a pre-existing context here, but check for it
-   warn and destroy the previous context if there is already a deferral context
-   for this key.
-*/
-static void *insert_dfq_callback(void *parm, void *data)
-{
-        if (data) {
-		DEBUG(DEBUG_ERR,("Already have DFQ registered. Free old %p and create new %p\n", data, parm));
-                talloc_free(data);
-        }
-        return parm;
-}
-
-/* if the original fetch-lock did not complete within a reasonable time,
-   free the context and context for all deferred requests to cause them to be
-   re-inserted into the event system.
-*/
-static void dfq_timeout(struct event_context *ev, struct timed_event *te, 
-				  struct timeval t, void *private_data)
-{
-	talloc_free(private_data);
-}
-
-/* This function is used in the local daemon to register a KEY in a database
-   for being "fetched"
-   While the remote fetch is in-flight, any futher attempts to re-fetch the
-   same record will be deferred until the fetch completes.
-*/
-static int setup_deferred_fetch_locks(struct ctdb_db_context *ctdb_db, struct ctdb_call *call)
-{
-	uint32_t *k;
-	struct ctdb_deferred_fetch_queue *dfq;
-
-	k = talloc_zero_size(call, ((call->key.dsize + 3) & 0xfffffffc) + 4);
-	if (k == NULL) {
-		DEBUG(DEBUG_ERR,("Failed to allocate key for deferred fetch\n"));
-		return -1;
-	}
-
-	k[0] = (call->key.dsize + 3) / 4 + 1;
-	memcpy(&k[1], call->key.dptr, call->key.dsize);
-
-	dfq  = talloc(call, struct ctdb_deferred_fetch_queue);
-	if (dfq == NULL) {
-		DEBUG(DEBUG_ERR,("Failed to allocate key for deferred fetch queue structure\n"));
-		talloc_free(k);
-		return -1;
-	}
-	dfq->deferred_calls = NULL;
-
-	trbt_insertarray32_callback(ctdb_db->deferred_fetch, k[0], &k[0], insert_dfq_callback, dfq);
-
-	talloc_set_destructor(dfq, deferred_fetch_queue_destructor);
-
-	/* if the fetch havent completed in 30 seconds, just tear it all down
-	   and let it try again as the events are reissued */
-	event_add_timed(ctdb_db->ctdb->ev, dfq, timeval_current_ofs(30, 0), dfq_timeout, dfq);
-
-	talloc_free(k);
-	return 0;
-}
-
-/* check if this is a duplicate request to a fetch already in-flight
-   if it is, make this call deferred to be reprocessed later when
-   the in-flight fetch completes.
-*/
-static int requeue_duplicate_fetch(struct ctdb_db_context *ctdb_db, struct ctdb_client *client, TDB_DATA key, struct ctdb_req_call *c)
-{
-	uint32_t *k;
-	struct ctdb_deferred_fetch_queue *dfq;
-	struct ctdb_deferred_fetch_call *dfc;
-
-	k = talloc_zero_size(c, ((key.dsize + 3) & 0xfffffffc) + 4);
-	if (k == NULL) {
-		DEBUG(DEBUG_ERR,("Failed to allocate key for deferred fetch\n"));
-		return -1;
-	}
-
-	k[0] = (key.dsize + 3) / 4 + 1;
-	memcpy(&k[1], key.dptr, key.dsize);
-
-	dfq = trbt_lookuparray32(ctdb_db->deferred_fetch, k[0], &k[0]);
-	if (dfq == NULL) {
-		talloc_free(k);
-		return -1;
-	}
-
-
-	talloc_free(k);
-
-	dfc = talloc(dfq, struct ctdb_deferred_fetch_call);
-	if (dfc == NULL) {
-		DEBUG(DEBUG_ERR, ("Failed to allocate deferred fetch call structure\n"));
-		return -1;
-	}
-
-	dfc->w = talloc(dfc, struct ctdb_daemon_packet_wrap);
-	if (dfc->w == NULL) {
-		DEBUG(DEBUG_ERR,("Failed to allocate deferred fetch daemon packet wrap structure\n"));
-		talloc_free(dfc);
-		return -1;
-	}
-
-	dfc->c = talloc_steal(dfc, c);
-	dfc->w->ctdb = ctdb_db->ctdb;
-	dfc->w->client_id = client->client_id;
-
-	DLIST_ADD_END(dfq->deferred_calls, dfc, NULL);
-
-	return 0;
-}
-
 
 /*
   this is called when the ctdb daemon received a ctdb request call
@@ -644,16 +423,6 @@ static void daemon_request_call_from_client(struct ctdb_client *client,
 		return;
 	}
 
-	if (ctdb->tunable.fetch_lock_collapse == 1) {
-		if (requeue_duplicate_fetch(ctdb_db, client, key, c) == 0) {
-			ret = ctdb_ltdb_unlock(ctdb_db, key);
-			if (ret != 0) {
-				DEBUG(DEBUG_ERR,(__location__ " ctdb_ltdb_unlock() failed with error %d\n", ret));
-			}
-			return;
-		}
-	}
-
 	dstate = talloc(client, struct daemon_call_state);
 	if (dstate == NULL) {
 		ret = ctdb_ltdb_unlock(ctdb_db, key);
@@ -693,14 +462,6 @@ static void daemon_request_call_from_client(struct ctdb_client *client,
 		state = ctdb_call_local_send(ctdb_db, call, &header, &data);
 	} else {
 		state = ctdb_daemon_call_send_remote(ctdb_db, call, &header);
-		if (ctdb->tunable.fetch_lock_collapse == 1) {
-			/* This request triggered a remote fetch-lock.
-			   set up a deferral for this key so any additional
-			   fetch-locks are deferred until the current one
-			   finishes.
-			 */
-			setup_deferred_fetch_locks(ctdb_db, call);
-		}
 	}
 
 	ret = ctdb_ltdb_unlock(ctdb_db, key);
diff --git a/server/ctdb_ltdb_server.c b/server/ctdb_ltdb_server.c
index 39dfdf3..3d18e06 100644
--- a/server/ctdb_ltdb_server.c
+++ b/server/ctdb_ltdb_server.c
@@ -905,17 +905,6 @@ again:
 		}
 	}
 
-	/* set up a rb tree we can use to track which records we have a 
-	   fetch-lock in-flight for so we can defer any additional calls
-	   for the same record.
-	 */
-	ctdb_db->deferred_fetch = trbt_create(ctdb_db, 0);
-	if (ctdb_db->deferred_fetch == NULL) {
-		DEBUG(DEBUG_ERR,("Failed to create deferred fetch rb tree for ctdb database\n"));
-		talloc_free(ctdb_db);
-		return -1;
-	}
-


-- 
CTDB repository


More information about the samba-cvs mailing list