[SCM] Samba Shared Repository - branch master updated

Ira Cooper ira at samba.org
Wed Jan 21 12:41:03 MST 2015


The branch, master has been updated
       via  4c3a3d9 vfs_glusterfs: Replace eventfd with pipes, for AIO use
      from  87c5795 libcli/auth: add netlogon_creds_cli_GetForestTrustInformation*()

https://git.samba.org/?p=samba.git;a=shortlog;h=master


- Log -----------------------------------------------------------------
commit 4c3a3d9e6adc95d0f0e1f6030b2406613d9f9f53
Author: Ira Cooper <ira at samba.org>
Date:   Mon Jan 19 23:08:17 2015 -0500

    vfs_glusterfs: Replace eventfd with pipes, for AIO use
    
    Pipes clean up the AIO implementation substantially, due to the fact
    that they implement a natural ithread safe queue instead of us
    creating our own queue.
    
    Signed-off-by: Ira Cooper <ira at samba.org>
    Signed-off-by: Poornima G <pgurusid at redhat.com>
    Reviewed-by: Günther Deschner <gd at samba.org>
    Reviewed-by: Michael Adam <obnox at samba.org>
    
    Autobuild-User(master): Ira Cooper <ira at samba.org>
    Autobuild-Date(master): Wed Jan 21 20:40:11 CET 2015 on sn-devel-104

-----------------------------------------------------------------------

Summary of changes:
 source3/modules/vfs_glusterfs.c | 137 ++++++++++++----------------------------
 source3/wscript                 |   1 -
 2 files changed, 41 insertions(+), 97 deletions(-)


Changeset truncated at 500 lines:

diff --git a/source3/modules/vfs_glusterfs.c b/source3/modules/vfs_glusterfs.c
index 10c3a22..12ec0b7 100644
--- a/source3/modules/vfs_glusterfs.c
+++ b/source3/modules/vfs_glusterfs.c
@@ -41,22 +41,15 @@
 #include "api/glfs.h"
 #include "lib/util/dlinklist.h"
 #include "lib/util/tevent_unix.h"
-#ifdef HAVE_SYS_EVENTFD_H
-#include <sys/eventfd.h>
-#endif
-#include <pthread.h>
+#include "lib/tevent/tevent_internal.h"
 #include "smbd/globals.h"
+#include "lib/sys_rw.h"
 
 #define DEFAULT_VOLFILE_SERVER "localhost"
 
-#ifdef HAVE_EVENTFD
-static pthread_mutex_t lock_req_list = PTHREAD_MUTEX_INITIALIZER;
-static int event_fd = -1;
+static int read_fd = -1;
+static int write_fd = -1;
 static struct tevent_fd *aio_read_event = NULL;
-static struct tevent_req **req_producer_list = NULL;
-static struct tevent_req **req_consumer_list = NULL;
-static uint64_t req_counter = 0;
-#endif
 
 /**
  * Helper to convert struct stat to struct stat_ex.
@@ -503,15 +496,13 @@ struct glusterfs_aio_state {
 /*
  * This function is the callback that will be called on glusterfs
  * threads once the async IO submitted is complete. To notify
- * Samba of the completion we use eventfd mechanism.
+ * Samba of the completion we use a pipe pased queue.
  */
 static void aio_glusterfs_done(glfs_fd_t *fd, ssize_t ret, void *data)
 {
-#if HAVE_EVENTFD
 	struct tevent_req *req = NULL;
 	struct glusterfs_aio_state *state = NULL;
-	int i, sts = 0;
-	uint64_t u = 1;
+	int sts = 0;
 
 	req = talloc_get_type_abort(data, struct tevent_req);
 	state = tevent_req_data(req, struct glusterfs_aio_state);
@@ -525,88 +516,65 @@ static void aio_glusterfs_done(glfs_fd_t *fd, ssize_t ret, void *data)
 	}
 
 	/*
-	 * Store the reqs that needs to be completed by calling
-	 * tevent_req_done(). tevent_req_done() cannot be called
-	 * here, as it is not designed to be executed in the
-	 * multithread environment, tevent_req_done() should be
+	 * Write the pointer to each req that needs to be completed
+	 * by calling tevent_req_done(). tevent_req_done() cannot
+	 * be called here, as it is not designed to be executed
+	 * in the multithread environment, tevent_req_done() must be
 	 * executed from the smbd main thread.
 	 */
-	pthread_mutex_lock (&lock_req_list);
-	{
-		for (i = 0 ; i < aio_pending_size ; i++) {
-			if(!req_producer_list[i]) {
-				req_producer_list[i] = req;
-				req_counter = req_counter + 1;
-				break;
-			}
-		}
-	}
-	pthread_mutex_unlock (&lock_req_list);
 
-	/*
-	 * For a bunch of fops notify only once
-	 */
-	if (req_counter == 1) {
-		sts = write (event_fd, &u, sizeof(uint64_t));
-		if (sts < 0 && errno == EAGAIN)
-			DEBUG(0,("\nWRITE: reached max value"));
+        sts = sys_write (write_fd, &req, sizeof(struct tevent_req *));
+        if (sts < 0) {
+                DEBUG(0,("\nWrite to pipe failed (%s)", strerror(errno)));
 	}
+
 	return;
-#endif
 }
 
-#ifdef HAVE_EVENTFD
+/*
+ * Read each req off the pipe and process it.
+ */
 static void aio_tevent_fd_done(struct tevent_context *event_ctx,
 				struct tevent_fd *fde,
 				uint16 flags, void *data)
 {
 	struct tevent_req *req = NULL;
-	struct tevent_req **temp = NULL;
-	int i = 0, sts = 0;
-	uint64_t u = 0;
-
-	sts = read (event_fd, &u, sizeof(uint64_t));
-	if (sts < 0 && errno == EAGAIN)
-		DEBUG(0,("\nREAD: eventfd read failed (%s)",strerror(errno)));
-
-	pthread_mutex_lock (&lock_req_list);
-	{
-		temp = req_producer_list;
-		req_producer_list = req_consumer_list;
-		req_consumer_list = temp;
-		req_counter = 0;
-	}
-	pthread_mutex_unlock (&lock_req_list);
-
-	for (i = 0 ; i < aio_pending_size ; i++) {
-		req = req_consumer_list[i];
-		if (req) {
-			tevent_req_done(req);
-			req_consumer_list[i] = 0;
-		}
+	int sts = 0;
+
+	sts = sys_read(read_fd, &req, sizeof(struct tevent_req *));
+	if (sts < 0) {
+		DEBUG(0,("\nRead from pipe failed (%s)", strerror(errno)));
+	}
+
+	if (req) {
+		tevent_req_done(req);
 	}
 	return;
 }
-#endif
 
 static bool init_gluster_aio(struct vfs_handle_struct *handle)
 {
-#ifdef HAVE_EVENTFD
-	if (event_fd != -1) {
+	int fds[2];
+	int ret = -1;
+
+	if (read_fd != -1) {
 		/*
 		 * Already initialized.
 		 */
 		return true;
 	}
 
-	event_fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
-	if (event_fd == -1) {
+	ret = pipe(fds);
+	if (ret == -1) {
 		goto fail;
 	}
 
+	read_fd = fds[0];
+	write_fd = fds[1];
+
 	aio_read_event = tevent_add_fd(handle->conn->sconn->ev_ctx,
 					NULL,
-					event_fd,
+					read_fd,
 					TEVENT_FD_READ,
 					aio_tevent_fd_done,
 					NULL);
@@ -614,19 +582,15 @@ static bool init_gluster_aio(struct vfs_handle_struct *handle)
 		goto fail;
 	}
 
-	req_producer_list = talloc_zero_array(NULL, struct tevent_req *,
-						aio_pending_size);
-	req_consumer_list = talloc_zero_array(NULL, struct tevent_req *,
-						aio_pending_size);
-
 	return true;
 fail:
 	TALLOC_FREE(aio_read_event);
-	if (event_fd != -1) {
-		close(event_fd);
-		event_fd = -1;
+	if (read_fd != -1) {
+		close(read_fd);
+		close(write_fd);
+		read_fd = -1;
+		write_fd = -1;
 	}
-#endif
 	return false;
 }
 
@@ -640,11 +604,6 @@ static struct tevent_req *vfs_gluster_pread_send(struct vfs_handle_struct
 	struct glusterfs_aio_state *state = NULL;
 	int ret = 0;
 
-#ifndef HAVE_EVENTFD
-	errno = ENOTSUP;
-	return NULL;
-#endif
-
 	req = tevent_req_create(mem_ctx, &state, struct glusterfs_aio_state);
 	if (req == NULL) {
 		return NULL;
@@ -689,11 +648,6 @@ static struct tevent_req *vfs_gluster_pwrite_send(struct vfs_handle_struct
 	struct glusterfs_aio_state *state = NULL;
 	int ret = 0;
 
-#ifndef HAVE_EVENTFD
-	errno = ENOTSUP;
-	return NULL;
-#endif
-
 	req = tevent_req_create(mem_ctx, &state, struct glusterfs_aio_state);
 	if (req == NULL) {
 		return NULL;
@@ -716,10 +670,6 @@ static ssize_t vfs_gluster_recv(struct tevent_req *req, int *err)
 {
 	struct glusterfs_aio_state *state = NULL;
 
-#ifndef HAVE_EVENTFD
-	errno = ENOTSUP;
-	return -1;
-#endif
 	state = tevent_req_data(req, struct glusterfs_aio_state);
 	if (state == NULL) {
 		return -1;
@@ -780,11 +730,6 @@ static struct tevent_req *vfs_gluster_fsync_send(struct vfs_handle_struct
 	struct glusterfs_aio_state *state = NULL;
 	int ret = 0;
 
-#ifndef HAVE_EVENTFD
-	errno = ENOTSUP;
-	return NULL;
-#endif
-
 	req = tevent_req_create(mem_ctx, &state, struct glusterfs_aio_state);
 	if (req == NULL) {
 		return NULL;
diff --git a/source3/wscript b/source3/wscript
index b7e7dcc..bb0f19d 100644
--- a/source3/wscript
+++ b/source3/wscript
@@ -551,7 +551,6 @@ return acl_get_perm_np(permset_d, perm);
         conf.DEFINE('HAVE_NO_AIO', '1')
 
     if host_os.rfind('linux') > -1:
-	conf.CHECK_FUNCS('eventfd')
 	conf.CHECK_FUNCS_IN('io_submit', 'aio')
 	conf.CHECK_CODE('''
 struct io_event ioev;


-- 
Samba Shared Repository


More information about the samba-cvs mailing list