[SCM] Samba Shared Repository - branch v4-0-test updated

Karolin Seeger kseeger at samba.org
Wed Nov 7 02:36:03 MST 2012


The branch, v4-0-test has been updated
       via  e75c9fa s3:vfs_default: optimize vfswrap_asys_finished() and read as much as we can
       via  0bc75df lib/tsocket: optimize syscalls in tstream_readv_pdu_send()
       via  6b051af lib/tsocket: disable the syscall optimization for recvfrom/readv by default
       via  4641555 s3:smbd: pass the current time to make_connection[_smb1]()
      from  6195cb6 docs-xml: fix use of <smbconfoption> tag (fix bug #9345)

http://gitweb.samba.org/?p=samba.git;a=shortlog;h=v4-0-test


- Log -----------------------------------------------------------------
commit e75c9fa085bcccf86ca8b36661622ebee45f1613
Author: Stefan Metzmacher <metze at samba.org>
Date:   Fri Nov 2 12:52:51 2012 +0100

    s3:vfs_default: optimize vfswrap_asys_finished() and read as much as we can
    
    Signed-off-by: Stefan Metzmacher <metze at samba.org>
    
    Autobuild-User(master): Volker Lendecke <vl at samba.org>
    Autobuild-Date(master): Mon Nov  5 19:01:13 CET 2012 on sn-devel-104
    (cherry picked from commit c2ca9e02106108c024b0daf27325e8eba35437f2)
    
    Fix bug #9359 - Optimization needed for SMB2 performance sensitive workloads.
    
    Autobuild-User(v4-0-test): Karolin Seeger <kseeger at samba.org>
    Autobuild-Date(v4-0-test): Wed Nov  7 10:35:40 CET 2012 on sn-devel-104

commit 0bc75dfc759d6866688a15b7f76a835d656103a3
Author: Stefan Metzmacher <metze at samba.org>
Date:   Fri Nov 2 13:56:53 2012 +0100

    lib/tsocket: optimize syscalls in tstream_readv_pdu_send()
    
    Once we've got the first part of a pdu we try to optimize
    readv calls for the rest of the pdu.
    
    Signed-off-by: Stefan Metzmacher <metze at samba.org>
    (cherry picked from commit 719595b6f7f8745f2608dddb2b86476b9cc2f598)

commit 6b051af94159aa2bb95e3147d316514e9d1864d5
Author: Stefan Metzmacher <metze at samba.org>
Date:   Fri Nov 2 13:45:49 2012 +0100

    lib/tsocket: disable the syscall optimization for recvfrom/readv by default
    
    We only do the optimization on recvfrom/readv if the caller asked for it.
    
    This is needed because in most cases we preferr to flush send
    buffers before receiving incoming requests.
    
    Signed-off-by: Stefan Metzmacher <metze at samba.org>
    (cherry picked from commit e42889f83f261e2ac34014649476fae638a6e1f2)

commit 4641555fffbb0ba6527dbfa465833ee83cb915d6
Author: Stefan Metzmacher <metze at samba.org>
Date:   Thu Nov 1 13:53:27 2012 +0100

    s3:smbd: pass the current time to make_connection[_smb1]()
    
    Otherwise smbstatus reports the wrong time for tree connects.
    
    Signed-off-by: Stefan Metzmacher <metze at samba.org>
    Reviewed-by: Christian Ambach <ambi at samba.org>
    
    Autobuild-User(master): Christian Ambach <ambi at samba.org>
    Autobuild-Date(master): Mon Nov  5 20:43:23 CET 2012 on sn-devel-104
    (cherry picked from commit 3d9361612d30725a3d14fa4d3a085256a91800db)
    
    Fix bug #9360 - SMB1 tree connect time is wrong.

-----------------------------------------------------------------------

Summary of changes:
 lib/tsocket/tsocket.h         |   42 +++++++++++++++++++++++
 lib/tsocket/tsocket_bsd.c     |   73 +++++++++++++++++++++++++++++++++++++---
 lib/tsocket/tsocket_helpers.c |   29 ++++++++++++++++
 source3/modules/vfs_default.c |   49 ++++++++++++++++++---------
 source3/smbd/proto.h          |    1 +
 source3/smbd/reply.c          |    5 ++-
 source3/smbd/service.c        |    9 +++--
 7 files changed, 180 insertions(+), 28 deletions(-)


Changeset truncated at 500 lines:

diff --git a/lib/tsocket/tsocket.h b/lib/tsocket/tsocket.h
index 3aca536..98f864e 100644
--- a/lib/tsocket/tsocket.h
+++ b/lib/tsocket/tsocket.h
@@ -627,6 +627,27 @@ int _tsocket_address_unix_from_path(TALLOC_CTX *mem_ctx,
 char *tsocket_address_unix_path(const struct tsocket_address *addr,
 				TALLOC_CTX *mem_ctx);
 
+/**
+ * @brief Request a syscall optimization for tdgram_recvfrom_send()
+ *
+ * This function is only used to reduce the amount of syscalls and
+ * optimize performance. You should only use this if you know
+ * what you're doing.
+ *
+ * The optimization is off by default.
+ *
+ * @param[in]  dgram    The tdgram_context of a bsd socket, if this
+ *                      not a bsd socket the function does nothing.
+ *
+ * @param[in]  on       The boolean value to turn the optimization on and off.
+ *
+ * @return              The old boolean value.
+ *
+ * @see tdgram_recvfrom_send()
+ */
+bool tdgram_bsd_optimize_recvfrom(struct tdgram_context *dgram,
+				  bool on);
+
 #ifdef DOXYGEN
 /**
  * @brief Create a tdgram_context for a ipv4 or ipv6 UDP communication.
@@ -689,6 +710,27 @@ int _tdgram_unix_socket(const struct tsocket_address *local,
 #endif
 
 /**
+ * @brief Request a syscall optimization for tstream_readv_send()
+ *
+ * This function is only used to reduce the amount of syscalls and
+ * optimize performance. You should only use this if you know
+ * what you're doing.
+ *
+ * The optimization is off by default.
+ *
+ * @param[in]  stream   The tstream_context of a bsd socket, if this
+ *                      not a bsd socket the function does nothing.
+ *
+ * @param[in]  on       The boolean value to turn the optimization on and off.
+ *
+ * @return              The old boolean value.
+ *
+ * @see tstream_readv_send()
+ */
+bool tstream_bsd_optimize_readv(struct tstream_context *stream,
+				bool on);
+
+/**
  * @brief Connect async to a TCP endpoint and create a tstream_context for the
  * stream based communication.
  *
diff --git a/lib/tsocket/tsocket_bsd.c b/lib/tsocket/tsocket_bsd.c
index 135fd02..56dff68 100644
--- a/lib/tsocket/tsocket_bsd.c
+++ b/lib/tsocket/tsocket_bsd.c
@@ -654,6 +654,7 @@ struct tdgram_bsd {
 
 	void *event_ptr;
 	struct tevent_fd *fde;
+	bool optimize_recvfrom;
 
 	void *readable_private;
 	void (*readable_handler)(void *private_data);
@@ -661,6 +662,25 @@ struct tdgram_bsd {
 	void (*writeable_handler)(void *private_data);
 };
 
+bool tdgram_bsd_optimize_recvfrom(struct tdgram_context *dgram,
+				  bool on)
+{
+	struct tdgram_bsd *bsds =
+		talloc_get_type(_tdgram_context_data(dgram),
+		struct tdgram_bsd);
+	bool old;
+
+	if (bsds == NULL) {
+		/* not a bsd socket */
+		return false;
+	}
+
+	old = bsds->optimize_recvfrom;
+	bsds->optimize_recvfrom = on;
+
+	return old;
+}
+
 static void tdgram_bsd_fde_handler(struct tevent_context *ev,
 				   struct tevent_fd *fde,
 				   uint16_t flags,
@@ -838,14 +858,25 @@ static struct tevent_req *tdgram_bsd_recvfrom_send(TALLOC_CTX *mem_ctx,
 		goto post;
 	}
 
+
 	/*
 	 * this is a fast path, not waiting for the
 	 * socket to become explicit readable gains
 	 * about 10%-20% performance in benchmark tests.
 	 */
-	tdgram_bsd_recvfrom_handler(req);
-	if (!tevent_req_is_in_progress(req)) {
-		goto post;
+	if (bsds->optimize_recvfrom) {
+		/*
+		 * We only do the optimization on
+		 * recvfrom if the caller asked for it.
+		 *
+		 * This is needed because in most cases
+		 * we preferr to flush send buffers before
+		 * receiving incoming requests.
+		 */
+		tdgram_bsd_recvfrom_handler(req);
+		if (!tevent_req_is_in_progress(req)) {
+			goto post;
+		}
 	}
 
 	ret = tdgram_bsd_set_readable_handler(bsds, ev,
@@ -1405,6 +1436,7 @@ struct tstream_bsd {
 
 	void *event_ptr;
 	struct tevent_fd *fde;
+	bool optimize_readv;
 
 	void *readable_private;
 	void (*readable_handler)(void *private_data);
@@ -1412,6 +1444,25 @@ struct tstream_bsd {
 	void (*writeable_handler)(void *private_data);
 };
 
+bool tstream_bsd_optimize_readv(struct tstream_context *stream,
+				bool on)
+{
+	struct tstream_bsd *bsds =
+		talloc_get_type(_tstream_context_data(stream),
+		struct tstream_bsd);
+	bool old;
+
+	if (bsds == NULL) {
+		/* not a bsd socket */
+		return false;
+	}
+
+	old = bsds->optimize_readv;
+	bsds->optimize_readv = on;
+
+	return old;
+}
+
 static void tstream_bsd_fde_handler(struct tevent_context *ev,
 				    struct tevent_fd *fde,
 				    uint16_t flags,
@@ -1624,9 +1675,19 @@ static struct tevent_req *tstream_bsd_readv_send(TALLOC_CTX *mem_ctx,
 	 * socket to become explicit readable gains
 	 * about 10%-20% performance in benchmark tests.
 	 */
-	tstream_bsd_readv_handler(req);
-	if (!tevent_req_is_in_progress(req)) {
-		goto post;
+	if (bsds->optimize_readv) {
+		/*
+		 * We only do the optimization on
+		 * readv if the caller asked for it.
+		 *
+		 * This is needed because in most cases
+		 * we preferr to flush send buffers before
+		 * receiving incoming requests.
+		 */
+		tstream_bsd_readv_handler(req);
+		if (!tevent_req_is_in_progress(req)) {
+			goto post;
+		}
 	}
 
 	ret = tstream_bsd_set_readable_handler(bsds, ev,
diff --git a/lib/tsocket/tsocket_helpers.c b/lib/tsocket/tsocket_helpers.c
index 1b92b9f..49c6840 100644
--- a/lib/tsocket/tsocket_helpers.c
+++ b/lib/tsocket/tsocket_helpers.c
@@ -215,6 +215,20 @@ static void tstream_readv_pdu_ask_for_next_vector(struct tevent_req *req)
 	size_t to_read = 0;
 	size_t i;
 	struct tevent_req *subreq;
+	bool optimize = false;
+	bool save_optimize = false;
+
+	if (state->count > 0) {
+		/*
+		 * This is not the first time we asked for a vector,
+		 * which means parts of the pdu already arrived.
+		 *
+		 * In this case it make sense to enable
+		 * a syscall/performance optimization if the
+		 * low level tstream implementation supports it.
+		 */
+		optimize = true;
+	}
 
 	TALLOC_FREE(state->vector);
 	state->count = 0;
@@ -258,11 +272,26 @@ static void tstream_readv_pdu_ask_for_next_vector(struct tevent_req *req)
 		return;
 	}
 
+	if (optimize) {
+		/*
+		 * If the low level stream is a bsd socket
+		 * we will get syscall optimization.
+		 *
+		 * If it is not a bsd socket
+		 * tstream_bsd_optimize_readv() just returns.
+		 */
+		save_optimize = tstream_bsd_optimize_readv(state->caller.stream,
+							   true);
+	}
 	subreq = tstream_readv_send(state,
 				    state->caller.ev,
 				    state->caller.stream,
 				    state->vector,
 				    state->count);
+	if (optimize) {
+		tstream_bsd_optimize_readv(state->caller.stream,
+					   save_optimize);
+	}
 	if (tevent_req_nomem(subreq, req)) {
 		return;
 	}
diff --git a/source3/modules/vfs_default.c b/source3/modules/vfs_default.c
index 8392feb..ebfa607 100644
--- a/source3/modules/vfs_default.c
+++ b/source3/modules/vfs_default.c
@@ -637,6 +637,7 @@ static void vfswrap_asys_finished(struct tevent_context *ev,
 static bool vfswrap_init_asys_ctx(struct smbXsrv_connection *conn)
 {
 	int ret;
+	int fd;
 
 	if (conn->asys_ctx != NULL) {
 		return true;
@@ -646,8 +647,12 @@ static bool vfswrap_init_asys_ctx(struct smbXsrv_connection *conn)
 		DEBUG(1, ("asys_context_init failed: %s\n", strerror(ret)));
 		return false;
 	}
-	conn->asys_fde = tevent_add_fd(conn->ev_ctx, conn,
-				       asys_signalfd(conn->asys_ctx),
+
+	fd = asys_signalfd(conn->asys_ctx);
+
+	set_blocking(fd, false);
+
+	conn->asys_fde = tevent_add_fd(conn->ev_ctx, conn, fd,
 				       TEVENT_FD_READ,
 				       vfswrap_asys_finished,
 				       conn->asys_ctx);
@@ -783,24 +788,36 @@ static void vfswrap_asys_finished(struct tevent_context *ev,
 		return;
 	}
 
-	res = asys_result(asys_ctx, &ret, &err, &private_data);
-	if (res == ECANCELED) {
-		return;
-	}
+	while (true) {
+		res = asys_result(asys_ctx, &ret, &err, &private_data);
+		if (res == EINTR || res == EAGAIN) {
+			return;
+		}
+#ifdef EWOULDBLOCK
+		if (res == EWOULDBLOCK) {
+			return;
+		}
+#endif
 
-	if (res != 0) {
-		DEBUG(1, ("asys_result returned %s\n", strerror(res)));
-		return;
-	}
+		if (res == ECANCELED) {
+			return;
+		}
 
-	req = talloc_get_type_abort(private_data, struct tevent_req);
-	state = tevent_req_data(req, struct vfswrap_asys_state);
+		if (res != 0) {
+			DEBUG(1, ("asys_result returned %s\n", strerror(res)));
+			return;
+		}
+
+		req = talloc_get_type_abort(private_data, struct tevent_req);
+		state = tevent_req_data(req, struct vfswrap_asys_state);
 
-	talloc_set_destructor(state, NULL);
+		talloc_set_destructor(state, NULL);
 
-	state->ret = ret;
-	state->err = err;
-	tevent_req_done(req);
+		state->ret = ret;
+		state->err = err;
+		tevent_req_defer_callback(req, ev);
+		tevent_req_done(req);
+	}
 }
 
 static ssize_t vfswrap_asys_ssize_t_recv(struct tevent_req *req, int *err)
diff --git a/source3/smbd/proto.h b/source3/smbd/proto.h
index 4b0bdf1..a4b2fab 100644
--- a/source3/smbd/proto.h
+++ b/source3/smbd/proto.h
@@ -971,6 +971,7 @@ connection_struct *make_connection_smb2(struct smbd_server_connection *sconn,
 					const char *pdev,
 					NTSTATUS *pstatus);
 connection_struct *make_connection(struct smbd_server_connection *sconn,
+				   NTTIME now,
 				   const char *service_in,
 				   const char *pdev, uint64_t vuid,
 				   NTSTATUS *status);
diff --git a/source3/smbd/reply.c b/source3/smbd/reply.c
index 4423e8e..8db9c62 100644
--- a/source3/smbd/reply.c
+++ b/source3/smbd/reply.c
@@ -666,6 +666,7 @@ void reply_tcon(struct smb_request *req)
 	const char *p;
 	TALLOC_CTX *ctx = talloc_tos();
 	struct smbd_server_connection *sconn = req->sconn;
+	NTTIME now = timeval_to_nttime(&req->request_time);
 
 	START_PROFILE(SMBtcon);
 
@@ -695,7 +696,7 @@ void reply_tcon(struct smb_request *req)
 		service = service_buf;
 	}
 
-	conn = make_connection(sconn,service,dev,
+	conn = make_connection(sconn, now, service, dev,
 			       req->vuid,&nt_status);
 	req->conn = conn;
 
@@ -911,7 +912,7 @@ void reply_tcon_and_X(struct smb_request *req)
 		session_key_updated = true;
 	}
 
-	conn = make_connection(sconn, service, client_devicetype,
+	conn = make_connection(sconn, now, service, client_devicetype,
 			       req->vuid, &nt_status);
 	req->conn =conn;
 
diff --git a/source3/smbd/service.c b/source3/smbd/service.c
index bb28fbf..2214ac0 100644
--- a/source3/smbd/service.c
+++ b/source3/smbd/service.c
@@ -922,13 +922,13 @@ static NTSTATUS make_connection_snum(struct smbd_server_connection *sconn,
 ****************************************************************************/
 
 static connection_struct *make_connection_smb1(struct smbd_server_connection *sconn,
+					NTTIME now,
 					int snum, struct user_struct *vuser,
 					const char *pdev,
 					NTSTATUS *pstatus)
 {
 	struct smbXsrv_tcon *tcon;
 	NTSTATUS status;
-	NTTIME now = 0;
 	struct connection_struct *conn;
 
 	status = smb1srv_tcon_create(sconn->conn, now, &tcon);
@@ -1025,6 +1025,7 @@ connection_struct *make_connection_smb2(struct smbd_server_connection *sconn,
 ****************************************************************************/
 
 connection_struct *make_connection(struct smbd_server_connection *sconn,
+				   NTTIME now,
 				   const char *service_in,
 				   const char *pdev, uint64_t vuid,
 				   NTSTATUS *status)
@@ -1078,7 +1079,7 @@ connection_struct *make_connection(struct smbd_server_connection *sconn,
 		}
 		DEBUG(5, ("making a connection to [homes] service "
 			  "created at session setup time\n"));
-		return make_connection_smb1(sconn,
+		return make_connection_smb1(sconn, now,
 					    vuser->homes_snum,
 					    vuser,
 					    dev, status);
@@ -1087,7 +1088,7 @@ connection_struct *make_connection(struct smbd_server_connection *sconn,
 			       lp_servicename(talloc_tos(), vuser->homes_snum))) {
 		DEBUG(5, ("making a connection to 'homes' service [%s] "
 			  "created at session setup time\n", service_in));
-		return make_connection_smb1(sconn,
+		return make_connection_smb1(sconn, now,
 					    vuser->homes_snum,
 					    vuser,
 					    dev, status);
@@ -1139,7 +1140,7 @@ connection_struct *make_connection(struct smbd_server_connection *sconn,
 
 	DEBUG(5, ("making a connection to 'normal' service %s\n", service));
 
-	return make_connection_smb1(sconn, snum, vuser,
+	return make_connection_smb1(sconn, now, snum, vuser,
 				    dev, status);
 }
 


-- 
Samba Shared Repository


More information about the samba-cvs mailing list