[SCM] Samba Shared Repository - branch v3-6-test updated

Karolin Seeger kseeger at samba.org
Tue Jun 14 12:22:28 MDT 2011


The branch, v3-6-test has been updated
       via  2dec07d s3:libsmb/cli_np_tstream: s/TSTREAM_CLI_NP_BUF_SIZE/TSTREAM_CLI_NP_MAX_BUF_SIZE
       via  b13df1f s3:libsmb:cli_np_tstream: use dynamic talloc buffers
       via  a64603f s3:libsmb/cli_np_tstream: use larger buffers to avoid a bug NT4 servers (bug #8195)
      from  ea2b9ca s3:librpc/ndr/util.h: add license/copyright header and inclusion guard

http://gitweb.samba.org/?p=samba.git;a=shortlog;h=v3-6-test


- Log -----------------------------------------------------------------
commit 2dec07d94d9616501ce84c53896179c764949f93
Author: Stefan Metzmacher <metze at samba.org>
Date:   Tue Jun 7 18:49:55 2011 +0200

    s3:libsmb/cli_np_tstream: s/TSTREAM_CLI_NP_BUF_SIZE/TSTREAM_CLI_NP_MAX_BUF_SIZE
    
    This isn't the fixed buffer size anymore, as we use dynamic beffer
    it's just the maximum size.
    
    metze
    (cherry picked from commit 957cfd23e1781e342edde07fc01dbec279088afa)
    
    The last 3 patches address bug #8195 (rpc client code doesn't work against NT4,
    when we need to fragment requests).

commit b13df1f6b987148d6743703929b9112cb25e2338
Author: Stefan Metzmacher <metze at samba.org>
Date:   Tue Jun 7 18:45:54 2011 +0200

    s3:libsmb:cli_np_tstream: use dynamic talloc buffers
    
    Having 8192 bytes on an idle connection is a bit to much,
    so we better use dynamic buffers using talloc, which also
    avoids a memcpy in the common SMBtrans readv codepath.
    
    metze
    (cherry picked from commit ad1cf187fdbcd726c6c74085308784fe4ecca883)

commit a64603f6633d61036fc6a7e626a4738cfe413b52
Author: Stefan Metzmacher <metze at samba.org>
Date:   Tue Jun 7 18:27:41 2011 +0200

    s3:libsmb/cli_np_tstream: use larger buffers to avoid a bug NT4 servers (bug #8195)
    
    NT4 servers return NT_STATUS_PIPE_BUSY if we try a SMBtrans
    and the SMBwriteX before hasn't transmited the whole DCERPC fragment.
    
    W2K and above is happy with that.
    
    As a result we try to match the behavior of Windows and older Samba clients,
    they use write and read buffers of 4280 bytes instead of 1024 bytes.
    On Windows only the SMBtrans based read uses 1024 (while we also use 4280
    there).
    
    metze
    
    Autobuild-User: Stefan Metzmacher <metze at samba.org>
    Autobuild-Date: Tue Jun  7 20:25:32 CEST 2011 on sn-devel-104
    (cherry picked from commit c3ac298a1fe4f5cada6d09376e2d4a3df271a093)

-----------------------------------------------------------------------

Summary of changes:
 source3/libsmb/cli_np_tstream.c |   66 ++++++++++++++++++++++++++++++---------
 1 files changed, 51 insertions(+), 15 deletions(-)


Changeset truncated at 500 lines:

diff --git a/source3/libsmb/cli_np_tstream.c b/source3/libsmb/cli_np_tstream.c
index 99a7e4f..208f663 100644
--- a/source3/libsmb/cli_np_tstream.c
+++ b/source3/libsmb/cli_np_tstream.c
@@ -28,9 +28,24 @@
 static const struct tstream_context_ops tstream_cli_np_ops;
 
 /*
- * Window uses 1024 hardcoded for read size and trans max data
+ * Windows uses 4280 (the max xmit/recv size negotiated on DCERPC).
+ * This is fits into the max_xmit negotiated at the SMB layer.
+ *
+ * On the sending side they may use SMBtranss if the request does not
+ * fit into a single SMBtrans call.
+ *
+ * Windows uses 1024 as max data size of a SMBtrans request and then
+ * possibly reads the rest of the DCERPC fragment (up to 3256 bytes)
+ * via a SMBreadX.
+ *
+ * For now we just ask for the full 4280 bytes (max data size) in the SMBtrans
+ * request to get the whole fragment at once (like samba 3.5.x and below did.
+ *
+ * It is important that we use do SMBwriteX with the size of a full fragment,
+ * otherwise we may get NT_STATUS_PIPE_BUSY on the SMBtrans request
+ * from NT4 servers. (See bug #8195)
  */
-#define TSTREAM_CLI_NP_BUF_SIZE 1024
+#define TSTREAM_CLI_NP_MAX_BUF_SIZE 4280
 
 struct tstream_cli_np {
 	struct cli_state *cli;
@@ -48,7 +63,7 @@ struct tstream_cli_np {
 	struct {
 		off_t ofs;
 		size_t left;
-		uint8_t buf[TSTREAM_CLI_NP_BUF_SIZE];
+		uint8_t *buf;
 	} read, write;
 };
 
@@ -348,9 +363,26 @@ static void tstream_cli_np_writev_write_next(struct tevent_req *req)
 		tstream_context_data(state->stream,
 		struct tstream_cli_np);
 	struct tevent_req *subreq;
+	size_t i;
+	size_t left = 0;
+
+	for (i=0; i < state->count; i++) {
+		left += state->vector[i].iov_len;
+	}
+
+	if (left == 0) {
+		TALLOC_FREE(cli_nps->write.buf);
+		tevent_req_done(req);
+		return;
+	}
 
 	cli_nps->write.ofs = 0;
-	cli_nps->write.left = TSTREAM_CLI_NP_BUF_SIZE;
+	cli_nps->write.left = MIN(left, TSTREAM_CLI_NP_MAX_BUF_SIZE);
+	cli_nps->write.buf = talloc_realloc(cli_nps, cli_nps->write.buf,
+					    uint8_t, cli_nps->write.left);
+	if (tevent_req_nomem(cli_nps->write.buf, req)) {
+		return;
+	}
 
 	/*
 	 * copy the pending buffer first
@@ -376,11 +408,6 @@ static void tstream_cli_np_writev_write_next(struct tevent_req *req)
 		state->ret += len;
 	}
 
-	if (cli_nps->write.ofs == 0) {
-		tevent_req_done(req);
-		return;
-	}
-
 	if (cli_nps->trans.active && state->count == 0) {
 		cli_nps->trans.active = false;
 		cli_nps->trans.write_req = req;
@@ -620,6 +647,10 @@ static void tstream_cli_np_readv_read_next(struct tevent_req *req)
 		state->ret += len;
 	}
 
+	if (cli_nps->read.left == 0) {
+		TALLOC_FREE(cli_nps->read.buf);
+	}
+
 	if (state->count == 0) {
 		tevent_req_done(req);
 		return;
@@ -638,7 +669,7 @@ static void tstream_cli_np_readv_read_next(struct tevent_req *req)
 	}
 
 	subreq = cli_read_andx_send(state, state->ev, cli_nps->cli,
-				    cli_nps->fnum, 0, TSTREAM_CLI_NP_BUF_SIZE);
+				    cli_nps->fnum, 0, TSTREAM_CLI_NP_MAX_BUF_SIZE);
 	if (tevent_req_nomem(subreq, req)) {
 		return;
 	}
@@ -674,7 +705,7 @@ static void tstream_cli_np_readv_trans_start(struct tevent_req *req)
 				NULL, 0, 0,
 				cli_nps->write.buf,
 				cli_nps->write.ofs,
-				TSTREAM_CLI_NP_BUF_SIZE);
+				TSTREAM_CLI_NP_MAX_BUF_SIZE);
 	if (tevent_req_nomem(subreq, req)) {
 		return;
 	}
@@ -714,7 +745,7 @@ static void tstream_cli_np_readv_trans_done(struct tevent_req *subreq)
 		return;
 	}
 
-	if (received > TSTREAM_CLI_NP_BUF_SIZE) {
+	if (received > TSTREAM_CLI_NP_MAX_BUF_SIZE) {
 		tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
 		return;
 	}
@@ -726,8 +757,7 @@ static void tstream_cli_np_readv_trans_done(struct tevent_req *subreq)
 
 	cli_nps->read.ofs = 0;
 	cli_nps->read.left = received;
-	memcpy(cli_nps->read.buf, rcvbuf, received);
-	TALLOC_FREE(rcvbuf);
+	cli_nps->read.buf = talloc_move(cli_nps, &rcvbuf);
 
 	if (cli_nps->trans.write_req == NULL) {
 		tstream_cli_np_readv_read_next(req);
@@ -789,7 +819,7 @@ static void tstream_cli_np_readv_read_done(struct tevent_req *subreq)
 		return;
 	}
 
-	if (received > TSTREAM_CLI_NP_BUF_SIZE) {
+	if (received > TSTREAM_CLI_NP_MAX_BUF_SIZE) {
 		TALLOC_FREE(subreq);
 		tstream_cli_np_readv_disconnect_now(req, EIO, __location__);
 		return;
@@ -803,6 +833,12 @@ static void tstream_cli_np_readv_read_done(struct tevent_req *subreq)
 
 	cli_nps->read.ofs = 0;
 	cli_nps->read.left = received;
+	cli_nps->read.buf = talloc_array(cli_nps, uint8_t, received);
+	if (cli_nps->read.buf == NULL) {
+		TALLOC_FREE(subreq);
+		tevent_req_nomem(cli_nps->read.buf, req);
+		return;
+	}
 	memcpy(cli_nps->read.buf, rcvbuf, received);
 	TALLOC_FREE(subreq);
 


-- 
Samba Shared Repository


More information about the samba-cvs mailing list