[SCM] Samba Shared Repository - branch master updated

Stefan Metzmacher metze at samba.org
Wed Dec 15 08:13:02 MST 2010


The branch, master has been updated
       via  8b47fce s3:rpc_client: implement rpc_transport_np_init() on top of rpc_transport_tstream_init()
       via  0342ed3 s3:rpc_client: implement rpc_transport_sock_init() on top of rpc_transport_tstream_init()
       via  122dc1e s3:rpc_client/rpc_transport_tstream.c: add some logic to handle cli_np tstreams
       via  9a14dac s3:rpc_client/rpc_transport_tstream: timeout should be unsigned int
       via  e55426f s3:rpc_client: let rpc_transport_tstream_init() create read and write queue
       via  07ba35a s3:libsmb: add tstream_cli_np_get_cli_state()
       via  c0f9e96 s3:libsmb: add tstream_cli_np_set_timeout()
       via  0df669e s3:libsmb: add tstream_cli_np_use_trans() and the needed logic
       via  e08c324 s3:libsmb: add tstream_is_cli_np()
       via  c0ac1ce s3:libsmb: add cli_np_tstream.c
      from  2e259de s4-dsdb/tests/python: Explicitly pass comamnd line LoadParm() instance to system_session()

http://gitweb.samba.org/?p=samba.git;a=shortlog;h=master


- Log -----------------------------------------------------------------
commit 8b47fcea043086e4ec7abbc4d4394f69caac08d0
Author: Stefan Metzmacher <metze at samba.org>
Date:   Mon Sep 6 17:31:15 2010 +0200

    s3:rpc_client: implement rpc_transport_np_init() on top of rpc_transport_tstream_init()
    
    This also makes use of tstream_cli_np_open() to get the tstream.
    
    metze
    
    Autobuild-User: Stefan Metzmacher <metze at samba.org>
    Autobuild-Date: Wed Dec 15 16:12:30 CET 2010 on sn-devel-104

commit 0342ed3ee0f8d9f29c6fcabbca1fab4f6514b78b
Author: Stefan Metzmacher <metze at samba.org>
Date:   Mon Sep 6 17:31:27 2010 +0200

    s3:rpc_client: implement rpc_transport_sock_init() on top of rpc_transport_tstream_init()
    
    metze

commit 122dc1e213d1049568cc49f305f82063b3fa20ba
Author: Stefan Metzmacher <metze at samba.org>
Date:   Tue Dec 14 18:20:25 2010 +0100

    s3:rpc_client/rpc_transport_tstream.c: add some logic to handle cli_np tstreams
    
    metze

commit 9a14dace227245c7b5bb751f7c534e338337bea9
Author: Stefan Metzmacher <metze at samba.org>
Date:   Tue Dec 14 18:19:14 2010 +0100

    s3:rpc_client/rpc_transport_tstream: timeout should be unsigned int
    
    metze

commit e55426fe7926ae6f8afe5fa6cfc009e0c3b54e38
Author: Stefan Metzmacher <metze at samba.org>
Date:   Tue Jun 22 18:01:45 2010 -0400

    s3:rpc_client: let rpc_transport_tstream_init() create read and write queue
    
    metze

commit 07ba35adcbb9feb551125034f091e8d4cb4aa0e8
Author: Stefan Metzmacher <metze at samba.org>
Date:   Tue Dec 14 18:18:13 2010 +0100

    s3:libsmb: add tstream_cli_np_get_cli_state()
    
    metze

commit c0f9e963b66fa8c97a4c0ea38367443dd6127509
Author: Stefan Metzmacher <metze at samba.org>
Date:   Tue Dec 14 18:17:45 2010 +0100

    s3:libsmb: add tstream_cli_np_set_timeout()
    
    metze

commit 0df669e14111de741ded8445a2acc00a51a50413
Author: Stefan Metzmacher <metze at samba.org>
Date:   Sat Sep 4 11:01:55 2010 +0200

    s3:libsmb: add tstream_cli_np_use_trans() and the needed logic
    
    tstream_cli_np_use_trans() defers the next tstream_writev
    to the next tstream_readv and send both as an SMBtrans request.
    
    metze

commit e08c324fc5ed9d7d4970fc7a7b6b13bff38ace59
Author: Stefan Metzmacher <metze at samba.org>
Date:   Sat Sep 4 11:00:31 2010 +0200

    s3:libsmb: add tstream_is_cli_np()
    
    metze

commit c0ac1cebfbf7b562bad368b14d8004495a59e574
Author: Stefan Metzmacher <metze at samba.org>
Date:   Thu Aug 12 12:00:15 2010 +0200

    s3:libsmb: add cli_np_tstream.c
    
    This abstracts a named pipe over smb as a tstream,
    which will make it easier to implement the dcerpc
    layer in a more generic way.
    
    metze

-----------------------------------------------------------------------

Summary of changes:
 source3/Makefile.in                        |    1 +
 source3/include/proto.h                    |    6 +-
 source3/libsmb/cli_np_tstream.c            |  992 ++++++++++++++++++++++++++++
 source3/libsmb/cli_np_tstream.h            |   56 ++
 source3/rpc_client/rpc_transport_np.c      |  419 +-----------
 source3/rpc_client/rpc_transport_sock.c    |  244 +-------
 source3/rpc_client/rpc_transport_tstream.c |  243 +++++++-
 source3/rpc_server/rpc_ncacn_np.c          |   16 +-
 8 files changed, 1315 insertions(+), 662 deletions(-)
 create mode 100644 source3/libsmb/cli_np_tstream.c
 create mode 100644 source3/libsmb/cli_np_tstream.h


Changeset truncated at 500 lines:

diff --git a/source3/Makefile.in b/source3/Makefile.in
index 1525888..c1b6c8b 100644
--- a/source3/Makefile.in
+++ b/source3/Makefile.in
@@ -591,6 +591,7 @@ LIBSMB_OBJ = libsmb/clientgen.o libsmb/cliconnect.o libsmb/clifile.o \
 	     libsmb/clistr.o libsmb/cliquota.o libsmb/clifsinfo.o libsmb/clidfs.o \
 	     libsmb/clioplock.o libsmb/clirap2.o \
 	     libsmb/smb_seal.o libsmb/async_smb.o \
+	     libsmb/cli_np_tstream.o \
 	     $(LIBSAMBA_OBJ) \
 	     $(LIBNMB_OBJ) \
 	     $(LIBNBT_OBJ) \
diff --git a/source3/include/proto.h b/source3/include/proto.h
index 0f02dfb..9b41321 100644
--- a/source3/include/proto.h
+++ b/source3/include/proto.h
@@ -4244,10 +4244,8 @@ NTSTATUS rpc_transport_sock_init(TALLOC_CTX *mem_ctx, int fd,
 
 /* The following definitions come from rpc_client/rpc_transport_tstream.c  */
 NTSTATUS rpc_transport_tstream_init(TALLOC_CTX *mem_ctx,
-				struct tstream_context *npipe,
-				struct tevent_queue *read_queue,
-				struct tevent_queue *write_queue,
-				 struct rpc_cli_transport **presult);
+				struct tstream_context **stream,
+				struct rpc_cli_transport **presult);
 
 /* The following definitions come from rpc_server/srv_eventlog_nt.c  */
 
diff --git a/source3/libsmb/cli_np_tstream.c b/source3/libsmb/cli_np_tstream.c
new file mode 100644
index 0000000..409b40d
--- /dev/null
+++ b/source3/libsmb/cli_np_tstream.c
@@ -0,0 +1,992 @@
+/*
+   Unix SMB/CIFS implementation.
+
+   Copyright (C) Stefan Metzmacher 2010
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program.  If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "includes.h"
+#include "system/network.h"
+#include "../util/tevent_unix.h"
+#include "../lib/tsocket/tsocket.h"
+#include "../lib/tsocket/tsocket_internal.h"
+#include "cli_np_tstream.h"
+
+static const struct tstream_context_ops tstream_cli_np_ops;
+
+/*
+ * Window uses 1024 hardcoded for read size and trans max data
+ */
+#define TSTREAM_CLI_NP_BUF_SIZE 1024
+
+struct tstream_cli_np {
+	struct cli_state *cli;
+	const char *npipe;
+	uint16_t fnum;
+	unsigned int default_timeout;
+
+	struct {
+		bool active;
+		struct tevent_req *write_req;
+		uint16_t setup[2];
+	} trans;
+
+	struct {
+		off_t ofs;
+		size_t left;
+		uint8_t buf[TSTREAM_CLI_NP_BUF_SIZE];
+	} read, write;
+};
+
+static int tstream_cli_np_destructor(struct tstream_cli_np *cli_nps)
+{
+	NTSTATUS status;
+
+	if (!cli_state_is_connected(cli_nps->cli)) {
+		return 0;
+	}
+
+	/*
+	 * TODO: do not use a sync call with a destructor!!!
+	 *
+	 * This only happens, if a caller does talloc_free(),
+	 * while the everything was still ok.
+	 *
+	 * If we get an unexpected failure within a normal
+	 * operation, we already do an async cli_close_send()/_recv().
+	 *
+	 * Once we've fixed all callers to call
+	 * tstream_disconnect_send()/_recv(), this will
+	 * never be called.
+	 */
+	status = cli_close(cli_nps->cli, cli_nps->fnum);
+	if (!NT_STATUS_IS_OK(status)) {
+		DEBUG(1, ("tstream_cli_np_destructor: cli_close "
+			  "failed on pipe %s. Error was %s\n",
+			  cli_nps->npipe, nt_errstr(status)));
+	}
+	/*
+	 * We can't do much on failure
+	 */
+	return 0;
+};
+
+struct tstream_cli_np_open_state {
+	struct cli_state *cli;
+	uint16_t fnum;
+	const char *npipe;
+};
+
+static void tstream_cli_np_open_done(struct tevent_req *subreq);
+
+struct tevent_req *tstream_cli_np_open_send(TALLOC_CTX *mem_ctx,
+					    struct tevent_context *ev,
+					    struct cli_state *cli,
+					    const char *npipe)
+{
+	struct tevent_req *req;
+	struct tstream_cli_np_open_state *state;
+	struct tevent_req *subreq;
+
+	req = tevent_req_create(mem_ctx, &state,
+				struct tstream_cli_np_open_state);
+	if (!req) {
+		return NULL;
+	}
+	state->cli = cli;
+
+	state->npipe = talloc_strdup(state, npipe);
+	if (tevent_req_nomem(state->npipe, req)) {
+		return tevent_req_post(req, ev);
+	}
+
+	subreq = cli_ntcreate_send(state, ev, cli,
+				   npipe,
+				   0,
+				   DESIRED_ACCESS_PIPE,
+				   0,
+				   FILE_SHARE_READ|FILE_SHARE_WRITE,
+				   FILE_OPEN,
+				   0,
+				   0);
+	if (tevent_req_nomem(subreq, req)) {
+		return tevent_req_post(req, ev);
+	}
+	tevent_req_set_callback(subreq, tstream_cli_np_open_done, req);
+
+	return req;
+}
+
+static void tstream_cli_np_open_done(struct tevent_req *subreq)
+{
+	struct tevent_req *req =
+		tevent_req_callback_data(subreq, struct tevent_req);
+	struct tstream_cli_np_open_state *state =
+		tevent_req_data(req, struct tstream_cli_np_open_state);
+	NTSTATUS status;
+
+	status = cli_ntcreate_recv(subreq, &state->fnum);
+	TALLOC_FREE(subreq);
+	if (!NT_STATUS_IS_OK(status)) {
+		tevent_req_nterror(req, status);
+		return;
+	}
+
+	tevent_req_done(req);
+}
+
+NTSTATUS _tstream_cli_np_open_recv(struct tevent_req *req,
+				   TALLOC_CTX *mem_ctx,
+				   struct tstream_context **_stream,
+				   const char *location)
+{
+	struct tstream_cli_np_open_state *state =
+		tevent_req_data(req, struct tstream_cli_np_open_state);
+	struct tstream_context *stream;
+	struct tstream_cli_np *cli_nps;
+	NTSTATUS status;
+
+	if (tevent_req_is_nterror(req, &status)) {
+		tevent_req_received(req);
+		return status;
+	}
+
+	stream = tstream_context_create(mem_ctx,
+					&tstream_cli_np_ops,
+					&cli_nps,
+					struct tstream_cli_np,
+					location);
+	if (!stream) {
+		tevent_req_received(req);
+		return NT_STATUS_NO_MEMORY;
+	}
+	ZERO_STRUCTP(cli_nps);
+
+	cli_nps->cli = state->cli;
+	cli_nps->npipe = talloc_move(cli_nps, &state->npipe);
+	cli_nps->fnum = state->fnum;
+	cli_nps->default_timeout = state->cli->timeout;
+
+	talloc_set_destructor(cli_nps, tstream_cli_np_destructor);
+
+	cli_nps->trans.active = false;
+	cli_nps->trans.write_req = NULL;
+	SSVAL(cli_nps->trans.setup+0, 0, TRANSACT_DCERPCCMD);
+	SSVAL(cli_nps->trans.setup+1, 0, cli_nps->fnum);
+
+	*_stream = stream;
+	tevent_req_received(req);
+	return NT_STATUS_OK;
+}
+
+static ssize_t tstream_cli_np_pending_bytes(struct tstream_context *stream)
+{
+	struct tstream_cli_np *cli_nps = tstream_context_data(stream,
+					 struct tstream_cli_np);
+
+	if (!cli_state_is_connected(cli_nps->cli)) {
+		errno = ENOTCONN;
+		return -1;
+	}
+
+	return cli_nps->read.left;
+}
+
+bool tstream_is_cli_np(struct tstream_context *stream)
+{
+	struct tstream_cli_np *cli_nps =
+		talloc_get_type(_tstream_context_data(stream),
+		struct tstream_cli_np);
+
+	if (!cli_nps) {
+		return false;
+	}
+
+	return true;
+}
+
+NTSTATUS tstream_cli_np_use_trans(struct tstream_context *stream)
+{
+	struct tstream_cli_np *cli_nps = tstream_context_data(stream,
+					 struct tstream_cli_np);
+
+	if (cli_nps->trans.write_req) {
+		return NT_STATUS_PIPE_BUSY;
+	}
+
+	if (cli_nps->trans.active) {
+		return NT_STATUS_PIPE_BUSY;
+	}
+
+	cli_nps->trans.active = true;
+
+	return NT_STATUS_OK;
+}
+
+unsigned int tstream_cli_np_set_timeout(struct tstream_context *stream,
+					unsigned int timeout)
+{
+	struct tstream_cli_np *cli_nps = tstream_context_data(stream,
+					 struct tstream_cli_np);
+
+	if (!cli_state_is_connected(cli_nps->cli)) {
+		return cli_nps->default_timeout;
+	}
+
+	return cli_set_timeout(cli_nps->cli, timeout);
+}
+
+struct cli_state *tstream_cli_np_get_cli_state(struct tstream_context *stream)
+{
+	struct tstream_cli_np *cli_nps = tstream_context_data(stream,
+					 struct tstream_cli_np);
+
+	return cli_nps->cli;
+}
+
+struct tstream_cli_np_writev_state {
+	struct tstream_context *stream;
+	struct tevent_context *ev;
+
+	struct iovec *vector;
+	size_t count;
+
+	int ret;
+
+	struct {
+		int val;
+		const char *location;
+	} error;
+};
+
+static int tstream_cli_np_writev_state_destructor(struct tstream_cli_np_writev_state *state)
+{
+	struct tstream_cli_np *cli_nps =
+		tstream_context_data(state->stream,
+		struct tstream_cli_np);
+
+	cli_nps->trans.write_req = NULL;
+
+	return 0;
+}
+
+static void tstream_cli_np_writev_write_next(struct tevent_req *req);
+
+static struct tevent_req *tstream_cli_np_writev_send(TALLOC_CTX *mem_ctx,
+					struct tevent_context *ev,
+					struct tstream_context *stream,
+					const struct iovec *vector,
+					size_t count)
+{
+	struct tevent_req *req;
+	struct tstream_cli_np_writev_state *state;
+	struct tstream_cli_np *cli_nps = tstream_context_data(stream,
+					 struct tstream_cli_np);
+
+	req = tevent_req_create(mem_ctx, &state,
+				struct tstream_cli_np_writev_state);
+	if (!req) {
+		return NULL;
+	}
+	state->stream = stream;
+	state->ev = ev;
+	state->ret = 0;
+
+	talloc_set_destructor(state, tstream_cli_np_writev_state_destructor);
+
+	if (!cli_state_is_connected(cli_nps->cli)) {
+		tevent_req_error(req, ENOTCONN);
+		return tevent_req_post(req, ev);
+	}
+
+	/*
+	 * we make a copy of the vector so we can change the structure
+	 */
+	state->vector = talloc_array(state, struct iovec, count);
+	if (tevent_req_nomem(state->vector, req)) {
+		return tevent_req_post(req, ev);
+	}
+	memcpy(state->vector, vector, sizeof(struct iovec) * count);
+	state->count = count;
+
+	tstream_cli_np_writev_write_next(req);
+	if (!tevent_req_is_in_progress(req)) {
+		return tevent_req_post(req, ev);
+	}
+
+	return req;
+}
+
+static void tstream_cli_np_writev_write_done(struct tevent_req *subreq);
+
+static void tstream_cli_np_writev_write_next(struct tevent_req *req)
+{
+	struct tstream_cli_np_writev_state *state =
+		tevent_req_data(req,
+		struct tstream_cli_np_writev_state);
+	struct tstream_cli_np *cli_nps =
+		tstream_context_data(state->stream,
+		struct tstream_cli_np);
+	struct tevent_req *subreq;
+
+	cli_nps->write.ofs = 0;
+	cli_nps->write.left = TSTREAM_CLI_NP_BUF_SIZE;
+
+	/*
+	 * copy the pending buffer first
+	 */
+	while (cli_nps->write.left > 0 && state->count > 0) {
+		uint8_t *base = (uint8_t *)state->vector[0].iov_base;
+		size_t len = MIN(cli_nps->write.left, state->vector[0].iov_len);
+
+		memcpy(cli_nps->write.buf + cli_nps->write.ofs, base, len);
+
+		base += len;
+		state->vector[0].iov_base = base;
+		state->vector[0].iov_len -= len;
+
+		cli_nps->write.ofs += len;
+		cli_nps->write.left -= len;
+
+		if (state->vector[0].iov_len == 0) {
+			state->vector += 1;
+			state->count -= 1;
+		}
+
+		state->ret += len;
+	}
+
+	if (cli_nps->write.ofs == 0) {
+		tevent_req_done(req);
+		return;
+	}
+
+	if (cli_nps->trans.active && state->count == 0) {
+		cli_nps->trans.active = false;
+		cli_nps->trans.write_req = req;
+		return;
+	}
+
+	subreq = cli_write_andx_send(state, state->ev, cli_nps->cli,
+				     cli_nps->fnum,
+				     8, /* 8 means message mode. */
+				     cli_nps->write.buf, 0,
+				     cli_nps->write.ofs);
+	if (tevent_req_nomem(subreq, req)) {
+		return;
+	}
+	tevent_req_set_callback(subreq,
+				tstream_cli_np_writev_write_done,
+				req);
+}
+
+static void tstream_cli_np_writev_disconnect_now(struct tevent_req *req,
+						 int error,
+						 const char *location);
+
+static void tstream_cli_np_writev_write_done(struct tevent_req *subreq)
+{
+	struct tevent_req *req =
+		tevent_req_callback_data(subreq, struct tevent_req);
+	struct tstream_cli_np_writev_state *state =
+		tevent_req_data(req, struct tstream_cli_np_writev_state);
+	struct tstream_cli_np *cli_nps =
+		tstream_context_data(state->stream,
+		struct tstream_cli_np);
+	size_t written;
+	NTSTATUS status;
+
+	status = cli_write_andx_recv(subreq, &written);
+	TALLOC_FREE(subreq);
+	if (!NT_STATUS_IS_OK(status)) {
+		tstream_cli_np_writev_disconnect_now(req, EIO, __location__);
+		return;
+	}
+
+	if (written != cli_nps->write.ofs) {
+		tstream_cli_np_writev_disconnect_now(req, EIO, __location__);
+		return;
+	}
+
+	tstream_cli_np_writev_write_next(req);
+}
+
+static void tstream_cli_np_writev_disconnect_done(struct tevent_req *subreq);
+
+static void tstream_cli_np_writev_disconnect_now(struct tevent_req *req,
+						 int error,
+						 const char *location)
+{
+	struct tstream_cli_np_writev_state *state =
+		tevent_req_data(req,
+		struct tstream_cli_np_writev_state);
+	struct tstream_cli_np *cli_nps =
+		tstream_context_data(state->stream,
+		struct tstream_cli_np);
+	struct tevent_req *subreq;
+
+	state->error.val = error;
+	state->error.location = location;
+
+	if (!cli_state_is_connected(cli_nps->cli)) {
+		/* return the original error */
+		_tevent_req_error(req, state->error.val, state->error.location);
+		return;
+	}
+
+	subreq = cli_close_send(state, state->ev, cli_nps->cli, cli_nps->fnum);
+	if (subreq == NULL) {
+		/* return the original error */
+		_tevent_req_error(req, state->error.val, state->error.location);
+		return;
+	}
+	tevent_req_set_callback(subreq,
+				tstream_cli_np_writev_disconnect_done,
+				req);
+}
+
+static void tstream_cli_np_writev_disconnect_done(struct tevent_req *subreq)
+{
+	struct tevent_req *req =
+		tevent_req_callback_data(subreq, struct tevent_req);
+	struct tstream_cli_np_writev_state *state =
+		tevent_req_data(req, struct tstream_cli_np_writev_state);


-- 
Samba Shared Repository


More information about the samba-cvs mailing list