RFC rawpipe
Noel Power
nopower at suse.com
Thu May 26 10:45:55 UTC 2016
Hi,
I'm trying once again to get my head around the WSP stuff, one of the
prerequisites for that is working with rawpipes, some time ago I posted
some patches around dealing with such rawpipes that was too heavy weight
(see
https://lists.samba.org/archive/samba-technical/2014-July/100884.html)
See attached patches that attempt to rework this,
The patches provide a simple framework to support an internal/external
service that wants to use such rawpipes. The patches also provide a very
simple reference implementation (for testing), note: this test
implementation is only built with developer builds, also there are also
some simple torture test suite added (but not so simple to figure out
how to write ;-))
I confess that I didn't understand all of what was said in the thread
above but I think I have captured the spirit of if (and am happy to work
to do whatever is needed to clean it further).
Note: Also included in there is a patch for adding the ability to change
the default 'Max Ioctl' for pipe trans, that patch is not strictly a
perquisite for the WSP server but is necessary for any test clients that
will use the WSP service.
thanks
Noel
-------------- next part --------------
From 0b677b91b1ce196b21788093e384e1a4e2b7f20b Mon Sep 17 00:00:00 2001
From: Noel Power <noel.power at suse.com>
Date: Thu, 4 Dec 2014 19:25:02 +0000
Subject: [PATCH 1/6] Add for very basic loop integration infrastruction for
non-rpc (raw) pipes.
Add a simple infrastructure to gives the ability that adds a
wellknown pipe name and an associated callback for the server loop.
Support for both embedded and external service is provided.
Signed-off-by: Noel Power <noel.power at suse.com>
---
source3/rpc_server/rpc_ncacn_np.c | 23 ++++++++++---
source3/rpc_server/rpc_server.c | 71 ++++++++++++++++++++++++++++++++++++---
source3/rpc_server/rpc_server.h | 14 ++++++++
source3/rpc_server/srv_pipe.c | 7 ++--
4 files changed, 102 insertions(+), 13 deletions(-)
diff --git a/source3/rpc_server/rpc_ncacn_np.c b/source3/rpc_server/rpc_ncacn_np.c
index 5647596..d0e8fd4 100644
--- a/source3/rpc_server/rpc_ncacn_np.c
+++ b/source3/rpc_server/rpc_ncacn_np.c
@@ -85,6 +85,7 @@ NTSTATUS make_internal_rpc_pipe_socketpair(TALLOC_CTX *mem_ctx,
NTSTATUS status;
int error;
int rc;
+ struct name_pipe_server_details *pipe_details = NULL;
DEBUG(4, ("Create of internal pipe %s requested\n", pipe_name));
@@ -94,7 +95,13 @@ NTSTATUS make_internal_rpc_pipe_socketpair(TALLOC_CTX *mem_ctx,
goto out;
}
- npa->file_type = FILE_TYPE_MESSAGE_MODE_PIPE;
+ pipe_details = get_pipe_server_details(pipe_name);
+
+ if (pipe_details) {
+ npa->file_type = pipe_details->msg_mode;
+ } else {
+ npa->file_type = FILE_TYPE_MESSAGE_MODE_PIPE;
+ }
npa->device_state = 0xff | 0x0400 | 0x0100;
npa->allocation_size = 4096;
@@ -161,14 +168,22 @@ NTSTATUS make_internal_rpc_pipe_socketpair(TALLOC_CTX *mem_ctx,
goto out;
}
- subreq = dcerpc_read_ncacn_packet_send(npc, npc->ev, npc->tstream);
+ if (pipe_details) {
+ subreq = pipe_details->start_server_loop(npc,
+ pipe_details->private_data);
+ } else {
+ subreq = dcerpc_read_ncacn_packet_send(npc,
+ npc->ev,
+ npc->tstream);
+ if (subreq) {
+ tevent_req_set_callback(subreq, named_pipe_packet_process, npc);
+ }
+ }
if (subreq == NULL) {
DEBUG(2, ("Failed to start receving packets\n"));
status = NT_STATUS_PIPE_BROKEN;
goto out;
}
- tevent_req_set_callback(subreq, named_pipe_packet_process, npc);
-
*pnpa = talloc_steal(mem_ctx, npa);
status = NT_STATUS_OK;
out:
diff --git a/source3/rpc_server/rpc_server.c b/source3/rpc_server/rpc_server.c
index 5effe66..a9487f5 100644
--- a/source3/rpc_server/rpc_server.c
+++ b/source3/rpc_server/rpc_server.c
@@ -37,6 +37,47 @@
#define SERVER_TCP_LOW_PORT 1024
#define SERVER_TCP_HIGH_PORT 1300
+static struct name_pipe_server_details *pipe_details_map = NULL;
+
+static void init_pipe_details_map(void)
+{
+ if (!pipe_details_map) {
+ pipe_details_map = talloc_zero(NULL, struct name_pipe_server_details);
+ }
+}
+
+struct name_pipe_server_details *get_pipe_server_details(const char* name) {
+ struct name_pipe_server_details *item;
+ init_pipe_details_map();
+ for(item = pipe_details_map; item; item = item->next) {
+ if (strequal(name, item->name)) {
+ return item;
+ }
+ }
+ return NULL;
+}
+
+void add_pipe_server_details(const char* name, uint16_t msg_mode,
+ server_loop_fn loop_fn, void *private_data)
+{
+ struct name_pipe_server_details * item = get_pipe_server_details(name);
+ if (item) {
+ /*update*/
+ item->start_server_loop = loop_fn;
+ item->msg_mode = msg_mode;
+ item->private_data = private_data;
+ } else {
+ struct name_pipe_server_details *new_item =
+ talloc_zero(pipe_details_map,
+ struct name_pipe_server_details);
+ new_item->name = name;
+ new_item->start_server_loop = loop_fn;
+ new_item->msg_mode = msg_mode;
+ new_item->private_data = private_data;
+ DLIST_ADD_END(pipe_details_map, new_item);
+ }
+}
+
/* Creates a pipes_struct and initializes it with the information
* sent from the client */
int make_server_pipes_struct(TALLOC_CTX *mem_ctx,
@@ -291,6 +332,8 @@ void named_pipe_accept_function(struct tevent_context *ev_ctx,
struct tstream_context *plain;
struct tevent_req *subreq;
int ret;
+ struct name_pipe_server_details *pipe_details =
+ get_pipe_server_details(pipe_name);
npc = talloc_zero(ev_ctx, struct named_pipe_client);
if (!npc) {
@@ -330,7 +373,11 @@ void named_pipe_accept_function(struct tevent_context *ev_ctx,
return;
}
- npc->file_type = FILE_TYPE_MESSAGE_MODE_PIPE;
+ if (pipe_details) {
+ npc->file_type = pipe_details->msg_mode;
+ } else {
+ npc->file_type = FILE_TYPE_MESSAGE_MODE_PIPE;
+ }
npc->device_state = 0xff | 0x0400 | 0x0100;
npc->allocation_size = 4096;
@@ -347,13 +394,12 @@ void named_pipe_accept_function(struct tevent_context *ev_ctx,
tevent_req_set_callback(subreq, named_pipe_accept_done, npc);
}
-static void named_pipe_packet_done(struct tevent_req *subreq);
-
static void named_pipe_accept_done(struct tevent_req *subreq)
{
struct auth_session_info_transport *session_info_transport;
struct named_pipe_client *npc =
tevent_req_callback_data(subreq, struct named_pipe_client);
+ struct name_pipe_server_details *pipe_details = NULL;
int error;
int ret;
@@ -395,12 +441,26 @@ static void named_pipe_accept_done(struct tevent_req *subreq)
}
/* And now start receiving and processing packets */
- subreq = dcerpc_read_ncacn_packet_send(npc, npc->ev, npc->tstream);
+
+ pipe_details = get_pipe_server_details(npc->pipe_name);
+ /* has the named pipe special non-rpc treatment */
+ if (pipe_details) {
+ subreq =
+ pipe_details->start_server_loop(npc,
+ pipe_details->private_data);
+ } else {
+ subreq = dcerpc_read_ncacn_packet_send(npc,
+ npc->ev,
+ npc->tstream);
+ if (subreq) {
+ tevent_req_set_callback(subreq,
+ named_pipe_packet_process, npc);
+ }
+ }
if (!subreq) {
DEBUG(2, ("Failed to start receving packets\n"));
goto fail;
}
- tevent_req_set_callback(subreq, named_pipe_packet_process, npc);
return;
fail:
@@ -411,6 +471,7 @@ fail:
return;
}
+static void named_pipe_packet_done(struct tevent_req *subreq);
void named_pipe_packet_process(struct tevent_req *subreq)
{
struct named_pipe_client *npc =
diff --git a/source3/rpc_server/rpc_server.h b/source3/rpc_server/rpc_server.h
index 2291350..33f0bd8 100644
--- a/source3/rpc_server/rpc_server.h
+++ b/source3/rpc_server/rpc_server.h
@@ -55,6 +55,8 @@ struct named_pipe_client {
void *private_data;
};
+bool pipe_init_outgoing_data(struct pipes_struct *p);
+
struct named_pipe_client *named_pipe_client_init(TALLOC_CTX *mem_ctx,
struct tevent_context *ev_ctx,
struct messaging_context *msg_ctx,
@@ -108,4 +110,16 @@ void dcerpc_ncacn_accept(struct tevent_context *ev_ctx,
int s,
dcerpc_ncacn_disconnect_fn fn);
+typedef struct tevent_req *(*server_loop_fn)(struct named_pipe_client *npc,
+ void *private_data);
+struct name_pipe_server_details {
+ struct name_pipe_server_details *prev, *next;
+ const char* name;
+ uint16_t msg_mode;
+ server_loop_fn start_server_loop;
+ void *private_data;
+};
+
+void add_pipe_server_details(const char *name, uint16_t msg_mode, server_loop_fn loop, void *private_data);
+struct name_pipe_server_details *get_pipe_server_details(const char* name);
#endif /* _PRC_SERVER_H_ */
diff --git a/source3/rpc_server/srv_pipe.c b/source3/rpc_server/srv_pipe.c
index bcd7e5d..7972db1 100644
--- a/source3/rpc_server/srv_pipe.c
+++ b/source3/rpc_server/srv_pipe.c
@@ -235,8 +235,6 @@ bool create_next_pdu(struct pipes_struct *p)
}
-static bool pipe_init_outgoing_data(struct pipes_struct *p);
-
/*******************************************************************
Marshall a bind_nak pdu.
*******************************************************************/
@@ -481,7 +479,8 @@ bool is_known_pipename(const char *pipename, struct ndr_syntax_id *syntax)
return false;
}
- if (rpc_srv_get_pipe_interface_by_cli_name(pipename, syntax)) {
+ if (rpc_srv_get_pipe_interface_by_cli_name(pipename, syntax)
+ || get_pipe_server_details(pipename)) {
return true;
}
@@ -1510,7 +1509,7 @@ static bool api_rpcTNP(struct pipes_struct *p, struct ncacn_packet *pkt,
Initialise an outgoing packet.
****************************************************************************/
-static bool pipe_init_outgoing_data(struct pipes_struct *p)
+bool pipe_init_outgoing_data(struct pipes_struct *p)
{
output_data *o_data = &p->out_data;
--
2.1.4
From 2ed1c5b00969b3f1839fdd5619bbf2aa8cbebe7b Mon Sep 17 00:00:00 2001
From: Noel Power <noel.power at suse.com>
Date: Tue, 10 May 2016 17:14:39 +0100
Subject: [PATCH 2/6] add 'generic' rawpipe server loop and additionally a
reference impl for testing.
Provide a helper function 'common_rawpipe_register' that provides a server
loop (you can still provide your own loop if you want) to simplify
processing of 'rawpipe' messages. The reference implementation is built only
with '-DDEVELOPER' and will be used by smbtorture for testing.
Signed-off-by: Noel Power <noel.power at suse.com>
---
source3/rpc_server/rawpipe.c | 457 +++++++++++++++++++++++++++++++++++++++
source3/rpc_server/rawpipe.h | 50 +++++
source3/rpc_server/rpc_config.h | 3 +
source3/rpc_server/wscript_build | 5 +
4 files changed, 515 insertions(+)
create mode 100644 source3/rpc_server/rawpipe.c
create mode 100644 source3/rpc_server/rawpipe.h
diff --git a/source3/rpc_server/rawpipe.c b/source3/rpc_server/rawpipe.c
new file mode 100644
index 0000000..e6ba332
--- /dev/null
+++ b/source3/rpc_server/rawpipe.c
@@ -0,0 +1,457 @@
+/*
+ * Unix SMB/CIFS implementation.
+ *
+ * RawPipe server loop
+ *
+ * Copyright (c) 2016 Noel Power
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "includes.h"
+#include "rawpipe.h"
+#include <tevent.h>
+#include "rpc_common.h"
+#include "rpc_server/srv_pipe.h"
+#include "rpc_server/rpc_server.h"
+#include "rpc_server/rpc_pipes.h"
+#include "rpc_server/rpc_config.h"
+#include "lib/tsocket/tsocket.h"
+#include "lib/util/tevent_ntstatus.h"
+
+struct common_rawpipe_ctx
+{
+ rawpipe_init init;
+ rawpipe_close dtor;
+ rawpipe_fn handler;
+ void *private_data;
+};
+
+struct common_rawpipe_loop_ctx
+{
+ void *init_ctx;
+ struct common_rawpipe_ctx *reg_ctx;
+};
+
+static struct tevent_req *process_rawpipe_request(TALLOC_CTX *mem_ctx,
+ struct named_pipe_client *npc)
+{
+ struct tevent_req *subreq;
+ struct pipes_struct *p = npc->p;
+ struct common_rawpipe_loop_ctx *ctx =
+ talloc_get_type_abort(npc->private_data,
+ struct common_rawpipe_loop_ctx);
+ TALLOC_CTX *frame = talloc_stackframe();
+
+ if (!pipe_init_outgoing_data(p)) {
+ goto done;
+ }
+
+ subreq = ctx->reg_ctx->handler(mem_ctx, npc, ctx->init_ctx);
+
+ if (!subreq) {
+ goto done;
+ }
+done:
+ TALLOC_FREE(frame);
+ return subreq;
+}
+
+struct rawpipe_send_state {
+ DATA_BLOB buffer;
+ struct iovec in;
+ struct tstream_context *stream;
+ struct tevent_context *ev;
+};
+
+static void read_rawpipe_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(subreq,
+ struct tevent_req);
+ struct rawpipe_send_state *state =
+ tevent_req_data(req, struct rawpipe_send_state);
+ int ret;
+ int sys_errno;
+ int to_read;
+ ssize_t ofs = 0;
+ NTSTATUS status;
+
+ DEBUG(10,("read_rawpipe_done\n"));
+ ret = tstream_readv_recv(subreq, &sys_errno);
+ TALLOC_FREE(subreq);
+ if (ret == -1) {
+ status = map_nt_error_from_unix_common(sys_errno);
+ tevent_req_nterror(req, status);
+ return;
+ }
+ /*
+ * initial read is just for waiting for at lest 1 byte, see if
+ * there is any additional bytes to read.
+ */
+ to_read = tstream_pending_bytes(state->stream);
+ if (!to_read) {
+ /* we're done */
+ tevent_req_done(req);
+ return;
+ }
+
+ ofs = state->buffer.length;
+ state->buffer.data = talloc_realloc(state,
+ state->buffer.data,
+ uint8_t,
+ to_read + ofs);
+ state->buffer.length = to_read + state->buffer.length;
+ state->in.iov_base = (void *) (state->buffer.data + ofs);
+ state->in.iov_len = state->buffer.length - ofs;
+ subreq = tstream_readv_send(state,
+ state->ev,
+ state->stream,
+ &state->in,
+ 1);
+ if (tevent_req_nomem(subreq, req)) {
+ tevent_req_post(req, state->ev);
+ return;
+ }
+
+ tevent_req_set_callback(subreq, read_rawpipe_done, req);
+}
+
+static struct tevent_req *read_rawpipe_send(TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct tstream_context *stream)
+{
+ struct tevent_req *req;
+ struct rawpipe_send_state *state;
+ struct tevent_req *subreq;
+ req = tevent_req_create(mem_ctx, &state,
+ struct rawpipe_send_state);
+ if (req == NULL) {
+ return NULL;
+ }
+
+ state->buffer.length = 1;
+ state->buffer.data = talloc_zero_array(mem_ctx,
+ uint8_t,
+ state->buffer.length);
+ state->stream = stream;
+ state->in.iov_base = state->buffer.data;
+ state->in.iov_len = state->buffer.length;
+ state->ev = ev;
+ DEBUG(10,("read_rawpipe_send stream %p\n", stream));
+ subreq = tstream_readv_send(state, ev,
+ stream,
+ &state->in,
+ 1);
+ tevent_req_set_callback(subreq, read_rawpipe_done, req);
+ return req;
+}
+
+static NTSTATUS rawpipe_read_recv(struct tevent_req *req,
+ TALLOC_CTX *mem_ctx,
+ DATA_BLOB *buffer)
+{
+ struct rawpipe_send_state *state = tevent_req_data(req,
+ struct rawpipe_send_state);
+ NTSTATUS status;
+
+ DEBUG(10,("rawpipe_read_recv\n"));
+ if (tevent_req_is_nterror(req, &status)) {
+ DEBUG(0,("rawpipe_read_recv nterror %s\n", nt_errstr(status)));
+ tevent_req_received(req);
+ return status;
+ }
+
+ if (buffer) {
+ buffer->data = talloc_move(mem_ctx, &state->buffer.data);
+ buffer->length = state->buffer.length;
+ }
+
+ tevent_req_received(req);
+ return NT_STATUS_OK;
+}
+
+void rawpipe_process(struct tevent_req *subreq);
+
+static void common_destroy_pipe(void *private_data)
+{
+ struct common_rawpipe_loop_ctx *loop_ctx =
+ talloc_get_type_abort(private_data,
+ struct common_rawpipe_loop_ctx);
+ loop_ctx->reg_ctx->dtor(loop_ctx->init_ctx);
+}
+
+static struct tevent_req *start_common_rawpipe_loop(struct named_pipe_client *npc,
+ void *private_data);
+static struct tevent_req *common_rawpipe_loop(struct named_pipe_client *npc);
+
+static void rawpipe_process_done(struct tevent_req *subreq)
+{
+ struct named_pipe_client *npc =
+ tevent_req_callback_data(subreq, struct named_pipe_client);
+ int sys_errno;
+ int ret;
+
+ DEBUG(10,("rawpipe_process_done \n"));
+ ret = tstream_writev_queue_recv(subreq, &sys_errno);
+ TALLOC_FREE(subreq);
+ if (ret == -1) {
+ DEBUG(2, ("Writev failed!\n"));
+ goto fail;
+ }
+ if (tevent_queue_length(npc->write_queue) > 0) {
+ return;
+ }
+
+ npc->count = 0;
+ TALLOC_FREE(npc->iov);
+ data_blob_free(&npc->p->in_data.data);
+ data_blob_free(&npc->p->out_data.frag);
+ data_blob_free(&npc->p->out_data.rdata);
+
+ talloc_free_children(npc->p->mem_ctx);
+ subreq = common_rawpipe_loop(npc);
+ if (!subreq) {
+ goto fail;
+ }
+ return;
+fail:
+ DEBUG(0, ("Fatal error(%s). "
+ "Terminating client(%s) connection!\n",
+ strerror(sys_errno), npc->client_name));
+ /* terminate client connection */
+ talloc_free(npc);
+ return;
+}
+
+static void process_rawpipe_request_done(struct tevent_req *subreq);
+void rawpipe_process(struct tevent_req *subreq)
+{
+ struct named_pipe_client *npc =
+ tevent_req_callback_data(subreq, struct named_pipe_client);
+ DATA_BLOB recv_buffer = data_blob_null;
+ NTSTATUS status;
+
+ DEBUG(10,("rawpipe_process \n"));
+ status = rawpipe_read_recv(subreq, npc, &recv_buffer);
+ TALLOC_FREE(subreq);
+ if (!NT_STATUS_IS_OK(status)) {
+ goto fail;
+ }
+
+ npc->p->in_data.pdu_needed_len = 0;
+ npc->p->in_data.pdu = recv_buffer;
+
+ subreq = process_rawpipe_request(npc->p->mem_ctx, npc);
+ tevent_req_set_callback(subreq, process_rawpipe_request_done, npc);
+ talloc_free(recv_buffer.data);
+ return;
+fail:
+ DEBUG(0, ("Fatal error(%s). "
+ "Terminating client(%s) connection!\n",
+ nt_errstr(status), npc->client_name));
+ /* terminate client connection */
+ talloc_free(npc);
+ return;
+}
+
+static void process_rawpipe_request_done(struct tevent_req *subreq)
+{
+ uint32_t to_send;
+ struct named_pipe_client *npc;
+ struct _output_data *out;
+ NTSTATUS status;
+ DEBUG(10,("process_rawpipe_done\n"));
+ npc = tevent_req_callback_data(subreq, struct named_pipe_client);
+ out = &npc->p->out_data;
+ to_send = out->rdata.length;
+ TALLOC_FREE(subreq);
+ if (to_send) {
+ npc->iov = talloc_zero(npc, struct iovec);
+ if (!npc->iov) {
+ status = NT_STATUS_NO_MEMORY;
+ goto fail;
+ }
+ npc->count = 1;
+
+ npc->iov[0].iov_len = to_send;
+ npc->iov[0].iov_base = out->rdata.data;
+ DEBUG(10,("sending %lu bytes to tstream !!\n",
+ npc->iov[0].iov_len ));
+ subreq = tstream_writev_queue_send(npc, npc->ev, npc->tstream,
+ npc->write_queue,
+ npc->iov,
+ 1);
+ if (!subreq) {
+ DEBUG(2, ("Failed to send response for raw pipe\n"));
+ status = NT_STATUS_NO_MEMORY;
+ goto fail;
+ }
+ tevent_req_set_callback(subreq, rawpipe_process_done, npc);
+ } else {
+ /*
+ * we don't respond to some messages (e.g. CPMDisconnect from
+ * MS-WSP), make sure we restart the server loop in anycase.
+ */
+ subreq = common_rawpipe_loop(npc);
+ if (!subreq) {
+ status = NT_STATUS_NO_MEMORY;
+ goto fail;
+ }
+ }
+ return;
+fail:
+ DEBUG(0, ("Fatal error(%s). "
+ "Terminating client(%s) connection!\n",
+ nt_errstr(status), npc->client_name));
+ /* terminate client connection */
+ talloc_free(npc);
+ return;
+}
+
+static struct tevent_req *common_rawpipe_loop(struct named_pipe_client *npc)
+{
+ struct tevent_req *subreq;
+
+ subreq = read_rawpipe_send(npc, npc->ev, npc->tstream);
+ if (!subreq) {
+ DEBUG(0, ("Failed to start receving packets\n"));
+ goto fail;
+ }
+ tevent_req_set_callback(subreq, rawpipe_process, npc);
+fail:
+ return subreq;
+}
+
+static struct tevent_req *start_common_rawpipe_loop(struct named_pipe_client *npc,
+ void *private_data)
+{
+ struct common_rawpipe_ctx *ctx =
+ talloc_get_type_abort(private_data,
+ struct common_rawpipe_ctx);
+ struct common_rawpipe_loop_ctx *loop_ctx =
+ talloc_zero(npc, struct common_rawpipe_loop_ctx);
+ loop_ctx->reg_ctx = ctx;
+ if (ctx->init) {
+ loop_ctx->init_ctx = ctx->init(npc, ctx->private_data);
+ }
+ if (ctx->dtor) {
+ npc->term_fn = common_destroy_pipe;
+ }
+ npc->private_data = loop_ctx;
+ return common_rawpipe_loop(npc);
+}
+
+void common_rawpipe_register(const char* pipename,
+ uint16_t msg_mode,
+ rawpipe_init init,
+ rawpipe_close dtor,
+ rawpipe_fn handler,
+ void *private_data)
+{
+ struct common_rawpipe_ctx *ctx =
+ talloc(NULL, struct common_rawpipe_ctx);
+ ctx->init = init;
+ ctx->dtor = dtor;
+ ctx->handler = handler;
+ ctx->private_data = private_data;
+ add_pipe_server_details(pipename,
+ msg_mode,
+ start_common_rawpipe_loop,
+ ctx);
+}
+
+#ifdef DEVELOPER
+
+struct dummy_rawpipe_state
+{
+};
+
+/*
+ * Very simple test harness for raw pipes
+ * + If we recieve message 'large' we return a large message
+ * that we know breaks the max response with a named pipe trans msg
+ * (Max Ioctl is hard coded to 4280) (this should generate a BUFFER_OVERFLOW)
+ * + Any other message we recieve we just echo back
+ *
+ * Note: we can easily extend the test harness by defining some proper
+ * message structures to exchange more complex instruction + payload
+ * combinations.
+ */
+static struct tevent_req *do_echo_send(TALLOC_CTX *ctx,
+ struct named_pipe_client *npc,
+ void *init_ctx)
+{
+ struct tevent_req *req;
+ struct pipes_struct *p = npc->p;
+ struct dummy_rawpipe_state *state;
+ const char* large = "large";
+ req = tevent_req_create(ctx, &state,
+ struct dummy_rawpipe_state);
+ DEBUG(10,("received %lu bytes mem_ctx = %p\n",
+ p->in_data.pdu.length,
+ npc->p->mem_ctx));
+
+ if ((p->in_data.pdu.length == strlen(large) + 1)
+ && (memcmp(p->in_data.pdu.data, large, strlen(large) + 1) == 0)) {
+ uint32_t large_size = 6500;
+ p->out_data.rdata.data = talloc_zero_array(npc->p->mem_ctx,
+ uint8_t,
+ large_size);
+ p->out_data.rdata.length = large_size;
+ DEBUG(10,("LARGE message test sending back %d bytes\n",
+ large_size));
+ } else {
+ /* simple echo */
+ p->out_data.rdata.data = talloc_array(npc->p->mem_ctx,
+ uint8_t,
+ p->in_data.pdu.length);
+ p->out_data.rdata.length = p->in_data.pdu.length;
+ }
+
+ memcpy(p->out_data.rdata.data,
+ p->in_data.pdu.data,
+ p->in_data.pdu.length);
+
+ tevent_req_done(req);
+ return tevent_req_post(req, npc->ev);
+}
+
+void init_rawipe_echo(struct tevent_context *ev_ctx,
+ struct messaging_context *msg_ctx)
+{
+ common_rawpipe_register("rawpipe", FILE_TYPE_MESSAGE_MODE_PIPE,
+ NULL, NULL, do_echo_send, NULL);
+ if (rpc_rawd_daemon()) {
+ pid_t pid = fork();
+ bool ok;
+ if (pid == -1) {
+ DEBUG(0, ("failed to fork rawd daemon [%s], "
+ "aborting ...\n", strerror(errno)));
+ exit(1);
+ }
+
+ if (pid) {
+ /* parent */
+ return;
+ }
+ ok = setup_named_pipe_socket("rawpipe", ev_ctx, msg_ctx);
+ if (!ok) {
+ DEBUG(0, ("Failed to open rawpipe named pipe!\n"));
+ exit(1);
+ }
+ DEBUG(10,("rawd daemon started\n"));
+ }
+ DEBUG(10,("raw pipe echo loop started\n"));
+}
+#endif
diff --git a/source3/rpc_server/rawpipe.h b/source3/rpc_server/rawpipe.h
new file mode 100644
index 0000000..33f797f
--- /dev/null
+++ b/source3/rpc_server/rawpipe.h
@@ -0,0 +1,50 @@
+/*
+ * Unix SMB/CIFS implementation.
+ *
+ * RawPipe server loop
+ *
+ * Copyright (c) 2016 Noel Power
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __RAWPIPE__
+#define __RAWPIPE__
+
+
+struct named_pipe_client;
+struct messaging_context;
+
+/* returns a context that is passed to the handler and close functions */
+typedef void* (*rawpipe_init)(struct named_pipe_client *npc,
+ void *private_data);
+/* called when pipe is destoyed */
+typedef void (*rawpipe_close)(void *init_ctx);
+/* called when a message to respond to on the pipe is available */
+typedef struct tevent_req *(*rawpipe_fn)(TALLOC_CTX *ctx,
+ struct named_pipe_client *npc,
+ void *init_ctx);
+
+void common_rawpipe_register(const char* pipename,
+ uint16_t msg_mode,
+ rawpipe_init init,
+ rawpipe_close dtor,
+ rawpipe_fn handler,
+ void *private_data);
+#ifdef DEVELOPER
+void init_rawipe_echo(struct tevent_context *ev_ctx,
+ struct messaging_context *msg_ctx);
+#endif
+
+#endif /* __RAWPIPE__ */
diff --git a/source3/rpc_server/rpc_config.h b/source3/rpc_server/rpc_config.h
index 5091704..dd39f14 100644
--- a/source3/rpc_server/rpc_config.h
+++ b/source3/rpc_server/rpc_config.h
@@ -68,5 +68,8 @@ enum rpc_daemon_type_e rpc_daemon_type(const char *name);
#define rpc_lsasd_daemon() rpc_daemon_type("lsasd")
#define rpc_fss_daemon() rpc_daemon_type("fssd")
#define rpc_mdssd_daemon() rpc_daemon_type("mdssd")
+#ifdef DEVELOPER
+#define rpc_rawd_daemon() rpc_daemon_type("rawd")
+#endif
#endif /* _RPC_CONFIG_H */
diff --git a/source3/rpc_server/wscript_build b/source3/rpc_server/wscript_build
index 1d0facb..f9aaf71 100755
--- a/source3/rpc_server/wscript_build
+++ b/source3/rpc_server/wscript_build
@@ -1,6 +1,11 @@
#!/usr/bin/env python
### RPC_SERVER
+bld.SAMBA3_SUBSYSTEM('RAWPIPE',
+ source='rawpipe.c',
+ deps='samba-util',
+ enabled=bld.env.DEVELOPER)
+
bld.SAMBA3_SUBSYSTEM('rpc',
source='',
deps='RPC_PIPE_REGISTER')
--
2.1.4
From 6c1112472a1654c8dc668c77343d08c2031545eb Mon Sep 17 00:00:00 2001
From: Noel Power <noel.power at suse.com>
Date: Tue, 10 May 2016 17:15:06 +0100
Subject: [PATCH 3/6] Add rawd (rawpipe test service) to smbd (conditionalised
on --enable-developer)
Signed-off-by: Noel Power <noel.power at suse.com>
---
source3/smbd/server.c | 7 ++++++-
source3/wscript_build | 2 +-
2 files changed, 7 insertions(+), 2 deletions(-)
diff --git a/source3/smbd/server.c b/source3/smbd/server.c
index 82e686e..eea2343 100644
--- a/source3/smbd/server.c
+++ b/source3/smbd/server.c
@@ -51,7 +51,9 @@
#include "smbd/notifyd/notifyd.h"
#include "smbd/smbd_cleanupd.h"
#include "lib/util/sys_rw.h"
-
+#ifdef DEVELOPER
+#include "rpc_server/rawpipe.h"
+#endif
#ifdef CLUSTER_SUPPORT
#include "ctdb_protocol.h"
#endif
@@ -1683,6 +1685,9 @@ extern void build_options(bool screen);
daemon_ready("smbd");
}
+#ifdef DEVELOPER
+ init_rawipe_echo(ev_ctx, msg_ctx);
+#endif
/* only start other daemons if we are running as a daemon
* -- bad things will happen if smbd is launched via inetd
* and we fork a copy of ourselves here */
diff --git a/source3/wscript_build b/source3/wscript_build
index ed2424d..77508ea 100755
--- a/source3/wscript_build
+++ b/source3/wscript_build
@@ -859,7 +859,7 @@ bld.SAMBA3_SUBSYSTEM('LIBLSA',
bld.SAMBA3_BINARY('smbd/smbd',
source='smbd/server.c smbd/smbd_cleanupd.c',
- deps='smbd_base EPMD LSASD FSSD MDSSD',
+ deps='smbd_base EPMD LSASD FSSD MDSSD RAWPIPE',
install_path='${SBINDIR}')
bld.SAMBA3_BINARY('nmbd/nmbd',
--
2.1.4
From 143613a393d074d1fbe206c3c2386b7f986dee3e Mon Sep 17 00:00:00 2001
From: Noel Power <noel.power at suse.com>
Date: Wed, 18 May 2016 15:06:49 +0100
Subject: [PATCH 4/6] Add smbtorture tests for rawpipe infrastructure
Some simple tests that interact with the reference (simple echo service)
implementation that uses the new rawpipe infrastructure.
Signed-off-by: Noel Power <noel.power at suse.com>
---
source3/selftest/tests.py | 2 +-
source4/torture/raw/raw.c | 1 +
source4/torture/raw/rawpipe.c | 674 ++++++++++++++++++++++++++++++++++++++++++
source4/torture/wscript_build | 2 +-
4 files changed, 677 insertions(+), 2 deletions(-)
create mode 100644 source4/torture/raw/rawpipe.c
diff --git a/source3/selftest/tests.py b/source3/selftest/tests.py
index b96df8a..fa7f7a9 100755
--- a/source3/selftest/tests.py
+++ b/source3/selftest/tests.py
@@ -503,5 +503,5 @@ for e in endianness_options:
options = binding_string + " -U$USERNAME%$PASSWORD"
plansmbtorture4testsuite(test, "nt4_dc", options, 'over ncacn_ip_tcp with [%s%s%s] ' % (a, s, e))
-plansmbtorture4testsuite('rpc.epmapper', 'nt4_dc:local', 'ncalrpc: -U$USERNAME%$PASSWORD', 'over ncalrpc')
plansmbtorture4testsuite('rpc.fsrvp', 'nt4_dc:local', 'ncacn_np:$SERVER_IP[/pipe/FssagentRpc] -U$USERNAME%$PASSWORD', 'over ncacn_np')
+plansmbtorture4testsuite('raw.rawpipe', 'nt4_dc:local', '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD')
diff --git a/source4/torture/raw/raw.c b/source4/torture/raw/raw.c
index bda463b..21d2f08 100644
--- a/source4/torture/raw/raw.c
+++ b/source4/torture/raw/raw.c
@@ -58,6 +58,7 @@ NTSTATUS torture_raw_init(void)
torture_suite_add_suite(suite, torture_raw_context(suite));
torture_suite_add_suite(suite, torture_raw_session(suite));
torture_suite_add_suite(suite, torture_raw_rename(suite));
+ torture_suite_add_suite(suite, torture_raw_rawpipe(suite));
torture_suite_add_1smb_test(suite, "seek", torture_raw_seek);
torture_suite_add_1smb_test(suite, "eas", torture_raw_eas);
torture_suite_add_suite(suite, torture_raw_streams(suite));
diff --git a/source4/torture/raw/rawpipe.c b/source4/torture/raw/rawpipe.c
new file mode 100644
index 0000000..9e98461
--- /dev/null
+++ b/source4/torture/raw/rawpipe.c
@@ -0,0 +1,674 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ test suite for Raw pipe implementation
+
+ Copyright (C) Noel Power 2016
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "includes.h"
+#include "libcli/smb2/smb2.h"
+#include "libcli/raw/interfaces.h"
+#include "torture/torture.h"
+#include "torture/smb2/proto.h"
+#include "torture/rpc/torture_rpc.h"
+#include "rpc_server/rawpipe.h"
+#include "rpc_common.h"
+#include "libcli/libcli.h"
+#include "torture/raw/proto.h"
+#include "smb_composite/smb_composite.h"
+#include "lib/cmdline/popt_common.h"
+#include "param/param.h"
+#include "torture/util.h"
+#include "auth/credentials/credentials.h"
+#include "libcli/resolve/resolve.h"
+#include "util/tevent_ntstatus.h"
+#include "system/filesys.h"
+#include "lib/tsocket/tsocket.h"
+#include "libcli/smb/tstream_smbXcli_np.h"
+#include "libcli/smb2/smb2.h"
+#include "libcli/smb2/smb2_calls.h"
+
+struct rawpipe_bh_state {
+ struct dcerpc_pipe *p;
+};
+
+static bool rawpipe_bh_ref_alloc(struct dcerpc_binding_handle *h)
+{
+ return true;
+}
+
+static bool rawpipe_bh_is_connected(struct dcerpc_binding_handle *h)
+{
+ struct rawpipe_bh_state *hs = dcerpc_binding_handle_data(h,
+ struct rawpipe_bh_state);
+
+ if (!hs->p) {
+ return false;
+ }
+
+ if (!hs->p->conn) {
+ return false;
+ }
+
+ if (hs->p->conn->dead) {
+ return false;
+ }
+
+ return true;
+}
+
+static uint32_t rawpipe_bh_set_timeout(struct dcerpc_binding_handle *h,
+ uint32_t timeout)
+{
+ struct rawpipe_bh_state *hs = dcerpc_binding_handle_data(h,
+ struct rawpipe_bh_state);
+ uint32_t old;
+
+ if (!hs->p) {
+ return DCERPC_REQUEST_TIMEOUT;
+ }
+
+ old = hs->p->request_timeout;
+ hs->p->request_timeout = timeout;
+
+ return old;
+}
+
+static void rawpipe_bh_auth_info(struct dcerpc_binding_handle *h,
+ enum dcerpc_AuthType *auth_type,
+ enum dcerpc_AuthLevel *auth_level)
+{
+ struct rawpipe_bh_state *hs = dcerpc_binding_handle_data(h,
+ struct rawpipe_bh_state);
+ if (hs->p == NULL) {
+ return;
+ }
+
+ if (hs->p->conn == NULL) {
+ return;
+ }
+
+ *auth_type = hs->p->conn->security_state.auth_type;
+ *auth_level = hs->p->conn->security_state.auth_level;
+}
+
+struct rawpipe_bh_disconnect_state {
+ uint8_t _dummy;
+};
+
+static struct tevent_req *rawpipe_bh_disconnect_send(TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct dcerpc_binding_handle *h)
+{
+ struct rawpipe_bh_state *hs = dcerpc_binding_handle_data(h,
+ struct rawpipe_bh_state);
+ struct tevent_req *req;
+ struct dcerpc_bh_disconnect_state *state;
+ bool ok;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct rawpipe_bh_disconnect_state);
+ if (req == NULL) {
+ return NULL;
+ }
+
+ ok = rawpipe_bh_is_connected(h);
+ if (!ok) {
+ tevent_req_nterror(req, NT_STATUS_CONNECTION_DISCONNECTED);
+ return tevent_req_post(req, ev);
+ }
+
+ /* TODO: do a real disconnect ... */
+ hs->p = NULL;
+
+ tevent_req_done(req);
+ return tevent_req_post(req, ev);
+}
+
+static NTSTATUS rawpipe_bh_disconnect_recv(struct tevent_req *req)
+{
+ NTSTATUS status;
+
+ if (tevent_req_is_nterror(req, &status)) {
+ tevent_req_received(req);
+ return status;
+ }
+
+ tevent_req_received(req);
+ return NT_STATUS_OK;
+}
+
+struct rawpipe_bh_raw_call_state {
+ DATA_BLOB out_data;
+ struct iovec req;
+ struct iovec resp;
+ struct tevent_context *ev;
+ struct dcecli_connection *conn;
+};
+
+struct rpc_write_state {
+ struct tevent_context *ev;
+ DATA_BLOB buffer;
+ struct dcecli_connection *conn;
+ struct iovec in;
+ struct iovec out;
+};
+
+static void raw_tstream_trans_writev(struct tevent_req *subreq);
+static void raw_tstream_trans_readv_done(struct tevent_req *subreq);
+
+static struct tevent_req *raw_pipe_req_send(TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct dcerpc_pipe *p,
+ struct iovec *req_data)
+{
+ struct tevent_req *req, *subreq;
+ struct rpc_write_state *state;
+ struct timeval endtime;
+
+ req = tevent_req_create(mem_ctx, &state, struct rpc_write_state);
+ if (req == NULL) {
+ return NULL;
+ }
+
+ /*
+ * #TODO check if stream is connected
+ */
+
+ state->ev = ev;
+ state->conn = p->conn;
+ state->in = *req_data;
+
+ endtime = timeval_current_ofs_msec(p->request_timeout);
+
+ subreq = tstream_writev_queue_send(state,
+ ev,
+ state->conn->transport.stream,
+ state->conn->transport.write_queue,
+ &state->in,
+ 1);
+ if (tevent_req_nomem(subreq, req)) {
+ return tevent_req_post(req, ev);
+ }
+
+ if (!tevent_req_set_endtime(subreq, ev, endtime)) {
+ return tevent_req_post(req, ev);
+ }
+
+ tevent_req_set_callback(subreq, raw_tstream_trans_writev, req);
+ state->buffer.data = talloc_array(state, uint8_t, 1);
+ state->buffer.length = talloc_array_length(state->buffer.data);
+ state->out.iov_base = state->buffer.data;
+ state->out.iov_len = state->buffer.length;
+ subreq = tstream_readv_send(state, ev,
+ p->conn->transport.stream,
+ &state->out,
+ 1);
+
+ if (tevent_req_nomem(subreq, req)) {
+ return tevent_req_post(req, ev);
+ }
+ if (!tevent_req_set_endtime(subreq, ev, endtime)) {
+ return tevent_req_post(req, ev);
+ }
+ tevent_req_set_callback(subreq, raw_tstream_trans_readv_done, req);
+
+ return req;
+}
+
+static void raw_tstream_trans_readv_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req =
+ tevent_req_callback_data(subreq,
+ struct tevent_req);
+
+ struct rpc_write_state *state =
+ tevent_req_data(req,
+ struct rpc_write_state);
+
+ int ret;
+ int err = 0;
+ int to_read;
+ ssize_t ofs = 0;
+
+ ret = tstream_readv_recv(subreq, &err);
+ TALLOC_FREE(subreq);
+ if (ret == -1) {
+ tevent_req_nterror(req, map_nt_error_from_unix_common(err));
+ return;
+ }
+
+ to_read = tstream_pending_bytes(state->conn->transport.stream);
+ if (!to_read) {
+ /* we're done */
+ tevent_req_done(req);
+ return;
+ }
+
+ ofs = state->buffer.length;
+ state->buffer.data = talloc_realloc(state,
+ state->buffer.data,
+ uint8_t,
+ to_read + ofs);
+ state->buffer.length = to_read + state->buffer.length;
+ state->out.iov_base = (void *) (state->buffer.data + ofs);
+ state->out.iov_len = state->buffer.length - ofs;
+ subreq = tstream_readv_send(state,
+ state->ev,
+ state->conn->transport.stream,
+ &state->out,
+ 1);
+ if (tevent_req_nomem(subreq, req)) {
+ tevent_req_post(req, state->ev);
+ return;
+ }
+
+ tevent_req_set_callback(subreq, raw_tstream_trans_readv_done, req);
+}
+
+static void raw_tstream_trans_writev(struct tevent_req *subreq)
+{
+ struct tevent_req *req =
+ tevent_req_callback_data(subreq,
+ struct tevent_req);
+ int ret;
+ int err;
+ ret = tstream_writev_queue_recv(subreq, &err);
+ TALLOC_FREE(subreq);
+ if (ret == -1) {
+ tevent_req_nterror(req, map_nt_error_from_unix_common(err));
+ return;
+ }
+}
+
+static void rawpipe_bh_call_send_done(struct tevent_req *subreq);
+static struct tevent_req *rawpipe_bh_call_send(TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct dcerpc_binding_handle *h,
+ const struct GUID *object,
+ uint32_t opnum,
+ uint32_t in_flags,
+ const uint8_t *in_data,
+ size_t in_length)
+{
+ struct rawpipe_bh_state *hs = dcerpc_binding_handle_data(h,
+ struct rawpipe_bh_state);
+ struct tevent_req *req;
+ bool ok;
+ struct tevent_req *subreq;
+ struct rawpipe_bh_raw_call_state* state;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct rawpipe_bh_raw_call_state);
+ if (req == NULL) {
+ return NULL;
+ }
+ state->req.iov_len = in_length;
+ state->req.iov_base = discard_const_p(uint8_t, in_data);
+
+ state->out_data = data_blob_null;
+ state->conn = hs->p->conn;
+ state->ev = ev;
+
+ ok = rawpipe_bh_is_connected(h);
+ if (!ok) {
+ tevent_req_nterror(req, NT_STATUS_CONNECTION_DISCONNECTED);
+ return tevent_req_post(req, ev);
+ }
+ subreq = raw_pipe_req_send(state, ev, hs->p,
+ &state->req);
+
+ if (tevent_req_nomem(subreq, req)) {
+ return tevent_req_post(req, ev);
+ }
+ tevent_req_set_callback(subreq, rawpipe_bh_call_send_done, req);
+ return req;
+}
+
+static void rawpipe_bh_call_send_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req =
+ tevent_req_callback_data(subreq,
+ struct tevent_req);
+ struct rawpipe_bh_raw_call_state *state =
+ tevent_req_data(req,
+ struct rawpipe_bh_raw_call_state);
+ struct rpc_write_state *write_state =
+ tevent_req_data(subreq,
+ struct rpc_write_state);
+ NTSTATUS status;
+ if (tevent_req_is_nterror(subreq, &status)) {
+ tevent_req_nterror(req, status);
+ return;
+ }
+ state->out_data.data = talloc_move(state, &write_state->buffer.data);
+ state->out_data.length = write_state->buffer.length;
+ TALLOC_FREE(subreq);
+ tevent_req_done(req);
+}
+
+static NTSTATUS rawpipe_bh_call_recv(struct tevent_req *req,
+ TALLOC_CTX *mem_ctx,
+ uint8_t **out_data,
+ size_t *out_length,
+ uint32_t *out_flags)
+{
+ NTSTATUS status;
+ struct rawpipe_bh_raw_call_state *state =
+ tevent_req_data(req,
+ struct rawpipe_bh_raw_call_state);
+
+ *out_data = talloc_move(mem_ctx, &state->out_data.data);
+ *out_length = state->out_data.length;
+ status = NT_STATUS_OK;
+ if (tevent_req_is_nterror(req, &status)) {
+ }
+ tevent_req_received(req);
+
+ return status;
+}
+
+static const struct dcerpc_binding_handle_ops raw_pipe_ccli_bh_ops = {
+ .name = "raw_pipe_ccli",
+ .is_connected = rawpipe_bh_is_connected,
+ .set_timeout = rawpipe_bh_set_timeout,
+ .auth_info = rawpipe_bh_auth_info,
+ .raw_call_send = rawpipe_bh_call_send,
+ .raw_call_recv = rawpipe_bh_call_recv,
+ .disconnect_send = rawpipe_bh_disconnect_send,
+ .disconnect_recv = rawpipe_bh_disconnect_recv,
+
+ .ref_alloc = rawpipe_bh_ref_alloc,
+};
+
+static struct dcerpc_binding_handle *create_rawpipe_handle(struct dcerpc_pipe *p)
+{
+ struct dcerpc_binding_handle *h = NULL;
+
+ struct rawpipe_bh_state *hs;
+ h = dcerpc_binding_handle_create(p,
+ &raw_pipe_ccli_bh_ops,
+ NULL,
+ NULL,
+ &hs,
+ struct rawpipe_bh_state,
+ __location__);
+ if (h == NULL) {
+ return NULL;
+ }
+ hs->p = p;
+ return h;
+}
+
+
+struct rawpipe_test_data
+{
+ struct dcerpc_pipe *p;
+ struct dcerpc_binding_handle *h;
+};
+
+static NTSTATUS write_something(TALLOC_CTX* ctx,
+ struct dcerpc_binding_handle *handle,
+ DATA_BLOB *blob_in,
+ DATA_BLOB *blob_out)
+{
+ uint32_t outflags;
+ NTSTATUS status = dcerpc_binding_handle_raw_call(handle,
+ NULL,
+ 0,
+ 0,
+ blob_in->data,
+ blob_in->length,
+ ctx,
+ &blob_out->data,
+ &blob_out->length,
+ &outflags);
+ return status;
+}
+
+static NTSTATUS connect_server_smb2(TALLOC_CTX *mem_ctx,
+ struct torture_context *tctx,
+ struct smb2_tree **tree)
+{
+ NTSTATUS status;
+ struct cli_credentials *credentials = cmdline_credentials;
+ struct smbcli_options options;
+ struct smbcli_session_options session_options;
+ const char *host = torture_setting_string(tctx, "host", NULL);
+
+ lpcfg_smbcli_options(tctx->lp_ctx, &options);
+
+ lpcfg_smbcli_session_options(tctx->lp_ctx, &session_options);
+
+ status = smb2_connect(mem_ctx,
+ host,
+ lpcfg_smb_ports(tctx->lp_ctx),
+ "IPC$",
+ lpcfg_resolve_context(tctx->lp_ctx),
+ credentials,
+ tree,
+ tctx->ev,
+ &options,
+ lpcfg_socket_options(tctx->lp_ctx),
+ lpcfg_gensec_settings(tctx, tctx->lp_ctx)
+ );
+ return status;
+}
+
+static NTSTATUS connect_server_smb(TALLOC_CTX *mem_ctx,
+ struct torture_context *tctx,
+ struct smbcli_state **cli)
+{
+ NTSTATUS status;
+ struct cli_credentials *credentials = cmdline_credentials;
+ struct smbcli_options options;
+ struct smbcli_session_options session_options;
+ const char *host = torture_setting_string(tctx, "host", NULL);
+
+ lpcfg_smbcli_options(tctx->lp_ctx, &options);
+
+ lpcfg_smbcli_session_options(tctx->lp_ctx, &session_options);
+
+ status = smbcli_full_connection(mem_ctx,
+ cli,
+ host,
+ lpcfg_smb_ports(tctx->lp_ctx),
+ "IPC$", NULL,
+ lpcfg_socket_options(tctx->lp_ctx),
+ credentials,
+ lpcfg_resolve_context(tctx->lp_ctx),
+ tctx->ev, &options, &session_options,
+ lpcfg_gensec_settings(tctx,
+ tctx->lp_ctx));
+ return status;
+}
+
+static bool test_rawpipe_simple_echo(struct torture_context *tctx,
+ const void *data)
+{
+ NTSTATUS status;
+ bool ret = true;
+ DATA_BLOB out;
+ DATA_BLOB in;
+ const char *test_message = "hello";
+ struct rawpipe_test_data *test_data =
+ talloc_get_type(data,
+ struct rawpipe_test_data);
+ TALLOC_CTX *mem_ctx = talloc_init("test_rawpipe_simple_echo");
+
+ in.data = talloc_array(mem_ctx, uint8_t, strlen(test_message) + 1);
+ in.length = talloc_array_length(in.data);
+ memcpy(in.data, test_message, in.length);
+ status = write_something(mem_ctx, test_data->h, &in, &out);
+ torture_assert_ntstatus_ok(tctx, status, "failed to write to pipe\n");
+ torture_assert(tctx,
+ in.length == out.length,
+ "message sizes are different");
+ ret = memcmp(in.data, out.data, in.length) == 0;
+ torture_assert(tctx, ret, "messages differ");
+ TALLOC_FREE(mem_ctx);
+ return ret;
+}
+
+/*
+ * The idea here is to send a large message size that exceeds the normal
+ * hardcoded Max Ioctl hard coded limit of 4280 bytes, this will generate
+ * will result in a BUFFER_OVERFLOW error at the SMB layer in the response
+ * from the server [1].
+ *
+ * [1] It seems we don't see the SMB BUFFER_OVERFLOW status at this layer
+ * however we can detect that the message is clipped to the current
+ * limit of 4280 bytes.
+ */
+static bool test_rawpipe_large_message(struct torture_context *tctx,
+ const void *data)
+{
+ NTSTATUS status;
+ bool ret = true;
+ const char * test_message = "large";
+ uint32_t limit = 4280;
+ uint32_t expected = 6500;
+ DATA_BLOB out;
+ DATA_BLOB in;
+ struct rawpipe_test_data *test_data =
+ talloc_get_type(data,
+ struct rawpipe_test_data);
+
+ TALLOC_CTX *mem_ctx = talloc_init("test_rawpipe_large_message");
+ in.data = talloc_array(mem_ctx, uint8_t, strlen(test_message) + 1);
+ in.length = talloc_array_length(in.data);
+ memcpy(in.data, test_message, in.length);
+
+ status = write_something(tctx, test_data->h, &in, &out);
+ torture_assert_ntstatus_ok_goto(tctx, status, ret, done, "failed to write to pipe\n");
+ ret = (out.length != expected) && (out.length == limit);
+ torture_comment(tctx, "test_rawpipe_large_message test was %s (received %d bytes expected %d)\n", ret ? "successful" : "unsuccsessful", (uint32_t)out.length, limit);
+done:
+ return ret;
+}
+
+static bool raw_smb1_setup(struct torture_context *tctx,
+ void **ppdata)
+{
+ struct rawpipe_test_data *data;
+ NTSTATUS status;
+ struct dcerpc_pipe *p;
+ struct smbcli_state *cli = NULL;
+ struct dcerpc_binding_handle *h;
+
+ data = talloc(NULL, struct rawpipe_test_data);
+ status = connect_server_smb(data, tctx, &cli);
+ torture_assert_ntstatus_ok(tctx, status, "failed to connect to server");
+
+ p = dcerpc_pipe_init(tctx, tctx->ev);
+
+ status = dcerpc_pipe_open_smb(p, cli->tree, "rawpipe");
+ torture_assert_ntstatus_ok(tctx, status, "could not open pipe\n");
+
+ h = create_rawpipe_handle(p);
+ torture_assert(tctx, h != NULL, "failed to create handle\n");
+
+ status = tstream_smbXcli_np_use_trans(p->conn->transport.stream);
+ torture_assert_ntstatus_ok(tctx,
+ status,
+ "failed to set trans mode on pipe\n");
+
+ data->p = p;
+ data->h = h;
+
+ *ppdata = data;
+ return true;
+}
+
+static bool raw_smb2_setup(struct torture_context *tctx,
+ void **ppdata)
+{
+ struct rawpipe_test_data *data;
+ NTSTATUS status;
+ struct dcerpc_pipe *p;
+ struct smb2_tree *tree = NULL;
+ struct dcerpc_binding_handle *h;
+
+ data = talloc(NULL, struct rawpipe_test_data);
+
+ status = connect_server_smb2(data, tctx, &tree);
+ torture_assert_ntstatus_ok(tctx, status, "failed to connect to server");
+
+ p = dcerpc_pipe_init(tctx, tctx->ev);
+
+ status = dcerpc_pipe_open_smb2(p, tree, "rawpipe");
+ torture_assert_ntstatus_ok(tctx, status, "could not open pipe\n");
+
+ h = create_rawpipe_handle(p);
+ torture_assert(tctx, h != NULL, "failed to create handle\n");
+
+ status = tstream_smbXcli_np_use_trans(p->conn->transport.stream);
+ torture_assert_ntstatus_ok(tctx,
+ status,
+ "failed to set trans mode on pipe\n");
+
+ data->p = p;
+ data->h = h;
+
+ *ppdata = data;
+ return true;
+}
+
+
+static bool raw_smb1_teardown(struct torture_context *tctx,
+ void *data)
+{
+ return true;
+}
+
+static bool raw_smb2_teardown(struct torture_context *tctx,
+ void *data)
+{
+ return true;
+}
+
+static void add_raw_test(struct torture_suite *suite,
+ const char *name,
+ bool (*run) (struct torture_context *test,
+ const void *tcase_data),
+ bool use_smb2)
+{
+ struct torture_tcase *tcase = torture_suite_add_tcase(suite,
+ name);
+ if (use_smb2) {
+ torture_tcase_set_fixture(tcase,
+ raw_smb2_setup,
+ raw_smb1_teardown);
+ } else {
+ torture_tcase_set_fixture(tcase,
+ raw_smb1_setup,
+ raw_smb2_teardown);
+ }
+ torture_tcase_add_simple_test_const(tcase, name, run);
+}
+
+struct torture_suite *torture_raw_rawpipe(TALLOC_CTX *mem_ctx)
+{
+ struct torture_suite *suite = torture_suite_create(mem_ctx, "rawpipe");
+ /* SMB1 tests */
+ add_raw_test(suite, "smb1_simple_echo", test_rawpipe_simple_echo, false);
+ add_raw_test(suite, "smb1_echo_large_message", test_rawpipe_large_message, false);
+ /* SMB2 tests */
+ add_raw_test(suite, "smb2_simple_echo", test_rawpipe_simple_echo, true);
+ add_raw_test(suite, "smb2_echo_large_message", test_rawpipe_large_message, true);
+ return suite;
+}
diff --git a/source4/torture/wscript_build b/source4/torture/wscript_build
index 8966026..5e3d5d9 100755
--- a/source4/torture/wscript_build
+++ b/source4/torture/wscript_build
@@ -19,7 +19,7 @@ bld.SAMBA_MODULE('TORTURE_BASIC',
bld.SAMBA_MODULE('TORTURE_RAW',
- source='raw/qfsinfo.c raw/qfileinfo.c raw/setfileinfo.c raw/search.c raw/close.c raw/open.c raw/mkdir.c raw/oplock.c raw/notify.c raw/mux.c raw/ioctl.c raw/chkpath.c raw/unlink.c raw/read.c raw/context.c raw/session.c raw/write.c raw/lock.c raw/pingpong.c raw/lockbench.c raw/lookuprate.c raw/tconrate.c raw/openbench.c raw/rename.c raw/eas.c raw/streams.c raw/acls.c raw/seek.c raw/samba3hide.c raw/samba3misc.c raw/composite.c raw/raw.c raw/offline.c',
+ source='raw/qfsinfo.c raw/qfileinfo.c raw/setfileinfo.c raw/search.c raw/close.c raw/open.c raw/mkdir.c raw/oplock.c raw/notify.c raw/mux.c raw/ioctl.c raw/chkpath.c raw/unlink.c raw/read.c raw/context.c raw/session.c raw/write.c raw/lock.c raw/pingpong.c raw/lockbench.c raw/lookuprate.c raw/tconrate.c raw/openbench.c raw/rename.c raw/eas.c raw/streams.c raw/acls.c raw/seek.c raw/samba3hide.c raw/samba3misc.c raw/composite.c raw/raw.c raw/offline.c raw/rawpipe.c',
autoproto='raw/proto.h',
subsystem='smbtorture',
init_function='torture_raw_init',
--
2.1.4
From 60c2ea504e66ac785f9b8ee0c1680e22338520ca Mon Sep 17 00:00:00 2001
From: Noel Power <noel.power at suse.com>
Date: Thu, 5 Jun 2014 10:52:54 +0100
Subject: [PATCH 5/6] Make it possible to dynamically set the max_data in SMB
Pipe transaction
Some services like WSP can send larger messages than the current 'Max Ioctl'
limit, this results in the server producing a BUFFER_OVERFLOW status (and
additionally clipping the message sent). Add support to allow a client to
modify the hardcoded 'Max Ioctl' to allow the server to successfully send
larger responses.
Signed-off-by: Noel Power <noel.power at suse.com>
---
libcli/smb/tstream_smbXcli_np.c | 33 +++++++++++++++++++++++++--------
libcli/smb/tstream_smbXcli_np.h | 3 +++
2 files changed, 28 insertions(+), 8 deletions(-)
diff --git a/libcli/smb/tstream_smbXcli_np.c b/libcli/smb/tstream_smbXcli_np.c
index a59db13..1ffde42 100644
--- a/libcli/smb/tstream_smbXcli_np.c
+++ b/libcli/smb/tstream_smbXcli_np.c
@@ -57,6 +57,7 @@ struct tstream_smbXcli_np {
uint16_t fnum;
uint64_t fid_persistent;
uint64_t fid_volatile;
+ uint32_t max_data;
struct {
bool active;
@@ -358,7 +359,7 @@ NTSTATUS _tstream_smbXcli_np_open_recv(struct tevent_req *req,
cli_nps->fnum = state->fnum;
cli_nps->fid_persistent = state->fid_persistent;
cli_nps->fid_volatile = state->fid_volatile;
-
+ cli_nps->max_data = TSTREAM_SMBXCLI_NP_MAX_BUF_SIZE;
talloc_set_destructor(cli_nps, tstream_smbXcli_np_destructor);
talloc_set_destructor(cli_nps->conn_ref,
tstream_smbXcli_np_ref_destructor);
@@ -426,6 +427,14 @@ NTSTATUS tstream_smbXcli_np_use_trans(struct tstream_context *stream)
return NT_STATUS_OK;
}
+void tstream_smbXcli_np_set_max_data(struct tstream_context *stream,
+ uint32_t max_data)
+{
+ struct tstream_smbXcli_np *cli_nps =
+ tstream_context_data(stream, struct tstream_smbXcli_np);
+ cli_nps->max_data = max_data;
+}
+
unsigned int tstream_smbXcli_np_set_timeout(struct tstream_context *stream,
unsigned int timeout)
{
@@ -524,6 +533,7 @@ static void tstream_smbXcli_np_writev_write_next(struct tevent_req *req)
struct tevent_req *subreq;
size_t i;
size_t left = 0;
+ uint32_t max_data = cli_nps->max_data;
for (i=0; i < state->count; i++) {
left += state->vector[i].iov_len;
@@ -536,7 +546,7 @@ static void tstream_smbXcli_np_writev_write_next(struct tevent_req *req)
}
cli_nps->write.ofs = 0;
- cli_nps->write.left = MIN(left, TSTREAM_SMBXCLI_NP_MAX_BUF_SIZE);
+ cli_nps->write.left = MIN(left, max_data);
cli_nps->write.buf = talloc_realloc(cli_nps, cli_nps->write.buf,
uint8_t, cli_nps->write.left);
if (tevent_req_nomem(cli_nps->write.buf, req)) {
@@ -803,6 +813,7 @@ static void tstream_smbXcli_np_readv_read_next(struct tevent_req *req)
tstream_context_data(state->stream,
struct tstream_smbXcli_np);
struct tevent_req *subreq;
+ uint32_t max_data = cli_nps->max_data;
/*
* copy the pending buffer first
@@ -858,14 +869,14 @@ static void tstream_smbXcli_np_readv_read_next(struct tevent_req *req)
cli_nps->session,
cli_nps->fnum,
0, /* offset */
- TSTREAM_SMBXCLI_NP_MAX_BUF_SIZE);
+ max_data);
} else {
subreq = smb2cli_read_send(state, state->ev,
cli_nps->conn,
cli_nps->timeout,
cli_nps->session,
cli_nps->tcon,
- TSTREAM_SMBXCLI_NP_MAX_BUF_SIZE, /* length */
+ max_data, /* length */
0, /* offset */
cli_nps->fid_persistent,
cli_nps->fid_volatile,
@@ -891,6 +902,7 @@ static void tstream_smbXcli_np_readv_trans_start(struct tevent_req *req)
tstream_context_data(state->stream,
struct tstream_smbXcli_np);
struct tevent_req *subreq;
+ uint32_t max_data = cli_nps->max_data;
state->trans.im = tevent_create_immediate(state);
if (tevent_req_nomem(state->trans.im, req)) {
@@ -913,7 +925,7 @@ static void tstream_smbXcli_np_readv_trans_start(struct tevent_req *req)
NULL, 0, 0,
cli_nps->write.buf,
cli_nps->write.ofs,
- TSTREAM_SMBXCLI_NP_MAX_BUF_SIZE);
+ max_data);
} else {
DATA_BLOB in_input_buffer = data_blob_null;
DATA_BLOB in_output_buffer = data_blob_null;
@@ -932,7 +944,7 @@ static void tstream_smbXcli_np_readv_trans_start(struct tevent_req *req)
0, /* in_max_input_length */
&in_input_buffer,
/* in_max_output_length */
- TSTREAM_SMBXCLI_NP_MAX_BUF_SIZE,
+ max_data,
&in_output_buffer,
SMB2_IOCTL_FLAG_IS_FSCTL);
}
@@ -959,10 +971,14 @@ static void tstream_smbXcli_np_readv_trans_done(struct tevent_req *subreq)
tevent_req_data(req, struct tstream_smbXcli_np_readv_state);
struct tstream_smbXcli_np *cli_nps =
tstream_context_data(state->stream, struct tstream_smbXcli_np);
+ uint32_t max_data = cli_nps->max_data;
uint8_t *rcvbuf;
uint32_t received;
NTSTATUS status;
+ /* reset max_data, should be set different every time (if required) */
+ cli_nps->max_data = TSTREAM_SMBXCLI_NP_MAX_BUF_SIZE;
+
if (cli_nps->is_smb1) {
status = smb1cli_trans_recv(subreq, state, NULL, NULL, 0, NULL,
NULL, 0, NULL,
@@ -995,7 +1011,7 @@ static void tstream_smbXcli_np_readv_trans_done(struct tevent_req *subreq)
return;
}
- if (received > TSTREAM_SMBXCLI_NP_MAX_BUF_SIZE) {
+ if (received > max_data) {
tstream_smbXcli_np_readv_disconnect_now(req, EIO, __location__);
return;
}
@@ -1045,6 +1061,7 @@ static void tstream_smbXcli_np_readv_read_done(struct tevent_req *subreq)
tevent_req_data(req, struct tstream_smbXcli_np_readv_state);
struct tstream_smbXcli_np *cli_nps =
tstream_context_data(state->stream, struct tstream_smbXcli_np);
+ uint32_t max_data = cli_nps->max_data;
uint8_t *rcvbuf;
uint32_t received;
NTSTATUS status;
@@ -1079,7 +1096,7 @@ static void tstream_smbXcli_np_readv_read_done(struct tevent_req *subreq)
return;
}
- if (received > TSTREAM_SMBXCLI_NP_MAX_BUF_SIZE) {
+ if (received > max_data) {
TALLOC_FREE(subreq);
tstream_smbXcli_np_readv_disconnect_now(req, EIO, __location__);
return;
diff --git a/libcli/smb/tstream_smbXcli_np.h b/libcli/smb/tstream_smbXcli_np.h
index e8c5c39..d7a4e3c 100644
--- a/libcli/smb/tstream_smbXcli_np.h
+++ b/libcli/smb/tstream_smbXcli_np.h
@@ -69,4 +69,7 @@ unsigned int tstream_smbXcli_np_set_timeout(struct tstream_context *stream,
*/
#define TSTREAM_SMBXCLI_NP_MAX_BUF_SIZE 4280
+void tstream_smbXcli_np_set_max_data(struct tstream_context *stream,
+ uint32_t max_data);
+
#endif /* _CLI_NP_TSTREAM_H_ */
--
2.1.4
From c177bdca9e93eac8b2ae58a0e605ebdd94d6e966 Mon Sep 17 00:00:00 2001
From: Noel Power <noel.power at suse.com>
Date: Tue, 24 May 2016 14:41:03 +0100
Subject: [PATCH 6/6] adjust existing and add new torture test(s) for
tstream_smbXcli_np_set_max_data
Adjust tests to ensure tstream_smbXcli_np_set_max_data works as expectd
Signed-off-by: Noel Power <noel.power at suse.com>
---
source4/torture/raw/rawpipe.c | 69 +++++++++++++++++++++++++++++++++----------
1 file changed, 53 insertions(+), 16 deletions(-)
diff --git a/source4/torture/raw/rawpipe.c b/source4/torture/raw/rawpipe.c
index 9e98461..c782ef1 100644
--- a/source4/torture/raw/rawpipe.c
+++ b/source4/torture/raw/rawpipe.c
@@ -524,18 +524,9 @@ static bool test_rawpipe_simple_echo(struct torture_context *tctx,
return ret;
}
-/*
- * The idea here is to send a large message size that exceeds the normal
- * hardcoded Max Ioctl hard coded limit of 4280 bytes, this will generate
- * will result in a BUFFER_OVERFLOW error at the SMB layer in the response
- * from the server [1].
- *
- * [1] It seems we don't see the SMB BUFFER_OVERFLOW status at this layer
- * however we can detect that the message is clipped to the current
- * limit of 4280 bytes.
- */
-static bool test_rawpipe_large_message(struct torture_context *tctx,
- const void *data)
+static bool test_rawpipe_large_message_impl(struct torture_context *tctx,
+ const void *data,
+ bool increase_max_ioctl)
{
NTSTATUS status;
bool ret = true;
@@ -547,20 +538,64 @@ static bool test_rawpipe_large_message(struct torture_context *tctx,
struct rawpipe_test_data *test_data =
talloc_get_type(data,
struct rawpipe_test_data);
+ struct tstream_context *stream = NULL;
TALLOC_CTX *mem_ctx = talloc_init("test_rawpipe_large_message");
in.data = talloc_array(mem_ctx, uint8_t, strlen(test_message) + 1);
in.length = talloc_array_length(in.data);
memcpy(in.data, test_message, in.length);
+ if (increase_max_ioctl) {
+ stream = test_data->p->conn->transport.stream;
+ tstream_smbXcli_np_set_max_data(stream, expected);
+ }
status = write_something(tctx, test_data->h, &in, &out);
torture_assert_ntstatus_ok_goto(tctx, status, ret, done, "failed to write to pipe\n");
- ret = (out.length != expected) && (out.length == limit);
- torture_comment(tctx, "test_rawpipe_large_message test was %s (received %d bytes expected %d)\n", ret ? "successful" : "unsuccsessful", (uint32_t)out.length, limit);
+ if (increase_max_ioctl) {
+ ret = out.length == expected;
+ } else {
+ ret = (out.length != expected) && (out.length == limit);
+ }
+ torture_comment(tctx, "test_rawpipe_large_message test was %s (received %d bytes expected %d)\n", ret ? "successful" : "unsuccsessful", (uint32_t)out.length, increase_max_ioctl ? expected : limit);
done:
return ret;
}
+/*
+ * The idea here is to send a large message size that exceeds the normal
+ * hardcoded Max Ioctl hard coded limit of 4280 bytes, this will generate
+ * will result in a BUFFER_OVERFLOW error at the SMB layer in the response
+ * from the server [1].
+ *
+ * [1] It seems we don't see the SMB BUFFER_OVERFLOW status at this layer
+ * however we can detect that the message is clipped to the current
+ * limit of 4280 bytes.
+ */
+static bool test_rawpipe_large_message_clipped(struct torture_context *tctx,
+ const void *data)
+{
+ return test_rawpipe_large_message_impl(tctx, data, false);
+}
+
+/*
+ * The idea here is to send a large message size that exceeds the normal
+ * hardcoded Max Ioctl hard coded limit of 4280 bytes, this would normally
+ * result in a BUFFER_OVERFLOW error at the SMB layer in the response
+ * from the server. However, now we test 'tstream_smbXcli_np_set_max_data'
+ * a new function which allows us to adjust the size limit so the message
+ * should no longer be clipped.
+ *
+ * [1] It seems we don't see the SMB BUFFER_OVERFLOW status at this layer
+ * however we can detect that the message is clipped to the current
+ * limit of 4280 bytes.
+ */
+
+static bool test_rawpipe_large_message_newmax(struct torture_context *tctx,
+ const void *data)
+{
+ return test_rawpipe_large_message_impl(tctx, data, true);
+}
+
static bool raw_smb1_setup(struct torture_context *tctx,
void **ppdata)
{
@@ -666,9 +701,11 @@ struct torture_suite *torture_raw_rawpipe(TALLOC_CTX *mem_ctx)
struct torture_suite *suite = torture_suite_create(mem_ctx, "rawpipe");
/* SMB1 tests */
add_raw_test(suite, "smb1_simple_echo", test_rawpipe_simple_echo, false);
- add_raw_test(suite, "smb1_echo_large_message", test_rawpipe_large_message, false);
+ add_raw_test(suite, "smb1_echo_large_message_clipped", test_rawpipe_large_message_clipped, false);
+ add_raw_test(suite, "smb1_echo_large_message_newmax", test_rawpipe_large_message_newmax, false);
/* SMB2 tests */
add_raw_test(suite, "smb2_simple_echo", test_rawpipe_simple_echo, true);
- add_raw_test(suite, "smb2_echo_large_message", test_rawpipe_large_message, true);
+ add_raw_test(suite, "smb2_echo_large_message_clipped", test_rawpipe_large_message_clipped, true);
+ add_raw_test(suite, "smb2_echo_large_message_newmax", test_rawpipe_large_message_newmax, true);
return suite;
}
--
2.1.4
More information about the samba-technical
mailing list