[PATCH] Make server-side copy (copy-chunk) async

Jeremy Allison jra at samba.org
Wed Mar 22 19:11:03 UTC 2017


On Wed, Mar 22, 2017 at 07:14:05PM +0100, Ralph Böhme via samba-technical wrote:
> Hi!
> 
> Attached is a patchset I've been working on that makes our server-side copy
> asynchronous.
> 
> At the SMB layer the chunks are processed sequentially in order, but left to
> right merged if copy ranges are adjecent. In the backend (vfs_default) I'm
> essentially just using pread|pwrite_send|recv instead of the sync versions.
> 
> I'm also adding a small new feature to tevent: cancel forwarding. It allows the
> implementation of a tevent request using subrequest to request forwarding of
> cancels to be done automagically by the tevent library. This is useful for
> forwarding cancels through a chain of subrequests.
> 
> Please review & comment but don't push yet.

Wow - very exciting Ralph ! I'll look over as I get
time at Vault.


> From 1674efe506c59c8fbff1b43e8b882c55f79d399c Mon Sep 17 00:00:00 2001
> From: Ralph Boehme <slow at samba.org>
> Date: Wed, 15 Mar 2017 16:16:49 +0100
> Subject: [PATCH 01/16] lib/tevent: add struct private_cancel and move the
>  cancel function into it
> 
> The next commit will add more members to the new struct.
> 
> Signed-off-by: Ralph Boehme <slow at samba.org>
> ---
>  lib/tevent/tevent_internal.h | 20 +++++++++++---------
>  lib/tevent/tevent_req.c      |  8 ++++----
>  2 files changed, 15 insertions(+), 13 deletions(-)
> 
> diff --git a/lib/tevent/tevent_internal.h b/lib/tevent/tevent_internal.h
> index a5f1ebde..d60d354 100644
> --- a/lib/tevent/tevent_internal.h
> +++ b/lib/tevent/tevent_internal.h
> @@ -65,15 +65,6 @@ struct tevent_req {
>  	tevent_req_print_fn private_print;
>  
>  	/**
> -	 * @brief A function to cancel the request
> -	 *
> -	 * The implementation might want to set a function
> -	 * that is called when the tevent_req_cancel() function
> -	 * was called.
> -	 */
> -	tevent_req_cancel_fn private_cancel;
> -
> -	/**
>  	 * @brief A function to cleanup the request
>  	 *
>  	 * The implementation might want to set a function
> @@ -85,6 +76,17 @@ struct tevent_req {
>  		enum tevent_req_state state;
>  	} private_cleanup;
>  
> +	struct {
> +		/**
> +		 * @brief A function to cancel the request
> +		 *
> +		 * The implementation might want to set a function
> +		 * that is called when the tevent_req_cancel() function
> +		 * was called.
> +		 */
> +		tevent_req_cancel_fn fn;
> +	} private_cancel;
> +
>  	/**
>  	 * @brief Internal state of the request
>  	 *
> diff --git a/lib/tevent/tevent_req.c b/lib/tevent/tevent_req.c
> index e309c3d..c066b7c 100644
> --- a/lib/tevent/tevent_req.c
> +++ b/lib/tevent/tevent_req.c
> @@ -243,7 +243,7 @@ void tevent_req_received(struct tevent_req *req)
>  	talloc_set_destructor(req, NULL);
>  
>  	req->private_print = NULL;
> -	req->private_cancel = NULL;
> +	req->private_cancel.fn = NULL;
>  
>  	TALLOC_FREE(req->internal.trigger);
>  	TALLOC_FREE(req->internal.timer);
> @@ -341,16 +341,16 @@ void tevent_req_set_print_fn(struct tevent_req *req, tevent_req_print_fn fn)
>  
>  void tevent_req_set_cancel_fn(struct tevent_req *req, tevent_req_cancel_fn fn)
>  {
> -	req->private_cancel = fn;
> +	req->private_cancel.fn = fn;
>  }
>  
>  bool _tevent_req_cancel(struct tevent_req *req, const char *location)
>  {
> -	if (req->private_cancel == NULL) {
> +	if (req->private_cancel.fn == NULL) {
>  		return false;
>  	}
>  
> -	return req->private_cancel(req);
> +	return req->private_cancel.fn(req);
>  }
>  
>  void tevent_req_set_cleanup_fn(struct tevent_req *req, tevent_req_cleanup_fn fn)
> -- 
> 2.9.3
> 
> 
> From 8e8cf4b77215ed05cf67861efae5bd50553ccdb8 Mon Sep 17 00:00:00 2001
> From: Ralph Boehme <slow at samba.org>
> Date: Mon, 13 Mar 2017 07:40:24 +0100
> Subject: [PATCH 02/16] tevent: version 0.9.32: add
>  tevent_req_set_cancel_forwarding()
> 
> This function requests automatic fowarding of a cancel to a tevent
> subrequest when tevent_req_cancel() is called on a request.
> 
> In an async computation with nested subrequests it is possible that a
> subrequest is cancellable (as it called tevent_req_set_cancel_fn()), but
> intermediate subrequests are not.
> 
> To avoid the necessity for all intermediate requests to install cancel
> handlers with tevent_req_set_cancel_fn() and manually forward the request
> from the cancel handler, a request that does a subrequest can call
> tevent_req_set_cancel_forwarding(req, subreq) and then tevent takes care of
> cancelling the subrequest when tevent_req_cancel() is called on the request.
> 
> Note that for a single request you can only either set a cancel function
> with tevent_req_set_cancel_fn() or request cancel forwarding with
> tevent_req_set_cancel_forwarding(), not both.
> 
> Signed-off-by: Ralph Boehme <slow at samba.org>
> ---
>  lib/tevent/ABI/tevent-0.9.32.sigs | 100 ++++++++++++++++++++++++++++++++++++++
>  lib/tevent/tevent.h               |  27 ++++++++++
>  lib/tevent/tevent_internal.h      |  14 ++++++
>  lib/tevent/tevent_req.c           |  44 +++++++++++++++--
>  lib/tevent/wscript                |   2 +-
>  5 files changed, 183 insertions(+), 4 deletions(-)
>  create mode 100644 lib/tevent/ABI/tevent-0.9.32.sigs
> 
> diff --git a/lib/tevent/ABI/tevent-0.9.32.sigs b/lib/tevent/ABI/tevent-0.9.32.sigs
> new file mode 100644
> index 0000000..5ed37a8
> --- /dev/null
> +++ b/lib/tevent/ABI/tevent-0.9.32.sigs
> @@ -0,0 +1,100 @@
> +_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *)
> +_tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *)
> +_tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *)
> +_tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *)
> +_tevent_loop_once: int (struct tevent_context *, const char *)
> +_tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *)
> +_tevent_loop_wait: int (struct tevent_context *, const char *)
> +_tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *)
> +_tevent_req_callback_data: void *(struct tevent_req *)
> +_tevent_req_cancel: bool (struct tevent_req *, const char *)
> +_tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *)
> +_tevent_req_data: void *(struct tevent_req *)
> +_tevent_req_done: void (struct tevent_req *, const char *)
> +_tevent_req_error: bool (struct tevent_req *, uint64_t, const char *)
> +_tevent_req_nomem: bool (const void *, struct tevent_req *, const char *)
> +_tevent_req_notify_callback: void (struct tevent_req *, const char *)
> +_tevent_req_oom: void (struct tevent_req *, const char *)
> +_tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *)
> +_tevent_threaded_schedule_immediate: void (struct tevent_threaded_context *, struct tevent_immediate *, tevent_immediate_handler_t, void *, const char *, const char *)
> +tevent_backend_list: const char **(TALLOC_CTX *)
> +tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *)
> +tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *)
> +tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *)
> +tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *)
> +tevent_common_add_timer_v2: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *)
> +tevent_common_check_signal: int (struct tevent_context *)
> +tevent_common_context_destructor: int (struct tevent_context *)
> +tevent_common_fd_destructor: int (struct tevent_fd *)
> +tevent_common_fd_get_flags: uint16_t (struct tevent_fd *)
> +tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t)
> +tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t)
> +tevent_common_have_events: bool (struct tevent_context *)
> +tevent_common_loop_immediate: bool (struct tevent_context *)
> +tevent_common_loop_timer_delay: struct timeval (struct tevent_context *)
> +tevent_common_loop_wait: int (struct tevent_context *, const char *)
> +tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *)
> +tevent_common_threaded_activate_immediate: void (struct tevent_context *)
> +tevent_common_wakeup: int (struct tevent_context *)
> +tevent_common_wakeup_fd: int (int)
> +tevent_common_wakeup_init: int (struct tevent_context *)
> +tevent_context_init: struct tevent_context *(TALLOC_CTX *)
> +tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *)
> +tevent_context_init_ops: struct tevent_context *(TALLOC_CTX *, const struct tevent_ops *, void *)
> +tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...)
> +tevent_fd_get_flags: uint16_t (struct tevent_fd *)
> +tevent_fd_set_auto_close: void (struct tevent_fd *)
> +tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t)
> +tevent_fd_set_flags: void (struct tevent_fd *, uint16_t)
> +tevent_get_trace_callback: void (struct tevent_context *, tevent_trace_callback_t *, void *)
> +tevent_loop_allow_nesting: void (struct tevent_context *)
> +tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *)
> +tevent_num_signals: size_t (void)
> +tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *)
> +tevent_queue_add_entry: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *)
> +tevent_queue_add_optimize_empty: struct tevent_queue_entry *(struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *)
> +tevent_queue_length: size_t (struct tevent_queue *)
> +tevent_queue_running: bool (struct tevent_queue *)
> +tevent_queue_start: void (struct tevent_queue *)
> +tevent_queue_stop: void (struct tevent_queue *)
> +tevent_queue_wait_recv: bool (struct tevent_req *)
> +tevent_queue_wait_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct tevent_queue *)
> +tevent_re_initialise: int (struct tevent_context *)
> +tevent_register_backend: bool (const char *, const struct tevent_ops *)
> +tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *)
> +tevent_req_defer_callback: void (struct tevent_req *, struct tevent_context *)
> +tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *)
> +tevent_req_is_in_progress: bool (struct tevent_req *)
> +tevent_req_poll: bool (struct tevent_req *, struct tevent_context *)
> +tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *)
> +tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *)
> +tevent_req_received: void (struct tevent_req *)
> +tevent_req_reset_endtime: void (struct tevent_req *)
> +tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *)
> +tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn)
> +tevent_req_set_cancel_forwarding: void (struct tevent_req *, struct tevent_req *)
> +tevent_req_set_cleanup_fn: void (struct tevent_req *, tevent_req_cleanup_fn)
> +tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval)
> +tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn)
> +tevent_sa_info_queue_count: size_t (void)
> +tevent_set_abort_fn: void (void (*)(const char *))
> +tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *)
> +tevent_set_debug_stderr: int (struct tevent_context *)
> +tevent_set_default_backend: void (const char *)
> +tevent_set_trace_callback: void (struct tevent_context *, tevent_trace_callback_t, void *)
> +tevent_signal_support: bool (struct tevent_context *)
> +tevent_thread_proxy_create: struct tevent_thread_proxy *(struct tevent_context *)
> +tevent_thread_proxy_schedule: void (struct tevent_thread_proxy *, struct tevent_immediate **, tevent_immediate_handler_t, void *)
> +tevent_threaded_context_create: struct tevent_threaded_context *(TALLOC_CTX *, struct tevent_context *)
> +tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t)
> +tevent_timeval_compare: int (const struct timeval *, const struct timeval *)
> +tevent_timeval_current: struct timeval (void)
> +tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t)
> +tevent_timeval_is_zero: bool (const struct timeval *)
> +tevent_timeval_set: struct timeval (uint32_t, uint32_t)
> +tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *)
> +tevent_timeval_zero: struct timeval (void)
> +tevent_trace_point_callback: void (struct tevent_context *, enum tevent_trace_point)
> +tevent_update_timer: void (struct tevent_timer *, struct timeval)
> +tevent_wakeup_recv: bool (struct tevent_req *)
> +tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval)
> diff --git a/lib/tevent/tevent.h b/lib/tevent/tevent.h
> index ba4bb4d..57ac80c 100644
> --- a/lib/tevent/tevent.h
> +++ b/lib/tevent/tevent.h
> @@ -986,6 +986,33 @@ typedef void (*tevent_req_cleanup_fn)(struct tevent_req *req,
>   */
>  void tevent_req_set_cleanup_fn(struct tevent_req *req, tevent_req_cleanup_fn fn);
>  
> +/**
> + * @brief This function requests automatic fowarding of a cancel to a tevent
> + * subrequest when tevent_req_cancel() is called on a request.
> + *
> + * In an async computation with nested subrequests it is possible that a
> + * subrequest is cancellable (as it called tevent_req_set_cancel_fn()), but
> + * intermediate subrequests are not.
> + *
> + * To avoid the necessity for all intermediate requests to install cancel
> + * handlers with tevent_req_set_cancel_fn() and manually forward the request
> + * from the cancel handler, a request that does a subrequest can call
> + * tevent_req_set_cancel_forwarding(req, subreq) and then tevent takes care of
> + * cancelling the subrequest when tevent_req_cancel() is called on the request.
> + *
> + * Note that for a single request you can only either set a cancel function
> + * with tevent_req_set_cancel_fn() or request cancel forwarding with
> + * tevent_req_set_cancel_forwarding(), not both.
> + *
> + * @param[in]  req      The request that should forward the cancel request.
> + *
> + * @param[in]  subreq   The sub-request to which the cancel shall be forwarded. When
> + *                      passing NULL, the forwarding is removed
> + *
> + */
> +void tevent_req_set_cancel_forwarding(struct tevent_req *req,
> +				      struct tevent_req *subreq);
> +
>  #ifdef DOXYGEN
>  /**
>   * @brief Create an async tevent request.
> diff --git a/lib/tevent/tevent_internal.h b/lib/tevent/tevent_internal.h
> index d60d354..ec857d1 100644
> --- a/lib/tevent/tevent_internal.h
> +++ b/lib/tevent/tevent_internal.h
> @@ -85,6 +85,20 @@ struct tevent_req {
>  		 * was called.
>  		 */
>  		tevent_req_cancel_fn fn;
> +
> +		struct {
> +			/**
> +			 * The subreq to forward a cancel to. There can be only
> +			 * one subreq.
> +			 */
> +			struct tevent_req *subreq;
> +
> +			/**
> +			 * A backpointer to req->subreq. Used by the subreq destructor
> +			 * to NULL out the forward pointer in the parent req.
> +			 */
> +			struct tevent_req **_subreq;
> +		} forwarding;
>  	} private_cancel;
>  
>  	/**
> diff --git a/lib/tevent/tevent_req.c b/lib/tevent/tevent_req.c
> index c066b7c..2b543cc 100644
> --- a/lib/tevent/tevent_req.c
> +++ b/lib/tevent/tevent_req.c
> @@ -244,6 +244,14 @@ void tevent_req_received(struct tevent_req *req)
>  
>  	req->private_print = NULL;
>  	req->private_cancel.fn = NULL;
> +	req->private_cancel.forwarding.subreq = NULL;
> +
> +	if (req->private_cancel.forwarding._subreq != NULL) {
> +		if (*req->private_cancel.forwarding._subreq == req) {
> +			*req->private_cancel.forwarding._subreq = NULL;
> +		}
> +		req->private_cancel.forwarding._subreq = NULL;
> +	}
>  
>  	TALLOC_FREE(req->internal.trigger);
>  	TALLOC_FREE(req->internal.timer);
> @@ -341,16 +349,46 @@ void tevent_req_set_print_fn(struct tevent_req *req, tevent_req_print_fn fn)
>  
>  void tevent_req_set_cancel_fn(struct tevent_req *req, tevent_req_cancel_fn fn)
>  {
> +	if (req->private_cancel.forwarding.subreq != NULL) {
> +		abort();
> +	}
> +
>  	req->private_cancel.fn = fn;
>  }
>  
> +void tevent_req_set_cancel_forwarding(struct tevent_req *req,
> +				      struct tevent_req *subreq)
> +{
> +	if (req->private_cancel.fn) {
> +		abort();
> +	}
> +
> +	if (req->private_cancel.forwarding.subreq != NULL) {
> +		/* Allow resetting the forwarding by passing subreq as NULL */
> +		if (subreq == NULL) {
> +			req->private_cancel.forwarding.subreq = NULL;
> +			return;
> +		}
> +		abort();
> +	}
> +
> +	req->private_cancel.forwarding.subreq = subreq;
> +
> +	subreq->private_cancel.forwarding._subreq =
> +		&req->private_cancel.forwarding.subreq;
> +}
> +
>  bool _tevent_req_cancel(struct tevent_req *req, const char *location)
>  {
> -	if (req->private_cancel.fn == NULL) {
> -		return false;
> +	if (req->private_cancel.fn != NULL) {
> +		return req->private_cancel.fn(req);
> +	}
> +
> +	if (req->private_cancel.forwarding.subreq != NULL) {
> +		return tevent_req_cancel(req->private_cancel.forwarding.subreq);
>  	}
>  
> -	return req->private_cancel.fn(req);
> +	return false;
>  }
>  
>  void tevent_req_set_cleanup_fn(struct tevent_req *req, tevent_req_cleanup_fn fn)
> diff --git a/lib/tevent/wscript b/lib/tevent/wscript
> index 0c02f70..54f216d 100644
> --- a/lib/tevent/wscript
> +++ b/lib/tevent/wscript
> @@ -1,7 +1,7 @@
>  #!/usr/bin/env python
>  
>  APPNAME = 'tevent'
> -VERSION = '0.9.31'
> +VERSION = '0.9.32'
>  
>  blddir = 'bin'
>  
> -- 
> 2.9.3
> 
> 
> From 1dc0adedfc18fa4b73786637a3a3a16499b7adc0 Mon Sep 17 00:00:00 2001
> From: Ralph Boehme <slow at samba.org>
> Date: Wed, 8 Mar 2017 15:07:06 +0100
> Subject: [PATCH 03/16] s3/smbd: move copychunk ioctl limits to IDL
> 
> This will be needed in the next commit in vfs_default.
> 
> Signed-off-by: Ralph Boehme <slow at samba.org>
> ---
>  librpc/idl/ioctl.idl                 | 4 ++++
>  source3/smbd/smb2_ioctl_network_fs.c | 3 ---
>  2 files changed, 4 insertions(+), 3 deletions(-)
> 
> diff --git a/librpc/idl/ioctl.idl b/librpc/idl/ioctl.idl
> index eb2dc8a..72f6656 100644
> --- a/librpc/idl/ioctl.idl
> +++ b/librpc/idl/ioctl.idl
> @@ -11,6 +11,10 @@ interface copychunk
>  		uint8 context[4];
>  	} req_resume_key_rsp;
>  
> +	const uint32 COPYCHUNK_MAX_CHUNKS = 256;
> +	const uint32 COPYCHUNK_MAX_CHUNK_LEN = 1048576;
> +	const uint32 COPYCHUNK_MAX_TOTAL_LEN = 16777216;
> +
>  	typedef struct {
>  		hyper source_off;
>  		hyper target_off;
> diff --git a/source3/smbd/smb2_ioctl_network_fs.c b/source3/smbd/smb2_ioctl_network_fs.c
> index c2b889b..c279182 100644
> --- a/source3/smbd/smb2_ioctl_network_fs.c
> +++ b/source3/smbd/smb2_ioctl_network_fs.c
> @@ -31,9 +31,6 @@
>  #include "smb2_ioctl_private.h"
>  #include "../lib/tsocket/tsocket.h"
>  
> -#define COPYCHUNK_MAX_CHUNKS	256		/* 2k8r2 & win8 = 256 */
> -#define COPYCHUNK_MAX_CHUNK_LEN	1048576		/* 2k8r2 & win8 = 1048576 */
> -#define COPYCHUNK_MAX_TOTAL_LEN	16777216	/* 2k8r2 & win8 = 16777216 */
>  static void copychunk_pack_limits(struct srv_copychunk_rsp *cc_rsp)
>  {
>  	cc_rsp->chunks_written = COPYCHUNK_MAX_CHUNKS;
> -- 
> 2.9.3
> 
> 
> From b45dbc02b3811a85c0f2ccc9a5cced168c016107 Mon Sep 17 00:00:00 2001
> From: Ralph Boehme <slow at samba.org>
> Date: Sun, 12 Mar 2017 17:18:39 +0100
> Subject: [PATCH 04/16] s3/vfs_default: use const from IDL
> 
> This also increases the buffer size from 8 MB to the current value of
> COPYCHUNK_MAX_TOTAL_LEN which is 16 MB.
> 
> For the typical case when vfswrap_copy_chunk_send is called from the SMB
> layer for an copy_chunk ioctl() the parameter "num" is guaranteed to be
> at most 1 MB though.
> 
> It will only be larger for special callers like vfs_fruit for their
> special implementation of copyfile where num will be the size of a file
> to copy.
> 
> Signed-off-by: Ralph Boehme <slow at samba.org>
> ---
>  source3/modules/vfs_default.c | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
> 
> diff --git a/source3/modules/vfs_default.c b/source3/modules/vfs_default.c
> index d4610f7..054ff64 100644
> --- a/source3/modules/vfs_default.c
> +++ b/source3/modules/vfs_default.c
> @@ -33,6 +33,7 @@
>  #include "lib/util/tevent_ntstatus.h"
>  #include "lib/util/sys_rw.h"
>  #include "lib/pthreadpool/pthreadpool_tevent.h"
> +#include "librpc/gen_ndr/ndr_ioctl.h"
>  
>  #undef DBGC_CLASS
>  #define DBGC_CLASS DBGC_VFS
> @@ -1617,7 +1618,7 @@ static struct tevent_req *vfswrap_copy_chunk_send(struct vfs_handle_struct *hand
>  	}
>  
>  	vfs_cc_state->buf = talloc_array(vfs_cc_state, uint8_t,
> -					 MIN(num, 8*1024*1024));
> +					 MIN(num, COPYCHUNK_MAX_TOTAL_LEN));
>  	if (tevent_req_nomem(vfs_cc_state->buf, req)) {
>  		return tevent_req_post(req, ev);
>  	}
> -- 
> 2.9.3
> 
> 
> From 43d0ede7ee2cb55144874a8f9caf667841cec54d Mon Sep 17 00:00:00 2001
> From: Ralph Boehme <slow at samba.org>
> Date: Tue, 21 Mar 2017 08:26:37 +0100
> Subject: [PATCH 05/16] s3/smbd: move cc_copy into fsctl_srv_copychunk_state
> 
> No change, in behaviour, just preperational stuff to unroll the core
> copy loop.
> 
> Signed-off-by: Ralph Boehme <slow at samba.org>
> ---
>  source3/smbd/smb2_ioctl_network_fs.c | 16 ++++++++--------
>  1 file changed, 8 insertions(+), 8 deletions(-)
> 
> diff --git a/source3/smbd/smb2_ioctl_network_fs.c b/source3/smbd/smb2_ioctl_network_fs.c
> index c279182..efb6fe7 100644
> --- a/source3/smbd/smb2_ioctl_network_fs.c
> +++ b/source3/smbd/smb2_ioctl_network_fs.c
> @@ -77,6 +77,7 @@ static NTSTATUS copychunk_check_limits(struct srv_copychunk_copy *cc_copy)
>  
>  struct fsctl_srv_copychunk_state {
>  	struct connection_struct *conn;
> +	struct srv_copychunk_copy cc_copy;
>  	uint32_t dispatch_count;
>  	uint32_t recv_count;
>  	uint32_t bad_recv_count;
> @@ -164,7 +165,6 @@ static struct tevent_req *fsctl_srv_copychunk_send(TALLOC_CTX *mem_ctx,
>  						   struct smbd_smb2_request *smb2req)
>  {
>  	struct tevent_req *req;
> -	struct srv_copychunk_copy cc_copy;
>  	enum ndr_err_code ndr_ret;
>  	uint64_t src_persistent_h;
>  	uint64_t src_volatile_h;
> @@ -192,7 +192,7 @@ static struct tevent_req *fsctl_srv_copychunk_send(TALLOC_CTX *mem_ctx,
>  		return tevent_req_post(req, ev);
>  	}
>  
> -	ndr_ret = ndr_pull_struct_blob(in_input, mem_ctx, &cc_copy,
> +	ndr_ret = ndr_pull_struct_blob(in_input, mem_ctx, &state->cc_copy,
>  			(ndr_pull_flags_fn_t)ndr_pull_srv_copychunk_copy);
>  	if (ndr_ret != NDR_ERR_SUCCESS) {
>  		DEBUG(0, ("failed to unmarshall copy chunk req\n"));
> @@ -202,8 +202,8 @@ static struct tevent_req *fsctl_srv_copychunk_send(TALLOC_CTX *mem_ctx,
>  	}
>  
>  	/* persistent/volatile keys sent as the resume key */
> -	src_persistent_h = BVAL(cc_copy.source_key, 0);
> -	src_volatile_h = BVAL(cc_copy.source_key, 8);
> +	src_persistent_h = BVAL(state->cc_copy.source_key, 0);
> +	src_volatile_h = BVAL(state->cc_copy.source_key, 8);
>  	state->src_fsp = file_fsp_get(smb2req, src_persistent_h, src_volatile_h);
>  	if (state->src_fsp == NULL) {
>  		DEBUG(3, ("invalid resume key in copy chunk req\n"));
> @@ -223,7 +223,7 @@ static struct tevent_req *fsctl_srv_copychunk_send(TALLOC_CTX *mem_ctx,
>  		return tevent_req_post(req, ev);
>  	}
>  
> -	state->status = copychunk_check_limits(&cc_copy);
> +	state->status = copychunk_check_limits(&state->cc_copy);
>  	if (!NT_STATUS_IS_OK(state->status)) {
>  		DEBUG(3, ("copy chunk req exceeds limits\n"));
>  		state->out_data = COPYCHUNK_OUT_LIMITS;
> @@ -234,7 +234,7 @@ static struct tevent_req *fsctl_srv_copychunk_send(TALLOC_CTX *mem_ctx,
>  	/* any errors from here onwards should carry copychunk response data */
>  	state->out_data = COPYCHUNK_OUT_RSP;
>  
> -	if (cc_copy.chunk_count == 0) {
> +	if (state->cc_copy.chunk_count == 0) {
>  		struct tevent_req *vfs_subreq;
>  		/*
>  		 * Process as OS X copyfile request. This is currently
> @@ -266,9 +266,9 @@ static struct tevent_req *fsctl_srv_copychunk_send(TALLOC_CTX *mem_ctx,
>  		return req;
>  	}
>  
> -	for (i = 0; i < cc_copy.chunk_count; i++) {
> +	for (i = 0; i < state->cc_copy.chunk_count; i++) {
>  		struct tevent_req *vfs_subreq;
> -		chunk = &cc_copy.chunks[i];
> +		chunk = &state->cc_copy.chunks[i];
>  		vfs_subreq = SMB_VFS_COPY_CHUNK_SEND(dst_fsp->conn,
>  						     state, ev,
>  						     state->src_fsp,
> -- 
> 2.9.3
> 
> 
> From 677b46e7894145778e88e2d291da43260de30c54 Mon Sep 17 00:00:00 2001
> From: Ralph Boehme <slow at samba.org>
> Date: Tue, 21 Mar 2017 09:17:03 +0100
> Subject: [PATCH 06/16] s3/smbd: implement a serializing async copy-chunk loop
> 
> Later commits will make the low level copy-chunk implementation async
> using a thread pool. That means the individual chunks may be scheduled
> and copied out-of-order at the low level.
> 
> According to conversation with MS Dochelp, a server implementation
> must process individual chunks in order.
> 
> Signed-off-by: Ralph Boehme <slow at samba.org>
> ---
>  source3/smbd/smb2_ioctl_network_fs.c | 188 +++++++++++++++++++----------------
>  1 file changed, 102 insertions(+), 86 deletions(-)
> 
> diff --git a/source3/smbd/smb2_ioctl_network_fs.c b/source3/smbd/smb2_ioctl_network_fs.c
> index efb6fe7..42d9857 100644
> --- a/source3/smbd/smb2_ioctl_network_fs.c
> +++ b/source3/smbd/smb2_ioctl_network_fs.c
> @@ -76,11 +76,11 @@ static NTSTATUS copychunk_check_limits(struct srv_copychunk_copy *cc_copy)
>  }
>  
>  struct fsctl_srv_copychunk_state {
> +	struct tevent_context *ev;
>  	struct connection_struct *conn;
>  	struct srv_copychunk_copy cc_copy;
> -	uint32_t dispatch_count;
> -	uint32_t recv_count;
> -	uint32_t bad_recv_count;
> +	uint32_t current_chunk;
> +	uint32_t next_chunk;
>  	NTSTATUS status;
>  	off_t total_written;
>  	struct files_struct *src_fsp;
> @@ -156,6 +156,8 @@ static NTSTATUS copychunk_check_handles(uint32_t ctl_code,
>  	return NT_STATUS_OK;
>  }
>  
> +static NTSTATUS fsctl_srv_copychunk_loop(struct tevent_req *req);
> +
>  static struct tevent_req *fsctl_srv_copychunk_send(TALLOC_CTX *mem_ctx,
>  						   struct tevent_context *ev,
>  						   uint32_t ctl_code,
> @@ -164,13 +166,12 @@ static struct tevent_req *fsctl_srv_copychunk_send(TALLOC_CTX *mem_ctx,
>  						   size_t in_max_output,
>  						   struct smbd_smb2_request *smb2req)
>  {
> -	struct tevent_req *req;
> -	enum ndr_err_code ndr_ret;
> +	struct tevent_req *req = NULL;
> +	struct fsctl_srv_copychunk_state *state = NULL;
>  	uint64_t src_persistent_h;
>  	uint64_t src_volatile_h;
> -	int i;
> -	struct srv_copychunk *chunk;
> -	struct fsctl_srv_copychunk_state *state;
> +	enum ndr_err_code ndr_ret;
> +	NTSTATUS status;
>  
>  	/* handler for both copy-chunk variants */
>  	SMB_ASSERT((ctl_code == FSCTL_SRV_COPYCHUNK)
> @@ -181,7 +182,10 @@ static struct tevent_req *fsctl_srv_copychunk_send(TALLOC_CTX *mem_ctx,
>  	if (req == NULL) {
>  		return NULL;
>  	}
> -	state->conn = dst_fsp->conn;
> +	*state = (struct fsctl_srv_copychunk_state) {
> +		.conn = dst_fsp->conn,
> +		.ev = ev,
> +	};
>  
>  	if (in_max_output < sizeof(struct srv_copychunk_rsp)) {
>  		DEBUG(3, ("max output %d not large enough to hold copy chunk "
> @@ -234,109 +238,123 @@ static struct tevent_req *fsctl_srv_copychunk_send(TALLOC_CTX *mem_ctx,
>  	/* any errors from here onwards should carry copychunk response data */
>  	state->out_data = COPYCHUNK_OUT_RSP;
>  
> +	status = fsctl_srv_copychunk_loop(req);
> +	if (tevent_req_nterror(req, status)) {
> +		return tevent_req_post(req, ev);
> +	}
> +
> +	return req;
> +}
> +
> +static NTSTATUS fsctl_srv_copychunk_loop(struct tevent_req *req)
> +{
> +	struct fsctl_srv_copychunk_state *state = tevent_req_data(
> +		req, struct fsctl_srv_copychunk_state);
> +	struct tevent_req *subreq = NULL;
> +	struct srv_copychunk *chunk = NULL;
> +
> +	if (state->next_chunk > state->cc_copy.chunk_count) {
> +		DBG_ERR("Copy-chunk loop next_chunk [%d] chunk_count [%d]\n",
> +			state->next_chunk, state->cc_copy.chunk_count);
> +		return NT_STATUS_INTERNAL_ERROR;
> +	}
> +
>  	if (state->cc_copy.chunk_count == 0) {
> -		struct tevent_req *vfs_subreq;
>  		/*
>  		 * Process as OS X copyfile request. This is currently
>  		 * the only copychunk request with a chunk count of 0
>  		 * we will process.
>  		 */
> -		if (!state->src_fsp->aapl_copyfile_supported) {
> -			tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
> -			return tevent_req_post(req, ev);
> -		}
> -		if (!state->dst_fsp->aapl_copyfile_supported) {
> -			tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
> -			return tevent_req_post(req, ev);
> +		if (!state->src_fsp->aapl_copyfile_supported ||
> +		    !state->dst_fsp->aapl_copyfile_supported)
> +		{
> +			/*
> +			 * This must not return an error but just a chunk count
> +			 * of 0 in the response.
> +			 */
> +			tevent_req_done(req);
> +			tevent_req_post(req, state->ev);
> +			return NT_STATUS_OK;
>  		}
>  		state->aapl_copyfile = true;
> -		vfs_subreq = SMB_VFS_COPY_CHUNK_SEND(dst_fsp->conn,
> -						     state, ev,
> -						     state->src_fsp,
> -						     0,
> -						     state->dst_fsp,
> -						     0,
> -						     0);
> -		if (tevent_req_nomem(vfs_subreq, req)) {
> -			return tevent_req_post(req, ev);
> +
> +		subreq = SMB_VFS_COPY_CHUNK_SEND(state->dst_fsp->conn,
> +						 state,
> +						 state->ev,
> +						 state->src_fsp,
> +						 0,
> +						 state->dst_fsp,
> +						 0,
> +						 0);
> +		if (subreq == NULL) {
> +			return NT_STATUS_NO_MEMORY;
>  		}
> -		tevent_req_set_callback(vfs_subreq,
> +		tevent_req_set_callback(subreq,
>  					fsctl_srv_copychunk_vfs_done, req);
> -		state->dispatch_count++;
> -		return req;
> +		return NT_STATUS_OK;
>  	}
>  
> -	for (i = 0; i < state->cc_copy.chunk_count; i++) {
> -		struct tevent_req *vfs_subreq;
> -		chunk = &state->cc_copy.chunks[i];
> -		vfs_subreq = SMB_VFS_COPY_CHUNK_SEND(dst_fsp->conn,
> -						     state, ev,
> -						     state->src_fsp,
> -						     chunk->source_off,
> -						     state->dst_fsp,
> -						     chunk->target_off,
> -						     chunk->length);
> -		if (vfs_subreq == NULL) {
> -			DEBUG(0, ("VFS copy chunk send failed\n"));
> -			state->status = NT_STATUS_NO_MEMORY;
> -			if (state->dispatch_count == 0) {
> -				/* nothing dispatched, return immediately */
> -				tevent_req_nterror(req, state->status);
> -				return tevent_req_post(req, ev);
> -			} else {
> -				/*
> -				 * wait for dispatched to complete before
> -				 * returning error.
> -				 */
> -				break;
> -			}
> -		}
> -		tevent_req_set_callback(vfs_subreq,
> -					fsctl_srv_copychunk_vfs_done, req);
> -		state->dispatch_count++;
> +	chunk = &state->cc_copy.chunks[state->current_chunk];
> +
> +	/*
> +	 * Doing the increment and calculation for the next chunk here and not
> +	 * in the done function, as a later commit will make this a more
> +	 * sophisticated logic.
> +	 */
> +	state->next_chunk++;
> +
> +	subreq = SMB_VFS_COPY_CHUNK_SEND(state->dst_fsp->conn,
> +					 state,
> +					 state->ev,
> +					 state->src_fsp,
> +					 chunk->source_off,
> +					 state->dst_fsp,
> +					 chunk->target_off,
> +					 chunk->length);
> +	if (tevent_req_nomem(subreq, req)) {
> +		return NT_STATUS_NO_MEMORY;
>  	}
> +	tevent_req_set_callback(subreq,	fsctl_srv_copychunk_vfs_done, req);
>  
> -	return req;
> +	return NT_STATUS_OK;
>  }
>  
>  static void fsctl_srv_copychunk_vfs_done(struct tevent_req *subreq)
>  {
>  	struct tevent_req *req = tevent_req_callback_data(
>  		subreq, struct tevent_req);
> -	struct fsctl_srv_copychunk_state *state = tevent_req_data(req,
> -					struct fsctl_srv_copychunk_state);
> +	struct fsctl_srv_copychunk_state *state = tevent_req_data(
> +		req, struct fsctl_srv_copychunk_state);
>  	off_t chunk_nwritten;
>  	NTSTATUS status;
>  
> -	state->recv_count++;
>  	status = SMB_VFS_COPY_CHUNK_RECV(state->conn, subreq,
>  					 &chunk_nwritten);
>  	TALLOC_FREE(subreq);
> -	if (NT_STATUS_IS_OK(status)) {
> -		DEBUG(10, ("good copy chunk recv %u of %u\n",
> -			   (unsigned int)state->recv_count,
> -			   (unsigned int)state->dispatch_count));
> -		state->total_written += chunk_nwritten;
> -	} else {
> -		DEBUG(0, ("bad status in copy chunk recv %u of %u: %s\n",
> -			  (unsigned int)state->recv_count,
> -			  (unsigned int)state->dispatch_count,
> -			  nt_errstr(status)));
> -		state->bad_recv_count++;
> -		/* may overwrite previous failed status */
> -		state->status = status;
> -	}
> -
> -	if (state->recv_count != state->dispatch_count) {
> -		/*
> -		 * Wait for all VFS copy_chunk requests to complete, even
> -		 * if an error is received for a specific chunk.
> -		 */
> +	if (!NT_STATUS_IS_OK(status)) {
> +		DBG_ERR("copy chunk failed [%s] chunk [%u] of [%u]\n",
> +			nt_errstr(status),
> +			(unsigned int)state->next_chunk,
> +			(unsigned int)state->cc_copy.chunk_count);
> +		tevent_req_nterror(req, status);
>  		return;
>  	}
>  
> -	if (!tevent_req_nterror(req, state->status)) {
> +	DBG_DEBUG("good copy chunk [%u] of [%u]\n",
> +		  (unsigned int)state->next_chunk,
> +		  (unsigned int)state->cc_copy.chunk_count);
> +	state->total_written += chunk_nwritten;
> +
> +	state->current_chunk = state->next_chunk;
> +
> +	if (state->next_chunk == state->cc_copy.chunk_count) {
>  		tevent_req_done(req);
> +		return;
> +	}
> +
> +	status = fsctl_srv_copychunk_loop(req);
> +	if (tevent_req_nterror(req, status)) {
> +		return;
>  	}
>  }
>  
> @@ -361,7 +379,7 @@ static NTSTATUS fsctl_srv_copychunk_recv(struct tevent_req *req,
>  		if (state->aapl_copyfile == true) {
>  			cc_rsp->chunks_written = 0;
>  		} else {
> -			cc_rsp->chunks_written = state->recv_count - state->bad_recv_count;
> +			cc_rsp->chunks_written = state->current_chunk;
>  		}
>  		cc_rsp->chunk_bytes_written = 0;
>  		cc_rsp->total_bytes_written = state->total_written;
> @@ -371,9 +389,7 @@ static NTSTATUS fsctl_srv_copychunk_recv(struct tevent_req *req,
>  		assert(1);
>  		break;
>  	}
> -	status = state->status;
> -	tevent_req_received(req);
> -
> +	status = tevent_req_simple_recv_ntstatus(req);
>  	return status;
>  }
>  
> -- 
> 2.9.3
> 
> 
> From c8c4c1906ed03f31fe9cdd1c3a19703d9468bf8c Mon Sep 17 00:00:00 2001
> From: Ralph Boehme <slow at samba.org>
> Date: Tue, 21 Mar 2017 18:34:22 +0100
> Subject: [PATCH 07/16] s3/smbd: optimize copy-chunk by merging chunks if
>  possible
> 
> Merge chunks with adjacent ranges. This results in fewer IO requests for
> the typical server-side file copy usecase: just one 16 MB copy instead
> of sixteen 1 MB.
> 
> Signed-off-by: Ralph Boehme <slow at samba.org>
> ---
>  source3/smbd/smb2_ioctl_network_fs.c | 79 ++++++++++++++++++++++++++++++------
>  1 file changed, 67 insertions(+), 12 deletions(-)
> 
> diff --git a/source3/smbd/smb2_ioctl_network_fs.c b/source3/smbd/smb2_ioctl_network_fs.c
> index 42d9857..5b869ec 100644
> --- a/source3/smbd/smb2_ioctl_network_fs.c
> +++ b/source3/smbd/smb2_ioctl_network_fs.c
> @@ -246,12 +246,74 @@ static struct tevent_req *fsctl_srv_copychunk_send(TALLOC_CTX *mem_ctx,
>  	return req;
>  }
>  
> +/*
> + * Work out the next IO request from the remaining chunks starting with
> + * state->current_chunk. Chunks with left to right adjacent ranges can be merged
> + * into a single IO request.
> + */
> +static uint32_t next_merged_io(struct fsctl_srv_copychunk_state *state)
> +{
> +	struct srv_copychunk *chunk = NULL;
> +	uint32_t length;
> +	off_t next_src_offset;
> +	off_t next_dst_offset;
> +
> +	/*
> +	 * We're expected to process at least one chunk and return it's length,
> +	 * so let's do this first.
> +	 */
> +
> +	chunk = &state->cc_copy.chunks[state->current_chunk];
> +	length = chunk->length;
> +	state->next_chunk++;
> +
> +	/*
> +	 * Now process subsequent chunks merging chunks as long as ranges are
> +	 * adjacent and the source file size is not exceeded (this is needed
> +	 * for error reporting).
> +	 */
> +
> +	next_src_offset = chunk->source_off + chunk->length;
> +	next_dst_offset = chunk->target_off + chunk->length;
> +
> +	while (state->next_chunk < state->cc_copy.chunk_count) {
> +		chunk = &state->cc_copy.chunks[state->next_chunk];
> +
> +		if ((chunk->source_off != next_src_offset) ||
> +		    (chunk->target_off != next_dst_offset))
> +		{
> +			/* Not adjacent, stop merging */
> +			break;
> +		}
> +
> +		next_src_offset += chunk->length;
> +		next_dst_offset += chunk->length;
> +
> +		if (next_src_offset > state->src_fsp->fsp_name->st.st_ex_size) {
> +			/* Source filesize exceeded, stop merging */
> +			break;
> +		}
> +
> +		/*
> +		 * Found a mergable chunk, merge it and continue searching.
> +		 * Note: this can't wrap, limits were already checked in
> +		 * copychunk_check_limits().
> +		 */
> +		length += chunk->length;
> +
> +		state->next_chunk++;
> +	}
> +
> +	return length;
> +}
> +
>  static NTSTATUS fsctl_srv_copychunk_loop(struct tevent_req *req)
>  {
>  	struct fsctl_srv_copychunk_state *state = tevent_req_data(
>  		req, struct fsctl_srv_copychunk_state);
>  	struct tevent_req *subreq = NULL;
>  	struct srv_copychunk *chunk = NULL;
> +	uint32_t length;
>  
>  	if (state->next_chunk > state->cc_copy.chunk_count) {
>  		DBG_ERR("Copy-chunk loop next_chunk [%d] chunk_count [%d]\n",
> @@ -269,8 +331,8 @@ static NTSTATUS fsctl_srv_copychunk_loop(struct tevent_req *req)
>  		    !state->dst_fsp->aapl_copyfile_supported)
>  		{
>  			/*
> -			 * This must not return an error but just a chunk count
> -			 * of 0 in the response.
> +			 * This must not produce an error but just return a
> +			 * chunk count of 0 in the response.
>  			 */
>  			tevent_req_done(req);
>  			tevent_req_post(req, state->ev);
> @@ -295,13 +357,7 @@ static NTSTATUS fsctl_srv_copychunk_loop(struct tevent_req *req)
>  	}
>  
>  	chunk = &state->cc_copy.chunks[state->current_chunk];
> -
> -	/*
> -	 * Doing the increment and calculation for the next chunk here and not
> -	 * in the done function, as a later commit will make this a more
> -	 * sophisticated logic.
> -	 */
> -	state->next_chunk++;
> +	length = next_merged_io(state);
>  
>  	subreq = SMB_VFS_COPY_CHUNK_SEND(state->dst_fsp->conn,
>  					 state,
> @@ -310,7 +366,7 @@ static NTSTATUS fsctl_srv_copychunk_loop(struct tevent_req *req)
>  					 chunk->source_off,
>  					 state->dst_fsp,
>  					 chunk->target_off,
> -					 chunk->length);
> +					 length);
>  	if (tevent_req_nomem(subreq, req)) {
>  		return NT_STATUS_NO_MEMORY;
>  	}
> @@ -346,8 +402,7 @@ static void fsctl_srv_copychunk_vfs_done(struct tevent_req *subreq)
>  	state->total_written += chunk_nwritten;
>  
>  	state->current_chunk = state->next_chunk;
> -
> -	if (state->next_chunk == state->cc_copy.chunk_count) {
> +	if (state->current_chunk == state->cc_copy.chunk_count) {
>  		tevent_req_done(req);
>  		return;
>  	}
> -- 
> 2.9.3
> 
> 
> From 123a1862552f162759abc73f9a22c03b720eff60 Mon Sep 17 00:00:00 2001
> From: Ralph Boehme <slow at samba.org>
> Date: Sun, 12 Mar 2017 17:23:09 +0100
> Subject: [PATCH 08/16] s3/vfs_default: move check for fsp->op validity
> 
> Move the check whether fsp->of is valid out of the copy loop in
> vfswrap_copy_chunk_send().
> 
> It's sufficient to check src_fsp->op and dest_fsp->op once before the
> copy loop. fsp->op can only be NULL for internal opens (cf file_new()),
> it's not expected to become NULL behind our backs.
> 
> Signed-off-by: Ralph Boehme <slow at samba.org>
> ---
>  source3/modules/vfs_default.c | 19 ++++++++++---------
>  1 file changed, 10 insertions(+), 9 deletions(-)
> 
> diff --git a/source3/modules/vfs_default.c b/source3/modules/vfs_default.c
> index 054ff64..545e21a 100644
> --- a/source3/modules/vfs_default.c
> +++ b/source3/modules/vfs_default.c
> @@ -1642,6 +1642,16 @@ static struct tevent_req *vfswrap_copy_chunk_send(struct vfs_handle_struct *hand
>  		return tevent_req_post(req, ev);
>  	}
>  
> +	if (src_fsp->op == NULL) {
> +		tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
> +		return tevent_req_post(req, ev);
> +	}
> +
> +	if (dest_fsp->op == NULL) {
> +		tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
> +		return tevent_req_post(req, ev);
> +	}
> +
>  	/* could use 2.6.33+ sendfile here to do this in kernel */
>  	while (vfs_cc_state->copied < num) {
>  		ssize_t ret;
> @@ -1651,10 +1661,6 @@ static struct tevent_req *vfswrap_copy_chunk_send(struct vfs_handle_struct *hand
>  		off_t this_num = MIN(talloc_array_length(vfs_cc_state->buf),
>  				     num - vfs_cc_state->copied);
>  
> -		if (src_fsp->op == NULL) {
> -			tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
> -			return tevent_req_post(req, ev);
> -		}
>  		init_strict_lock_struct(src_fsp,
>  					src_fsp->op->global->open_persistent_id,
>  					src_off,
> @@ -1688,11 +1694,6 @@ static struct tevent_req *vfswrap_copy_chunk_send(struct vfs_handle_struct *hand
>  
>  		src_off += ret;
>  
> -		if (dest_fsp->op == NULL) {
> -			tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
> -			return tevent_req_post(req, ev);
> -		}
> -
>  		init_strict_lock_struct(dest_fsp,
>  					dest_fsp->op->global->open_persistent_id,
>  					dest_off,
> -- 
> 2.9.3
> 
> 
> From cb894075fdae1af8537a183220cbf3792a5b2966 Mon Sep 17 00:00:00 2001
> From: Ralph Boehme <slow at samba.org>
> Date: Sun, 12 Mar 2017 18:13:48 +0100
> Subject: [PATCH 09/16] s3/vfs_default: lock the whole byte-range before the
>  copy loop
> 
> For callers that pass the num argument with a value higher then the max
> copy buffer we should check the byte-ranges before starting the copy
> loop so the resulting locking semantics are coupled to the calling layer
> sizes and not our buffer size of the day.
> 
> Signed-off-by: Ralph Boehme <slow at samba.org>
> ---
>  source3/modules/vfs_default.c | 69 ++++++++++++++++++++++---------------------
>  1 file changed, 35 insertions(+), 34 deletions(-)
> 
> diff --git a/source3/modules/vfs_default.c b/source3/modules/vfs_default.c
> index 545e21a..e0945ff 100644
> --- a/source3/modules/vfs_default.c
> +++ b/source3/modules/vfs_default.c
> @@ -1607,6 +1607,8 @@ static struct tevent_req *vfswrap_copy_chunk_send(struct vfs_handle_struct *hand
>  {
>  	struct tevent_req *req;
>  	struct vfs_cc_state *vfs_cc_state;
> +	struct lock_struct write_lck;
> +	struct lock_struct read_lck;
>  	NTSTATUS status;
>  
>  	DEBUG(10, ("performing server side copy chunk of length %lu\n",
> @@ -1652,77 +1654,72 @@ static struct tevent_req *vfswrap_copy_chunk_send(struct vfs_handle_struct *hand
>  		return tevent_req_post(req, ev);
>  	}
>  
> +	init_strict_lock_struct(src_fsp,
> +				src_fsp->op->global->open_persistent_id,
> +				src_off,
> +				num,
> +				READ_LOCK,
> +				&read_lck);
> +
> +	if (!SMB_VFS_STRICT_LOCK(src_fsp->conn, src_fsp, &read_lck)) {
> +		tevent_req_nterror(req, NT_STATUS_FILE_LOCK_CONFLICT);
> +		return tevent_req_post(req, ev);
> +	}
> +
> +	init_strict_lock_struct(dest_fsp,
> +				dest_fsp->op->global->open_persistent_id,
> +				dest_off,
> +				num,
> +				WRITE_LOCK,
> +				&write_lck);
> +
> +	if (!SMB_VFS_STRICT_LOCK(dest_fsp->conn, dest_fsp, &write_lck)) {
> +		SMB_VFS_STRICT_UNLOCK(dest_fsp->conn, dest_fsp, &read_lck);
> +		tevent_req_nterror(req, NT_STATUS_FILE_LOCK_CONFLICT);
> +		return tevent_req_post(req, ev);
> +	}
> +
>  	/* could use 2.6.33+ sendfile here to do this in kernel */
>  	while (vfs_cc_state->copied < num) {
>  		ssize_t ret;
> -		struct lock_struct lck;
>  		int saved_errno;
> -
>  		off_t this_num = MIN(talloc_array_length(vfs_cc_state->buf),
>  				     num - vfs_cc_state->copied);
>  
> -		init_strict_lock_struct(src_fsp,
> -					src_fsp->op->global->open_persistent_id,
> -					src_off,
> -					this_num,
> -					READ_LOCK,
> -					&lck);
> -
> -		if (!SMB_VFS_STRICT_LOCK(src_fsp->conn, src_fsp, &lck)) {
> -			tevent_req_nterror(req, NT_STATUS_FILE_LOCK_CONFLICT);
> -			return tevent_req_post(req, ev);
> -		}
> -
>  		ret = SMB_VFS_PREAD(src_fsp, vfs_cc_state->buf,
>  				    this_num, src_off);
>  		if (ret == -1) {
>  			saved_errno = errno;
>  		}
>  
> -		SMB_VFS_STRICT_UNLOCK(src_fsp->conn, src_fsp, &lck);
> -
>  		if (ret == -1) {
>  			errno = saved_errno;
>  			tevent_req_nterror(req, map_nt_error_from_unix(errno));
> -			return tevent_req_post(req, ev);
> +			goto fail;
>  		}
>  		if (ret != this_num) {
>  			/* zero tolerance for short reads */
>  			tevent_req_nterror(req, NT_STATUS_IO_DEVICE_ERROR);
> -			return tevent_req_post(req, ev);
> +			goto fail;
>  		}
>  
>  		src_off += ret;
>  
> -		init_strict_lock_struct(dest_fsp,
> -					dest_fsp->op->global->open_persistent_id,
> -					dest_off,
> -					this_num,
> -					WRITE_LOCK,
> -					&lck);
> -
> -		if (!SMB_VFS_STRICT_LOCK(dest_fsp->conn, dest_fsp, &lck)) {
> -			tevent_req_nterror(req, NT_STATUS_FILE_LOCK_CONFLICT);
> -			return tevent_req_post(req, ev);
> -		}
> -
>  		ret = SMB_VFS_PWRITE(dest_fsp, vfs_cc_state->buf,
>  				     this_num, dest_off);
>  		if (ret == -1) {
>  			saved_errno = errno;
>  		}
>  
> -		SMB_VFS_STRICT_UNLOCK(dest_fsp->conn, dest_fsp, &lck);
> -
>  		if (ret == -1) {
>  			errno = saved_errno;
>  			tevent_req_nterror(req, map_nt_error_from_unix(errno));
> -			return tevent_req_post(req, ev);
> +			goto fail;
>  		}
>  		if (ret != this_num) {
>  			/* zero tolerance for short writes */
>  			tevent_req_nterror(req, NT_STATUS_IO_DEVICE_ERROR);
> -			return tevent_req_post(req, ev);
> +			goto fail;
>  		}
>  		dest_off += ret;
>  
> @@ -1730,6 +1727,10 @@ static struct tevent_req *vfswrap_copy_chunk_send(struct vfs_handle_struct *hand
>  	}
>  
>  	tevent_req_done(req);
> +
> +fail:
> +	SMB_VFS_STRICT_UNLOCK(src_fsp->conn, src_fsp, &read_lck);
> +	SMB_VFS_STRICT_UNLOCK(dest_fsp->conn, dest_fsp, &write_lck);
>  	return tevent_req_post(req, ev);
>  }
>  
> -- 
> 2.9.3
> 
> 
> From 67cfeca7dbb2071ffe260c907244a67f1b39645f Mon Sep 17 00:00:00 2001
> From: Ralph Boehme <slow at samba.org>
> Date: Mon, 13 Mar 2017 07:51:39 +0100
> Subject: [PATCH 10/16] s3/vfs: forward tevent cancel request in copy_chunk
> 
> Use tevent_req_set_cancel_forwarding() to forward cancel requests. No
> change in behaviour here, but a subsequent commit will make copy_chunk
> async in vfs_default and allow it to be cancelled.
> 
> Signed-off-by: Ralph Boehme <slow at samba.org>
> ---
>  source3/modules/vfs_btrfs.c          | 2 ++
>  source3/modules/vfs_fruit.c          | 1 +
>  source3/modules/vfs_time_audit.c     | 1 +
>  source3/smbd/smb2_ioctl_network_fs.c | 9 +++++++++
>  4 files changed, 13 insertions(+)
> 
> diff --git a/source3/modules/vfs_btrfs.c b/source3/modules/vfs_btrfs.c
> index 154fcc3..154ed79 100644
> --- a/source3/modules/vfs_btrfs.c
> +++ b/source3/modules/vfs_btrfs.c
> @@ -126,6 +126,7 @@ static struct tevent_req *btrfs_copy_chunk_send(struct vfs_handle_struct *handle
>  		tevent_req_set_callback(cc_state->subreq,
>  					btrfs_copy_chunk_done,
>  					req);
> +		tevent_req_set_cancel_forwarding(req, cc_state->subreq);
>  		return req;
>  	}
>  
> @@ -204,6 +205,7 @@ static struct tevent_req *btrfs_copy_chunk_send(struct vfs_handle_struct *handle
>  		tevent_req_set_callback(cc_state->subreq,
>  					btrfs_copy_chunk_done,
>  					req);
> +		tevent_req_set_cancel_forwarding(req, cc_state->subreq);
>  		return req;
>  	}
>  
> diff --git a/source3/modules/vfs_fruit.c b/source3/modules/vfs_fruit.c
> index fc80629..38ce697 100644
> --- a/source3/modules/vfs_fruit.c
> +++ b/source3/modules/vfs_fruit.c
> @@ -5252,6 +5252,7 @@ static struct tevent_req *fruit_copy_chunk_send(struct vfs_handle_struct *handle
>  	}
>  
>  	tevent_req_set_callback(subreq, fruit_copy_chunk_done, req);
> +	tevent_req_set_cancel_forwarding(req, subreq);
>  	return req;
>  }
>  
> diff --git a/source3/modules/vfs_time_audit.c b/source3/modules/vfs_time_audit.c
> index 986fe79..b358f46 100644
> --- a/source3/modules/vfs_time_audit.c
> +++ b/source3/modules/vfs_time_audit.c
> @@ -1907,6 +1907,7 @@ static struct tevent_req *smb_time_audit_copy_chunk_send(struct vfs_handle_struc
>  	}
>  
>  	tevent_req_set_callback(subreq, smb_time_audit_copy_chunk_done, req);
> +	tevent_req_set_cancel_forwarding(req, subreq);
>  	return req;
>  }
>  
> diff --git a/source3/smbd/smb2_ioctl_network_fs.c b/source3/smbd/smb2_ioctl_network_fs.c
> index 5b869ec..10d53e6 100644
> --- a/source3/smbd/smb2_ioctl_network_fs.c
> +++ b/source3/smbd/smb2_ioctl_network_fs.c
> @@ -353,6 +353,7 @@ static NTSTATUS fsctl_srv_copychunk_loop(struct tevent_req *req)
>  		}
>  		tevent_req_set_callback(subreq,
>  					fsctl_srv_copychunk_vfs_done, req);
> +		tevent_req_set_cancel_forwarding(req, subreq);
>  		return NT_STATUS_OK;
>  	}
>  
> @@ -372,6 +373,13 @@ static NTSTATUS fsctl_srv_copychunk_loop(struct tevent_req *req)
>  	}
>  	tevent_req_set_callback(subreq,	fsctl_srv_copychunk_vfs_done, req);
>  
> +	/*
> +	 * Request cancel forwarding. When the subreq is freed in
> +	 * fsctl_srv_copychunk_vfs_done() the cancel is also removed, so we can
> +	 * point the cancel to the next subreq.
> +	 */
> +	tevent_req_set_cancel_forwarding(req, subreq);
> +
>  	return NT_STATUS_OK;
>  }
>  
> @@ -720,6 +728,7 @@ struct tevent_req *smb2_ioctl_network_fs(uint32_t ctl_code,
>  		tevent_req_set_callback(subreq,
>  					smb2_ioctl_network_fs_copychunk_done,
>  					req);
> +		tevent_req_set_cancel_forwarding(req, subreq);
>  		return req;
>  		break;
>  	case FSCTL_QUERY_NETWORK_INTERFACE_INFO:
> -- 
> 2.9.3
> 
> 
> From 0a31c7041147f47787e9a516c5ce8fdc9ed282ed Mon Sep 17 00:00:00 2001
> From: Ralph Boehme <slow at samba.org>
> Date: Wed, 15 Mar 2017 15:23:46 +0100
> Subject: [PATCH 11/16] s3/smbd: decrease a debug log level in copy_chunk
> 
> The request could have been user cancelled, so let's just log errors at
> here with level 10. The lower level implementation can better log this
> for real IO errors, a subsequent commit will do just that.
> 
> Signed-off-by: Ralph Boehme <slow at samba.org>
> ---
>  source3/smbd/smb2_ioctl_network_fs.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/source3/smbd/smb2_ioctl_network_fs.c b/source3/smbd/smb2_ioctl_network_fs.c
> index 10d53e6..80787e5 100644
> --- a/source3/smbd/smb2_ioctl_network_fs.c
> +++ b/source3/smbd/smb2_ioctl_network_fs.c
> @@ -396,7 +396,7 @@ static void fsctl_srv_copychunk_vfs_done(struct tevent_req *subreq)
>  					 &chunk_nwritten);
>  	TALLOC_FREE(subreq);
>  	if (!NT_STATUS_IS_OK(status)) {
> -		DBG_ERR("copy chunk failed [%s] chunk [%u] of [%u]\n",
> +		DBG_DEBUG("copy chunk failed [%s] chunk [%u] of [%u]\n",
>  			nt_errstr(status),
>  			(unsigned int)state->next_chunk,
>  			(unsigned int)state->cc_copy.chunk_count);
> -- 
> 2.9.3
> 
> 
> From 97e865e4e480de2aa515f4f1b50620b43c589ddd Mon Sep 17 00:00:00 2001
> From: Ralph Boehme <slow at samba.org>
> Date: Wed, 15 Mar 2017 15:25:54 +0100
> Subject: [PATCH 12/16] vfs_fruit: decrease a debug log level in copy_chunk
> 
> The request could have been user cancelled, so let's just log errors at
> here with level 10. The lower level implementation will error out anyway
> in this failure case.
> 
> Signed-off-by: Ralph Boehme <slow at samba.org>
> ---
>  source3/modules/vfs_fruit.c | 3 +--
>  1 file changed, 1 insertion(+), 2 deletions(-)
> 
> diff --git a/source3/modules/vfs_fruit.c b/source3/modules/vfs_fruit.c
> index 38ce697..23007c1 100644
> --- a/source3/modules/vfs_fruit.c
> +++ b/source3/modules/vfs_fruit.c
> @@ -5366,8 +5366,7 @@ static NTSTATUS fruit_copy_chunk_recv(struct vfs_handle_struct *handle,
>  	NTSTATUS status;
>  
>  	if (tevent_req_is_nterror(req, &status)) {
> -		DEBUG(1, ("server side copy chunk failed: %s\n",
> -			  nt_errstr(status)));
> +		DBG_DEBUG("copy chunk failed: %s\n", nt_errstr(status));
>  		*copied = 0;
>  		tevent_req_received(req);
>  		return status;
> -- 
> 2.9.3
> 
> 
> From a631a3af1d84ccf2517e77e4095c99d02759f1be Mon Sep 17 00:00:00 2001
> From: Ralph Boehme <slow at samba.org>
> Date: Wed, 22 Mar 2017 18:15:03 +0100
> Subject: [PATCH 13/16] s3/smbd: let copy-chunk add an aio object to the fsp
> 
> This is needed for making the copy-chunk implementation async in
> subsequent commits. For the current sync code it's an noop, the aio
> object will immediately get removed again when the req is received.
> 
> Signed-off-by: Ralph Boehme <slow at samba.org>
> ---
>  source3/smbd/smb2_ioctl_network_fs.c | 9 +++++++++
>  1 file changed, 9 insertions(+)
> 
> diff --git a/source3/smbd/smb2_ioctl_network_fs.c b/source3/smbd/smb2_ioctl_network_fs.c
> index 80787e5..254f79e 100644
> --- a/source3/smbd/smb2_ioctl_network_fs.c
> +++ b/source3/smbd/smb2_ioctl_network_fs.c
> @@ -705,6 +705,7 @@ struct tevent_req *smb2_ioctl_network_fs(uint32_t ctl_code,
>  {
>  	struct tevent_req *subreq;
>  	NTSTATUS status;
> +	bool ok;
>  
>  	switch (ctl_code) {
>  	/*
> @@ -729,6 +730,14 @@ struct tevent_req *smb2_ioctl_network_fs(uint32_t ctl_code,
>  					smb2_ioctl_network_fs_copychunk_done,
>  					req);
>  		tevent_req_set_cancel_forwarding(req, subreq);
> +
> +		/* Ensure we can't be closed in flight. */
> +		ok = aio_add_req_to_fsp(state->fsp, req);
> +		if (!ok) {
> +			tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
> +			return tevent_req_post(req, ev);
> +		}
> +
>  		return req;
>  		break;
>  	case FSCTL_QUERY_NETWORK_INTERFACE_INFO:
> -- 
> 2.9.3
> 
> 
> From cad8abbb65dbf0c2dad1e0c3defd9be6c09a9084 Mon Sep 17 00:00:00 2001
> From: Ralph Boehme <slow at samba.org>
> Date: Wed, 8 Mar 2017 15:07:58 +0100
> Subject: [PATCH 14/16] s3/smbd: make copy chunk asynchronous
> 
> Just use SMB_VFS_PREAD_SEND/RECV and SMB_VFS_PWRITE_SEND/RECV in a
> sensible loop.
> 
> Also install a cancel handler, so it's now possible to cancel it from
> the SMB layer. This would be especially useful for the fruit:copyfile
> behaviour where a single server-side copy request lets us copy the whole
> file, unfortunately at present the macOS client doesn't support
> cancelling the copy-chunk ioctl request.
> 
> Signed-off-by: Ralph Boehme <slow at samba.org>
> ---
>  source3/modules/vfs_default.c | 272 ++++++++++++++++++++++++++++++------------
>  1 file changed, 197 insertions(+), 75 deletions(-)
> 
> diff --git a/source3/modules/vfs_default.c b/source3/modules/vfs_default.c
> index e0945ff..9ced46b 100644
> --- a/source3/modules/vfs_default.c
> +++ b/source3/modules/vfs_default.c
> @@ -1592,10 +1592,27 @@ static NTSTATUS vfswrap_fset_dos_attributes(struct vfs_handle_struct *handle,
>  }
>  
>  struct vfs_cc_state {
> -	off_t copied;
> +	struct tevent_context *ev;
> +	bool cancelled;
>  	uint8_t *buf;
> +	bool read_lck_locked;
> +	struct lock_struct read_lck;
> +	bool write_lck_locked;
> +	struct lock_struct write_lck;
> +	struct files_struct *src_fsp;
> +	off_t src_off;
> +	struct files_struct *dst_fsp;
> +	off_t dst_off;
> +	off_t to_copy;
> +	off_t remaining;
> +	size_t next_io_size;
>  };
>  
> +static NTSTATUS copy_chunk_loop(TALLOC_CTX *mem_ctx,
> +				struct tevent_req *req);
> +
> +static bool vfswrap_copy_chunk_cancel(struct tevent_req *req);
> +
>  static struct tevent_req *vfswrap_copy_chunk_send(struct vfs_handle_struct *handle,
>  						  TALLOC_CTX *mem_ctx,
>  						  struct tevent_context *ev,
> @@ -1603,25 +1620,32 @@ static struct tevent_req *vfswrap_copy_chunk_send(struct vfs_handle_struct *hand
>  						  off_t src_off,
>  						  struct files_struct *dest_fsp,
>  						  off_t dest_off,
> -						  off_t num)
> +						  off_t to_copy)
>  {
>  	struct tevent_req *req;
> -	struct vfs_cc_state *vfs_cc_state;
> -	struct lock_struct write_lck;
> -	struct lock_struct read_lck;
> +	struct vfs_cc_state *state = NULL;
> +	size_t num = MIN(to_copy, COPYCHUNK_MAX_TOTAL_LEN);
>  	NTSTATUS status;
> +	bool ok;
>  
> -	DEBUG(10, ("performing server side copy chunk of length %lu\n",
> -		   (unsigned long)num));
> +	DBG_DEBUG("server side copy chunk of length %" PRIu64 "\n", to_copy);
>  
> -	req = tevent_req_create(mem_ctx, &vfs_cc_state, struct vfs_cc_state);
> +	req = tevent_req_create(mem_ctx, &state, struct vfs_cc_state);
>  	if (req == NULL) {
>  		return NULL;
>  	}
>  
> -	vfs_cc_state->buf = talloc_array(vfs_cc_state, uint8_t,
> -					 MIN(num, COPYCHUNK_MAX_TOTAL_LEN));
> -	if (tevent_req_nomem(vfs_cc_state->buf, req)) {
> +	*state = (struct vfs_cc_state) {
> +		.ev = ev,
> +		.src_fsp = src_fsp,
> +		.src_off = src_off,
> +		.dst_fsp = dest_fsp,
> +		.dst_off = dest_off,
> +		.to_copy = to_copy,
> +		.remaining = to_copy,
> +	};
> +	state->buf = talloc_array(state, uint8_t, num);
> +	if (tevent_req_nomem(state->buf, req)) {
>  		return tevent_req_post(req, ev);
>  	}
>  
> @@ -1654,105 +1678,203 @@ static struct tevent_req *vfswrap_copy_chunk_send(struct vfs_handle_struct *hand
>  		return tevent_req_post(req, ev);
>  	}
>  
> -	init_strict_lock_struct(src_fsp,
> -				src_fsp->op->global->open_persistent_id,
> -				src_off,
> -				num,
> +	init_strict_lock_struct(state->src_fsp,
> +				state->src_fsp->op->global->open_persistent_id,
> +				state->src_off,
> +				to_copy,
>  				READ_LOCK,
> -				&read_lck);
> +				&state->read_lck);
>  
> -	if (!SMB_VFS_STRICT_LOCK(src_fsp->conn, src_fsp, &read_lck)) {
> +	ok = SMB_VFS_STRICT_LOCK(state->src_fsp->conn,
> +				 state->src_fsp,
> +				 &state->read_lck);
> +	if (!ok) {
>  		tevent_req_nterror(req, NT_STATUS_FILE_LOCK_CONFLICT);
>  		return tevent_req_post(req, ev);
>  	}
> +	state->read_lck_locked = true;
>  
> -	init_strict_lock_struct(dest_fsp,
> -				dest_fsp->op->global->open_persistent_id,
> -				dest_off,
> -				num,
> +	init_strict_lock_struct(state->dst_fsp,
> +				state->dst_fsp->op->global->open_persistent_id,
> +				state->dst_off,
> +				to_copy,
>  				WRITE_LOCK,
> -				&write_lck);
> +				&state->write_lck);
>  
> -	if (!SMB_VFS_STRICT_LOCK(dest_fsp->conn, dest_fsp, &write_lck)) {
> -		SMB_VFS_STRICT_UNLOCK(dest_fsp->conn, dest_fsp, &read_lck);
> +	ok = SMB_VFS_STRICT_LOCK(state->dst_fsp->conn,
> +				 state->dst_fsp,
> +				 &state->write_lck);
> +	if (!ok) {
>  		tevent_req_nterror(req, NT_STATUS_FILE_LOCK_CONFLICT);
>  		return tevent_req_post(req, ev);
>  	}
> +	state->write_lck_locked = true;
>  
> -	/* could use 2.6.33+ sendfile here to do this in kernel */
> -	while (vfs_cc_state->copied < num) {
> -		ssize_t ret;
> -		int saved_errno;
> -		off_t this_num = MIN(talloc_array_length(vfs_cc_state->buf),
> -				     num - vfs_cc_state->copied);
> +	status = copy_chunk_loop(state, req);
> +	if (!NT_STATUS_IS_OK(status)) {
> +		tevent_req_nterror(req, status);
> +		return tevent_req_post(req, ev);
> +	}
>  
> -		ret = SMB_VFS_PREAD(src_fsp, vfs_cc_state->buf,
> -				    this_num, src_off);
> -		if (ret == -1) {
> -			saved_errno = errno;
> -		}
> +	tevent_req_set_cancel_fn(req, vfswrap_copy_chunk_cancel);
> +	return req;
> +}
>  
> -		if (ret == -1) {
> -			errno = saved_errno;
> -			tevent_req_nterror(req, map_nt_error_from_unix(errno));
> -			goto fail;
> -		}
> -		if (ret != this_num) {
> -			/* zero tolerance for short reads */
> -			tevent_req_nterror(req, NT_STATUS_IO_DEVICE_ERROR);
> -			goto fail;
> -		}
> +static bool vfswrap_copy_chunk_cancel(struct tevent_req *req)
> +{
> +	struct vfs_cc_state *state = tevent_req_data(req, struct vfs_cc_state);
>  
> -		src_off += ret;
> +	state->cancelled = true;
> +	return true;
> +}
>  
> -		ret = SMB_VFS_PWRITE(dest_fsp, vfs_cc_state->buf,
> -				     this_num, dest_off);
> -		if (ret == -1) {
> -			saved_errno = errno;
> -		}
> +static void vfswrap_copy_chunk_read_done(struct tevent_req *subreq);
>  
> -		if (ret == -1) {
> -			errno = saved_errno;
> -			tevent_req_nterror(req, map_nt_error_from_unix(errno));
> -			goto fail;
> -		}
> -		if (ret != this_num) {
> -			/* zero tolerance for short writes */
> -			tevent_req_nterror(req, NT_STATUS_IO_DEVICE_ERROR);
> -			goto fail;
> -		}
> -		dest_off += ret;
> +static NTSTATUS copy_chunk_loop(TALLOC_CTX *mem_ctx,
> +				struct tevent_req *req)
> +{
> +	struct vfs_cc_state *state = tevent_req_data(req, struct vfs_cc_state);
> +	struct tevent_req *subreq = NULL;
>  
> -		vfs_cc_state->copied += this_num;
> +	state->next_io_size = MIN(state->remaining, talloc_array_length(state->buf));
> +
> +	subreq = SMB_VFS_PREAD_SEND(state,
> +				    state->src_fsp->conn->sconn->ev_ctx,
> +				    state->src_fsp,
> +				    state->buf,
> +				    state->next_io_size,
> +				    state->src_off);
> +	if (subreq == NULL) {
> +		return NT_STATUS_NO_MEMORY;
>  	}
> +	tevent_req_set_callback(subreq, vfswrap_copy_chunk_read_done, req);
>  
> -	tevent_req_done(req);
> +	return NT_STATUS_OK;
> +}
> +
> +static void vfswrap_copy_chunk_write_done(struct tevent_req *subreq);
> +
> +static void vfswrap_copy_chunk_read_done(struct tevent_req *subreq)
> +{
> +	struct tevent_req *req = tevent_req_callback_data(
> +		subreq, struct tevent_req);
> +	struct vfs_cc_state *state = tevent_req_data(req, struct vfs_cc_state);
> +	struct vfs_aio_state aio_state;
> +	ssize_t nread;
> +
> +	nread = SMB_VFS_PREAD_RECV(subreq, &aio_state);
> +	TALLOC_FREE(subreq);
> +	if (nread == -1) {
> +		DBG_ERR("read failed: %s\n", strerror(errno));
> +		tevent_req_nterror(req, map_nt_error_from_unix(aio_state.error));
> +		return;
> +	}
> +	if (nread != state->next_io_size) {
> +		DBG_ERR("Short read, only %zd of %zu\n",
> +			nread, state->next_io_size);
> +		tevent_req_nterror(req, NT_STATUS_IO_DEVICE_ERROR);
> +		return;
> +	}
>  
> -fail:
> -	SMB_VFS_STRICT_UNLOCK(src_fsp->conn, src_fsp, &read_lck);
> -	SMB_VFS_STRICT_UNLOCK(dest_fsp->conn, dest_fsp, &write_lck);
> -	return tevent_req_post(req, ev);
> +	state->src_off += nread;
> +
> +	if (state->cancelled) {
> +		DBG_DEBUG("Cancelled at offset %zd\n", state->src_off);
> +		tevent_req_nterror(req, NT_STATUS_CANCELLED);
> +		return;
> +	}
> +
> +	subreq = SMB_VFS_PWRITE_SEND(state,
> +				     state->ev,
> +				     state->dst_fsp,
> +				     state->buf,
> +				     state->next_io_size,
> +				     state->dst_off);
> +	if (subreq == NULL) {
> +		tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
> +		return;
> +	}
> +	tevent_req_set_callback(subreq, vfswrap_copy_chunk_write_done, req);
> +}
> +
> +static void vfswrap_copy_chunk_write_done(struct tevent_req *subreq)
> +{
> +	struct tevent_req *req = tevent_req_callback_data(
> +		subreq, struct tevent_req);
> +	struct vfs_cc_state *state = tevent_req_data(req, struct vfs_cc_state);
> +	struct vfs_aio_state aio_state;
> +	ssize_t nwritten;
> +	NTSTATUS status;
> +
> +	nwritten = SMB_VFS_PWRITE_RECV(subreq, &aio_state);
> +	TALLOC_FREE(subreq);
> +	if (nwritten == -1) {
> +		DBG_ERR("write failed: %s\n", strerror(errno));
> +		tevent_req_nterror(req, map_nt_error_from_unix(aio_state.error));
> +		return;
> +	}
> +	if (nwritten != state->next_io_size) {
> +		DBG_ERR("Short write, only %zd of %zu\n", nwritten, state->next_io_size);
> +		tevent_req_nterror(req, NT_STATUS_IO_DEVICE_ERROR);
> +		return;
> +	}
> +
> +	state->dst_off += nwritten;
> +
> +	if (state->cancelled) {
> +		DBG_DEBUG("Cancelled at offset %zd\n", state->dst_off);
> +		tevent_req_nterror(req, NT_STATUS_CANCELLED);
> +		return;
> +	}
> +
> +	if ((state->remaining - nwritten) > state->remaining) {
> +		/* Paranoia check */
> +		tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
> +		return;
> +	}
> +	state->remaining -= nwritten;
> +	if (state->remaining == 0) {
> +		tevent_req_done(req);
> +		return;
> +	}
> +
> +	status = copy_chunk_loop(state, req);
> +	if (!NT_STATUS_IS_OK(status)) {
> +		tevent_req_nterror(req, status);
> +		return;
> +	}
> +
> +	return;
>  }
>  
>  static NTSTATUS vfswrap_copy_chunk_recv(struct vfs_handle_struct *handle,
>  					struct tevent_req *req,
>  					off_t *copied)
>  {
> -	struct vfs_cc_state *vfs_cc_state = tevent_req_data(req,
> -							struct vfs_cc_state);
> +	struct vfs_cc_state *state = tevent_req_data(req, struct vfs_cc_state);
>  	NTSTATUS status;
>  
> +	if (state->read_lck_locked) {
> +		SMB_VFS_STRICT_UNLOCK(state->src_fsp->conn,
> +				      state->src_fsp,
> +				      &state->read_lck);
> +	}
> +
> +	if (state->write_lck_locked) {
> +		SMB_VFS_STRICT_UNLOCK(state->dst_fsp->conn,
> +				      state->dst_fsp,
> +				      &state->write_lck);
> +	}
> +
>  	if (tevent_req_is_nterror(req, &status)) {
> -		DEBUG(2, ("server side copy chunk failed: %s\n",
> -			  nt_errstr(status)));
> +		DBG_DEBUG("copy chunk failed: %s\n", nt_errstr(status));
>  		*copied = 0;
>  		tevent_req_received(req);
>  		return status;
>  	}
>  
> -	*copied = vfs_cc_state->copied;
> -	DEBUG(10, ("server side copy chunk copied %lu\n",
> -		   (unsigned long)*copied));
> +	*copied = state->to_copy;
> +	DBG_DEBUG("copy chunk copied %lu\n", (unsigned long)*copied);
>  	tevent_req_received(req);
>  
>  	return NT_STATUS_OK;
> -- 
> 2.9.3
> 
> 
> From dc61ed2f1e623b07b7931bc55935d4dc07ad1dd4 Mon Sep 17 00:00:00 2001
> From: Ralph Boehme <slow at samba.org>
> Date: Wed, 15 Mar 2017 13:47:39 +0100
> Subject: [PATCH 15/16] torture: add two new assert macros
> 
> The two new macros are:
> 
> o torture_assert_ndr_err_equal_goto()
> 
> o torture_assert_ndr_success_goto()
> 
> Signed-off-by: Ralph Boehme <slow at samba.org>
> ---
>  lib/torture/torture.h | 12 ++++++++++++
>  1 file changed, 12 insertions(+)
> 
> diff --git a/lib/torture/torture.h b/lib/torture/torture.h
> index b6d1301..3e476c2 100644
> --- a/lib/torture/torture.h
> +++ b/lib/torture/torture.h
> @@ -293,6 +293,15 @@ void torture_result(struct torture_context *test,
>  	}\
>  	} while(0)
>  
> +#define torture_assert_ndr_err_equal_goto(torture_ctx,got,expected,ret,label,cmt) \
> +	do { enum ndr_err_code __got = got, __expected = expected; \
> +	if (__got != __expected) { \
> +		torture_result(torture_ctx, TORTURE_FAIL, __location__": "#got" was %d (%s), expected %d (%s): %s", __got, ndr_errstr(__got), __expected, __STRING(expected), cmt); \
> +		ret = false; \
> +		goto label; \
> +	} \
> +	} while(0)
> +
>  #define torture_assert_hresult_equal(torture_ctx, got, expected, cmt) \
>  	do { HRESULT __got = got, __expected = expected; \
>  	if (!HRES_IS_EQUAL(__got, __expected)) { \
> @@ -637,6 +646,9 @@ static inline void torture_dump_data_str_cb(const char *buf, void *private_data)
>  #define torture_assert_ndr_success(torture_ctx,expr,cmt) \
>  		torture_assert_ndr_err_equal(torture_ctx,expr,NDR_ERR_SUCCESS,cmt)
>  
> +#define torture_assert_ndr_success_goto(torture_ctx,expr,ret,label,cmt) \
> +		torture_assert_ndr_err_equal_goto(torture_ctx,expr,NDR_ERR_SUCCESS,ret,label,cmt)
> +
>  #define torture_assert_hresult_ok(torture_ctx,expr,cmt) \
>  		torture_assert_hresult_equal(torture_ctx,expr,HRES_ERROR(0), cmt)
>  
> -- 
> 2.9.3
> 
> 
> From 102dd4c6375d061cf30ef38c1538b7873d22b7c9 Mon Sep 17 00:00:00 2001
> From: Ralph Boehme <slow at samba.org>
> Date: Wed, 15 Mar 2017 13:49:29 +0100
> Subject: [PATCH 16/16] s4/torture: add a test for async copy_chunk
>  cancellation
> 
> This tests async copy_chunk cancellecation of the request.
> 
> It is added to the vfs.fruit testsuite as leveraging the fruit:copyfile
> semantics makes it possible to have a long running copy loop that runs
> long enough so we can cancel it.
> 
> Signed-off-by: Ralph Boehme <slow at samba.org>
> ---
>  source4/torture/vfs/fruit.c | 64 ++++++++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 63 insertions(+), 1 deletion(-)
> 
> diff --git a/source4/torture/vfs/fruit.c b/source4/torture/vfs/fruit.c
> index d74a153..75fb619 100644
> --- a/source4/torture/vfs/fruit.c
> +++ b/source4/torture/vfs/fruit.c
> @@ -2593,6 +2593,68 @@ done:
>  	return true;
>  }
>  
> +static bool test_copy_chunk_cancel(struct torture_context *torture,
> +				   struct smb2_tree *tree)
> +{
> +	struct smb2_handle src_h = {{0}};
> +	struct smb2_handle dest_h = {{0}};
> +	NTSTATUS status;
> +	union smb_ioctl ioctl;
> +	struct smb2_request *req;
> +	TALLOC_CTX *tmp_ctx = talloc_new(tree);
> +	struct srv_copychunk_copy cc_copy;
> +	enum ndr_err_code ndr_ret;
> +	bool ok = true;
> +
> +	ok = neg_aapl_copyfile(torture, tree,
> +			       SMB2_CRTCTX_AAPL_SUPPORTS_OSX_COPYFILE);
> +	torture_assert_goto(torture, ok == true, ok, done, "missing AAPL copyfile\n");
> +
> +	/*
> +	 * Create a BIG file so we have a good chance the copy is still in progress
> +	 * when the server gets our cancel.
> +	 */
> +	ok = test_setup_copy_chunk(torture, tree, tmp_ctx,
> +				   0, /* 0 chunks -> copyfile */
> +				   &src_h,
> +				   512*1024*1024, /* 512 MB source file */
> +				   SEC_RIGHTS_FILE_ALL,
> +				   &dest_h,
> +				   0,	/* 0 byte dest file */
> +				   SEC_RIGHTS_FILE_ALL,
> +				   &cc_copy,
> +				   &ioctl);
> +	torture_assert_goto(torture, ok == true, ok, done, "setup copy chunk error\n");
> +
> +	ndr_ret = ndr_push_struct_blob(
> +		&ioctl.smb2.in.out, tmp_ctx, &cc_copy,
> +		(ndr_push_flags_fn_t)ndr_push_srv_copychunk_copy);
> +	torture_assert_ndr_success_goto(torture, ndr_ret, ok, done,
> +					"ndr_push_srv_copychunk_copy\n");
> +
> +	req = smb2_ioctl_send(tree, &ioctl.smb2);
> +	torture_assert_goto(torture, req != NULL, ok, done, "smb2_ioctl_send failed\n");
> +
> +	smb2_cancel(req);
> +
> +	status = smb2_ioctl_recv(req, tree, &ioctl.smb2);
> +	torture_assert_ntstatus_equal_goto(torture, status, NT_STATUS_CANCELLED, ok, done,
> +					   "Cancel failed\n");
> +
> +done:
> +	if (!smb2_util_handle_empty(src_h)) {
> +		smb2_util_close(tree, src_h);
> +	}
> +	if (!smb2_util_handle_empty(dest_h)) {
> +		smb2_util_close(tree, dest_h);
> +	}
> +	smb2_util_unlink(tree, FNAME_CC_SRC);
> +	smb2_util_unlink(tree, FNAME_CC_DST);
> +	talloc_free(tmp_ctx);
> +
> +	return ok;
> +}
> +
>  static bool check_stream_list(struct smb2_tree *tree,
>  			      struct torture_context *tctx,
>  			      const char *fname,
> @@ -3950,8 +4012,8 @@ struct torture_suite *torture_vfs_fruit(void)
>  	torture_suite_add_1smb2_test(suite, "delete", test_delete_file_with_rfork);
>  	torture_suite_add_1smb2_test(suite, "read open rsrc after rename", test_rename_and_read_rsrc);
>  	torture_suite_add_1smb2_test(suite, "readdir_attr with names with illegal ntfs characters", test_readdir_attr_illegal_ntfs);
> -
>  	torture_suite_add_2ns_smb2_test(suite, "invalid AFP_AfpInfo", test_invalid_afpinfo);
> +	torture_suite_add_1smb2_test(suite, "copy_chunk_cancel", test_copy_chunk_cancel);
>  
>  	return suite;
>  }
> -- 
> 2.9.3
> 




More information about the samba-technical mailing list