[PATCH] vfs_ceph: add fake async pwrite/pread send/recv hooks
Jeremy Allison
jra at samba.org
Wed May 9 16:03:10 UTC 2018
On Wed, May 09, 2018 at 05:07:11PM +0200, David Disseldorp wrote:
> As found by Jeremy, VFS modules that don't provide pread_send() or
> pwrite_send() hooks result in vfs_default fallback, which is
> catastrophic for VFS modules with non-mounted filesystems such as
> vfs_ceph.
>
> Bug: https://bugzilla.samba.org/show_bug.cgi?id=13425
Thanks a lot David, you just did the work I was planning
to do today for me :-).
RB+ and pushed !
Jeremy.
> From 55b18b6170652748812a50edfc0d353921daefc3 Mon Sep 17 00:00:00 2001
> From: David Disseldorp <ddiss at samba.org>
> Date: Wed, 9 May 2018 16:51:34 +0200
> Subject: [PATCH] vfs_ceph: add fake async pwrite/pread send/recv hooks
>
> As found by Jeremy, VFS modules that don't provide pread_send() or
> pwrite_send() hooks result in vfs_default fallback, which is
> catastrophic for VFS modules with non-mounted filesystems such as
> vfs_ceph.
>
> Bug: https://bugzilla.samba.org/show_bug.cgi?id=13425
>
> Reported-by: Jeremy Allison <jra at samba.org>
> Signed-off-by: David Disseldorp <ddiss at samba.org>
> ---
> source3/modules/vfs_ceph.c | 109 ++++++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 108 insertions(+), 1 deletion(-)
>
> diff --git a/source3/modules/vfs_ceph.c b/source3/modules/vfs_ceph.c
> index 87df83e805b..bac88581133 100644
> --- a/source3/modules/vfs_ceph.c
> +++ b/source3/modules/vfs_ceph.c
> @@ -470,6 +470,57 @@ static ssize_t cephwrap_pread(struct vfs_handle_struct *handle, files_struct *fs
> WRAP_RETURN(result);
> }
>
> +struct cephwrap_pread_state {
> + ssize_t bytes_read;
> + struct vfs_aio_state vfs_aio_state;
> +};
> +
> +/*
> + * Fake up an async ceph read by calling the synchronous API.
> + */
> +static struct tevent_req *cephwrap_pread_send(struct vfs_handle_struct *handle,
> + TALLOC_CTX *mem_ctx,
> + struct tevent_context *ev,
> + struct files_struct *fsp,
> + void *data,
> + size_t n, off_t offset)
> +{
> + struct tevent_req *req = NULL;
> + struct cephwrap_pread_state *state = NULL;
> + int ret = -1;
> +
> + DBG_DEBUG("[CEPH] %s\n", __func__);
> + req = tevent_req_create(mem_ctx, &state, struct cephwrap_pread_state);
> + if (req == NULL) {
> + return NULL;
> + }
> +
> + ret = ceph_read(handle->data, fsp->fh->fd, data, n, offset);
> + if (ret < 0) {
> + /* ceph returns -errno on error. */
> + tevent_req_error(req, -ret);
> + return tevent_req_post(req, ev);
> + }
> +
> + state->bytes_read = ret;
> + tevent_req_done(req);
> + /* Return and schedule the completion of the call. */
> + return tevent_req_post(req, ev);
> +}
> +
> +static ssize_t cephwrap_pread_recv(struct tevent_req *req,
> + struct vfs_aio_state *vfs_aio_state)
> +{
> + struct cephwrap_pread_state *state =
> + tevent_req_data(req, struct cephwrap_pread_state);
> +
> + DBG_DEBUG("[CEPH] %s\n", __func__);
> + if (tevent_req_is_unix_error(req, &vfs_aio_state->error)) {
> + return -1;
> + }
> + *vfs_aio_state = state->vfs_aio_state;
> + return state->bytes_read;
> +}
>
> static ssize_t cephwrap_pwrite(struct vfs_handle_struct *handle, files_struct *fsp, const void *data,
> size_t n, off_t offset)
> @@ -482,6 +533,58 @@ static ssize_t cephwrap_pwrite(struct vfs_handle_struct *handle, files_struct *f
> WRAP_RETURN(result);
> }
>
> +struct cephwrap_pwrite_state {
> + ssize_t bytes_written;
> + struct vfs_aio_state vfs_aio_state;
> +};
> +
> +/*
> + * Fake up an async ceph write by calling the synchronous API.
> + */
> +static struct tevent_req *cephwrap_pwrite_send(struct vfs_handle_struct *handle,
> + TALLOC_CTX *mem_ctx,
> + struct tevent_context *ev,
> + struct files_struct *fsp,
> + const void *data,
> + size_t n, off_t offset)
> +{
> + struct tevent_req *req = NULL;
> + struct cephwrap_pwrite_state *state = NULL;
> + int ret = -1;
> +
> + DBG_DEBUG("[CEPH] %s\n", __func__);
> + req = tevent_req_create(mem_ctx, &state, struct cephwrap_pwrite_state);
> + if (req == NULL) {
> + return NULL;
> + }
> +
> + ret = ceph_write(handle->data, fsp->fh->fd, data, n, offset);
> + if (ret < 0) {
> + /* ceph returns -errno on error. */
> + tevent_req_error(req, -ret);
> + return tevent_req_post(req, ev);
> + }
> +
> + state->bytes_written = ret;
> + tevent_req_done(req);
> + /* Return and schedule the completion of the call. */
> + return tevent_req_post(req, ev);
> +}
> +
> +static ssize_t cephwrap_pwrite_recv(struct tevent_req *req,
> + struct vfs_aio_state *vfs_aio_state)
> +{
> + struct cephwrap_pwrite_state *state =
> + tevent_req_data(req, struct cephwrap_pwrite_state);
> +
> + DBG_DEBUG("[CEPH] %s\n", __func__);
> + if (tevent_req_is_unix_error(req, &vfs_aio_state->error)) {
> + return -1;
> + }
> + *vfs_aio_state = state->vfs_aio_state;
> + return state->bytes_written;
> +}
> +
> static off_t cephwrap_lseek(struct vfs_handle_struct *handle, files_struct *fsp, off_t offset, int whence)
> {
> off_t result = 0;
> @@ -535,7 +638,7 @@ static int cephwrap_rename(struct vfs_handle_struct *handle,
> }
>
> /*
> - * Fake up an async ceph fsync by calling the sychronous API.
> + * Fake up an async ceph fsync by calling the synchronous API.
> */
>
> static struct tevent_req *cephwrap_fsync_send(struct vfs_handle_struct *handle,
> @@ -1443,7 +1546,11 @@ static struct vfs_fn_pointers ceph_fns = {
> .open_fn = cephwrap_open,
> .close_fn = cephwrap_close,
> .pread_fn = cephwrap_pread,
> + .pread_send_fn = cephwrap_pread_send,
> + .pread_recv_fn = cephwrap_pread_recv,
> .pwrite_fn = cephwrap_pwrite,
> + .pwrite_send_fn = cephwrap_pwrite_send,
> + .pwrite_recv_fn = cephwrap_pwrite_recv,
> .lseek_fn = cephwrap_lseek,
> .sendfile_fn = cephwrap_sendfile,
> .recvfile_fn = cephwrap_recvfile,
> --
> 2.13.6
>
More information about the samba-technical
mailing list