vfs_glusterfs: Fix AIO crash on smb.conf reload.

Jeremy Allison jra at samba.org
Fri Dec 11 00:57:46 UTC 2015


On Thu, Dec 10, 2015 at 04:37:36PM -0500, Ira Cooper wrote:
> This is really a bug related to how vfs_glusterfs handles AIO and
> cancellation, so new logic was needed on those paths.
> 
> The patch enclosed passes smoke tests on my laptop at this point, and it
> is passing valgrind.

Just a few comments (inline) below. Sorry if I'm being too picky about
the whitespace changes but any extra whitespace mods (IMHO)
hide the actual changes. Feel free to tell me to bugger off :-).

Cheers,

	Jeremy.

>  	 */
>  
> -	sts = sys_read(read_fd, &req, sizeof(struct tevent_req *));
> +	sts = sys_read(read_fd, &state, sizeof(struct glusterfs_aio_state *));
> +
> +	if (state->cancelled) {
> +		goto done;
> +	}
> +
> +	req = state->req;
> +
>  	if (sts < 0) {
>  		DEBUG(0,("\nRead from pipe failed (%s)", strerror(errno)));
>  	}

We need to have the if (sts < 0) check before the use
of state->cancelled and state->req above.

> +	/* if we've cancelled the op, there is no req, so just clean up. */
> +
> +	if (state->cancelled == true) {
> +		TALLOC_FREE(state);
> +		goto done;
> +	}
> +
>  	if (req) {
>  		tevent_req_done(req);
>  	}
> +
> + done:
>  	return;
>  }

Do we really need the goto done and done: label ? I'd prefer
to use immediate return; statements here (early return is
always nicer to me).

> @@ -581,6 +614,7 @@ static bool init_gluster_aio(struct vfs_handle_struct *handle)
>  	}
>  
>  	ret = pipe(fds);
> +

Unneeded whitespace change :-).

>  	if (ret == -1) {
>  		goto fail;
>  	}
> @@ -610,28 +644,72 @@ fail:
>  	return false;
>  }
>  
> -static struct tevent_req *vfs_gluster_pread_send(struct vfs_handle_struct
> -						 *handle, TALLOC_CTX *mem_ctx,
> -						 struct tevent_context *ev,
> -						 files_struct *fsp, void *data,
> -						 size_t n, off_t offset)
> +typedef int (*gluster_aio_p_fn)(glfs_fd_t *,
> +				void *,
> +				size_t,
> +				off_t,
> +				int,
> +				glfs_io_cbk,
> +				void *);
> +
> +
> +static struct glusterfs_aio_state *aio_state_create(TALLOC_CTX *mem_ctx)
>  {
>  	struct tevent_req *req = NULL;
>  	struct glusterfs_aio_state *state = NULL;
> -	int ret = 0;
> +	struct glusterfs_aio_wrapper *wrapper = NULL;
> +
> +	req = tevent_req_create(mem_ctx, &wrapper, struct glusterfs_aio_wrapper);
>  
> -	req = tevent_req_create(mem_ctx, &state, struct glusterfs_aio_state);
>  	if (req == NULL) {
>  		return NULL;
>  	}
>  
> +	state = talloc(NULL, struct glusterfs_aio_state);
> +
> +	if (state == NULL) {
> +		TALLOC_FREE(req);
> +		return NULL;
> +	}
> +
> +	state->cancelled = false;
> +	state->ret = false;
> +	state->err = false;
> +	state->req = req;
> +
> +	wrapper->state = state;
> +
> +	return state;
> +}
> +
> +
> +static struct tevent_req *vfs_gluster_pwrite_send(struct vfs_handle_struct
> +						  *handle, TALLOC_CTX *mem_ctx,
> +						  struct tevent_context *ev,
> +						  files_struct *fsp,
> +						  const void *data, size_t n,
> +						  off_t offset)
> +{
> +	struct glusterfs_aio_state *state = NULL;
> +	struct tevent_req *req = NULL;

Do we need the extra 'req' variable here ? As we only
refer to it a couple of times can't we just use state->req
in both cases instead ?

> +	int ret = 0;
> +
> +	state = aio_state_create(mem_ctx);
> +
> +	if (state == NULL) {
> +		return NULL;
> +	}
> +
> +	req = state->req;
> +
>  	if (!init_gluster_aio(handle)) {
>  		tevent_req_error(req, EIO);
>  		return tevent_req_post(req, ev);
>  	}
> -	ret = glfs_pread_async(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle,
> -				fsp), data, n, offset, 0, aio_glusterfs_done,
> -				req);
> +
> +	ret = glfs_pwrite_async(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle, fsp),
> +			       data, n, offset, 0, aio_glusterfs_done, state);
> +
>  	if (ret < 0) {
>  		tevent_req_error(req, -ret);
>  		return tevent_req_post(req, ev);
> @@ -640,53 +718,68 @@ static struct tevent_req *vfs_gluster_pread_send(struct vfs_handle_struct
>  	return req;
>  }
>  
> -static ssize_t vfs_gluster_write(struct vfs_handle_struct *handle,
> -				 files_struct *fsp, const void *data, size_t n)
> -{
> -	return glfs_write(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle, fsp), data, n, 0);
> -}
> -
> -static ssize_t vfs_gluster_pwrite(struct vfs_handle_struct *handle,
> -				  files_struct *fsp, const void *data,
> -				  size_t n, off_t offset)
> -{
> -	return glfs_pwrite(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle, fsp), data, n, offset, 0);
> -}
> -
> -static struct tevent_req *vfs_gluster_pwrite_send(struct vfs_handle_struct
> +static struct tevent_req *vfs_gluster_pread_send(struct vfs_handle_struct
>  						  *handle, TALLOC_CTX *mem_ctx,
>  						  struct tevent_context *ev,
>  						  files_struct *fsp,
> -						  const void *data, size_t n,
> +						  void *data, size_t n,
>  						  off_t offset)
>  {
> -	struct tevent_req *req = NULL;
>  	struct glusterfs_aio_state *state = NULL;
> +	struct tevent_req *req = NULL;

Same deal as above with the extra 'req' variable.

>  	int ret = 0;
>  
> -	req = tevent_req_create(mem_ctx, &state, struct glusterfs_aio_state);
> -	if (req == NULL) {
> +	state = aio_state_create(mem_ctx);
> +
> +	if (state == NULL) {
>  		return NULL;
>  	}
> +
> +	req = state->req;
> +
>  	if (!init_gluster_aio(handle)) {
>  		tevent_req_error(req, EIO);
>  		return tevent_req_post(req, ev);
>  	}
> -	ret = glfs_pwrite_async(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle,
> -				fsp), data, n, offset, 0, aio_glusterfs_done,
> -				req);
> +
> +	ret = glfs_pread_async(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle, fsp),
> +			       data, n, offset, 0, aio_glusterfs_done, state);
> +
>  	if (ret < 0) {
> -		tevent_req_error(req, -ret);
>  		return tevent_req_post(req, ev);
> +		tevent_req_error(req, -ret);

I think the two lines above are transposed.
Should be:

                tevent_req_error(req, -ret);
                return tevent_req_post(req, ev);

as in the vfs_gluster_pread_send() code.

>  	}
> +
>  	return req;
>  }
>  
> +static ssize_t vfs_gluster_write(struct vfs_handle_struct *handle,
> +				 files_struct *fsp, const void *data, size_t n)
> +{
> +	return glfs_write(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle, fsp), data, n, 0);
> +}
> +
> +static ssize_t vfs_gluster_pwrite(struct vfs_handle_struct *handle,
> +				  files_struct *fsp, const void *data,
> +				  size_t n, off_t offset)
> +{
> +	return glfs_pwrite(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle, fsp), data, n, offset, 0);
> +}
> +
>  static ssize_t vfs_gluster_recv(struct tevent_req *req, int *err)
>  {
>  	struct glusterfs_aio_state *state = NULL;
> +	struct glusterfs_aio_wrapper *wrapper = NULL;
> +	int ret = 0;
> +
> +	wrapper = tevent_req_data(req, struct glusterfs_aio_wrapper);
> +
> +	if (wrapper == NULL) {
> +		return -1;
> +	}
> +
> +	state = wrapper->state;
>  
> -	state = tevent_req_data(req, struct glusterfs_aio_state);
>  	if (state == NULL) {
>  		return -1;
>  	}
> @@ -697,7 +790,14 @@ static ssize_t vfs_gluster_recv(struct tevent_req *req, int *err)
>  	if (state->ret == -1) {
>  		*err = state->err;
>  	}
> -	return state->ret;
> +
> +	ret = state->ret;
> +
> +	/* Clean up the state, it is in a NULL context. */
> +
> +	TALLOC_FREE(state);
> +
> +	return ret;
>  }
>  
>  static off_t vfs_gluster_lseek(struct vfs_handle_struct *handle,
> @@ -746,20 +846,26 @@ static struct tevent_req *vfs_gluster_fsync_send(struct vfs_handle_struct
>  	struct glusterfs_aio_state *state = NULL;
>  	int ret = 0;
>  
> -	req = tevent_req_create(mem_ctx, &state, struct glusterfs_aio_state);
> -	if (req == NULL) {
> +	state = aio_state_create(mem_ctx);
> +
> +	if (state == NULL) {
>  		return NULL;
>  	}
> +
> +	req = state->req;
> +
>  	if (!init_gluster_aio(handle)) {
>  		tevent_req_error(req, EIO);
>  		return tevent_req_post(req, ev);
>  	}
>  	ret = glfs_fsync_async(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle,
>  				fsp), aio_glusterfs_done, req);
> +
Do we really need whitespace changes here ?
>  	if (ret < 0) {
>  		tevent_req_error(req, -ret);
>  		return tevent_req_post(req, ev);
>  	}
> +
and here ?
>  	return req;
>  }
>  
> -- 
> 2.5.0
> 




More information about the samba-technical mailing list