[linux-cifs-client] question about cifs performance (and asynchronous read)

James Roper u3205097 at alumni.anu.edu.au
Sun Apr 3 00:42:02 GMT 2005


I should have posted this patch to the list back in October, I received 
a stern talking to from abartlett last night about how code should 
always be posted to mailing lists as soon as you've done it so that even 
if you're not sure that it's ready to be merged, someone else might pick 
it up to fix it up and merge it :)

Anyway, this patch implements asynchronous reads, and was created 
against the linux-2.5cifs bitkeeper source back in October 2004., so I 
don't think it will patch against the latest bitkeeper source  When I 
find the time, I'll get it working, unless someone updates it first.

James
-------------- next part --------------
# User:	jazzy
# Host:	jazztower.(none)
# Root:	/usr/src/linux-2.5cifs

# Patch vers:	1.3
# Patch type:	REGULAR

== ChangeSet ==
torvalds at athlon.transmeta.com|ChangeSet|20020205173056|16047|c1d11a41ed024864
torvalds at ppc970.osdl.org|ChangeSet|20040915004007|09309
D 1.1902 04/10/11 15:26:10+10:00 jazzy at jazztower.(none) +6 -0
B torvalds at athlon.transmeta.com|ChangeSet|20020205173056|16047|c1d11a41ed024864
C
c Made cifs_readpages send requests asynchronously.
K 8380
P ChangeSet
------------------------------------------------

0a0
> stevef at smfhome1.austin.rr.com|fs/cifs/cifssmb.c|20021010191611|36727|9fad6fb6f0967e9 jazzy at jazztower.(none)|fs/cifs/cifssmb.c|20041011052504|42671
> stevef at smfhome1.austin.rr.com|fs/cifs/file.c|20021010191612|51622|842460dea0f28ec1 jazzy at jazztower.(none)|fs/cifs/file.c|20041011052505|54767
> stevef at smfhome1.austin.rr.com|fs/cifs/transport.c|20021010191615|19666|bf11e1437f427aed jazzy at jazztower.(none)|fs/cifs/transport.c|20041011052508|41379
> stevef at smfhome1.austin.rr.com|fs/cifs/cifsproto.h|20021010191611|08044|ef9e47c552b5f72e jazzy at jazztower.(none)|fs/cifs/cifsproto.h|20041011052503|32195
> stevef at smfhome1.austin.rr.com|fs/cifs/cifsglob.h|20021010191610|14882|d5b42ff3d85dcd24 jazzy at jazztower.(none)|fs/cifs/cifsglob.h|20041011052502|20038
> stevef at smfhome1.austin.rr.com|fs/cifs/cifsfs.c|20021010191610|37452|2c60dc52e1f2fcd8 jazzy at jazztower.(none)|fs/cifs/cifsfs.c|20041011052458|34184

jazzy at jazztower.(none)|ChangeSet|20041011052610|08380
D 1.1903 04/10/12 18:44:28+10:00 jazzy at jazztower.(none) +7 -0
B torvalds at athlon.transmeta.com|ChangeSet|20020205173056|16047|c1d11a41ed024864
C
c Added the ability to select whether async or sync readpages was used, through proc and mount options.
c Added peakMids in use data
c General clean up
K 8504
P ChangeSet
------------------------------------------------

0a0
> stevef at smfhome1.austin.rr.com|fs/cifs/cifssmb.c|20021010191611|36727|9fad6fb6f0967e9 jazzy at jazztower.(none)|fs/cifs/cifssmb.c|20041012084418|39076
> stevef at smfhome1.austin.rr.com|fs/cifs/cifs_debug.c|20021010191609|40602|a3cfed1d970a5a9c jazzy at jazztower.(none)|fs/cifs/cifs_debug.c|20041012084418|36079
> stevef at smfhome1.austin.rr.com|fs/cifs/file.c|20021010191612|51622|842460dea0f28ec1 jazzy at jazztower.(none)|fs/cifs/file.c|20041012084418|36894
> stevef at smfhome1.austin.rr.com|fs/cifs/transport.c|20021010191615|19666|bf11e1437f427aed jazzy at jazztower.(none)|fs/cifs/transport.c|20041012084418|56813
> stevef at smfhome1.austin.rr.com|fs/cifs/connect.c|20021010191612|08981|d046b102ee6decf7 jazzy at jazztower.(none)|fs/cifs/connect.c|20041012084418|19122
> stevef at smfhome1.austin.rr.com|fs/cifs/cifsglob.h|20021010191610|14882|d5b42ff3d85dcd24 jazzy at jazztower.(none)|fs/cifs/cifsglob.h|20041012084418|31577
> stevef at smfhome1.austin.rr.com|fs/cifs/cifsfs.c|20021010191610|37452|2c60dc52e1f2fcd8 jazzy at jazztower.(none)|fs/cifs/cifsfs.c|20041012084418|42688

== fs/cifs/cifs_debug.c ==
stevef at smfhome1.austin.rr.com|fs/cifs/cifs_debug.c|20021010191609|40602|a3cfed1d970a5a9c
stevef at smfhome.smfdom|fs/cifs/cifs_debug.c|20040711233415|07486
D 1.29 04/10/12 18:44:18+10:00 jazzy at jazztower.(none) +52 -0
B torvalds at athlon.transmeta.com|ChangeSet|20020205173056|16047|c1d11a41ed024864
C
c Added proc entry to control async routines
c Added peak mids data
K 36079
O -rw-rw-r--
P fs/cifs/cifs_debug.c
------------------------------------------------

I207 5
	item_length = 
		sprintf(buf,"Peak Simultaneous Operations: %d\n",
			peakMids.counter);
	length += item_length;
	buf += item_length;
I303 2
static read_proc_t async_routines_enabled_read;
static write_proc_t async_routines_enabled_write;
I376 6
\
	pde =
		create_proc_read_entry("AsyncRoutinesEnabled", 0, proc_fs_cifs,
				async_routines_enabled_read, NULL);
	if (pde)
		pde->write_proc = async_routines_enabled_write;
I398 1
	remove_proc_entry("AsyncRoutinesEnabled",proc_fs_cifs);
I795 38
static int
async_routines_enabled_read(char *page, char **start, off_t off,
		       int count, int *eof, void *data)
{
	int len;
\
	len = sprintf(page, "%d\n", async_routines_enabled);
\
	len -= off;
	*start = page + off;
\
	if (len > count)
		len = count;
	else
		*eof = 1;
\
	if (len < 0)
		len = 0;
\
	return len;
}
static int
async_routines_enabled_write(struct file *file, const char __user *buffer,
			unsigned long count, void *data)
{
	char c;
	int rc;
\
	rc = get_user(c, buffer);
	if (rc)
		return rc;
	if (c == '0' || c == 'n' || c == 'N')
		async_routines_enabled = 0;
	else if (c == '1' || c == 'y' || c == 'Y')
		async_routines_enabled = 1;
\
	return count;
}

== fs/cifs/cifsfs.c ==
stevef at smfhome1.austin.rr.com|fs/cifs/cifsfs.c|20021010191610|37452|2c60dc52e1f2fcd8
stevef at smfhome1.smfdom|fs/cifs/cifsfs.c|20040902043503|58872
D 1.70 04/10/11 15:24:58+10:00 jazzy at jazztower.(none) +14 -1
B torvalds at athlon.transmeta.com|ChangeSet|20020205173056|16047|c1d11a41ed024864
C
c Added async cachep
K 34184
O -rw-rw-r--
P fs/cifs/cifsfs.c
------------------------------------------------

I209 1
kmem_cache_t *cifs_async_cachep;
D640 1
I640 9
	cifs_async_cachep = kmem_cache_create("cifs_async_seq_ids",
			    sizeof (struct async_rw_q_entry), 0,
				SLAB_HWCACHE_ALIGN, NULL, NULL);
	if (cifs_async_cachep == NULL) {
		kmem_cache_destroy(cifs_mid_cachep);
		kmem_cache_destroy(cifs_oplock_cachep);
	    mempool_destroy(cifs_mid_poolp);
		return -ENOMEM;
		}
I654 4
	if (kmem_cache_destroy(cifs_async_cachep))
		printk(KERN_WARNING
				"cifs_destroy_mids: error not all asyncs were freed\n");
		

jazzy at jazztower.(none)|fs/cifs/cifsfs.c|20041011052458|34184
D 1.71 04/10/12 18:44:18+10:00 jazzy at jazztower.(none) +4 -0
B torvalds at athlon.transmeta.com|ChangeSet|20020205173056|16047|c1d11a41ed024864
C
c Added initialisation for async proc stuff
K 42688
O -rw-rw-r--
P fs/cifs/cifsfs.c
------------------------------------------------

I59 1
unsigned int async_routines_enabled = 1;
I760 3
#ifdef CONFIG_CIFS_STATS
	atomic_set(&peakMids, 0);
#endif

== fs/cifs/cifsglob.h ==
stevef at smfhome1.austin.rr.com|fs/cifs/cifsglob.h|20021010191610|14882|d5b42ff3d85dcd24
torvalds at ppc970.osdl.org|fs/cifs/cifsglob.h|20040824005702|39697
D 1.36 04/10/11 15:25:02+10:00 jazzy at jazztower.(none) +17 -0
B torvalds at athlon.transmeta.com|ChangeSet|20020205173056|16047|c1d11a41ed024864
C
c Added async_rw_q_entry and states
K 20038
O -rw-rw-r--
P fs/cifs/cifsglob.h
------------------------------------------------

I307 17
/* one of these for every read request sent by a read pages
 * routine */
struct async_rw_q_entry {
    struct list_head qhead; /* other entries from this command */
    struct mid_q_entry * midQ;   /* the mid for this request */
    char *buf; /* response buffer */
    unsigned int count; /* number of bytes to read */
    __u64 lseek;    /* offset */
    unsigned int nbytes;   /* number of bytes read */
    int state;   /* sequence number for multiplexing */
};
\
#define ASYNC_ALLOCATED 1
#define ASYNC_SENT  2
#define ASYNC_HANDLED  4
#define ASYNC_IN_FLIGHT 8
\

jazzy at jazztower.(none)|fs/cifs/cifsglob.h|20041011052502|20038
D 1.37 04/10/12 18:44:18+10:00 jazzy at jazztower.(none) +2 -0
B torvalds at athlon.transmeta.com|ChangeSet|20020205173056|16047|c1d11a41ed024864
C
c Added async proc defs
K 31577
O -rw-rw-r--
P fs/cifs/cifsglob.h
------------------------------------------------

I419 1
GLOBAL_EXTERN atomic_t peakMids;
I433 1
GLOBAL_EXTERN unsigned int async_routines_enabled; /* enable reading/writing asynchronously */

== fs/cifs/cifsproto.h ==
stevef at smfhome1.austin.rr.com|fs/cifs/cifsproto.h|20021010191611|08044|ef9e47c552b5f72e
stevef at smfhome.smfdom|fs/cifs/cifsproto.h|20040819134238|54525
D 1.33 04/10/11 15:25:03+10:00 jazzy at jazztower.(none) +10 -0
B torvalds at athlon.transmeta.com|ChangeSet|20020205173056|16047|c1d11a41ed024864
C
c Prototypes for ReceiveMsg, SendMsg, DeleteMidQEntry, and CIFSSMBReadAsync
K 32195
O -rw-rw-r--
P fs/cifs/cifsproto.h
------------------------------------------------

I47 8
extern int ReceiveMsg(const unsigned int /* xid */, struct cifsSesInfo *,
            struct smb_hdr * /* output */ , int * /*bytes returned */ ,
            struct mid_q_entry *);
extern int SendMsg(const unsigned int /* xid */, struct cifsSesInfo *,
            struct smb_hdr * /* input */ , struct mid_q_entry **,
            const int long_op);
extern void DeleteMidQEntry(struct mid_q_entry *);
\
I186 2
extern int CIFSSMBReadAsync(const int xid, struct cifsTconInfo *tcon,
            const int netfid, struct list_head *async_q);

== fs/cifs/cifssmb.c ==
stevef at smfhome1.austin.rr.com|fs/cifs/cifssmb.c|20021010191611|36727|9fad6fb6f0967e9
torvalds at ppc970.osdl.org|fs/cifs/cifssmb.c|20040912031937|55397
D 1.82 04/10/11 15:25:04+10:00 jazzy at jazztower.(none) +169 -0
B torvalds at athlon.transmeta.com|ChangeSet|20020205173056|16047|c1d11a41ed024864
C
c Added CIFSSMBReadAsync
K 42671
O -rw-rw-r--
P fs/cifs/cifssmb.c
------------------------------------------------

I662 169
int
CIFSSMBReadAsync(const int xid, struct cifsTconInfo *tcon,
	    const int netfid, struct list_head *async_q)
{
	int rc = -EACCES;
	READ_REQ *pSMB = NULL;
	READ_RSP *pSMBr = NULL;
	int sent=0, received=0;
	int response_obtained, request_obtained; 
	/* Whether this iteration of the loop has sent/received a response */
	struct async_rw_q_entry *response;
	struct async_rw_q_entry *request;
	int timeout = 0;
\
	request = list_entry(async_q->next, struct async_rw_q_entry, qhead);
	/* Loop until every request has been sent and received */
	while (&request->qhead != async_q || received != sent) {
		request_obtained = 0;
		/* If we haven't sent everything, send a request.  Sending up to the
		 * maximum number of requests allowed is ok for samba, as requests
		 * just sit in the tcp buffer, in fact is probably most efficient
		 * (according to tridge), and windows is probably is similar.  We do
		 * however want to be friendly to other processes, so we won't have
		 * more than one request waiting in the request_q */
		if (&request->qhead != async_q) {	
		/* Join request_q. We only want to block if this is the only request
		 * from us in flight, otherwise if we want to handle responses */
			spin_lock(&GlobalMid_Lock);
			while(1) {
				if(atomic_read(&tcon->ses->server->inFlight) >= CIFS_MAX_REQ) {
					spin_unlock(&GlobalMid_Lock);
					if (sent == received) {
						wait_event(tcon->ses->server->request_q,
							atomic_read(&tcon->ses->server->inFlight) 
							< CIFS_MAX_REQ);
						spin_lock(&GlobalMid_Lock);
					} else {
						break;
					}	
				} else {
					if(tcon->ses->server->tcpStatus == CifsExiting) {
						spin_unlock(&GlobalMid_Lock);
						rc = -ENOENT;
						goto cifs_read_async_cleanup;
					}
\
					/* update # of requests on the wire to server */
					atomic_inc(&tcon->ses->server->inFlight);
					spin_unlock(&GlobalMid_Lock);
					request_obtained = 1;
					break;
				}
			}
		}
		if (request_obtained)
		{
			request->state = ASYNC_IN_FLIGHT;
			request->nbytes = 0;
			rc = smb_init(SMB_COM_READ_ANDX, 12, tcon, (void **) &pSMB,
				(void **) &pSMBr);
			if (rc)
				goto cifs_read_async_cleanup;
			request->buf = (char *) pSMBr;
			/* tcon and ses pointer are checked in smb_init */
			if (tcon->ses->server == NULL) {
				rc = -ECONNABORTED;
				goto cifs_read_async_cleanup;
			}
			pSMB->AndXCommand = 0xFF;	/* none */
			pSMB->Fid = netfid;
			pSMB->OffsetLow = cpu_to_le32(request->lseek & 0xFFFFFFFF);
			pSMB->OffsetHigh = cpu_to_le32(request->lseek >> 32);
			pSMB->Remaining = 0;
			pSMB->MaxCount = cpu_to_le16(request->count);
			pSMB->MaxCountHigh = 0;
			pSMB->ByteCount = 0;  /* no need to do le conversion since it is 0 */
\
			rc = SendMsg(xid, tcon->ses, (struct smb_hdr *) pSMB, 
					&request->midQ, 0);
			if (rc) {
				cERROR(1, ("Send error in read = %d", rc));
				request->state = ASYNC_ALLOCATED;
				goto cifs_read_async_cleanup;
			}
			request->state = ASYNC_SENT;
			sent++;
			/* Iterate to next request in async_q */
			request = list_entry(request->qhead.next, struct async_rw_q_entry, qhead);
				
		} 
		
		response_obtained = 0;
		/* Iterate through request_q, handling any receieved responses */
		list_for_each_entry(response, async_q, qhead) {
			if (!(response->state & ASYNC_SENT)) continue;
			if (!(response->midQ->midState & MID_RESPONSE_RECEIVED)) continue;
			response_obtained++;
			pSMBr = (READ_RSP *) response->buf;
			rc = ReceiveMsg(xid, tcon->ses, (struct smb_hdr *) pSMBr, 
					&response->nbytes, response->midQ);
			if (rc)
				goto cifs_read_async_cleanup;
			
			/* Delete midQ and decrement number of requests on wire */
			DeleteMidQEntry(response->midQ);
			atomic_dec(&tcon->ses->server->inFlight);
			wake_up(&tcon->ses->server->request_q);
			
			received++;
			response->state = ASYNC_HANDLED;
			
			pSMBr->DataLength = le16_to_cpu(pSMBr->DataLength);
			response->nbytes = pSMBr->DataLength;
			/*check that DataLength would not go beyond end of SMB */
			if ((response->nbytes > CIFS_MAX_MSGSIZE) 
					|| (response->nbytes > response->count)) {
				cFYI(1,("bad length %d for count %d",
						response->nbytes,response->count));
				rc = -EIO;
				response->nbytes = 0;
				goto cifs_read_async_cleanup;
			}
		}
\
		/* Timing out is done like so:
		 * The timeout is 15 seconds.  Everytime we either recieve a response
		 * or send a request, we reset timeout to 15 seconds.  If we don't
		 * send or receive, then we want to timeout, using schedule_timeout,
		 * which will decrement timeout.  Once timeout reaches 0, we cleanup
		 * and exit with an error */
		if (response_obtained == 0 && 
				request_obtained == 0 && !signal_pending(current)) {
			set_current_state(TASK_UNINTERRUPTIBLE);
			timeout = schedule_timeout(timeout);
			if (!timeout) {
				/* If we reach 0 timeout without receiving a signal, cleanup */
				cFYI(1, ("Time out for async responses"));
				if (tcon->ses->server->tcpStatus == CifsExiting)
					rc = -EHOSTDOWN;
				else if (tcon->ses->server->tcpStatus == CifsNeedReconnect)
					rc = -EAGAIN;
				else rc = -EIO;
				goto cifs_read_async_cleanup;
			} 
		} else {
			/* Reset timeout to 15 seconds */
			timeout = 15 * HZ;
		}
	}
\
	return rc;
	
	
cifs_read_async_cleanup:
	list_for_each_entry(request, async_q, qhead) {
		if (request->state & ASYNC_SENT)
			/* means there's a midQ entry */
			DeleteMidQEntry(request->midQ);
		if (request->state & (ASYNC_SENT | ASYNC_IN_FLIGHT)) {
			/* Get out of request_q */
			atomic_dec(&tcon->ses->server->inFlight);
			wake_up(&tcon->ses->server->request_q);
		}
		/* Reset state */
		request->state = ASYNC_ALLOCATED;
	}
	return rc;
}
\

jazzy at jazztower.(none)|fs/cifs/cifssmb.c|20041011052504|42671
D 1.83 04/10/12 18:44:18+10:00 jazzy at jazztower.(none) +1 -4
B torvalds at athlon.transmeta.com|ChangeSet|20020205173056|16047|c1d11a41ed024864
C
c Cleanup
K 39076
O -rw-rw-r--
P fs/cifs/cifssmb.c
------------------------------------------------

D752 2
I753 1
		}
D786 1
D799 1

== fs/cifs/connect.c ==
stevef at smfhome1.austin.rr.com|fs/cifs/connect.c|20021010191612|08981|d046b102ee6decf7
stevef at smfhome.smfdom|fs/cifs/connect.c|20040827044659|06614
D 1.102 04/10/12 18:44:18+10:00 jazzy at jazztower.(none) +4 -0
B torvalds at athlon.transmeta.com|ChangeSet|20020205173056|16047|c1d11a41ed024864
C
c Added mount options
K 19122
O -rw-rw-r--
P fs/cifs/connect.c
------------------------------------------------

I784 4
		} else if (strnicmp(data, "async", 5) == 0) {
			async_routines_enabled = 1;
		} else if (strnicmp(data, "noasync", 7) == 0) {
			async_routines_enabled = 0;

== fs/cifs/file.c ==
stevef at smfhome1.austin.rr.com|fs/cifs/file.c|20021010191612|51622|842460dea0f28ec1
stevef at smfhome.smfdom|fs/cifs/file.c|20040827044659|09972
D 1.100 04/10/11 15:25:05+10:00 jazzy at jazztower.(none) +171 -137
B torvalds at athlon.transmeta.com|ChangeSet|20020205173056|16047|c1d11a41ed024864
C
c Converted cifs_readpages to asynchronous
K 54767
O -rw-rw-r--
P fs/cifs/file.c
------------------------------------------------

I37 2
extern kmem_cache_t *cifs_async_cachep;
\
D1100 1
I1100 1
        struct list_head *page_list, unsigned num_pages)
D1102 64
I1165 51
    int rc = -EACCES;
    int xid;
    struct page * page;
    struct cifs_sb_info *cifs_sb;
    struct cifsTconInfo *pTcon;
    unsigned int i, max_pages;
    struct smb_com_read_rsp * pSMBr;
    struct pagevec lru_pvec;
    struct cifsFileInfo * open_file;
    struct list_head async_rw_q;
    struct async_rw_q_entry *temp;
    struct list_head *tmp_page_list;
\
    xid = GetXid();
    if (file->private_data == NULL) {
        FreeXid(xid);
        return -EBADF;
    }
    open_file = (struct cifsFileInfo *)file->private_data;
    cifs_sb = CIFS_SB(file->f_dentry->d_sb);
    pTcon = cifs_sb->tcon;
\
    pagevec_init(&lru_pvec, 0);
	/* tmp_page_list is used to iterate through page_list without modifying
	 * page_list.  Could be a better way of doing this... */
    tmp_page_list = page_list;
\
	/* The maximum number of pages that we will request */
	max_pages = (cifs_sb->rsize & PAGE_CACHE_MASK) >> PAGE_CACHE_SHIFT;
\
    INIT_LIST_HEAD(&async_rw_q);
\
    for(i = 0;i<num_pages;) {
        unsigned contig_pages;
        unsigned long expected_index;
\
        if(list_empty(page_list)) {
            break;
        }
		
		/* Allocate memory for async request */
        temp = (struct async_rw_q_entry *) kmem_cache_alloc(cifs_async_cachep,
                SLAB_KERNEL);
\
        if (temp == NULL) {
            /* TODO: Cleanup, I think I need to free the page cache */
			cFYI(1, ("Unable to obtain async_q_entry"));
			FreeXid(xid);
			return rc;
        }
        list_add_tail(&temp->qhead, &async_rw_q);
D1167 26
I1192 68
        temp->buf = NULL;
        temp->midQ = NULL;
        temp->nbytes = 0;
        temp->state = ASYNC_ALLOCATED;
        page = list_entry(tmp_page_list->prev, struct page, lru);
        temp->lseek = (loff_t)page->index << PAGE_CACHE_SHIFT;
\
\
        /* count adjacent pages that we will read into */
        contig_pages = 0;
        expected_index = 
			list_entry(tmp_page_list->prev,struct page,lru)->index;
		while (list_entry(tmp_page_list->prev, struct page, lru)->index 
				== expected_index && contig_pages < max_pages) {
            tmp_page_list = tmp_page_list->prev;
            contig_pages++;
            expected_index++;
        }
        if(contig_pages + i >  num_pages) {
            contig_pages = num_pages - i;
        }
\
        i += contig_pages;
\
        temp->count = contig_pages * PAGE_CACHE_SIZE;
    }
\
    rc = -EAGAIN;
    while(rc == -EAGAIN) {
        if ((open_file->invalidHandle) && (!open_file->closePend)) {
            rc = cifs_reopen_file(file->f_dentry->d_inode,
                file, TRUE);
            if(rc != 0)
                break;
        }
        rc = CIFSSMBReadAsync(xid, pTcon,
                open_file->netfid, &async_rw_q);
        if(rc== -EAGAIN) {
            list_for_each_entry(temp, &async_rw_q, qhead) {
				/* There may be some buffers, release them */
                if (temp->buf != NULL) {
                    cifs_buf_release(temp->buf);
                    temp->buf = NULL;
                }
            }
        }
    }
    
	i = 0;
	/* Handle each response */
    list_for_each_entry(temp, &async_rw_q, qhead) {
        if(list_empty(page_list)) {
            break;
        }
        if ((rc < 0) || (temp->buf == NULL)) {
            cFYI(1,("Read error in readpages: %d",rc));
            /* clean up remaing pages off list */
            while (!list_empty(page_list) && (i < num_pages)) {
                page = list_entry(page_list->prev, struct page, lru);
                list_del(&page->lru);
                page_cache_release(page);
            }
            break;
        } else if (temp->nbytes > 0) {
            pSMBr = (struct smb_com_read_rsp *)temp->buf;
            cifs_copy_cache_pages(mapping, page_list, temp->nbytes,
                temp->buf + 4 /* RFC1001 hdr */ +
                le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
D1194 1
I1194 1
            i += temp->nbytes >> PAGE_CACHE_SHIFT;
D1196 4
I1199 4
            atomic_inc(&pTcon->num_reads);
            spin_lock(&pTcon->stat_lock);
            pTcon->bytes_read += temp->nbytes;
            spin_unlock(&pTcon->stat_lock);
D1201 2
I1202 2
            if((int)(temp->nbytes & PAGE_CACHE_MASK) != temp->nbytes) {
                cFYI(1,("Partial page %d of %d read to cache",++i,num_pages));
D1204 1
I1204 1
                i++; /* account for partial page */
D1206 38
I1243 40
            }
        } else {
            cFYI(1,("No bytes read (%d) at offset %lld . Cleaning remaining pages from readahead list",temp->nbytes,temp->lseek));
            /* BB turn off caching and do new lookup on file size at server? */
            while (!list_empty(page_list) && (i < num_pages)) {
                page = list_entry(page_list->prev, struct page, lru);
                list_del(&page->lru);
                page_cache_release(page); /* BB removeme - replace with zero of page? */
            }
            break;
        }
        if(temp->buf) {
            cifs_buf_release(temp->buf);
			/* Important, if there is no allocated memory, it MUST be NULL,
			 * otherwise we will end up trying to free it twice */
			temp->buf = NULL;
        }
    }
\
    pagevec_lru_add(&lru_pvec);
\
/* need to free smb buffers before exit */
\
    while (!list_empty(&async_rw_q)) {
        temp = list_entry(async_rw_q.prev, struct async_rw_q_entry, qhead);
        if(temp->buf) {
            cifs_buf_release(temp->buf);
            temp->buf = NULL;
        }
        list_del(&temp->qhead);
        kmem_cache_free(cifs_async_cachep, temp);
    }
	/* Delete the rest of the pages */
    while (!list_empty(page_list) && (i < num_pages)) {
        page = list_entry(page_list->prev, struct page, lru);
        list_del(&page->lru);
        page_cache_release(page);
    }
    FreeXid(xid);
    return rc;
I1244 1
\

jazzy at jazztower.(none)|fs/cifs/file.c|20041011052505|54767
D 1.101 04/10/12 18:44:18+10:00 jazzy at jazztower.(none) +168 -3
B torvalds at athlon.transmeta.com|ChangeSet|20020205173056|16047|c1d11a41ed024864
C
c Added synchronous call, cleaned up
K 36894
O -rw-rw-r--
P fs/cifs/file.c
------------------------------------------------

I1098 95
static inline int
cifs_readpages_sync(struct file *file, struct address_space *mapping,
		struct list_head *page_list, unsigned num_pages)
{
	int rc = -EACCES;
	int xid;
	loff_t offset;
	struct page * page;
	struct cifs_sb_info *cifs_sb;
	struct cifsTconInfo *pTcon;
	int bytes_read = 0;
	unsigned int read_size,i;
	char * smb_read_data = NULL;
	struct smb_com_read_rsp * pSMBr;
	struct pagevec lru_pvec;
	struct cifsFileInfo * open_file;
\
	xid = GetXid();
	if (file->private_data == NULL) {
		FreeXid(xid);
		return -EBADF;
	}
	open_file = (struct cifsFileInfo *)file->private_data;
	cifs_sb = CIFS_SB(file->f_dentry->d_sb);
	pTcon = cifs_sb->tcon;
\
	pagevec_init(&lru_pvec, 0);
\
	for(i = 0;i<num_pages;) {
		unsigned contig_pages;
		struct page * tmp_page;
		unsigned long expected_index;
\
		if(list_empty(page_list)) {
			break;
		}
		page = list_entry(page_list->prev, struct page, lru);
		offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
\
		/* count adjacent pages that we will read into */
		contig_pages = 0;
		expected_index = list_entry(page_list->prev,struct page,lru)->index;
		list_for_each_entry_reverse(tmp_page,page_list,lru) {
			if(tmp_page->index == expected_index) {
				contig_pages++;
				expected_index++;
			} else {
				break; 
			}
		}
		if(contig_pages + i >  num_pages) {
			contig_pages = num_pages - i;
		}
\
		/* for reads over a certain size could initiate async read ahead */
\
		read_size = contig_pages * PAGE_CACHE_SIZE;
		/* Read size needs to be in multiples of one page */
		read_size = min_t(const unsigned int,read_size,cifs_sb->rsize & PAGE_CACHE_MASK);
\
		rc = -EAGAIN;
		while(rc == -EAGAIN) {
			if ((open_file->invalidHandle) && (!open_file->closePend)) {
				rc = cifs_reopen_file(file->f_dentry->d_inode,
					file, TRUE);
				if(rc != 0)
					break;
			}
\
			rc = CIFSSMBRead(xid, pTcon,
				open_file->netfid,
				read_size, offset,
				&bytes_read, &smb_read_data);
			/* BB need to check return code here */
			if(rc== -EAGAIN) {
				if(smb_read_data) {
					cifs_buf_release(smb_read_data);
					smb_read_data = NULL;
				}
			}
		}
		if ((rc < 0) || (smb_read_data == NULL)) {
			cFYI(1,("Read error in readpages: %d",rc));
			/* clean up remaing pages off list */
			while (!list_empty(page_list) && (i < num_pages)) {
				page = list_entry(page_list->prev, struct page, lru);
				list_del(&page->lru);
				page_cache_release(page);
			}
			break;
		} else if (bytes_read > 0) {
			pSMBr = (struct smb_com_read_rsp *)smb_read_data;
			cifs_copy_cache_pages(mapping, page_list, bytes_read,
				smb_read_data + 4 /* RFC1001 hdr */ +
				le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
D1100 2
I1101 54
			i +=  bytes_read >> PAGE_CACHE_SHIFT;
#ifdef CONFIG_CIFS_STATS
			atomic_inc(&pTcon->num_reads);
			spin_lock(&pTcon->stat_lock);
			pTcon->bytes_read += bytes_read;
			spin_unlock(&pTcon->stat_lock);
#endif
			if((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
				cFYI(1,("Partial page %d of %d read to cache",i++,num_pages));
\
				i++; /* account for partial page */
\
				/* server copy of file can have smaller size than client */
				/* BB do we need to verify this common case ? this case is ok - 
				if we are at server EOF we will hit it on next read */
\
			/* while(!list_empty(page_list) && (i < num_pages)) {
					page = list_entry(page_list->prev,struct page, list);
					list_del(&page->list);
					page_cache_release(page);
				}
				break; */
			}
		} else {
			cFYI(1,("No bytes read (%d) at offset %lld . Cleaning remaining pages from readahead list",bytes_read,offset)); 
			/* BB turn off caching and do new lookup on file size at server? */
			while (!list_empty(page_list) && (i < num_pages)) {
				page = list_entry(page_list->prev, struct page, lru);
				list_del(&page->lru);
				page_cache_release(page); /* BB removeme - replace with zero of page? */
			}
			break;
		}
		if(smb_read_data) {
			cifs_buf_release(smb_read_data);
			smb_read_data = NULL;
		}
		bytes_read = 0;
	}
\
	pagevec_lru_add(&lru_pvec);
\
/* need to free smb_read_data buf before exit */
	if(smb_read_data) {
		cifs_buf_release(smb_read_data);
		smb_read_data = NULL;
	} 
\
	FreeXid(xid);
	return rc;
}
\
static inline int
cifs_readpages_async(struct file *file, struct address_space *mapping,
D1149 1
I1150 5
			while (!list_empty(page_list) && (i < num_pages)) {
				page = list_entry(page_list->prev, struct page, lru);
				list_del(&page->lru);
				page_cache_release(page);
			}
I1278 14
static int
cifs_readpages(struct file *file, struct address_space *mapping,
        struct list_head *page_list, unsigned num_pages)
{
	int rc;
	/* Call sync or async based on async_routines_enabled */
	if (async_routines_enabled) {
		rc = cifs_readpages_async(file, mapping, page_list, num_pages);
	} else {
		rc = cifs_readpages_sync(file, mapping, page_list, num_pages);
	}
	return rc;
}
		

== fs/cifs/transport.c ==
stevef at smfhome1.austin.rr.com|fs/cifs/transport.c|20021010191615|19666|bf11e1437f427aed
stevef at stevef95.austin.ibm.com|fs/cifs/transport.c|20040824225009|58817
D 1.48 04/10/11 15:25:08+10:00 jazzy at jazztower.(none) +134 -105
B torvalds at athlon.transmeta.com|ChangeSet|20020205173056|16047|c1d11a41ed024864
C
c Split SendReceive into SendMsg and RecieveMsg
K 41379
O -rw-rw-r--
P fs/cifs/transport.c
------------------------------------------------

D177 4
I180 9
/**
 * Send a message.
 * @param xid ??
 * @param ses Session information
 * @param in_buf Message buffer containing message to be sent
 * */
int SendMsg(const unsigned int xid, struct cifsSesInfo *ses,
                struct smb_hdr *in_buf, struct mid_q_entry **midQout,
				const int long_op)
D182 4
I185 2
    int rc = 0;
    struct mid_q_entry *midQ;
D187 42
I284 2
		goto out_unlock;
	} else {
D286 5
I290 1
		*midQout = midQ;
D292 35
I326 7
	}
out_unlock:
	up(&ses->server->tcpSem);
	/* If not lock req, update # of requests on wire to server */
	if(long_op < 3) {
		atomic_dec(&ses->server->inFlight); 
		wake_up(&ses->server->request_q);
I328 17
	return rc;
}
\
/**
 * Receive a message.
 * @param xid process id
 * @param ses Session information
 * @param out_buf Message buffer to return received message
 * @param pbytes_returned number of bytes returned in the buffer
 */
int ReceiveMsg(const unsigned int xid, struct cifsSesInfo *ses,
        struct smb_hdr *out_buf, int *pbytes_returned,
        struct mid_q_entry *midQ)
{
    int rc = 0;
    unsigned int receive_len;
\
D353 6
D410 2
I411 2
	return rc;
}
D413 3
I415 17
int
SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
	    int *pbytes_returned, const int long_op)
{
	int rc = 0;
	unsigned int receive_len;
	unsigned long timeout;
	struct mid_q_entry *midQ;
\
	if (ses == NULL) {
		cERROR(1,("Null smb session"));
		return -EIO;
	}
	if(ses->server == NULL) {
		cERROR(1,("Null tcp session"));
		return -EIO;
D418 1
I418 77
	/* Ensure that we do not send more than 50 overlapping requests 
	   to the same server. We may make this configurable later or
	   use ses->maxReq */
	if(long_op == -1) {
		/* oplock breaks must not be held up */
		atomic_inc(&ses->server->inFlight);
	} else {
		spin_lock(&GlobalMid_Lock); 
		while(1) {        
			if(atomic_read(&ses->server->inFlight) >= CIFS_MAX_REQ){
				spin_unlock(&GlobalMid_Lock);
				wait_event(ses->server->request_q,
					atomic_read(&ses->server->inFlight)
					 < CIFS_MAX_REQ);
				spin_lock(&GlobalMid_Lock);
			} else {
				if(ses->server->tcpStatus == CifsExiting) {
					spin_unlock(&GlobalMid_Lock);
					return -ENOENT;
				}
\
			/* can not count locking commands against total since
			   they are allowed to block on server */
					
				if(long_op < 3) {
				/* update # of requests on the wire to server */
					atomic_inc(&ses->server->inFlight);
				}
				spin_unlock(&GlobalMid_Lock);
				break;
			}
		}
	}
\
    rc = SendMsg(xid, ses, in_buf, &midQ, 0);
	if (rc)
		return rc;
\
	if (long_op == -1)
		goto cifs_no_response_exit;
	else if (long_op == 2) /* writes past end of file can take looooong time */
		timeout = 300 * HZ;
	else if (long_op == 1)
		timeout = 45 * HZ; /* should be greater than 
			servers oplock break timeout (about 43 seconds) */
	else if (long_op > 2) {
		timeout = MAX_SCHEDULE_TIMEOUT;
	} else
		timeout = 15 * HZ;
	/* wait for 15 seconds or until woken up due to response arriving or 
	   due to last connection to this server being unmounted */
	if (signal_pending(current)) {
		/* if signal pending do not hold up user for full smb timeout
		but we still give response a change to complete */
		timeout = 2 * HZ;
		
	}   
\
	/* No user interrupts in wait - wreaks havoc with performance */
	if(timeout != MAX_SCHEDULE_TIMEOUT) {
		timeout += jiffies;
		wait_event(ses->server->response_q,
			(midQ->midState & MID_RESPONSE_RECEIVED) || 
			time_after(jiffies, timeout) || 
			((ses->server->tcpStatus != CifsGood) &&
			 (ses->server->tcpStatus != CifsNew)));
	} else {
		wait_event(ses->server->response_q,
			(midQ->midState & MID_RESPONSE_RECEIVED) || 
			((ses->server->tcpStatus != CifsGood) &&
			 (ses->server->tcpStatus != CifsNew)));
	}
\
    rc = ReceiveMsg(xid, ses, out_buf, pbytes_returned, midQ);
\
cifs_no_response_exit:
	DeleteMidQEntry(midQ);
D420 3

jazzy at jazztower.(none)|fs/cifs/transport.c|20041011052508|41379
D 1.49 04/10/12 18:44:18+10:00 jazzy at jazztower.(none) +6 -1
B torvalds at athlon.transmeta.com|ChangeSet|20020205173056|16047|c1d11a41ed024864
C
c Added peak mid data
c cleaned up
K 56813
O -rw-rw-r--
P fs/cifs/transport.c
------------------------------------------------

I67 6
#ifdef CONFIG_CIFS_STATS
	/* FIXME: Is this thread safe?  Do we really need atomic_set, atomic_read
	 * etc? */
	if (atomic_read(&midCount) > atomic_read(&peakMids))
		atomic_set(&peakMids, midCount.counter);
#endif
D361 1

# Patch checksum=b7bb0f98


More information about the linux-cifs-client mailing list