[linux-cifs-client] cifs 1.45 for 2.4.x

Yehuda Sadeh Weinraub Yehuda.Sadeh at expand.com
Sun Jul 30 05:58:06 GMT 2006


And here is the second part

Yehuda

[part 2/2]

diff -urN cifs-2.6-old-kernels/fs/cifs/inode.c cifs-2.4/fs/cifs/inode.c
--- cifs-2.6-old-kernels/fs/cifs/inode.c	Mon Jul 24 15:13:14 2006
+++ cifs-2.4/fs/cifs/inode.c	Thu Jul 27 15:11:25 2006
@@ -19,7 +19,9 @@
  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA
  */
 #include <linux/fs.h>
+#ifdef LNXKERN26
 #include <linux/buffer_head.h>
+#endif
 #include <linux/stat.h>
 #include <linux/pagemap.h>
 #include <asm/div64.h>
@@ -30,6 +32,52 @@
 #include "cifs_debug.h"
 #include "cifs_fs_sb.h"
 
+#ifndef pgoff_t
+#define pgoff_t unsigned long
+#endif
+
+#ifdef LNXKERN26
+#define CIFS_INODE_FLAGS (S_NOATIME | S_NOCMTIME)
+#else
+#define CIFS_INODE_FLAGS (S_NOATIME)
+#endif
+
+void cifs_init_inode(struct inode *inode)
+{
+    struct cifsInodeInfo *cifs_inode;
+
+    cifs_inode = CIFS_I(inode);
+
+	cifs_inode->cifsAttrs = 0x20;	/* default */
+	atomic_set(&cifs_inode->inUse, 0);
+	cifs_inode->time = 0;
+	/* Until the file is open and we have gotten oplock
+	info back from the server, can not assume caching of
+	file data or metadata */
+	cifs_inode->clientCanCacheRead = FALSE;
+	cifs_inode->clientCanCacheAll = FALSE;
+	inode->i_blksize = CIFS_MAX_MSGSIZE;
+	inode->i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
+	inode->i_flags = CIFS_INODE_FLAGS;
+	INIT_LIST_HEAD(&cifs_inode->openFileList);
+#ifndef CIFS_HAVE_VFS_ALLOC_INODE
+    cifs_inode->vfs_inode = inode;
+#endif
+}
+
+struct inode * get_cifs_inode(struct super_block * sb)
+{
+    struct inode * newinode;
+    newinode = new_inode(sb);
+    cFYI(1,("got new inode %p",newinode));
+#ifndef CIFS_HAVE_VFS_ALLOC_INODE
+    if(newinode) {
+        cifs_init_inode(newinode);
+    }
+#endif
+    return newinode;
+}
+
 int cifs_get_inode_info_unix(struct inode **pinode,
 	const unsigned char *search_path, struct super_block *sb, int
xid)
 {
@@ -81,7 +129,7 @@
 
 		/* get new inode */
 		if (*pinode == NULL) {
-			*pinode = new_inode(sb);
+			*pinode = get_cifs_inode(sb);
 			if (*pinode == NULL) 
 				return -ENOMEM;
 			/* Is an i_ino of zero legal? */
@@ -389,7 +437,7 @@
 
 		/* get new inode */
 		if (*pinode == NULL) {
-			*pinode = new_inode(sb);
+			*pinode = get_cifs_inode(sb);
 			if (*pinode == NULL)
 				return -ENOMEM;
 			/* Is an i_ino of zero legal? Can we use that to
check
@@ -557,6 +605,10 @@
 	int xid;
 	struct cifs_sb_info *cifs_sb;
 
+#ifndef CIFS_HAVE_VFS_ALLOC_INODE
+    cifs_init_inode(inode);
+#endif
+
 	cifs_sb = CIFS_SB(inode->i_sb);
 	xid = GetXid();
 	if (cifs_sb->tcon->ses->capabilities & CAP_UNIX)
@@ -969,7 +1021,11 @@
 	struct cifs_sb_info *cifs_sb;
 	struct cifsInodeInfo *cifsInode;
 	loff_t local_size;
+#ifdef LNXKERN26
 	struct timespec local_mtime;
+#else
+	time_t local_mtime;
+#endif
 	int invalidate_inode = FALSE;
 
 	if (direntry->d_inode == NULL)
@@ -1042,7 +1098,7 @@
 	/* if not oplocked, we invalidate inode pages if mtime or file
size
 	   had changed on server */
 
-	if (timespec_equal(&local_mtime,&direntry->d_inode->i_mtime) && 
+	if (TIME_EQUAL(local_mtime,direntry->d_inode->i_mtime) && 
 	    (local_size == direntry->d_inode->i_size)) {
 		cFYI(1, ("cifs_revalidate - inode unchanged"));
 	} else {
@@ -1064,7 +1120,7 @@
 	if (direntry->d_inode->i_mapping) {
 		/* do we need to lock inode until after invalidate
completes
 		   below? */
-		filemap_fdatawrite(direntry->d_inode->i_mapping);
+		FILEMAP_SYNC(direntry->d_inode->i_mapping);
 	}
 	if (invalidate_inode) {
 	/* shrink_dcache not necessary now that cifs dentry ops
@@ -1079,7 +1135,11 @@
 				/* changed on server - flush read ahead
pages */
 				cFYI(1, ("Invalidating read ahead data
on "
 					 "closed file"));
+#ifdef LNXKERN26
 
invalidate_remote_inode(direntry->d_inode);
+#else
+
invalidate_inode_pages(direntry->d_inode);
+#endif
 			}
 		}
 	}
@@ -1090,6 +1150,7 @@
 	return rc;
 }
 
+#ifdef LNXKERN26
 int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
 	struct kstat *stat)
 {
@@ -1098,6 +1159,7 @@
 		generic_fillattr(dentry->d_inode, stat);
 	return err;
 }
+#endif
 
 static int cifs_truncate_page(struct address_space *mapping, loff_t
from)
 {
@@ -1166,7 +1228,7 @@
 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 15)
 	filemap_write_and_wait(direntry->d_inode->i_mapping);
 #else
-	filemap_fdatawrite(direntry->d_inode->i_mapping);
+	FILEMAP_SYNC(direntry->d_inode->i_mapping);
 	filemap_fdatawait(direntry->d_inode->i_mapping);
 #endif
 
diff -urN cifs-2.6-old-kernels/fs/cifs/link.c cifs-2.4/fs/cifs/link.c
--- cifs-2.6-old-kernels/fs/cifs/link.c	Mon Jul 24 15:13:14 2006
+++ cifs-2.4/fs/cifs/link.c	Thu Jul 27 15:11:25 2006
@@ -20,7 +20,9 @@
  */
 #include <linux/fs.h>
 #include <linux/stat.h>
+#ifdef LNXKERN26
 #include <linux/namei.h>
+#endif
 #include "cifsfs.h"
 #include "cifspdu.h"
 #include "cifsglob.h"
diff -urN cifs-2.6-old-kernels/fs/cifs/mempool_compat.c
cifs-2.4/fs/cifs/mempool_compat.c
--- cifs-2.6-old-kernels/fs/cifs/mempool_compat.c	Thu Jan  1
02:00:00 1970
+++ cifs-2.4/fs/cifs/mempool_compat.c	Thu Jul 27 15:11:25 2006
@@ -0,0 +1,276 @@
+/*
+ *  linux/mm/mempool.c
+ *
+ *  memory buffer pool support. Such pools are mostly used
+ *  for guaranteed, deadlock-free memory allocations during
+ *  extreme VM load.
+ *
+ *  started by Ingo Molnar, Copyright (C) 2001
+ */
+
+#include "cifs_compat.h"
+
+#ifdef CIFS_USE_MEMPOOL_COMPAT
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "mempool_compat.h"
+
+/**
+ * mempool_create - create a memory pool
+ * @min_nr:    the minimum number of elements guaranteed to be
+ *             allocated for this pool.
+ * @alloc_fn:  user-defined element-allocation function.
+ * @free_fn:   user-defined element-freeing function.
+ * @pool_data: optional private data available to the user-defined
functions.
+ *
+ * this function creates and allocates a guaranteed size, preallocated
+ * memory pool. The pool can be used from the mempool_alloc and
mempool_free
+ * functions. This function might sleep. Both the alloc_fn() and the
free_fn()
+ * functions might sleep - as long as the mempool_alloc function is not
called
+ * from IRQ contexts. The element allocated by alloc_fn() must be able
to
+ * hold a struct list_head. (8 bytes on x86.)
+ */
+mempool_t * cifs_mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
+				mempool_free_t *free_fn, void
*pool_data)
+{
+	mempool_t *pool;
+	int i;
+
+	pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+	if (!pool)
+		return NULL;
+	memset(pool, 0, sizeof(*pool));
+
+	spin_lock_init(&pool->lock);
+	pool->min_nr = min_nr;
+	pool->pool_data = pool_data;
+	INIT_LIST_HEAD(&pool->elements);
+	init_waitqueue_head(&pool->wait);
+	pool->alloc = alloc_fn;
+	pool->free = free_fn;
+
+	/*
+	 * First pre-allocate the guaranteed number of buffers.
+	 */
+	for (i = 0; i < min_nr; i++) {
+		void *element;
+		struct list_head *tmp;
+		element = pool->alloc(GFP_KERNEL, pool->pool_data);
+
+		if (unlikely(!element)) {
+			/*
+			 * Not enough memory - free the allocated ones
+			 * and return:
+			 */
+			list_for_each(tmp, &pool->elements) {
+				element = tmp;
+				pool->free(element, pool->pool_data);
+			}
+			kfree(pool);
+
+			return NULL;
+		}
+		tmp = element;
+		list_add(tmp, &pool->elements);
+		pool->curr_nr++;
+	}
+	return pool;
+}
+
+/**
+ * mempool_resize - resize an existing memory pool
+ * @pool:       pointer to the memory pool which was allocated via
+ *              mempool_create().
+ * @new_min_nr: the new minimum number of elements guaranteed to be
+ *              allocated for this pool.
+ * @gfp_mask:   the usual allocation bitmask.
+ *
+ * This function shrinks/grows the pool. In the case of growing,
+ * it cannot be guaranteed that the pool will be grown to the new
+ * size immediately, but new mempool_free() calls will refill it.
+ *
+ * Note, the caller must guarantee that no mempool_destroy is called
+ * while this function is running. mempool_alloc() & mempool_free()
+ * might be called (eg. from IRQ contexts) while this function
executes.
+ */
+void cifs_mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask)
+{
+	int delta;
+	void *element;
+	unsigned long flags;
+	struct list_head *tmp;
+
+	if (new_min_nr <= 0)
+		BUG();
+
+	spin_lock_irqsave(&pool->lock, flags);
+	if (new_min_nr < pool->min_nr) {
+		pool->min_nr = new_min_nr;
+		/*
+		 * Free possible excess elements.
+		 */
+		while (pool->curr_nr > pool->min_nr) {
+			tmp = pool->elements.next;
+			if (tmp == &pool->elements)
+				BUG();
+			list_del(tmp);
+			element = tmp;
+			pool->curr_nr--;
+			spin_unlock_irqrestore(&pool->lock, flags);
+
+			pool->free(element, pool->pool_data);
+
+			spin_lock_irqsave(&pool->lock, flags);
+		}
+		spin_unlock_irqrestore(&pool->lock, flags);
+		return;
+	}
+	delta = new_min_nr - pool->min_nr;
+	pool->min_nr = new_min_nr;
+	spin_unlock_irqrestore(&pool->lock, flags);
+
+	/*
+	 * We refill the pool up to the new treshold - but we dont
+	 * (cannot) guarantee that the refill succeeds.
+	 */
+	while (delta) {
+		element = pool->alloc(gfp_mask, pool->pool_data);
+		if (!element)
+			break;
+		mempool_free(element, pool);
+		delta--;
+	}
+}
+
+/**
+ * mempool_destroy - deallocate a memory pool
+ * @pool:      pointer to the memory pool which was allocated via
+ *             mempool_create().
+ *
+ * this function only sleeps if the free_fn() function sleeps. The
caller
+ * has to guarantee that no mempool_alloc() nor mempool_free() happens
in
+ * this pool when calling this function.
+ */
+void cifs_mempool_destroy(mempool_t *pool)
+{
+	void *element;
+	struct list_head *head, *tmp;
+
+	if (!pool)
+		return;
+
+	head = &pool->elements;
+	for (tmp = head->next; tmp != head; ) {
+		element = tmp;
+		tmp = tmp->next;
+		pool->free(element, pool->pool_data);
+		pool->curr_nr--;
+	}
+	if (pool->curr_nr)
+		BUG();
+	kfree(pool);
+}
+
+/**
+ * mempool_alloc - allocate an element from a specific memory pool
+ * @pool:      pointer to the memory pool which was allocated via
+ *             mempool_create().
+ * @gfp_mask:  the usual allocation bitmask.
+ *
+ * this function only sleeps if the alloc_fn function sleeps or
+ * returns NULL. Note that due to preallocation, this function
+ * *never* fails when called from process contexts. (it might
+ * fail if called from an IRQ context.)
+ */
+void * cifs_mempool_alloc(mempool_t *pool, int gfp_mask)
+{
+	void *element;
+	unsigned long flags;
+	struct list_head *tmp;
+	int curr_nr;
+	DECLARE_WAITQUEUE(wait, current);
+	int gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
+
+repeat_alloc:
+	element = pool->alloc(gfp_nowait, pool->pool_data);
+	if (likely(element != NULL))
+		return element;
+
+	/*
+	 * If the pool is less than 50% full then try harder
+	 * to allocate an element:
+	 */
+	if ((gfp_mask != gfp_nowait) && (pool->curr_nr <=
pool->min_nr/2)) {
+		element = pool->alloc(gfp_mask, pool->pool_data);
+		if (likely(element != NULL))
+			return element;
+	}
+
+	/*
+	 * Kick the VM at this point.
+	 */
+	wakeup_bdflush();
+
+	spin_lock_irqsave(&pool->lock, flags);
+	if (likely(pool->curr_nr)) {
+		tmp = pool->elements.next;
+		list_del(tmp);
+		element = tmp;
+		pool->curr_nr--;
+		spin_unlock_irqrestore(&pool->lock, flags);
+		return element;
+	}
+	spin_unlock_irqrestore(&pool->lock, flags);
+
+	/* We must not sleep in the GFP_ATOMIC case */
+	if (gfp_mask == gfp_nowait)
+		return NULL;
+
+	run_task_queue(&tq_disk);
+
+	add_wait_queue_exclusive(&pool->wait, &wait);
+	set_task_state(current, TASK_UNINTERRUPTIBLE);
+
+	spin_lock_irqsave(&pool->lock, flags);
+	curr_nr = pool->curr_nr;
+	spin_unlock_irqrestore(&pool->lock, flags);
+
+	if (!curr_nr)
+		schedule();
+
+	current->state = TASK_RUNNING;
+	remove_wait_queue(&pool->wait, &wait);
+
+	goto repeat_alloc;
+}
+
+/**
+ * mempool_free - return an element to the pool.
+ * @element:   pool element pointer.
+ * @pool:      pointer to the memory pool which was allocated via
+ *             mempool_create().
+ *
+ * this function only sleeps if the free_fn() function sleeps.
+ */
+void cifs_mempool_free(void *element, mempool_t *pool)
+{
+	unsigned long flags;
+
+	if (pool->curr_nr < pool->min_nr) {
+		spin_lock_irqsave(&pool->lock, flags);
+		if (pool->curr_nr < pool->min_nr) {
+			list_add(element, &pool->elements);
+			pool->curr_nr++;
+			spin_unlock_irqrestore(&pool->lock, flags);
+			wake_up(&pool->wait);
+			return;
+		}
+		spin_unlock_irqrestore(&pool->lock, flags);
+	}
+	pool->free(element, pool->pool_data);
+}
+
+#endif
+
diff -urN cifs-2.6-old-kernels/fs/cifs/mempool_compat.h
cifs-2.4/fs/cifs/mempool_compat.h
--- cifs-2.6-old-kernels/fs/cifs/mempool_compat.h	Thu Jan  1
02:00:00 1970
+++ cifs-2.4/fs/cifs/mempool_compat.h	Thu Jul 27 15:11:25 2006
@@ -0,0 +1,39 @@
+/*
+ * memory buffer pool support
+ */
+#ifndef _LINUX_MEMPOOL_H
+#define _LINUX_MEMPOOL_H
+
+#include <linux/list.h>
+#include <linux/wait.h>
+
+struct mempool_s;
+typedef struct mempool_s mempool_t;
+
+typedef void * (mempool_alloc_t)(int gfp_mask, void *pool_data);
+typedef void (mempool_free_t)(void *element, void *pool_data);
+
+struct mempool_s {
+	spinlock_t lock;
+	int min_nr, curr_nr;
+	struct list_head elements;
+
+	void *pool_data;
+	mempool_alloc_t *alloc;
+	mempool_free_t *free;
+	wait_queue_head_t wait;
+};
+extern mempool_t * cifs_mempool_create(int min_nr, mempool_alloc_t
*alloc_fn,
+				 mempool_free_t *free_fn, void
*pool_data);
+extern void cifs_mempool_resize(mempool_t *pool, int new_min_nr, int
gfp_mask);
+extern void cifs_mempool_destroy(mempool_t *pool);
+extern void * cifs_mempool_alloc(mempool_t *pool, int gfp_mask);
+extern void cifs_mempool_free(void *element, mempool_t *pool);
+
+#define mempool_create  cifs_mempool_create
+#define mempool_resize  cifs_mempool_resize
+#define mempool_destroy cifs_mempool_destroy
+#define mempool_alloc   cifs_mempool_alloc
+#define mempool_free    cifs_mempool_free
+
+#endif /* _LINUX_MEMPOOL_H */
diff -urN cifs-2.6-old-kernels/fs/cifs/netmisc.c
cifs-2.4/fs/cifs/netmisc.c
--- cifs-2.6-old-kernels/fs/cifs/netmisc.c	Mon Jul 24 15:13:14 2006
+++ cifs-2.4/fs/cifs/netmisc.c	Thu Jul 27 15:11:25 2006
@@ -910,4 +910,28 @@
 	/* Convert to 100ns intervals and then add the NTFS time offset.
*/
 	return (u64) t.tv_sec * 10000000 + t.tv_nsec/100 +
NTFS_TIME_OFFSET;
 }
+#else
+time_t
+cifs_NTtimeToUnix(__u64 ntutc)
+{
+	/* BB what about the timezone? BB */
+
+	/* Subtract the NTFS time offset, then convert to 1s intervals.
*/
+	u64 t;
+
+	t = ntutc - NTFS_TIME_OFFSET;
+	do_div(t, 10000000);
+	return (time_t)t;
+}
+
+/* Convert the Unix UTC into NT UTC. */
+__u64
+cifs_UnixTimeToNT(time_t t)
+{
+	__u64 dce_time;
+   /* Convert to 100ns intervals and then add the NTFS time offset. */
+	dce_time = (__u64) t * 10000000;
+	dce_time += NTFS_TIME_OFFSET;
+	return dce_time;
+}
 #endif
diff -urN cifs-2.6-old-kernels/fs/cifs/readdir.c
cifs-2.4/fs/cifs/readdir.c
--- cifs-2.6-old-kernels/fs/cifs/readdir.c	Mon Jul 24 15:13:14 2006
+++ cifs-2.4/fs/cifs/readdir.c	Thu Jul 27 15:11:25 2006
@@ -78,7 +78,7 @@
 		*ptmp_inode = tmp_dentry->d_inode;
 /* BB overwrite old name? i.e. tmp_dentry->d_name and
tmp_dentry->d_name.len??*/
 		if(*ptmp_inode == NULL) {
-			*ptmp_inode = new_inode(file->f_dentry->d_sb);
+			*ptmp_inode =
get_cifs_inode(file->f_dentry->d_sb);
 			if(*ptmp_inode == NULL)
 				return rc;
 			rc = 1;
@@ -92,7 +92,7 @@
 			return rc;
 		}
 
-		*ptmp_inode = new_inode(file->f_dentry->d_sb);
+		*ptmp_inode = get_cifs_inode(file->f_dentry->d_sb);
 		if (pTcon->nocase)
 			tmp_dentry->d_op = &cifs_ci_dentry_ops;
 		else
@@ -113,7 +113,11 @@
 		char * buf, int *pobject_type, int isNewInode)
 {
 	loff_t local_size;
+#ifdef LNXKERN26
 	struct timespec local_mtime;
+#else
+	time_t local_mtime;
+#endif
 
 	struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode);
 	struct cifs_sb_info *cifs_sb = CIFS_SB(tmp_inode->i_sb);
@@ -210,7 +214,11 @@
 	if (is_size_safe_to_change(cifsInfo)) {
 		/* can not safely change the file size here if the 
 		client is writing to it due to potential races */
+#ifdef LNXKERN26
 		i_size_write(tmp_inode, end_of_file);
+#else
+		tmp_inode->i_size = end_of_file;
+#endif
 
 	/* 512 bytes (2**9) is the fake blocksize that must be used */
 	/* for this calculation, even though the reported blocksize is
larger */
@@ -249,13 +257,17 @@
 				   since have not started caching
readahead file
 				   data yet */
 
-		if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) &&
+		if (TIME_EQUAL(&tmp_inode->i_mtime, &local_mtime) &&
 			(local_size == tmp_inode->i_size)) {
 			cFYI(1, ("inode exists but unchanged"));
 		} else {
 			/* file may have changed on server */
 			cFYI(1, ("invalidate inode, readdir detected
change"));
+#ifdef LNXKERN26
 			invalidate_remote_inode(tmp_inode);
+#else
+			invalidate_inode_pages(tmp_inode);
+#endif
 		}
 	} else if (S_ISDIR(tmp_inode->i_mode)) {
 		cFYI(1, ("Directory inode"));
@@ -275,7 +287,11 @@
 	FILE_UNIX_INFO *pfindData, int *pobject_type, int isNewInode)
 {
 	loff_t local_size;
+#ifdef LNXKERN26
 	struct timespec local_mtime;
+#else
+	time_t local_mtime;
+#endif
 
 	struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode);
 	struct cifs_sb_info *cifs_sb = CIFS_SB(tmp_inode->i_sb);
@@ -340,7 +356,11 @@
 	if (is_size_safe_to_change(cifsInfo)) {
 		/* can not safely change the file size here if the 
 		client is writing to it due to potential races */
+#ifdef LNXKERN26
 		i_size_write(tmp_inode,end_of_file);
+#else
+		tmp_inode->i_size = end_of_file;
+#endif
 
 	/* 512 bytes (2**9) is the fake blocksize that must be used */
 	/* for this calculation, not the real blocksize */
@@ -373,13 +393,17 @@
 			return; /* No sense invalidating pages for new
inode since we
 					   have not started caching
readahead file data yet */
 
-		if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) &&
+		if (TIME_EQUAL(&tmp_inode->i_mtime, &local_mtime) &&
 			(local_size == tmp_inode->i_size)) {
 			cFYI(1, ("inode exists but unchanged"));
 		} else {
 			/* file may have changed on server */
 			cFYI(1, ("invalidate inode, readdir detected
change"));
+#ifdef LNXKERN26
 			invalidate_remote_inode(tmp_inode);
+#else
+			invalidate_inode_pages(tmp_inode);
+#endif
 		}
 	} else if (S_ISDIR(tmp_inode->i_mode)) {
 		cFYI(1, ("Directory inode"));
diff -urN cifs-2.6-old-kernels/fs/cifs/sess.c cifs-2.4/fs/cifs/sess.c
--- cifs-2.6-old-kernels/fs/cifs/sess.c	Mon Jul 24 15:13:14 2006
+++ cifs-2.4/fs/cifs/sess.c	Thu Jul 27 15:11:25 2006
@@ -20,7 +20,7 @@
  *   along with this library; if not, write to the Free Software
  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA
  */
-
+#include <linux/fs.h>
 #include "cifspdu.h"
 #include "cifsglob.h"
 #include "cifsproto.h"
diff -urN cifs-2.6-old-kernels/fs/cifs/transport.c
cifs-2.4/fs/cifs/transport.c
--- cifs-2.6-old-kernels/fs/cifs/transport.c	Mon Jul 24 15:13:14 2006
+++ cifs-2.4/fs/cifs/transport.c	Thu Jul 27 15:11:25 2006
@@ -20,6 +20,8 @@
  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA 
  */
 
+#include "cifs_compat.h"
+
 #include <linux/fs.h>
 #include <linux/list.h>
 #include <linux/wait.h>
@@ -27,7 +29,11 @@
 #include <linux/delay.h>
 #include <asm/uaccess.h>
 #include <asm/processor.h>
+#ifndef CIFS_USE_MEMPOOL_COMPAT
 #include <linux/mempool.h>
+#else
+#include "mempool_compat.h"
+#endif
 #include "cifspdu.h"
 #include "cifsglob.h"
 #include "cifsproto.h"
diff -urN cifs-2.6-old-kernels/fs/cifs/xattr.c cifs-2.4/fs/cifs/xattr.c
--- cifs-2.6-old-kernels/fs/cifs/xattr.c	Tue May 23 10:25:31 2006
+++ cifs-2.4/fs/cifs/xattr.c	Thu Jul 27 15:11:25 2006
@@ -20,7 +20,9 @@
  */
 
 #include <linux/fs.h>
+#ifdef LNXKERN26
 #include <linux/posix_acl_xattr.h>
+#endif
 #include "cifsfs.h"
 #include "cifspdu.h"
 #include "cifsglob.h"


More information about the linux-cifs-client mailing list