Starting to look through this set<br><br>ACK on this one.   Doesn&#39;t look like this would affect behavior, but slight cosmetic improvement<br><br><br><div class="gmail_quote">On Tue, Aug 18, 2009 at 1:06 PM, Jeff Layton <span dir="ltr">&lt;<a href="mailto:jlayton@redhat.com">jlayton@redhat.com</a>&gt;</span> wrote:<br>
<blockquote class="gmail_quote" style="border-left: 1px solid rgb(204, 204, 204); margin: 0pt 0pt 0pt 0.8ex; padding-left: 1ex;">Right now, the GlobalOplock_Q is protected by the GlobalMid_Lock. That<br>
lock is also used for completely unrelated purposes (mostly for managing<br>
the global mid queue). Give the list its own dedicated spinlock<br>
(cifs_oplock_lock) and rename the list to cifs_oplock_list to<br>
eliminate the camel-case.<br>
<br>
Signed-off-by: Jeff Layton &lt;<a href="mailto:jlayton@redhat.com">jlayton@redhat.com</a>&gt;<br>
---<br>
 fs/cifs/cifsfs.c    |   13 +++++++------<br>
 fs/cifs/cifsglob.h  |    6 +++++-<br>
 fs/cifs/transport.c |   17 ++++++++---------<br>
 3 files changed, 20 insertions(+), 16 deletions(-)<br>
<br>
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c<br>
index b750aa5..3610e99 100644<br>
--- a/fs/cifs/cifsfs.c<br>
+++ b/fs/cifs/cifsfs.c<br>
@@ -986,19 +986,19 @@ static int cifs_oplock_thread(void *dummyarg)<br>
                if (try_to_freeze())<br>
                        continue;<br>
<br>
-               spin_lock(&amp;GlobalMid_Lock);<br>
-               if (list_empty(&amp;GlobalOplock_Q)) {<br>
-                       spin_unlock(&amp;GlobalMid_Lock);<br>
+               spin_lock(&amp;cifs_oplock_lock);<br>
+               if (list_empty(&amp;cifs_oplock_list)) {<br>
+                       spin_unlock(&amp;cifs_oplock_lock);<br>
                        set_current_state(TASK_INTERRUPTIBLE);<br>
                        schedule_timeout(39*HZ);<br>
                } else {<br>
-                       oplock_item = list_entry(GlobalOplock_Q.next,<br>
+                       oplock_item = list_entry(cifs_oplock_list.next,<br>
                                                struct oplock_q_entry, qhead);<br>
                        cFYI(1, (&quot;found oplock item to write out&quot;));<br>
                        pTcon = oplock_item-&gt;tcon;<br>
                        inode = oplock_item-&gt;pinode;<br>
                        netfid = oplock_item-&gt;netfid;<br>
-                       spin_unlock(&amp;GlobalMid_Lock);<br>
+                       spin_unlock(&amp;cifs_oplock_lock);<br>
                        DeleteOplockQEntry(oplock_item);<br>
                        /* can not grab inode sem here since it would<br>
                                deadlock when oplock received on delete<br>
@@ -1055,7 +1055,7 @@ init_cifs(void)<br>
        int rc = 0;<br>
        cifs_proc_init();<br>
        INIT_LIST_HEAD(&amp;cifs_tcp_ses_list);<br>
-       INIT_LIST_HEAD(&amp;GlobalOplock_Q);<br>
+       INIT_LIST_HEAD(&amp;cifs_oplock_list);<br>
 #ifdef CONFIG_CIFS_EXPERIMENTAL<br>
        INIT_LIST_HEAD(&amp;GlobalDnotifyReqList);<br>
        INIT_LIST_HEAD(&amp;GlobalDnotifyRsp_Q);<br>
@@ -1084,6 +1084,7 @@ init_cifs(void)<br>
        rwlock_init(&amp;GlobalSMBSeslock);<br>
        rwlock_init(&amp;cifs_tcp_ses_lock);<br>
        spin_lock_init(&amp;GlobalMid_Lock);<br>
+       spin_lock_init(&amp;cifs_oplock_lock);<br>
<br>
        if (cifs_max_pending &lt; 2) {<br>
                cifs_max_pending = 2;<br>
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h<br>
index 6084d63..f100399 100644<br>
--- a/fs/cifs/cifsglob.h<br>
+++ b/fs/cifs/cifsglob.h<br>
@@ -656,7 +656,11 @@ GLOBAL_EXTERN rwlock_t             cifs_tcp_ses_lock;<br>
  */<br>
 GLOBAL_EXTERN rwlock_t GlobalSMBSeslock;<br>
<br>
-GLOBAL_EXTERN struct list_head GlobalOplock_Q;<br>
+/* Global list of oplocks */<br>
+GLOBAL_EXTERN struct list_head cifs_oplock_list;<br>
+<br>
+/* Protects the cifs_oplock_list */<br>
+GLOBAL_EXTERN spinlock_t cifs_oplock_lock;<br>
<br>
 /* Outstanding dir notify requests */<br>
 GLOBAL_EXTERN struct list_head GlobalDnotifyReqList;<br>
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c<br>
index 0ad3e2d..1da4ab2 100644<br>
--- a/fs/cifs/transport.c<br>
+++ b/fs/cifs/transport.c<br>
@@ -119,20 +119,19 @@ AllocOplockQEntry(struct inode *pinode, __u16 fid, struct cifsTconInfo *tcon)<br>
                temp-&gt;pinode = pinode;<br>
                temp-&gt;tcon = tcon;<br>
                temp-&gt;netfid = fid;<br>
-               spin_lock(&amp;GlobalMid_Lock);<br>
-               list_add_tail(&amp;temp-&gt;qhead, &amp;GlobalOplock_Q);<br>
-               spin_unlock(&amp;GlobalMid_Lock);<br>
+               spin_lock(&amp;cifs_oplock_lock);<br>
+               list_add_tail(&amp;temp-&gt;qhead, &amp;cifs_oplock_list);<br>
+               spin_unlock(&amp;cifs_oplock_lock);<br>
        }<br>
        return temp;<br>
-<br>
 }<br>
<br>
 void DeleteOplockQEntry(struct oplock_q_entry *oplockEntry)<br>
 {<br>
-       spin_lock(&amp;GlobalMid_Lock);<br>
+       spin_lock(&amp;cifs_oplock_lock);<br>
     /* should we check if list empty first? */<br>
        list_del(&amp;oplockEntry-&gt;qhead);<br>
-       spin_unlock(&amp;GlobalMid_Lock);<br>
+       spin_unlock(&amp;cifs_oplock_lock);<br>
        kmem_cache_free(cifs_oplock_cachep, oplockEntry);<br>
 }<br>
<br>
@@ -144,14 +143,14 @@ void DeleteTconOplockQEntries(struct cifsTconInfo *tcon)<br>
        if (tcon == NULL)<br>
                return;<br>
<br>
-       spin_lock(&amp;GlobalMid_Lock);<br>
-       list_for_each_entry(temp, &amp;GlobalOplock_Q, qhead) {<br>
+       spin_lock(&amp;cifs_oplock_lock);<br>
+       list_for_each_entry(temp, &amp;cifs_oplock_list, qhead) {<br>
                if ((temp-&gt;tcon) &amp;&amp; (temp-&gt;tcon == tcon)) {<br>
                        list_del(&amp;temp-&gt;qhead);<br>
                        kmem_cache_free(cifs_oplock_cachep, temp);<br>
                }<br>
        }<br>
-       spin_unlock(&amp;GlobalMid_Lock);<br>
+       spin_unlock(&amp;cifs_oplock_lock);<br>
 }<br>
<br>
 static int<br>
<font color="#888888">--<br>
1.6.0.6<br>
<br>
</font></blockquote></div><br><br clear="all"><br>-- <br>Thanks,<br><br>Steve<br>