aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/locking/dlm/lock.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2008-06-04 15:06:21 +0100
committerSteven Whitehouse <swhiteho@redhat.com>2008-06-27 09:39:50 +0100
commit31fcba00fe7145527b159f8893ec6c9cc61309fd (patch)
tree6fef1dc1b20166b881356159d9ac8208a9335978 /fs/gfs2/locking/dlm/lock.c
parentb2cad26cfc2091050574a460b304ed103a35dbda (diff)
downloadlinux-31fcba00fe7145527b159f8893ec6c9cc61309fd.tar.gz
[GFS2] Remove all_list from lock_dlm
I discovered that we had a list onto which every lock_dlm lock was being put. Its only function was to discover whether we'd got any locks left after umount. Since there was already a counter for that purpose as well, I removed the list. The saving is sizeof(struct list_head) per glock - well worth having. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/locking/dlm/lock.c')
-rw-r--r--fs/gfs2/locking/dlm/lock.c23
1 files changed, 0 insertions, 23 deletions
diff --git a/fs/gfs2/locking/dlm/lock.c b/fs/gfs2/locking/dlm/lock.c
index 894df4567a030a..2482c9047505fc 100644
--- a/fs/gfs2/locking/dlm/lock.c
+++ b/fs/gfs2/locking/dlm/lock.c
@@ -58,9 +58,6 @@ static void gdlm_delete_lp(struct gdlm_lock *lp)
spin_lock(&ls->async_lock);
if (!list_empty(&lp->delay_list))
list_del_init(&lp->delay_list);
- gdlm_assert(!list_empty(&lp->all_list), "%x,%llx", lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number);
- list_del_init(&lp->all_list);
ls->all_locks_count--;
spin_unlock(&ls->async_lock);
@@ -397,7 +394,6 @@ static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
INIT_LIST_HEAD(&lp->delay_list);
spin_lock(&ls->async_lock);
- list_add(&lp->all_list, &ls->all_locks);
ls->all_locks_count++;
spin_unlock(&ls->async_lock);
@@ -710,22 +706,3 @@ void gdlm_submit_delayed(struct gdlm_ls *ls)
wake_up(&ls->thread_wait);
}
-int gdlm_release_all_locks(struct gdlm_ls *ls)
-{
- struct gdlm_lock *lp, *safe;
- int count = 0;
-
- spin_lock(&ls->async_lock);
- list_for_each_entry_safe(lp, safe, &ls->all_locks, all_list) {
- list_del_init(&lp->all_list);
-
- if (lp->lvb && lp->lvb != junk_lvb)
- kfree(lp->lvb);
- kfree(lp);
- count++;
- }
- spin_unlock(&ls->async_lock);
-
- return count;
-}
-