fs/dcache.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/fs/dcache.c b/fs/dcache.c index 41000305d716..c988806b941e 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -603,6 +603,9 @@ relock: * Real recursion would eat up our stack space. */ +#define is_simple_dput(dentry) \ + (((dentry)->d_flags & (DCACHE_REFERENCED |DCACHE_LRU_LIST)) == (DCACHE_REFERENCED |DCACHE_LRU_LIST)) + /* * dput - release a dentry * @dentry: dentry to release @@ -617,6 +620,35 @@ void dput(struct dentry *dentry) if (unlikely(!dentry)) return; + /* + * Try RTM for the trivial - and common - case. + * + * We don't do this for DCACHE_OP_DELETE (which is a static flag, + * so check it outside the transaction), and we require that the + * dentry is already marked referenced and on the LRU list. + * + * If that is true, and the dentry is not locked, we can just + * decrement the usage count. + * + * This is kind of a special super-case of lockref_put(), but + * atomically testing the dentry flags to make sure that there + * is nothing else we need to look at. + */ + if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) + goto repeat; + asm goto("xbegin %l[repeat]": : :"memory","ax":repeat); + if (unlikely(d_unhashed(dentry))) + goto xabort; + if (unlikely(!is_simple_dput(dentry))) + goto xabort; + if (unlikely(!arch_spin_value_unlocked(dentry->d_lock.rlock.raw_lock))) + goto xabort; + dentry->d_lockref.count--; + asm volatile("xend"); + return; + +xabort: + asm volatile("xabort $0"); repeat: if (lockref_put_or_lock(&dentry->d_lockref)) return;