[PATCH] mm: __lock_page() dbg

From: Bart Van Assche
Date: Thu Aug 11 2016 - 19:38:32 EST


---
include/linux/mm_types.h | 3 +++
include/linux/pagemap.h | 22 ++++++++++++++++++++--
mm/filemap.c | 44 ++++++++++++++++++++++++++++++++------------
mm/ksm.c | 1 +
mm/migrate.c | 1 +
mm/shmem.c | 1 +
mm/swap_state.c | 2 ++
mm/vmscan.c | 1 +
8 files changed, 61 insertions(+), 14 deletions(-)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index ca3e517..59fdfeb 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -23,6 +23,7 @@

struct address_space;
struct mem_cgroup;
+struct task_struct;

#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
@@ -220,6 +221,8 @@ struct page {
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
int _last_cpupid;
#endif
+
+ struct task_struct *owner;
}
/*
* The struct page can be forced to be double word aligned so that atomic ops
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 9735410..d332674 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -419,10 +419,25 @@ extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags);
extern void unlock_page(struct page *page);

+static inline struct task_struct *get_page_lock_owner(struct page *page)
+{
+ return page->owner;
+}
+
+static inline void set_page_lock_owner(struct page *page, struct task_struct *t)
+{
+ page->owner = t;
+}
+
static inline int trylock_page(struct page *page)
{
+ int res;
+
page = compound_head(page);
- return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
+ res = !test_and_set_bit_lock(PG_locked, &page->flags);
+ if (likely(res))
+ set_page_lock_owner(page, current);
+ return res;
}

/*
@@ -641,9 +656,12 @@ static inline int add_to_page_cache(struct page *page,
int error;

__SetPageLocked(page);
+ set_page_lock_owner(page, current);
error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
- if (unlikely(error))
+ if (unlikely(error)) {
+ set_page_lock_owner(page, NULL);
__ClearPageLocked(page);
+ }
return error;
}

diff --git a/mm/filemap.c b/mm/filemap.c
index 530e75a..0ad8bf6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -699,11 +699,13 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
int ret;

__SetPageLocked(page);
+ set_page_lock_owner(page, current);
ret = __add_to_page_cache_locked(page, mapping, offset,
gfp_mask, &shadow);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ set_page_lock_owner(page, NULL);
__ClearPageLocked(page);
- else {
+ } else {
/*
* The page might have been evicted from cache only
* recently, in which case it should be activated like
@@ -831,6 +833,7 @@ void unlock_page(struct page *page)
{
page = compound_head(page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
+ set_page_lock_owner(page, NULL);
clear_bit_unlock(PG_locked, &page->flags);
smp_mb__after_atomic();
wake_up_page(page, PG_locked);
@@ -925,27 +928,44 @@ void page_endio(struct page *page, int rw, int err)
}
EXPORT_SYMBOL_GPL(page_endio);

+int __lock_page_impl(struct page *page, int mode)
+{
+ struct page *page_head = compound_head(page);
+ DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked);
+ struct task_struct *owner;
+ int res;
+
+ for (;;) {
+ wait.key.timeout = jiffies + 30 * HZ;
+ res = __wait_on_bit_lock(page_waitqueue(page_head),
+ &wait, bit_wait_io_timeout, mode);
+ if (res == 0) {
+ set_page_lock_owner(page, current);
+ break;
+ }
+ if (res == -EINTR)
+ break;
+ owner = get_page_lock_owner(page);
+ pr_info("%s / pid %d / m %#x: %s - continuing to wait for %d\n",
+ __func__, task_pid_nr(current), mode, res == -EAGAIN ?
+ "timeout" : "interrupted",
+ owner ? task_pid_nr(owner) : 0);
+ }
+ return res;
+}
/**
* __lock_page - get a lock on the page, assuming we need to sleep to get it
* @page: the page to lock
*/
void __lock_page(struct page *page)
{
- struct page *page_head = compound_head(page);
- DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked);
-
- __wait_on_bit_lock(page_waitqueue(page_head), &wait, bit_wait_io,
- TASK_UNINTERRUPTIBLE);
+ __lock_page_impl(page, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(__lock_page);

int __lock_page_killable(struct page *page)
{
- struct page *page_head = compound_head(page);
- DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked);
-
- return __wait_on_bit_lock(page_waitqueue(page_head), &wait,
- bit_wait_io, TASK_KILLABLE);
+ return __lock_page_impl(page, TASK_KILLABLE);
}
EXPORT_SYMBOL_GPL(__lock_page_killable);

diff --git a/mm/ksm.c b/mm/ksm.c
index 4786b41..20ca878 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1880,6 +1880,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
SetPageDirty(new_page);
__SetPageUptodate(new_page);
__SetPageLocked(new_page);
+ set_page_lock_owner(new_page, current);
}

return new_page;
diff --git a/mm/migrate.c b/mm/migrate.c
index bd3fdc2..50e5bc1 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1794,6 +1794,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,

/* Prepare a page as a migration target */
__SetPageLocked(new_page);
+ set_page_lock_owner(new_page, current);
__SetPageSwapBacked(new_page);

/* anon mapping, we can simply copy page->mapping to the new page: */
diff --git a/mm/shmem.c b/mm/shmem.c
index 171dee7..0af6bf7 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1021,6 +1021,7 @@ static struct page *shmem_alloc_page(gfp_t gfp,
page = alloc_pages_vma(gfp, 0, &pvma, 0, numa_node_id(), false);
if (page) {
__SetPageLocked(page);
+ set_page_lock_owner(page, current);
__SetPageSwapBacked(page);
}

diff --git a/mm/swap_state.c b/mm/swap_state.c
index c99463a..8522a8c 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -361,6 +361,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,

/* May fail (-ENOMEM) if radix-tree node allocation failed. */
__SetPageLocked(new_page);
+ set_page_lock_owner(new_page, current);
__SetPageSwapBacked(new_page);
err = __add_to_swap_cache(new_page, entry);
if (likely(!err)) {
@@ -373,6 +374,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
return new_page;
}
radix_tree_preload_end();
+ set_page_lock_owner(new_page, NULL);
__ClearPageLocked(new_page);
/*
* add_to_swap_cache() doesn't return -EEXIST, so we can safely
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c4a2f45..67d7496 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1190,6 +1190,7 @@ lazyfree:
* we obviously don't have to worry about waking up a process
* waiting on the page lock, because there are no references.
*/
+ set_page_lock_owner(page, NULL);
__ClearPageLocked(page);
free_it:
if (ret == SWAP_LZFREE)
--
2.9.2


--------------1ABB882063EED93AEF250835
Content-Type: text/x-patch;
name="0001-do_generic_file_read-Fail-immediately-if-killed.patch"
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment;
filename="0001-do_generic_file_read-Fail-immediately-if-killed.patch"