Re: [PATCH 5/5] mm:swap: skip swapcache for swapin of synchronous device

From: kbuild test robot
Date: Tue Sep 12 2017 - 16:23:38 EST


Hi Minchan,

[auto build test ERROR on mmotm/master]
[also build test ERROR on next-20170912]
[cannot apply to linus/master v4.13]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url: https://github.com/0day-ci/linux/commits/Minchan-Kim/zram-set-BDI_CAP_STABLE_WRITES-once/20170913-025838
base: git://git.cmpxchg.org/linux-mmotm.git master
config: x86_64-randconfig-x016-201737 (attached as .config)
compiler: gcc-6 (Debian 6.2.0-3) 6.2.0 20160901
reproduce:
# save the attached .config to linux build tree
make ARCH=x86_64

All error/warnings (new ones prefixed by >>):

mm/memory.c: In function 'do_swap_page':
>> mm/memory.c:2891:33: error: implicit declaration of function 'swp_swap_info' [-Werror=implicit-function-declaration]
struct swap_info_struct *si = swp_swap_info(entry);
^~~~~~~~~~~~~
>> mm/memory.c:2891:33: warning: initialization makes pointer from integer without a cast [-Wint-conversion]
>> mm/memory.c:2908:4: error: implicit declaration of function 'swap_readpage' [-Werror=implicit-function-declaration]
swap_readpage(page, true);
^~~~~~~~~~~~~
In file included from include/uapi/linux/stddef.h:1:0,
from include/linux/stddef.h:4,
from include/uapi/linux/posix_types.h:4,
from include/uapi/linux/types.h:13,
from include/linux/types.h:5,
from include/linux/smp.h:10,
from include/linux/kernel_stat.h:4,
from mm/memory.c:41:
mm/memory.c: At top level:
include/linux/compiler.h:162:4: warning: '______f' is static but declared in inline function 'memcpy_and_pad' which is not static
______f = { \
^
include/linux/compiler.h:154:23: note: in expansion of macro '__trace_if'
#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
^~~~~~~~~~
include/linux/string.h:451:2: note: in expansion of macro 'if'
if (dest_len > count) {
^~
include/linux/compiler.h:162:4: warning: '______f' is static but declared in inline function 'memcpy_and_pad' which is not static
______f = { \
^
include/linux/compiler.h:154:23: note: in expansion of macro '__trace_if'
#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
^~~~~~~~~~
include/linux/string.h:449:2: note: in expansion of macro 'if'
if (dest_size < dest_len)
^~
include/linux/compiler.h:162:4: warning: '______f' is static but declared in inline function 'memcpy_and_pad' which is not static
______f = { \
^
include/linux/compiler.h:154:23: note: in expansion of macro '__trace_if'
#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
^~~~~~~~~~
include/linux/string.h:446:8: note: in expansion of macro 'if'
else if (src_size < dest_len && src_size < count)
^~
include/linux/compiler.h:162:4: warning: '______f' is static but declared in inline function 'memcpy_and_pad' which is not static
______f = { \
^
include/linux/compiler.h:154:23: note: in expansion of macro '__trace_if'
#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
^~~~~~~~~~
include/linux/string.h:444:3: note: in expansion of macro 'if'
if (dest_size < dest_len && dest_size < count)
^~
include/linux/compiler.h:162:4: warning: '______f' is static but declared in inline function 'memcpy_and_pad' which is not static
______f = { \
^
include/linux/compiler.h:154:23: note: in expansion of macro '__trace_if'
#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
^~~~~~~~~~
include/linux/string.h:443:2: note: in expansion of macro 'if'
if (__builtin_constant_p(dest_len) && __builtin_constant_p(count)) {
^~
cc1: some warnings being treated as errors

vim +/swp_swap_info +2891 mm/memory.c

2833
2834 /*
2835 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2836 * but allow concurrent faults), and pte mapped but not yet locked.
2837 * We return with pte unmapped and unlocked.
2838 *
2839 * We return with the mmap_sem locked or unlocked in the same cases
2840 * as does filemap_fault().
2841 */
2842 int do_swap_page(struct vm_fault *vmf)
2843 {
2844 struct vm_area_struct *vma = vmf->vma;
2845 struct page *page = NULL, *swapcache = NULL;
2846 struct mem_cgroup *memcg;
2847 struct vma_swap_readahead swap_ra;
2848 swp_entry_t entry;
2849 pte_t pte;
2850 int locked;
2851 int exclusive = 0;
2852 int ret = 0;
2853 bool vma_readahead = swap_use_vma_readahead();
2854
2855 if (vma_readahead)
2856 page = swap_readahead_detect(vmf, &swap_ra);
2857 if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) {
2858 if (page)
2859 put_page(page);
2860 goto out;
2861 }
2862
2863 entry = pte_to_swp_entry(vmf->orig_pte);
2864 if (unlikely(non_swap_entry(entry))) {
2865 if (is_migration_entry(entry)) {
2866 migration_entry_wait(vma->vm_mm, vmf->pmd,
2867 vmf->address);
2868 } else if (is_device_private_entry(entry)) {
2869 /*
2870 * For un-addressable device memory we call the pgmap
2871 * fault handler callback. The callback must migrate
2872 * the page back to some CPU accessible page.
2873 */
2874 ret = device_private_entry_fault(vma, vmf->address, entry,
2875 vmf->flags, vmf->pmd);
2876 } else if (is_hwpoison_entry(entry)) {
2877 ret = VM_FAULT_HWPOISON;
2878 } else {
2879 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
2880 ret = VM_FAULT_SIGBUS;
2881 }
2882 goto out;
2883 }
2884
2885
2886 delayacct_set_flag(DELAYACCT_PF_SWAPIN);
2887 if (!page)
2888 page = lookup_swap_cache(entry, vma_readahead ? vma : NULL,
2889 vmf->address);
2890 if (!page) {
> 2891 struct swap_info_struct *si = swp_swap_info(entry);
2892
2893 if (!(si->flags & SWP_SYNCHRONOUS_IO)) {
2894 if (vma_readahead)
2895 page = do_swap_page_readahead(entry,
2896 GFP_HIGHUSER_MOVABLE, vmf, &swap_ra);
2897 else
2898 page = swapin_readahead(entry,
2899 GFP_HIGHUSER_MOVABLE, vma, vmf->address);
2900 swapcache = page;
2901 } else {
2902 /* skip swapcache */
2903 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
2904 __SetPageLocked(page);
2905 __SetPageSwapBacked(page);
2906 set_page_private(page, entry.val);
2907 lru_cache_add_anon(page);
> 2908 swap_readpage(page, true);
2909 }
2910
2911 if (!page) {
2912 /*
2913 * Back out if somebody else faulted in this pte
2914 * while we released the pte lock.
2915 */
2916 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
2917 vmf->address, &vmf->ptl);
2918 if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
2919 ret = VM_FAULT_OOM;
2920 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2921 goto unlock;
2922 }
2923
2924 /* Had to read the page from swap area: Major fault */
2925 ret = VM_FAULT_MAJOR;
2926 count_vm_event(PGMAJFAULT);
2927 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
2928 } else if (PageHWPoison(page)) {
2929 /*
2930 * hwpoisoned dirty swapcache pages are kept for killing
2931 * owner processes (which may be unknown at hwpoison time)
2932 */
2933 ret = VM_FAULT_HWPOISON;
2934 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2935 swapcache = page;
2936 goto out_release;
2937 }
2938
2939 locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
2940
2941 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2942 if (!locked) {
2943 ret |= VM_FAULT_RETRY;
2944 goto out_release;
2945 }
2946
2947 /*
2948 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
2949 * release the swapcache from under us. The page pin, and pte_same
2950 * test below, are not enough to exclude that. Even if it is still
2951 * swapcache, we need to check that the page's swap has not changed.
2952 */
2953 if (unlikely((!PageSwapCache(page) ||
2954 page_private(page) != entry.val)) && swapcache)
2955 goto out_page;
2956
2957 page = ksm_might_need_to_copy(page, vma, vmf->address);
2958 if (unlikely(!page)) {
2959 ret = VM_FAULT_OOM;
2960 page = swapcache;
2961 goto out_page;
2962 }
2963
2964 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL,
2965 &memcg, false)) {
2966 ret = VM_FAULT_OOM;
2967 goto out_page;
2968 }
2969
2970 /*
2971 * Back out if somebody else already faulted in this pte.
2972 */
2973 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
2974 &vmf->ptl);
2975 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
2976 goto out_nomap;
2977
2978 if (unlikely(!PageUptodate(page))) {
2979 ret = VM_FAULT_SIGBUS;
2980 goto out_nomap;
2981 }
2982
2983 /*
2984 * The page isn't present yet, go ahead with the fault.
2985 *
2986 * Be careful about the sequence of operations here.
2987 * To get its accounting right, reuse_swap_page() must be called
2988 * while the page is counted on swap but not yet in mapcount i.e.
2989 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
2990 * must be called after the swap_free(), or it will never succeed.
2991 */
2992
2993 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
2994 dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
2995 pte = mk_pte(page, vma->vm_page_prot);
2996 if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
2997 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
2998 vmf->flags &= ~FAULT_FLAG_WRITE;
2999 ret |= VM_FAULT_WRITE;
3000 exclusive = RMAP_EXCLUSIVE;
3001 }
3002 flush_icache_page(vma, page);
3003 if (pte_swp_soft_dirty(vmf->orig_pte))
3004 pte = pte_mksoft_dirty(pte);
3005 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3006 vmf->orig_pte = pte;
3007
3008 /* ksm created a completely new copy */
3009 if (unlikely(page != swapcache && swapcache)) {
3010 page_add_new_anon_rmap(page, vma, vmf->address, false);
3011 mem_cgroup_commit_charge(page, memcg, false, false);
3012 lru_cache_add_active_or_unevictable(page, vma);
3013 } else {
3014 do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
3015 mem_cgroup_commit_charge(page, memcg, true, false);
3016 activate_page(page);
3017 }
3018
3019 swap_free(entry);
3020 if (mem_cgroup_swap_full(page) ||
3021 (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
3022 try_to_free_swap(page);
3023 unlock_page(page);
3024 if (page != swapcache && swapcache) {
3025 /*
3026 * Hold the lock to avoid the swap entry to be reused
3027 * until we take the PT lock for the pte_same() check
3028 * (to avoid false positives from pte_same). For
3029 * further safety release the lock after the swap_free
3030 * so that the swap count won't change under a
3031 * parallel locked swapcache.
3032 */
3033 unlock_page(swapcache);
3034 put_page(swapcache);
3035 }
3036
3037 if (vmf->flags & FAULT_FLAG_WRITE) {
3038 ret |= do_wp_page(vmf);
3039 if (ret & VM_FAULT_ERROR)
3040 ret &= VM_FAULT_ERROR;
3041 goto out;
3042 }
3043
3044 /* No need to invalidate - it was non-present before */
3045 update_mmu_cache(vma, vmf->address, vmf->pte);
3046 unlock:
3047 pte_unmap_unlock(vmf->pte, vmf->ptl);
3048 out:
3049 return ret;
3050 out_nomap:
3051 mem_cgroup_cancel_charge(page, memcg, false);
3052 pte_unmap_unlock(vmf->pte, vmf->ptl);
3053 out_page:
3054 unlock_page(page);
3055 out_release:
3056 put_page(page);
3057 if (page != swapcache) {
3058 unlock_page(swapcache);
3059 put_page(swapcache);
3060 }
3061 return ret;
3062 }
3063

---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation

Attachment: .config.gz
Description: application/gzip