mm/gup.c:559:18: sparse: sparse: cast to non-scalar

From: kernel test robot
Date: Sun Jan 31 2021 - 21:30:13 EST


tree: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
head: 1048ba83fb1c00cd24172e23e8263972f6b5d9ac
commit: dee081bf8f824cabeb7c7495367d5dad0a444eb1 READ_ONCE: Drop pointer qualifiers when reading from scalar types
date: 10 months ago
config: alpha-randconfig-s031-20210201 (attached as .config)
compiler: alpha-linux-gcc (GCC) 9.3.0
reproduce:
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# apt-get install sparse
# sparse version: v0.6.3-215-g0fb77bb6-dirty
# https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=dee081bf8f824cabeb7c7495367d5dad0a444eb1
git remote add linus https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
git fetch --no-tags linus master
git checkout dee081bf8f824cabeb7c7495367d5dad0a444eb1
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' ARCH=alpha

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@xxxxxxxxx>


"sparse warnings: (new ones prefixed by >>)"
>> mm/gup.c:559:18: sparse: sparse: cast to non-scalar
>> mm/gup.c:559:18: sparse: sparse: cast from non-scalar
mm/gup.c:584:26: sparse: sparse: cast to non-scalar
mm/gup.c:584:26: sparse: sparse: cast from non-scalar
--
>> mm/page_vma_mapped.c:186:16: sparse: sparse: cast to non-scalar
>> mm/page_vma_mapped.c:186:16: sparse: sparse: cast from non-scalar
mm/page_vma_mapped.c:16:13: sparse: sparse: context imbalance in 'map_pte' - wrong count at exit
mm/page_vma_mapped.c: note: in included file:
include/linux/rmap.h:220:28: sparse: sparse: context imbalance in 'page_vma_mapped_walk' - unexpected unlock
include/linux/rmap.h:220:28: sparse: sparse: context imbalance in 'page_mapped_in_vma' - unexpected unlock
--
>> mm/migrate.c:236:33: sparse: sparse: cast to non-scalar
>> mm/migrate.c:236:33: sparse: sparse: cast from non-scalar
mm/migrate.c:824:9: sparse: sparse: context imbalance in '__buffer_migrate_page' - different lock contexts for basic block

vim +559 mm/gup.c

69e68b4f03135d Kirill A. Shutemov 2014-06-04 543
080dbb618b4bc2 Aneesh Kumar K.V 2017-07-06 544 static struct page *follow_pmd_mask(struct vm_area_struct *vma,
080dbb618b4bc2 Aneesh Kumar K.V 2017-07-06 545 unsigned long address, pud_t *pudp,
df06b37ffe5a44 Keith Busch 2018-10-26 546 unsigned int flags,
df06b37ffe5a44 Keith Busch 2018-10-26 547 struct follow_page_context *ctx)
69e68b4f03135d Kirill A. Shutemov 2014-06-04 548 {
688272809fcce5 Huang Ying 2018-06-07 549 pmd_t *pmd, pmdval;
69e68b4f03135d Kirill A. Shutemov 2014-06-04 550 spinlock_t *ptl;
69e68b4f03135d Kirill A. Shutemov 2014-06-04 551 struct page *page;
69e68b4f03135d Kirill A. Shutemov 2014-06-04 552 struct mm_struct *mm = vma->vm_mm;
69e68b4f03135d Kirill A. Shutemov 2014-06-04 553
080dbb618b4bc2 Aneesh Kumar K.V 2017-07-06 554 pmd = pmd_offset(pudp, address);
688272809fcce5 Huang Ying 2018-06-07 555 /*
688272809fcce5 Huang Ying 2018-06-07 556 * The READ_ONCE() will stabilize the pmdval in a register or
688272809fcce5 Huang Ying 2018-06-07 557 * on the stack so that it will stop changing under the code.
688272809fcce5 Huang Ying 2018-06-07 558 */
688272809fcce5 Huang Ying 2018-06-07 @559 pmdval = READ_ONCE(*pmd);
688272809fcce5 Huang Ying 2018-06-07 560 if (pmd_none(pmdval))
69e68b4f03135d Kirill A. Shutemov 2014-06-04 561 return no_page_table(vma, flags);
be9d30458913f7 Wei Yang 2020-01-30 562 if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
e66f17ff71772b Naoya Horiguchi 2015-02-11 563 page = follow_huge_pmd(mm, address, pmd, flags);
e66f17ff71772b Naoya Horiguchi 2015-02-11 564 if (page)
4bbd4c776a63a0 Kirill A. Shutemov 2014-06-04 565 return page;
e66f17ff71772b Naoya Horiguchi 2015-02-11 566 return no_page_table(vma, flags);
4bbd4c776a63a0 Kirill A. Shutemov 2014-06-04 567 }
688272809fcce5 Huang Ying 2018-06-07 568 if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
4dc71451a2078e Aneesh Kumar K.V 2017-07-06 569 page = follow_huge_pd(vma, address,
688272809fcce5 Huang Ying 2018-06-07 570 __hugepd(pmd_val(pmdval)), flags,
4dc71451a2078e Aneesh Kumar K.V 2017-07-06 571 PMD_SHIFT);
4dc71451a2078e Aneesh Kumar K.V 2017-07-06 572 if (page)
4dc71451a2078e Aneesh Kumar K.V 2017-07-06 573 return page;
4dc71451a2078e Aneesh Kumar K.V 2017-07-06 574 return no_page_table(vma, flags);
4dc71451a2078e Aneesh Kumar K.V 2017-07-06 575 }
84c3fc4e9c563d Zi Yan 2017-09-08 576 retry:
688272809fcce5 Huang Ying 2018-06-07 577 if (!pmd_present(pmdval)) {
84c3fc4e9c563d Zi Yan 2017-09-08 578 if (likely(!(flags & FOLL_MIGRATION)))
84c3fc4e9c563d Zi Yan 2017-09-08 579 return no_page_table(vma, flags);
84c3fc4e9c563d Zi Yan 2017-09-08 580 VM_BUG_ON(thp_migration_supported() &&
688272809fcce5 Huang Ying 2018-06-07 581 !is_pmd_migration_entry(pmdval));
688272809fcce5 Huang Ying 2018-06-07 582 if (is_pmd_migration_entry(pmdval))
84c3fc4e9c563d Zi Yan 2017-09-08 583 pmd_migration_entry_wait(mm, pmd);
688272809fcce5 Huang Ying 2018-06-07 584 pmdval = READ_ONCE(*pmd);
688272809fcce5 Huang Ying 2018-06-07 585 /*
688272809fcce5 Huang Ying 2018-06-07 586 * MADV_DONTNEED may convert the pmd to null because
688272809fcce5 Huang Ying 2018-06-07 587 * mmap_sem is held in read mode
688272809fcce5 Huang Ying 2018-06-07 588 */
688272809fcce5 Huang Ying 2018-06-07 589 if (pmd_none(pmdval))
688272809fcce5 Huang Ying 2018-06-07 590 return no_page_table(vma, flags);
84c3fc4e9c563d Zi Yan 2017-09-08 591 goto retry;
84c3fc4e9c563d Zi Yan 2017-09-08 592 }
688272809fcce5 Huang Ying 2018-06-07 593 if (pmd_devmap(pmdval)) {
3565fce3a6597e Dan Williams 2016-01-15 594 ptl = pmd_lock(mm, pmd);
df06b37ffe5a44 Keith Busch 2018-10-26 595 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
3565fce3a6597e Dan Williams 2016-01-15 596 spin_unlock(ptl);
3565fce3a6597e Dan Williams 2016-01-15 597 if (page)
3565fce3a6597e Dan Williams 2016-01-15 598 return page;
3565fce3a6597e Dan Williams 2016-01-15 599 }
688272809fcce5 Huang Ying 2018-06-07 600 if (likely(!pmd_trans_huge(pmdval)))
df06b37ffe5a44 Keith Busch 2018-10-26 601 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
6742d293cbe01d Kirill A. Shutemov 2016-01-15 602
688272809fcce5 Huang Ying 2018-06-07 603 if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
db08f2030a173f Aneesh Kumar K.V 2017-02-24 604 return no_page_table(vma, flags);
db08f2030a173f Aneesh Kumar K.V 2017-02-24 605
84c3fc4e9c563d Zi Yan 2017-09-08 606 retry_locked:
69e68b4f03135d Kirill A. Shutemov 2014-06-04 607 ptl = pmd_lock(mm, pmd);
688272809fcce5 Huang Ying 2018-06-07 608 if (unlikely(pmd_none(*pmd))) {
688272809fcce5 Huang Ying 2018-06-07 609 spin_unlock(ptl);
688272809fcce5 Huang Ying 2018-06-07 610 return no_page_table(vma, flags);
688272809fcce5 Huang Ying 2018-06-07 611 }
84c3fc4e9c563d Zi Yan 2017-09-08 612 if (unlikely(!pmd_present(*pmd))) {
84c3fc4e9c563d Zi Yan 2017-09-08 613 spin_unlock(ptl);
84c3fc4e9c563d Zi Yan 2017-09-08 614 if (likely(!(flags & FOLL_MIGRATION)))
84c3fc4e9c563d Zi Yan 2017-09-08 615 return no_page_table(vma, flags);
84c3fc4e9c563d Zi Yan 2017-09-08 616 pmd_migration_entry_wait(mm, pmd);
84c3fc4e9c563d Zi Yan 2017-09-08 617 goto retry_locked;
84c3fc4e9c563d Zi Yan 2017-09-08 618 }
6742d293cbe01d Kirill A. Shutemov 2016-01-15 619 if (unlikely(!pmd_trans_huge(*pmd))) {
6742d293cbe01d Kirill A. Shutemov 2016-01-15 620 spin_unlock(ptl);
df06b37ffe5a44 Keith Busch 2018-10-26 621 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
6742d293cbe01d Kirill A. Shutemov 2016-01-15 622 }
bfe7b00de6d1e2 Song Liu 2019-09-23 623 if (flags & (FOLL_SPLIT | FOLL_SPLIT_PMD)) {
6742d293cbe01d Kirill A. Shutemov 2016-01-15 624 int ret;
6742d293cbe01d Kirill A. Shutemov 2016-01-15 625 page = pmd_page(*pmd);
6742d293cbe01d Kirill A. Shutemov 2016-01-15 626 if (is_huge_zero_page(page)) {
6742d293cbe01d Kirill A. Shutemov 2016-01-15 627 spin_unlock(ptl);
6742d293cbe01d Kirill A. Shutemov 2016-01-15 628 ret = 0;
78ddc534734190 Kirill A. Shutemov 2016-01-15 629 split_huge_pmd(vma, pmd, address);
337d9abf1cd1a5 Naoya Horiguchi 2016-07-26 630 if (pmd_trans_unstable(pmd))
337d9abf1cd1a5 Naoya Horiguchi 2016-07-26 631 ret = -EBUSY;
bfe7b00de6d1e2 Song Liu 2019-09-23 632 } else if (flags & FOLL_SPLIT) {
8fde12ca79aff9 Linus Torvalds 2019-04-11 633 if (unlikely(!try_get_page(page))) {
8fde12ca79aff9 Linus Torvalds 2019-04-11 634 spin_unlock(ptl);
8fde12ca79aff9 Linus Torvalds 2019-04-11 635 return ERR_PTR(-ENOMEM);
8fde12ca79aff9 Linus Torvalds 2019-04-11 636 }
69e68b4f03135d Kirill A. Shutemov 2014-06-04 637 spin_unlock(ptl);
6742d293cbe01d Kirill A. Shutemov 2016-01-15 638 lock_page(page);
6742d293cbe01d Kirill A. Shutemov 2016-01-15 639 ret = split_huge_page(page);
6742d293cbe01d Kirill A. Shutemov 2016-01-15 640 unlock_page(page);
6742d293cbe01d Kirill A. Shutemov 2016-01-15 641 put_page(page);
baa355fd331424 Kirill A. Shutemov 2016-07-26 642 if (pmd_none(*pmd))
baa355fd331424 Kirill A. Shutemov 2016-07-26 643 return no_page_table(vma, flags);
bfe7b00de6d1e2 Song Liu 2019-09-23 644 } else { /* flags & FOLL_SPLIT_PMD */
bfe7b00de6d1e2 Song Liu 2019-09-23 645 spin_unlock(ptl);
bfe7b00de6d1e2 Song Liu 2019-09-23 646 split_huge_pmd(vma, pmd, address);
bfe7b00de6d1e2 Song Liu 2019-09-23 647 ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
69e68b4f03135d Kirill A. Shutemov 2014-06-04 648 }
6742d293cbe01d Kirill A. Shutemov 2016-01-15 649
6742d293cbe01d Kirill A. Shutemov 2016-01-15 650 return ret ? ERR_PTR(ret) :
df06b37ffe5a44 Keith Busch 2018-10-26 651 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
69e68b4f03135d Kirill A. Shutemov 2014-06-04 652 }
6742d293cbe01d Kirill A. Shutemov 2016-01-15 653 page = follow_trans_huge_pmd(vma, address, pmd, flags);
6742d293cbe01d Kirill A. Shutemov 2016-01-15 654 spin_unlock(ptl);
df06b37ffe5a44 Keith Busch 2018-10-26 655 ctx->page_mask = HPAGE_PMD_NR - 1;
6742d293cbe01d Kirill A. Shutemov 2016-01-15 656 return page;
69e68b4f03135d Kirill A. Shutemov 2014-06-04 657 }
4bbd4c776a63a0 Kirill A. Shutemov 2014-06-04 658

:::::: The code at line 559 was first introduced by commit
:::::: 688272809fcce5b17fcefd5892b59f3788efb144 mm, gup: prevent pmd checking race in follow_pmd_mask()

:::::: TO: Huang Ying <ying.huang@xxxxxxxxx>
:::::: CC: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@xxxxxxxxxxxx

Attachment: .config.gz
Description: application/gzip