Linux 3.16.57

From: Ben Hutchings
Date: Sun Jun 17 2018 - 09:42:09 EST


I'm announcing the release of the 3.16.57 kernel.

All users of the 3.16 kernel series should upgrade.

The updated 3.16.y git tree can be found at:
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git linux-3.16.y
and can be browsed at the normal kernel.org git web browser:
https://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git

The diff from 3.16.56 is attached to this message.

Ben.

------------

Documentation/device-mapper/thin-provisioning.txt | 8 +-
Documentation/devicetree/bindings/dma/snps-dma.txt | 2 +-
Documentation/filesystems/ext4.txt | 2 +-
Makefile | 2 +-
arch/alpha/kernel/pci_impl.h | 3 +-
arch/alpha/kernel/process.c | 3 +-
arch/arm/boot/dts/exynos4412-trats2.dts | 2 +-
arch/arm/boot/dts/spear1310-evb.dts | 2 +-
arch/arm/boot/dts/spear1340.dtsi | 4 +-
arch/arm/boot/dts/spear13xx.dtsi | 6 +-
arch/arm/boot/dts/spear600.dtsi | 1 +
arch/arm/kvm/handle_exit.c | 13 +-
arch/arm/mach-mvebu/Kconfig | 4 +-
arch/arm/xen/enlighten.c | 1 +
arch/arm64/kernel/process.c | 16 +-
arch/arm64/kernel/traps.c | 58 +-
arch/arm64/kvm/handle_exit.c | 9 +
arch/ia64/scripts/unwcheck.py | 16 +-
arch/mips/boot/compressed/Makefile | 6 +-
arch/mips/kernel/smp-bmips.c | 8 +-
arch/mips/kernel/traps.c | 15 +-
arch/mips/ralink/reset.c | 8 -
arch/mips/txx9/rbtx4939/setup.c | 4 +-
arch/mn10300/mm/misalignment.c | 2 +-
arch/openrisc/kernel/traps.c | 10 +-
arch/powerpc/include/asm/kvm_book3s.h | 6 +-
arch/powerpc/include/asm/topology.h | 8 +
arch/powerpc/kernel/entry_64.S | 10 +-
arch/powerpc/kvm/book3s_interrupts.S | 4 +-
arch/powerpc/kvm/book3s_pr.c | 20 +-
arch/powerpc/mm/numa.c | 5 -
arch/powerpc/platforms/pseries/hotplug-cpu.c | 2 +
arch/s390/kernel/compat_linux.c | 8 +-
arch/s390/kvm/kvm-s390.c | 1 +
arch/sh/kernel/traps_32.c | 3 +-
arch/sparc/crypto/crc32c_glue.c | 1 +
arch/x86/crypto/crc32-pclmul_glue.c | 1 +
arch/x86/crypto/crc32c-intel_glue.c | 1 +
arch/x86/include/asm/apm.h | 6 +
arch/x86/include/asm/cpufeature.h | 15 +-
arch/x86/include/asm/efi.h | 8 +
arch/x86/include/asm/intel-family.h | 11 +-
arch/x86/include/asm/kvm_host.h | 8 +-
arch/x86/include/asm/nospec-branch.h | 37 ++
arch/x86/include/asm/pgtable.h | 4 +-
arch/x86/include/asm/pgtable_types.h | 5 +
arch/x86/include/asm/vmx.h | 1 +
arch/x86/include/uapi/asm/mce.h | 4 +
arch/x86/include/uapi/asm/msr-index.h | 12 +
arch/x86/kernel/aperture_64.c | 46 +-
arch/x86/kernel/cpu/bugs.c | 19 +-
arch/x86/kernel/cpu/common.c | 75 ++-
arch/x86/kernel/cpu/intel.c | 71 +++
arch/x86/kernel/cpu/mcheck/mce.c | 26 +-
arch/x86/kernel/cpu/microcode/core.c | 2 +-
arch/x86/kernel/cpu/microcode/core_early.c | 29 +-
arch/x86/kernel/entry_64.S | 2 +-
arch/x86/kernel/traps.c | 27 +-
arch/x86/kvm/cpuid.c | 24 +-
arch/x86/kvm/cpuid.h | 31 ++
arch/x86/kvm/svm.c | 171 +++++-
arch/x86/kvm/vmx.c | 619 +++++++++++----------
arch/x86/kvm/x86.c | 111 ++--
arch/x86/mm/tlb.c | 19 +
arch/x86/oprofile/nmi_int.c | 2 +-
arch/x86/xen/mmu.c | 2 +-
arch/x86/xen/suspend.c | 24 +
arch/x86/xen/xen-head.S | 15 +
arch/xtensa/include/asm/futex.h | 23 +-
crypto/af_alg.c | 5 +
crypto/ahash.c | 33 +-
crypto/algif_hash.c | 54 +-
crypto/crc32.c | 1 +
crypto/crc32c_generic.c | 1 +
crypto/cryptd.c | 6 +-
crypto/shash.c | 25 +-
drivers/ata/ahci.c | 24 +-
drivers/ata/libata-core.c | 21 +-
drivers/ata/libata-eh.c | 3 +-
drivers/ata/libata-scsi.c | 4 +-
drivers/block/pktcdvd.c | 4 +-
drivers/block/rbd.c | 7 +-
drivers/cdrom/cdrom.c | 2 +-
drivers/char/tpm/tpm-interface.c | 4 +
drivers/char/tpm/tpm_i2c_infineon.c | 5 +-
drivers/char/tpm/tpm_i2c_nuvoton.c | 8 +-
drivers/char/tpm/tpm_tis.c | 5 +-
drivers/clocksource/fsl_ftm_timer.c | 2 +-
drivers/cpufreq/s3c24xx-cpufreq.c | 8 +-
drivers/crypto/bfin_crc.c | 3 +-
drivers/crypto/caam/ctrl.c | 8 +-
drivers/crypto/s5p-sss.c | 12 +-
drivers/devfreq/devfreq.c | 2 +-
drivers/edac/octeon_edac-lmc.c | 1 +
drivers/firmware/dmi_scan.c | 22 +-
drivers/gpu/drm/drm_edid.c | 3 +
drivers/gpu/drm/drm_probe_helper.c | 20 +
drivers/gpu/drm/nouveau/nouveau_connector.c | 18 +-
drivers/gpu/drm/radeon/cik.c | 31 +-
drivers/gpu/drm/radeon/radeon_connectors.c | 105 ++--
drivers/gpu/drm/radeon/radeon_device.c | 4 +
drivers/gpu/drm/radeon/radeon_gem.c | 2 -
drivers/gpu/drm/radeon/radeon_object.c | 2 +
drivers/gpu/drm/radeon/radeon_uvd.c | 2 +-
drivers/gpu/drm/radeon/si_dpm.c | 5 +
drivers/gpu/drm/ttm/ttm_bo.c | 3 +-
drivers/gpu/drm/udl/udl_fb.c | 9 +-
drivers/hid/hid-core.c | 3 +
drivers/hid/hid-ids.h | 4 +
drivers/hid/hid-roccat-kovaplus.c | 2 +
drivers/hid/usbhid/hid-quirks.c | 1 +
drivers/iio/imu/adis_trigger.c | 7 +-
drivers/iio/industrialio-buffer.c | 2 +-
drivers/infiniband/core/cma.c | 5 +-
drivers/infiniband/core/iwpm_util.c | 1 +
drivers/infiniband/core/ucma.c | 51 +-
drivers/infiniband/hw/mlx4/main.c | 13 +-
drivers/infiniband/hw/mlx5/cq.c | 7 +-
drivers/infiniband/hw/mlx5/qp.c | 5 +-
drivers/infiniband/hw/mlx5/srq.c | 15 +-
drivers/infiniband/ulp/ipoib/ipoib_fs.c | 2 -
drivers/input/keyboard/matrix_keypad.c | 4 +-
drivers/input/touchscreen/edt-ft5x06.c | 14 +-
drivers/input/touchscreen/mms114.c | 2 +-
drivers/md/bcache/super.c | 27 +-
drivers/md/raid10.c | 6 +-
drivers/media/pci/bt8xx/bt878.c | 3 +-
drivers/media/platform/exynos4-is/fimc-isp.c | 14 +-
drivers/media/usb/cpia2/cpia2_v4l.c | 4 +-
drivers/media/usb/dvb-usb-v2/lmedm04.c | 39 +-
drivers/media/usb/dvb-usb/cxusb.c | 2 +
drivers/media/usb/dvb-usb/dib0700_devices.c | 1 +
drivers/misc/lkdtm.c | 2 +-
drivers/mmc/card/block.c | 21 +
drivers/mmc/host/dw_mmc-exynos.c | 1 +
drivers/mmc/host/dw_mmc.c | 68 ++-
drivers/mmc/host/dw_mmc.h | 2 +
drivers/mmc/host/sdhci-pci.c | 27 +
drivers/mmc/host/sdhci.c | 7 +-
drivers/mmc/host/sdhci.h | 1 +
drivers/mtd/chips/jedec_probe.c | 2 +
drivers/mtd/nand/nand_base.c | 5 +-
drivers/mtd/ubi/vmt.c | 15 +-
drivers/mtd/ubi/wl.c | 8 +-
drivers/net/bonding/bond_main.c | 73 +--
drivers/net/can/cc770/cc770.c | 100 ++--
drivers/net/can/cc770/cc770.h | 2 +
drivers/net/ethernet/broadcom/bcmsysport.c | 33 +-
drivers/net/ethernet/broadcom/bcmsysport.h | 2 +-
drivers/net/ethernet/intel/e1000e/ich8lan.c | 2 +-
drivers/net/ethernet/intel/e1000e/mac.c | 2 +-
drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c | 23 +-
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 27 +-
drivers/net/ethernet/mellanox/mlx4/en_main.c | 4 +-
drivers/net/ethernet/mellanox/mlx4/mr.c | 40 +-
drivers/net/ethernet/mellanox/mlx4/qp.c | 3 +
drivers/net/slip/slip.c | 4 +-
drivers/net/team/team.c | 4 +-
drivers/net/tun.c | 2 +-
drivers/net/wireless/ath/ath9k/htc_drv_main.c | 4 +
drivers/net/wireless/brcm80211/brcmfmac/p2p.c | 24 +-
drivers/net/wireless/ti/wl1251/main.c | 3 +-
drivers/pci/quirks.c | 2 +
drivers/pinctrl/core.c | 24 +-
drivers/platform/x86/apple-gmux.c | 48 +-
drivers/power/ab8500_charger.c | 6 +-
drivers/s390/net/qeth_core.h | 5 +
drivers/s390/net/qeth_core_main.c | 16 +-
drivers/s390/net/qeth_l2_main.c | 2 +-
drivers/s390/net/qeth_l3_main.c | 2 +-
drivers/scsi/aacraid/aachba.c | 22 +-
drivers/scsi/arm/fas216.c | 2 +-
drivers/scsi/ibmvscsi/ibmvfc.h | 2 +-
drivers/scsi/ipr.c | 3 +-
drivers/scsi/libsas/sas_ata.c | 3 +-
drivers/scsi/libsas/sas_expander.c | 3 +-
drivers/spi/spi-imx.c | 15 +-
drivers/spi/spi-sun6i.c | 2 +-
drivers/staging/android/ashmem.c | 32 +-
drivers/staging/android/binder.c | 14 +-
drivers/staging/iio/adc/ad7192.c | 29 +-
drivers/staging/iio/adc/ad7192.h | 2 +-
drivers/staging/iio/adc/ad7280a.c | 4 +-
.../lustre/libcfs/linux/linux-crypto-adler.c | 1 +
drivers/staging/lustre/lustre/libcfs/tracefile.c | 2 +-
drivers/staging/rts5208/ms.c | 3 +-
drivers/staging/usbip/stub_dev.c | 3 +
drivers/staging/usbip/userspace/src/usbip_bind.c | 9 +
drivers/staging/usbip/userspace/src/usbip_list.c | 9 +
drivers/staging/usbip/vhci_hcd.c | 2 +
drivers/tty/n_tty.c | 6 +
drivers/tty/serial/8250/8250_pci.c | 11 +
drivers/tty/serial/atmel_serial.c | 1 +
drivers/tty/serial/sh-sci.c | 2 +
drivers/tty/vt/vt.c | 8 +-
drivers/usb/class/cdc-acm.c | 5 +-
drivers/usb/core/message.c | 4 +
drivers/usb/core/quirks.c | 6 +-
drivers/usb/dwc3/gadget.c | 2 +
drivers/usb/gadget/f_fs.c | 9 +-
drivers/usb/host/ohci-q.c | 17 +-
drivers/usb/host/xhci-pci.c | 3 +
drivers/usb/host/xhci.c | 3 +
drivers/usb/host/xhci.h | 1 +
drivers/usb/misc/ldusb.c | 6 +
drivers/usb/mon/mon_text.c | 124 +++--
drivers/usb/serial/Kconfig | 3 +
drivers/usb/serial/io_edgeport.c | 1 -
drivers/usb/serial/option.c | 5 +
drivers/usb/serial/pl2303.c | 1 +
drivers/usb/serial/pl2303.h | 1 +
drivers/usb/serial/usb-serial-simple.c | 26 +-
drivers/usb/storage/uas.c | 22 +-
drivers/usb/storage/unusual_devs.h | 7 +
drivers/vhost/net.c | 1 +
drivers/video/console/dummycon.c | 1 -
drivers/video/fbdev/atmel_lcdfb.c | 10 +-
drivers/video/fbdev/sbuslib.c | 4 +-
drivers/xen/events/events_base.c | 4 +-
drivers/xen/manage.c | 9 +-
fs/aio.c | 134 +++--
fs/btrfs/backref.c | 11 +-
fs/btrfs/inode.c | 44 +-
fs/btrfs/tree-log.c | 14 +-
fs/btrfs/volumes.c | 11 +-
fs/cifs/cifsencrypt.c | 3 +-
fs/cifs/cifssmb.c | 4 +-
fs/cifs/connect.c | 4 +-
fs/cifs/file.c | 26 +-
fs/cifs/misc.c | 14 +-
fs/cifs/smb2pdu.c | 6 +-
fs/dcache.c | 11 +-
fs/ext4/balloc.c | 17 +-
fs/ext4/ialloc.c | 6 +
fs/ext4/inode.c | 6 +
fs/ext4/super.c | 1 +
fs/f2fs/segment.c | 5 +-
fs/hugetlbfs/inode.c | 26 +-
fs/jffs2/fs.c | 1 -
fs/kernfs/file.c | 2 +-
fs/namei.c | 5 +-
fs/ncpfs/ncplib_kernel.c | 4 +
fs/nfs/direct.c | 4 +-
fs/nfs/idmap.c | 6 +-
fs/nfs/internal.h | 1 -
fs/nfs/nfs4sysctl.c | 2 +-
fs/nfs/pagelist.c | 26 +-
fs/nfs/pnfs.c | 6 +-
fs/nfs/super.c | 2 +
fs/nfs/write.c | 2 +
fs/ocfs2/cluster/nodemanager.c | 63 ++-
fs/pipe.c | 198 ++++---
include/crypto/hash.h | 34 +-
include/crypto/internal/hash.h | 2 +
include/drm/drm_crtc_helper.h | 1 +
include/linux/crypto.h | 8 +
include/linux/fs.h | 4 +
include/linux/libata.h | 1 +
include/linux/mlx5/driver.h | 4 +-
include/linux/mmc/sdhci.h | 1 +
include/linux/nospec.h | 3 +-
include/linux/pipe_fs_i.h | 4 +-
include/linux/skbuff.h | 17 +
include/linux/usb/quirks.h | 3 +
include/linux/workqueue.h | 1 +
include/net/ip.h | 11 +-
include/net/ip_fib.h | 1 +
include/net/regulatory.h | 2 +-
include/net/route.h | 3 +-
include/net/sch_generic.h | 8 +
include/net/sctp/sctp.h | 7 +-
include/net/udplite.h | 1 +
include/scsi/libsas.h | 34 +-
include/uapi/linux/if_ether.h | 3 +
include/uapi/linux/usb/audio.h | 4 +-
include/xen/xen-ops.h | 1 +
kernel/async.c | 20 +-
kernel/events/hw_breakpoint.c | 30 +-
kernel/hrtimer.c | 7 +-
kernel/posix-timers.c | 15 +-
kernel/relay.c | 2 +-
kernel/sysctl.c | 33 +-
kernel/trace/trace_kprobe.c | 4 +-
kernel/trace/trace_probe.c | 8 +-
kernel/trace/trace_probe.h | 2 +-
kernel/workqueue.c | 16 +
mm/hugetlb.c | 9 +
mm/madvise.c | 3 +-
mm/memory.c | 2 +-
mm/mempolicy.c | 3 +
mm/vmscan.c | 14 +-
net/9p/trans_virtio.c | 3 +-
net/batman-adv/bat_iv_ogm.c | 16 +-
net/batman-adv/distributed-arp-table.c | 2 +-
net/batman-adv/fragmentation.c | 3 +-
net/batman-adv/gateway_client.c | 3 +
net/batman-adv/hard-interface.c | 9 +-
net/batman-adv/multicast.c | 4 +-
net/batman-adv/originator.c | 4 +-
net/batman-adv/originator.h | 4 +-
net/batman-adv/routing.c | 21 +-
net/batman-adv/soft-interface.c | 8 +-
net/batman-adv/types.h | 9 +-
net/bluetooth/hidp/core.c | 3 +-
net/bridge/br_sysfs_if.c | 3 +
net/bridge/netfilter/ebt_among.c | 55 +-
net/bridge/netfilter/ebtables.c | 17 +-
net/core/dev.c | 13 +-
net/core/skbuff.c | 11 +-
net/dccp/proto.c | 5 +
net/decnet/af_decnet.c | 62 ++-
net/ipv4/igmp.c | 4 +
net/ipv4/ip_sockglue.c | 21 +-
net/ipv4/ip_tunnel.c | 30 +-
net/ipv4/ip_vti.c | 2 -
net/ipv4/netfilter/ipt_CLUSTERIP.c | 24 +-
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | 6 +-
net/ipv4/route.c | 114 ++--
net/ipv4/udp.c | 5 +
net/ipv4/xfrm4_policy.c | 1 +
net/ipv6/ip6_checksum.c | 5 +
net/ipv6/ip6_output.c | 13 +-
net/ipv6/ipv6_sockglue.c | 27 +-
net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | 18 +-
net/ipv6/netfilter/nf_nat_l3proto_ipv6.c | 4 +
net/l2tp/l2tp_core.c | 202 +++----
net/l2tp/l2tp_core.h | 26 +-
net/l2tp/l2tp_ip.c | 10 +-
net/l2tp/l2tp_ip6.c | 8 +-
net/l2tp/l2tp_ppp.c | 126 ++---
net/mac80211/cfg.c | 2 +-
net/netfilter/nf_nat_proto_common.c | 7 +-
net/netfilter/xt_IDLETIMER.c | 9 +-
net/netfilter/xt_LED.c | 12 +-
net/netfilter/xt_RATEEST.c | 22 +-
net/netlink/af_netlink.c | 3 +
net/netlink/genetlink.c | 12 +-
net/nfc/llcp_commands.c | 4 +
net/nfc/netlink.c | 3 +-
net/sched/sch_netem.c | 6 +-
net/sctp/sm_make_chunk.c | 8 +-
net/xfrm/xfrm_user.c | 21 +-
security/integrity/ima/ima_appraise.c | 3 +-
sound/core/oss/pcm_oss.c | 4 +-
sound/core/pcm_native.c | 2 +-
sound/core/seq/seq_clientmgr.c | 29 +-
sound/core/seq/seq_fifo.c | 2 +-
sound/core/seq/seq_memory.c | 14 +-
sound/core/seq/seq_memory.h | 3 +-
sound/core/seq/seq_prioq.c | 28 +-
sound/core/seq/seq_prioq.h | 6 +-
sound/core/seq/seq_queue.c | 28 +-
sound/drivers/aloop.c | 17 +-
sound/pci/hda/patch_realtek.c | 25 +-
sound/soc/au1x/ac97c.c | 6 +-
sound/soc/codecs/rt5651.c | 1 +
sound/soc/nuc900/nuc900-ac97.c | 4 +-
sound/usb/pcm.c | 9 +
sound/usb/quirks-table.h | 47 ++
tools/perf/builtin-record.c | 13 +
tools/perf/util/annotate.c | 8 +-
tools/perf/util/evlist.c | 28 +
tools/perf/util/evlist.h | 3 +
tools/perf/util/session.c | 3 +-
.../testing/selftests/rcutorture/bin/configinit.sh | 2 +-
tools/testing/selftests/rcutorture/bin/kvm.sh | 4 +-
virt/kvm/kvm_main.c | 3 +-
367 files changed, 3696 insertions(+), 2110 deletions(-)

AMAN DEEP (1):
usb: ohci: Proper handling of ed_rm_list to handle race condition between usb_kill_urb() and finish_unlinks()

Adrian Hunter (2):
mmc: sdhci: Allow override of mmc host operations
mmc: sdhci-pci: Fix S0i3 for Intel BYT-based controllers

Al Viro (2):
Bluetooth: hidp_connection_add() unsafe use of l2cap_pi()
lock_parent() needs to recheck if dentry got __dentry_kill'ed under it

Alaa Hleihel (1):
IB/ipoib: Do not warn if IPoIB debugfs doesn't exist

Alex Deucher (2):
drm/radeon: Add dpm quirk for Jet PRO (v2)
drm/radeon: fix KV harvesting

Alexander Graf (1):
KVM: PPC: Book3S PR: Fix svcpu copying with preemption enabled

Alexander Potapenko (1):
netlink: make sure nladdr has correct size in netlink_connect()

Alexandra Yates (3):
ahci: add new Intel device IDs
ahci: Order SATA device IDs for codename Lewisburg
Adding Intel Lewisburg device IDs for SATA

Alexandru Ardelean (1):
staging: iio: adc: ad7192: fix external frequency setting

Alexey Kodanev (4):
dccp: check sk for closed state in dccp_sendmsg()
sctp: verify size of a new chunk in _sctp_make_chunk()
udplite: fix partial checksum initialization
sch_netem: fix skb leak in netem_enqueue()

Andi Shyti (1):
Input: mms114 - fix license module information

Andri Yngvason (3):
can: cc770: Fix stalls on rt-linux, remove redundant IRQ ack
can: cc770: Fix queue stall & dropped RTR reply
can: cc770: Fix use after free in cc770_tx_interrupt()

Andy Lutomirski (1):
x86/entry/64: Don't use IST entry for #BP stack

Andy Shevchenko (1):
x86/cpu: Rename Merrifield2 to Moorefield

Anna-Maria Gleixner (1):
hrtimer: Ensure POSIX compliance (relative CLOCK_REALTIME hrtimers)

Arend Van Spriel (1):
brcmfmac: fix P2P_DEVICE ethernet address generation

Arkadi Sharshevsky (1):
team: Fix double free in error path

Arnaldo Carvalho de Melo (3):
perf evlist: Introduce perf_evlist__new_dummy constructor
perf record: Generate PERF_RECORD_{MMAP,COMM,EXEC} with --delay
perf report: Fix -D output for user metadata events

Arnd Bergmann (7):
x86/pti: Mark constant arrays as __initconst
media: exynos4-is: properly initialize frame format
scsi: fas216: fix sense buffer initialization
cifs: silence compiler warnings showing up with gcc-8.0.0
mm: hide a #warning for COMPILE_TEST
cfg80211: fix cfg80211_beacon_dup
x86/oprofile: Fix bogus GCC-8 warning in nmi_setup()

Ashok Raj (1):
KVM/x86: Add IBPB support

Aurelien Aptel (1):
CIFS: zero sensitive data when freeing

Baolin Wang (1):
usb: gadget: f_fs: Fix possibe deadlock

Bart Van Assche (1):
pktcdvd: Fix pkt_setup_dev() error path

Bastian Stender (1):
mmc: block: fix updating ext_csd caches on ioctl call

Ben Crocker (1):
drm/radeon: insist on 32-bit DMA for Cedar on PPC64/PPC64LE

Ben Hutchings (4):
staging: android: ashmem: Fix a race condition in pin ioctls
xen: Add xen_arch_suspend()
skb: Add skb_postpush_rcsum()
Linux 3.16.57

Benjamin Poirier (1):
e1000e: Fix check_for_link return value with autoneg off

Bjorn Andersson (1):
PM / devfreq: Propagate error from devfreq_add_device()

Boris Ostrovsky (1):
xen/arm: Define xen_arch_suspend()

Boris Pismenny (1):
IB/mlx5: Fix integer overflows in mlx5_ib_create_srq

Borislav Petkov (2):
x86, microcode: Fix accessing dis_ucode_ldr on 32-bit
x86/microcode/AMD: Do not load when running on a hypervisor

Charles_Rose@xxxxxxxx (1):
ahci: Add Device ID for Intel Sunrise Point PCH

Chien Tin Tung (1):
RDMA/ucma: Correct option size check using optlen

Christian Borntraeger (1):
KVM: s390: provide io interrupt kvm_stat

Christian KÃnig (2):
drm/ttm: fix adding foreign BOs to the swap LRU
drm/radeon: fix prime teardown order

Christophe JAILLET (3):
power: supply: ab8500_charger: Fix an error handling path
power: supply: ab8500_charger: Bail out in case of error in 'ab8500_charger_init_hw_registers()'
media: bt8xx: Fix err 'bt878_probe()'

Clay McClure (1):
ubi: Fix race condition between ubi volume creation and udev

Colin Ian King (3):
wl1251: check return from call to wl1251_acx_arp_ip_filter
scsi: aacraid: remove redundant setting of variable c
clocksource/drivers/fsl_ftm_timer: Fix error return checking

Cong Wang (2):
netfilter: xt_RATEEST: acquire xt_rateest_mutex for hash insert
netfilter: ipt_CLUSTERIP: fix a refcount bug in clusterip_config_find_get()

Corentin Labbe (2):
powerpc/pseries: Add empty update_numa_cpu_lookup_table() for NUMA=n
ia64: convert unwcheck.py to python3

Dan Aloni (1):
cifs: empty TargetInfo leads to crash on recovery

Dan Carpenter (10):
staging: ncpfs: memory corruption in ncp_read_kernel()
cdrom: information leak in cdrom_ioctl_media_changed()
media: cpia2: Fix a couple off by one bugs
ASoC: nuc900: Fix a loop timeout test
ath9k_htc: Add a sanity check in ath9k_htc_ampdu_action()
ASoC: au1x: Fix timeout tests in au1xac97c_ac97_read()
staging: lustre: libcfs: Prevent harmless read underflow
staging: rts5208: Fix "seg_no" calculation in reset_ms_card()
HID: roccat: prevent an out of bounds read in kovaplus_profile_activated()
ALSA: pcm: potential uninitialized return values

Daniel N Pettersson (1):
cifs: Fix autonegotiate security settings mismatch

Danilo Krummrich (1):
usb: quirks: add control message delay for 1b1c:1b20

Dave Hansen (1):
x86/cpu: Rename "WESTMERE2" family to "NEHALEM_G"

Dave Young (1):
HID: add quirk for another PIXART OEM mouse used by HP

David Ahern (1):
net: Refactor rtable initialization

David Matlack (1):
KVM: nVMX: mark vmcs12 pages dirty on L2 exit

David Rientjes (1):
kernel/relay.c: limit kmalloc size to KMALLOC_MAX_SIZE

David Woodhouse (11):
x86/cpufeatures: Add Intel feature bits for Speculation Control
x86/cpufeatures: Add AMD feature bits for Speculation Control
x86/msr: Add definitions for new speculation control MSRs
x86/pti: Do not enable PTI on CPUs which are not vulnerable to Meltdown
x86/cpufeature: Blacklist SPEC_CTRL/PRED_CMD on early Spectre v2 microcodes
x86/speculation: Add basic IBPB (Indirect Branch Prediction Barrier) support
x86/cpufeatures: Clean up Spectre v2 related CPUID flags
x86/cpuid: Fix up "virtual" IBRS/IBPB/STIBP feature bits on Intel
x86/speculation: Use IBRS if available before calling into firmware
x86/speculation: Update Speculation Control microcode blacklist
x86/speculation: Correct Speculation Control microcode blacklist again

Dmitry Torokhov (1):
Input: edt-ft5x06 - fix error handling for factory mode on non-M06

Dmitry Vyukov (1):
netfilter: ipt_CLUSTERIP: fix out-of-bounds accesses in clusterip_tg_check()

Eran Ben Elisha (1):
net/mlx4_en: Fix mixed PFC and Global pause user control requests

Eric Biggers (15):
crypto: hash - introduce crypto_hash_alg_has_setkey()
crypto: cryptd - pass through absence of ->setkey()
crypto: hash - annotate algorithms taking optional key
crypto: hash - prevent using keyed hashes without setting key
NFS: reject request for id_legacy key without auxdata
pipe, sysctl: drop 'min' parameter from pipe-max-size converter
pipe, sysctl: remove pipe_proc_fn()
pipe: actually allow root to exceed the pipe buffer limits
pipe: fix off-by-one error when checking buffer limits
pipe: reject F_SETPIPE_SZ with size over UINT_MAX
pipe: simplify round_pipe_size()
pipe: read buffer limits atomically
libata: fix length validation of ATAPI-relayed SCSI commands
libata: remove WARN() for DMA or PIO command without data
binder: check for binder_thread allocation failure in binder_poll()

Eric Dumazet (4):
net: igmp: add a missing rcu locking section
netfilter: IDLETIMER: be syzkaller friendly
l2tp: do not accept arbitrary sockets
net: fix possible out-of-bound read in skb_network_protocol()

Eric W. Biederman (4):
signal/sh: Ensure si_signo is initialized in do_divide_error
signal/openrisc: Fix do_unaligned_access to send the proper signal
mn10300/misalignment: Use SIGSEGV SEGV_MAPERR to report a failed user copy
fs: Teach path_connected to handle nfs filesystems with multiple roots.

Erik Veijola (1):
ALSA: usb-audio: Add a quirck for B&W PX headphones

Ernesto A. FernÃndez (1):
ext4: correct documentation for grpid mount option

Eugene Syromiatnikov (1):
s390: fix handling of -1 in set{,fs}[gu]id16 syscalls

Felix Kuehling (1):
drm/ttm: Don't add swapped BOs to swap-LRU list

Florian Fainelli (2):
pinctrl: Really force states during suspend/resume
net: systemport: Rewrite __bcm_sysport_tx_reclaim()

Florian Westphal (6):
netfilter: ebtables: CONFIG_COMPAT: don't trust userland offsets
netfilter: ebtables: fix erroneous reject of last rule
xfrm_user: uncoditionally validate esn replay attribute struct
netfilter: ipv6: fix use-after-free Write in nf_nat_ipv6_manip_pkt
netfilter: bridge: ebt_among: add missing match size checks
netfilter: bridge: ebt_among: add more missing match size checks

Ganesh Mahendran (1):
android: binder: use VM_ALLOC to get vm area

Geert Uytterhoeven (1):
RDMA/iwpm: Fix uninitialized error code in iwpm_send_mapinfo()

Greg Kroah-Hartman (2):
drm: udl: Properly check framebuffer mmap offsets
USB: serial: pl2303: new device id for Chilitag

Greg Kurz (1):
9p/trans_virtio: discard zero-length reply

Guillaume Nault (3):
l2tp: remove l2tp_tunnel_count and l2tp_session_count
l2tp: don't close sessions in l2tp_tunnel_destruct()
l2tp: avoid using ->tunnel_sock for getting session's parent tunnel

Hans de Goede (10):
ahci: Add PCI ids for Intel Bay Trail, Cherry Trail and Apollo Lake AHCI
USB: cdc-acm: Do not log urb submission errors on disconnect
uas: Log error codes when logging errors
libata: Apply NOLPM quirk to Crucial MX100 512GB SSDs
ASoC: rt5651: Fix regcache sync errors on resume
ahci: Add PCI-id for the Highpoint Rocketraid 644L card
PCI: Add function 1 DMA alias quirk for Highpoint RocketRAID 644L
libata: Apply NOLPM quirk to Crucial M500 480 and 960GB SSDs
libata: Make Crucial BX100 500GB LPM quirk apply to all firmware versions
libata: Modify quirks for MX100 to limit NCQ_TRIM quirk to MU01 version

Hans van Kranenburg (1):
btrfs: alloc_chunk: fix DUP stripe size handling

Hemant Kumar (1):
usb: f_fs: Prevent gadget unbind if it is already unbound

Horia GeantÄ (1):
crypto: caam - fix endless loop when DECO acquire fails

Hugh Dickins (1):
mm: fix the NULL mapping case in __isolate_lru_page()

Ilya Dryomov (1):
rbd: whitelist RBD_FEATURE_OPERATIONS feature bit

Ingo Molnar (1):
x86/speculation: Move firmware_restrict_branch_speculation_*() from C to CPP

Ioana Ciornei (1):
staging: iio: adc: remove the use of CamelCase

Ivan Delalande (1):
lkdtm: fix handle_irq_event symbol for INT_HW_IRQ_EN

Ivan Vecera (2):
kernfs: fix regression in kernfs_fop_write caused by wrong type
net/mlx4_en: do not ignore autoneg in mlx4_en_set_pauseparam()

J. Bruce Fields (1):
NFS: commit direct writes even if they fail partially

Jack Morgenstein (1):
IB/mlx4: Fix incorrectly releasing steerable UD QPs when have only ETH ports

Jack Stocker (1):
Add delay-init quirk for Corsair K70 RGB keyboards

Jake Daryll Obina (1):
jffs2: Fix use-after-free bug in jffs2_iget()'s error handling path

Jakub Kicinski (1):
net: fix race on decreasing number of TX queues

James Chapman (5):
l2tp: don't use inet_shutdown on tunnel destroy
l2tp: don't use inet_shutdown on ppp session destroy
l2tp: fix races with tunnel socket close
l2tp: fix race in pppol2tp_release with session object destroy
l2tp: fix tunnel lookup use-after-free race

James Hogan (2):
EDAC, octeon: Fix an uninitialized variable warning
MIPS: Fix clean of vmlinuz.{32,ecoff,bin,srec}

James Ralston (1):
ahci: Remove Device ID for Intel Sunrise Point PCH

Jan Beulich (1):
x86/mm: Fix {pmd,pud}_{set,clear}_flags()

Jan Chochol (1):
nfs: Do not convert nfs_idmap_cache_timeout to jiffies

Jan-Marek Glogowski (1):
ALSA: hda/realtek: PCI quirk for Fujitsu U7x7

Jason Gunthorpe (1):
sctp: Fix mangled IPv4 addresses on a IPv6 listening socket

Jason Wang (1):
vhost_net: stop device during reset owner

Jason Yan (4):
scsi: libsas: remove the numbering for each event enum
scsi: libsas: fix memory leak in sas_smp_get_phy_events()
scsi: libsas: fix error when getting phy events
ata: do not schedule hot plug if it is a sas host

Jean Delvare (1):
firmware: dmi_scan: Fix handling of empty DMI strings

Jens Axboe (1):
aio: fix serial draining in exit_aio()

Jeremy Boone (4):
tpm_tis: fix potential buffer overruns caused by bit glitches on the bus
tpm_i2c_nuvoton: fix potential buffer overruns caused by bit glitches on the bus
tpm_i2c_infineon: fix potential buffer overruns caused by bit glitches on the bus
tpm: fix potential buffer overruns caused by bit glitches on the bus

Jia-Ju Bai (1):
USB: serial: io_edgeport: fix possible sleep-in-atomic

Jim Mattson (1):
KVM: nVMX: Eliminate vmcs02 pool

Jiri Bohac (1):
x86/gart: Exclude GART aperture from vmcore

Joe Lawrence (3):
pipe: avoid round_pipe_size() nr_pages overflow on 32-bit
pipe: add proc_dopipe_max_size() to safely assign pipe_max_size
sysctl: check for UINT_MAX before unsigned int min/max

Joel Fernandes (1):
staging: android: ashmem: Fix lockdep issue during llseek

Johan Hovold (5):
video: fbdev: atmel_lcdfb: fix display-timings lookup
USB: serial: add support for multi-port simple drivers
USB: serial: add Novatel Wireless GPS driver
USB: serial: add Medtronic CareLink USB driver
USB: serial: simple: add Motorola Tetra driver

Johannes Berg (1):
regulatory: add NUL to request alpha2

John Crispin (1):
MIPS: ralink: Don't set pm_power_off

Jonas Danielsson (1):
tty/serial: atmel: add new version check for usart

Ju Hyung Park (1):
libata: Enable queued TRIM for Samsung SSD 860

Juergen Gross (2):
x86/xen: init %gs very early to avoid page faults with stack protector
x86/xen: Zero MSR_IA32_SPEC_CTRL before suspend

Julia Lawall (3):
drivers: video: fbdev: atmel_lcdfb.c: fix error return code
drm/radeon: adjust tested variable
USB: usbmon: remove assignment from IS_ERR argument

Julian Wiedmann (2):
s390/qeth: fix SETIP command handling
s390/qeth: free netdevice when removing a card

Julien Gomes (1):
tun: allow positive return values on dev_get_valid_name() call

Justin Chen (1):
MIPS: BMIPS: Do not mask IPIs during suspend

Kai-Heng Feng (3):
drm/edid: Add 6 bpc quirk for CPT panel in Asus UX303LA
libata: disable LPM for Crucial BX100 SSD 500GB drive
xhci: Fix front USB ports on ASUS PRIME B350M-A

Kamil Konieczny (1):
crypto: s5p-sss - Fix kernel Oops in AES-ECB mode

KarimAllah Ahmed (3):
KVM/VMX: Emulate MSR_IA32_ARCH_CAPABILITIES
KVM/VMX: Allow direct access to MSR_IA32_SPEC_CTRL
KVM/SVM: Allow direct access to MSR_IA32_SPEC_CTRL

Karsten Koop (1):
usb: ldusb: add PIDs for new CASSY devices supported by this driver

Kees Cook (1):
NFC: llcp: Limit size of SDP URI

Kirill Marinushkin (1):
ALSA: usb-audio: Fix parsing descriptor of UAC2 processing unit

Konrad Rzeszutek Wilk (1):
x86/spectre_v2: Don't check microcode versions when running under hypervisors

Lars-Peter Clausen (1):
iio: adis_lib: Initialize trigger before requesting interrupt

Lassi Ylikojola (1):
ALSA: usb-audio: add implicit fb quirk for Behringer UFX1204

Leon Romanovsky (11):
RDMA/mlx5: Avoid memory leak in case of XRCD dealloc failure
RDMA/ucma: Limit possible option size
RDMA/ucma: Check that user doesn't overflow QP state
RDMA/mlx5: Fix integer overflow while resizing CQ
RDMA/ucma: Fix access to non-initialized CM_ID object
RDMA/ucma: Don't allow join attempts for unsupported AF family
RDMA/ucma: Check AF family prior resolving address
RDMA/ucma: Fix use-after-free access in ucma_close
RDMA/ucma: Ensure that CM_ID exists prior to access it
RDMA/ucma: Check that device is connected prior to access it
RDMA/ucma: Check that device exists prior to accessing it

Linus LÃssing (2):
batman-adv: fix multicast-via-unicast transmission with AP isolation
batman-adv: fix packet loss for broadcasted DHCP packets to a server

Linus Torvalds (3):
kvm/x86: fix icebp instruction handling
perf/hwbp: Simplify the perf-hwbp code, fix documentation
tty: vt: fix up tabstops properly

Linus Walleij (1):
mtd: jedec_probe: Fix crash in jedec_read_mfr()

Liu Bo (4):
Btrfs: fix deadlock in run_delalloc_nocow
Btrfs: fix crash due to not cleaning up tree log block's dirty bits
Btrfs: fix extent state leak from tree log
Btrfs: fix use-after-free on root->orphan_block_rsv

Lukas Czerner (1):
ext4: fix bitmap position validation

Lukas Wunner (5):
Revert "apple-gmux: lock iGP IO to protect from vgaarb changes"
workqueue: Allow retrieval of current task's work struct
drm: Allow determining if current task is output poll worker
drm/nouveau: Fix deadlock on runtime suspend
drm/radeon: Fix deadlock on runtime suspend

Maciej W. Rozycki (1):
MIPS: Normalise code flow in the CpU exception handler

Malcolm Priestley (2):
media: dvb-usb-v2: lmedm04: Improve logic checking of warm start
media: dvb-usb-v2: lmedm04: move ts2020 attach to dm04_lme2510_tuner

Marc Kleine-Budde (1):
slip: sl_alloc(): remove unused parameter "dev_t line"

Marc Zyngier (2):
arm: KVM: Fix SMCCC handling of unimplemented SMC/HVC calls
arm64: KVM: Increment PC after handling an SMC trap

Mark Rutland (1):
arm64: remove __die()'s stack dump

Masahiro Yamada (1):
mmc: sdhci: export sdhci_execute_tuning()

Masami Hiramatsu (1):
tracing: probeevent: Fix to support minus offset from symbol

Masatake YAMATO (1):
route: remove unsed variable in __mkroute_input

Matt Redfearn (1):
MIPS: TXx9: use IS_BUILTIN() for CONFIG_LEDS_CLASS

Matthew Wilcox (1):
cifs: Fix missing put_xid in cifs_file_strict_mmap

Matthias Schiffer (4):
batman-adv: fix packet checksum in receive path
batman-adv: invalidate checksum on fragment reassembly
batman-adv: update data pointers after skb_cow()
batman-adv: fix header size check in batadv_dbg_arp()

Mauro Carvalho Chehab (1):
media: cxusb, dib0700: ignore XC2028_I2C_FLUSH

Max Filippov (1):
xtensa: fix futex_atomic_cmpxchg_inatomic

Mel Gorman (1):
mm: pin address_space before dereferencing it while isolating an LRU page

Michael Kerrisk (man-pages) (8):
pipe: relocate round_pipe_size() above pipe_set_size()
pipe: move limit checking logic into pipe_set_size()
pipe: refactor argument for account_pipe_buffers()
pipe: fix limit checking in pipe_set_size()
pipe: simplify logic in alloc_pipe_info()
pipe: fix limit checking in alloc_pipe_info()
pipe: make account_pipe_buffers() return a value, and use it
pipe: cap initial pipe capacity according to pipe-max-size limit

Michael Lyle (1):
bcache: don't attach backing with duplicate UUID

Michael Weiser (2):
arm64: Disable unhandled signal log messages by default
arm64: Remove unimplemented syscall log message

Michel DÃnzer (1):
drm/radeon: Don't turn off DP sink when disconnected

Mika Westerberg (1):
ahci: Add Intel Cannon Lake PCH-H PCI ID

Mike Kravetz (2):
hugetlbfs: fix offset overflow in hugetlbfs mmap
hugetlbfs: check for pgoff value overflow

Mikulas Patocka (2):
alpha: fix reboot on Avanti platform
alpha: fix crash if pthread_create races with signal delivery

Mimi Zohar (1):
ima: relax requiring a file signature for new files with zero length

Miquel Raynal (1):
mtd: nand: Fix nand_do_read_oob() return value

Namjae Jeon (1):
cifs: fix memory leak when password is supplied multiple times

Nathan Fontenot (1):
powerpc/numa: Invalidate numa_cpu_lookup_table on cpu remove

NeilBrown (1):
MIPS: ralink: Remove ralink_halt()

Nicholas Piggin (1):
powerpc/64: Don't trace irqs-off at interrupt return to soft-disabled context

Nicolas Dichtel (2):
netlink: ensure to loop over all netns in genlmsg_multicast_allns()
netlink: avoid a double skb free in genlmsg_mcast()

Nicolas Pitre (1):
console/dummy: leave .con_font_get set to NULL

Nikola Ciprich (1):
serial: 8250_pci: Add Brainboxes UC-260 4 port serial device

Nikolay Borisov (1):
btrfs: Handle btrfs_set_extent_delalloc failure in fixup worker

OKAMOTO Yoshiaki (1):
usb: option: Add support for FS040U modem

Oleg Nesterov (2):
aio: change exit_aio() to load mm->ioctx_table once and avoid rcu_read_lock()
aio: kill the misleading rcu read locks in ioctx_add_table() and kill_ioctx()

Oliver Neukum (3):
usb: uas: unconditionally bring back host after reset
CDC-ACM: apply quirk for card reader
uas: fix comparison for error code

Paolo Abeni (7):
netfilter: on sockopt() acquire sock lock only in the required scope
netfilter: drop outermost socket lock in getsockopt()
netfilter: x_tables: fix missing timer initialization in xt_LED
netfilter: nat: cope with negative port range
dn_getsockoptdecnet: move nf_{get/set}sockopt outside sock lock
l2tp: fix races with ipv4-mapped ipv6 addresses
ipv6: the entire IPv6 header chain must fit the first fragment

Paolo Bonzini (6):
KVM: x86: rename update_db_bp_intercept to update_bp_intercept
KVM: x86: pass host_initiated to functions that read MSRs
KVM: VMX: introduce alloc_loaded_vmcs
KVM: VMX: make MSR bitmaps per-VCPU
KVM/x86: Remove indirect MSR op calls from SPEC_CTRL
KVM/VMX: Optimize vmx_vcpu_run() and svm_vcpu_run() by marking the RDMSR path as unlikely()

Parav Pandit (1):
RDMA/cma: Use correct size when writing netlink stats

Pete Zaitcev (1):
usb: usbmon: Read text within supplied buffer size

Peter Malone (1):
fbdev: Fixing arbitrary kernel leak in case FBIOGETCMAP_SPARC in sbusfb_ioctl_helper().

Peter Zijlstra (1):
x86/speculation: Add <asm/msr-index.h> dependency

Petr Machata (1):
ip_tunnel: Emit events for post-register MTU changes

Raghava Aditya Renukunta (1):
scsi: aacraid: Fix udev inquiry race condition

Rasmus Villemoes (2):
kernel/async.c: revert "async: simplify lowest_in_progress()"
nospec: Allow index argument to have const-qualified type

Roger Pau Monne (1):
xen/pirq: fix error path cleanup when binding MSIs

Sabrina Dubroca (1):
ipv4: lock mtu in fnhe when received PMTU < net.ipv4.route.min_pmtu

Scott Lawson (1):
AHCI: Remove obsolete Intel Lewisburg SATA RAID device IDs

Scott Mayhew (1):
nfs/pnfs: fix nfs_direct_req ref leak when i/o falls back to the mds

SeongJae Park (2):
rcutorture/configinit: Fix build directory error message
rcutorture/kvm.sh: Use consistent help text for --qemu-args

Sergey Senozhatsky (1):
arm64: do not use print_symbol()

Seunghun Han (1):
x86/MCE: Serialize sysfs changes

Shaohua Li (1):
ata: Add a new flag to destinguish sas controller

Shawn Lin (2):
mmc: dw_mmc: Factor out dw_mci_init_slot_caps
mmc: dw_mmc: Fix out-of-bounds access for slot's caps

Shuah Khan (3):
usbip: prevent bind loops on devices attached to vhci_hcd
usbip: list: don't list devices attached to vhci_hcd
usbip: keep usbip_device sockfd state in sync with tcp_socket

Simon Shields (1):
ARM: dts: exynos: Correct Trats2 panel reset line

Stefan Agner (1):
spi: imx: do not access registers while clocks disabled

Stefan Roese (1):
ALSA: pcm: Use dma_bytes as size parameter in dma_mmap_coherent()

Stefan Windfeldt-Prytz (1):
iio: buffer: check if a buffer has been set up when poll is called

Stefano Brivio (3):
vti4: Don't count header length twice on tunnel setup
ip_tunnel: Clamp MTU to bounds on new link
vti4: Don't override MTU passed on link creation via IFLA_MTU

Stephan Mueller (1):
crypto: af_alg - whitelist mask and type

Sven Eckelmann (2):
batman-adv: Fix internal interface indices types
batman-adv: Fix skbuff rcsum on packet reroute

Takashi Iwai (8):
ALSA: seq: Fix racy pool initializations
ALSA: seq: Don't allow resizing pool in use
ALSA: seq: More protection for concurrent write and ioctl races
ALSA: seq: Fix possible UAF in snd_seq_check_queue()
ALSA: seq: Clear client entry before deleting else at closing
ALSA: hda/realtek - Always immediately update mute LED with pin VREF
ALSA: aloop: Sync stale timer before release
ALSA: aloop: Fix access to not-yet-ready substream via cable

Tang Junhui (1):
bcache: fix crashes in duplicate cache device register

Tariq Toukan (1):
net/mlx4_core: Cleanup FMR unmapping flow

Teijo Kinnunen (1):
USB: storage: Add JMicron bridge 152d:2567 to unusual_devs.h

Tejun Heo (3):
tty: make n_tty_read() always abort if hangup is in progress
fs/aio: Add explicit RCU grace period when freeing kioctx
fs/aio: Use RCU accessors for kioctx_table->table[]

Theodore Ts'o (2):
ext4: fail ext4_iget for root directory if unallocated
ext4: add validity checks for bitmap block numbers

Thinh Nguyen (1):
usb: dwc3: gadget: Set maxpacket size for ep0 IN

Thomas Gleixner (1):
posix-timers: Protect posix clock array access against speculation

Thomas Richter (1):
perf annotate: Fix objdump comment parsing for Intel mov dissassembly

Tim Chen (1):
x86/speculation: Use Indirect Branch Prediction Barrier in context switch

Tobias Jordan (1):
spi: sun6i: disable/unprepare clocks on remove

Todd Kjos (1):
binder: replace "%p" with "%pK"

Tony Luck (1):
x86/MCE: Save microcode revision in machine check records

Toshiaki Makita (2):
net: Fix vlan untag for bridge and vlan_dev with reorder_hdr off
net: Fix untag for vlan packets without ethernet header

Trond Myklebust (2):
NFS: Add a cond_resched() to nfs_commit_release_pages()
NFS: Fix 2 use after free issues in the I/O code

Tyrel Datwyler (1):
scsi: ibmvfc: fix misdefined reserved field in ibmvfc_fcp_rsp_info

Ulf Magnusson (1):
ARM: mvebu: Fix broken PL310_ERRATA_753970 selects

Ulrich Hecht (1):
serial: sh-sci: prevent lockup on full TTY buffers

Vinicius Costa Gomes (1):
skbuff: Fix not waking applications when errors are enqueued

Viresh Kumar (4):
arm: spear600: Add missing interrupt-parent of rtc
arm: spear13xx: Fix dmas cells
arm: spear13xx: Fix spics gpio controller's warning
cpufreq: s3c24xx: Fix broken s3c_cpufreq_init()

Wang Nan (1):
x86/traps: Enable DEBUG_STACK after cpu_init() for TRAP_DB/BP

Wanpeng Li (1):
KVM: mmu: Fix overlap between public and private memslots

Wei Yongjun (1):
mtd: ubi: wl: Fix error return code in ubi_wl_init()

Will Deacon (2):
arm64: traps: Don't print stack or raw PC/LR values in backtraces
arm64: __show_regs: Only resolve kernel symbols when running at EL1

Xin Long (4):
bridge: check brport attr show in brport_show
bonding: fix the err path for dev hwaddr sync in bond_enslave
bonding: move dev_mc_sync after master_upper_dev_link in bond_enslave
bonding: process the err returned by dev_set_allmulti properly in bond_enslave

Yisheng Xie (2):
staging: android: ashmem: Fix possible deadlock in ashmem_ioctl
mm/mempolicy.c: avoid use uninitialized preferred_node

Yufen Yu (1):
md raid10: fix NULL deference in handle_write_completed()

Yunlei He (1):
f2fs: fix a panic caused by NULL flush_cmd_control

Zhang Bo (1):
Input: matrix_keypad - fix race when disabling interrupts

Zhouyi Zhou (1):
ext4: save error to disk in __ext4_grp_locked_error()

Zygo Blaxell (1):
btrfs: remove spurious WARN_ON(ref->count < 0) in find_parent_nodes

alex chen (1):
ocfs2: subsystem.su_mutex is required while accessing the item->ci_parent

chenjie (1):
mm/madvise.c: fix madvise() infinite loop under special circumstances

mulhern (1):
dm thin: fix documentation relative to low water mark threshold

diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index 2f5173500bd9..2800b014a619 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -112,9 +112,11 @@ $low_water_mark is expressed in blocks of size $data_block_size. If
free space on the data device drops below this level then a dm event
will be triggered which a userspace daemon should catch allowing it to
extend the pool device. Only one such event will be sent.
-Resuming a device with a new table itself triggers an event so the
-userspace daemon can use this to detect a situation where a new table
-already exceeds the threshold.
+
+No special event is triggered if a just resumed device's free space is below
+the low water mark. However, resuming a device always triggers an
+event; a userspace daemon should verify that free space exceeds the low
+water mark when handling this event.

A low water mark for the metadata device is maintained in the kernel and
will trigger a dm event if free space on the metadata device drops below
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
index d58675ea1abf..f4bdc9d62130 100644
--- a/Documentation/devicetree/bindings/dma/snps-dma.txt
+++ b/Documentation/devicetree/bindings/dma/snps-dma.txt
@@ -58,6 +58,6 @@ described in the dma.txt file, using a four-cell specifier for each channel.
interrupts = <0 35 0x4>;
status = "disabled";
dmas = <&dmahost 12 0 1>,
- <&dmahost 13 0 1 0>;
+ <&dmahost 13 1 0>;
dma-names = "rx", "rx";
};
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
index 919a3293aaa4..04f7e4ad070b 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4.txt
@@ -233,7 +233,7 @@ data_err=ignore(*) Just print an error message if an error occurs
data_err=abort Abort the journal if an error occurs in a file
data buffer in ordered mode.

-grpid Give objects the same group ID as their creator.
+grpid New objects have the group ID of their parent.
bsdgroups

nogrpid (*) New objects have the group ID of their creator.
diff --git a/Makefile b/Makefile
index 1c632c11f398..4cf98f68a826 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 16
-SUBLEVEL = 56
+SUBLEVEL = 57
EXTRAVERSION =
NAME = Museum of Fishiegoodies

diff --git a/arch/alpha/kernel/pci_impl.h b/arch/alpha/kernel/pci_impl.h
index 2b0ac429f5eb..412bb3c24f36 100644
--- a/arch/alpha/kernel/pci_impl.h
+++ b/arch/alpha/kernel/pci_impl.h
@@ -143,7 +143,8 @@ struct pci_iommu_arena
};

#if defined(CONFIG_ALPHA_SRM) && \
- (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA))
+ (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA) || \
+ defined(CONFIG_ALPHA_AVANTI))
# define NEED_SRM_SAVE_RESTORE
#else
# undef NEED_SRM_SAVE_RESTORE
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 1941a07b5811..86c1c4fd5246 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -274,12 +274,13 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
application calling fork. */
if (clone_flags & CLONE_SETTLS)
childti->pcb.unique = regs->r20;
+ else
+ regs->r20 = 0; /* OSF/1 has some strange fork() semantics. */
childti->pcb.usp = usp ?: rdusp();
*childregs = *regs;
childregs->r0 = 0;
childregs->r19 = 0;
childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */
- regs->r20 = 0;
stack = ((struct switch_stack *) regs) - 1;
*childstack = *stack;
childstack->r26 = (unsigned long) ret_from_fork;
diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts
index 0cee640ea9c9..44d58f30146d 100644
--- a/arch/arm/boot/dts/exynos4412-trats2.dts
+++ b/arch/arm/boot/dts/exynos4412-trats2.dts
@@ -628,7 +628,7 @@
reg = <0>;
vdd3-supply = <&lcd_vdd3_reg>;
vci-supply = <&ldo25_reg>;
- reset-gpios = <&gpy4 5 0>;
+ reset-gpios = <&gpf2 1 0>;
power-on-delay= <50>;
reset-delay = <100>;
init-delay = <100>;
diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts
index b56a801e42a2..28d5d145ea07 100644
--- a/arch/arm/boot/dts/spear1310-evb.dts
+++ b/arch/arm/boot/dts/spear1310-evb.dts
@@ -345,7 +345,7 @@
spi0: spi@e0100000 {
status = "okay";
num-cs = <3>;
- cs-gpios = <&gpio1 7 0>, <&spics 0>, <&spics 1>;
+ cs-gpios = <&gpio1 7 0>, <&spics 0 0>, <&spics 1 0>;

stmpe610@0 {
compatible = "st,stmpe610";
diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
index 54d128d35681..dcb326353d3c 100644
--- a/arch/arm/boot/dts/spear1340.dtsi
+++ b/arch/arm/boot/dts/spear1340.dtsi
@@ -113,8 +113,8 @@
reg = <0xb4100000 0x1000>;
interrupts = <0 105 0x4>;
status = "disabled";
- dmas = <&dwdma0 0x600 0 0 1>, /* 0xC << 11 */
- <&dwdma0 0x680 0 1 0>; /* 0xD << 7 */
+ dmas = <&dwdma0 12 0 1>,
+ <&dwdma0 13 1 0>;
dma-names = "tx", "rx";
};

diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
index 4382547df58a..c6199fdad8d8 100644
--- a/arch/arm/boot/dts/spear13xx.dtsi
+++ b/arch/arm/boot/dts/spear13xx.dtsi
@@ -100,7 +100,7 @@
reg = <0xb2800000 0x1000>;
interrupts = <0 29 0x4>;
status = "disabled";
- dmas = <&dwdma0 0 0 0 0>;
+ dmas = <&dwdma0 0 0 0>;
dma-names = "data";
};

@@ -283,8 +283,8 @@
#size-cells = <0>;
interrupts = <0 31 0x4>;
status = "disabled";
- dmas = <&dwdma0 0x2000 0 0 0>, /* 0x4 << 11 */
- <&dwdma0 0x0280 0 0 0>; /* 0x5 << 7 */
+ dmas = <&dwdma0 4 0 0>,
+ <&dwdma0 5 0 0>;
dma-names = "tx", "rx";
};

diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi
index 9f60a7b6a42b..bd379034993c 100644
--- a/arch/arm/boot/dts/spear600.dtsi
+++ b/arch/arm/boot/dts/spear600.dtsi
@@ -194,6 +194,7 @@
rtc@fc900000 {
compatible = "st,spear600-rtc";
reg = <0xfc900000 0x1000>;
+ interrupt-parent = <&vic0>;
interrupts = <10>;
status = "disabled";
};
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index a96a8043277c..c8b56c70d7b9 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -45,7 +45,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)

ret = kvm_psci_call(vcpu);
if (ret < 0) {
- kvm_inject_undefined(vcpu);
+ *vcpu_reg(vcpu, 0) = ~0UL;
return 1;
}

@@ -54,7 +54,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)

static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- kvm_inject_undefined(vcpu);
+ /*
+ * "If an SMC instruction executed at Non-secure EL1 is
+ * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
+ * Trap exception, not a Secure Monitor Call exception [...]"
+ *
+ * We need to advance the PC after the trap, as it would
+ * otherwise return to the same address...
+ */
+ *vcpu_reg(vcpu, 0) = ~0UL;
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}

diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index b9bc599a5fd0..67aee3467696 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -33,7 +33,7 @@ config MACH_ARMADA_370
config MACH_ARMADA_375
bool "Marvell Armada 375 boards" if ARCH_MULTI_V7
select ARM_ERRATA_720789
- select ARM_ERRATA_753970
+ select PL310_ERRATA_753970
select ARM_GIC
select ARMADA_375_CLK
select HAVE_ARM_SCU
@@ -48,7 +48,7 @@ config MACH_ARMADA_375
config MACH_ARMADA_38X
bool "Marvell Armada 380/385 boards" if ARCH_MULTI_V7
select ARM_ERRATA_720789
- select ARM_ERRATA_753970
+ select PL310_ERRATA_753970
select ARM_GIC
select ARMADA_38X_CLK
select HAVE_ARM_SCU
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index e4d719ff71f6..3252347c3140 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -344,6 +344,7 @@ void xen_arch_pre_suspend(void) { }
void xen_arch_post_suspend(int suspend_cancelled) { }
void xen_timer_resume(void) { }
void xen_arch_resume(void) { }
+void xen_arch_suspend(void) { }


/* In the hypervisor.S file. */
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 291fcb57299d..0bfb5fe5fdc7 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -31,7 +31,6 @@
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/interrupt.h>
-#include <linux/kallsyms.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/elfcore.h>
@@ -198,11 +197,16 @@ void __show_regs(struct pt_regs *regs)
}

show_regs_print_info(KERN_DEFAULT);
- print_symbol("PC is at %s\n", instruction_pointer(regs));
- print_symbol("LR is at %s\n", lr);
- printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
- regs->pc, lr, regs->pstate);
- printk("sp : %016llx\n", sp);
+
+ if (!user_mode(regs)) {
+ printk("pc : %pS\n", (void *)regs->pc);
+ printk("lr : %pS\n", (void *)lr);
+ } else {
+ printk("pc : %016llx\n", regs->pc);
+ printk("lr : %016llx\n", lr);
+ }
+
+ printk("sp : %016llx pstate : %08llx\n", sp, regs->pstate);
for (i = top_reg; i >= 0; i--) {
printk("x%-2d: %016llx ", i, regs->regs[i]);
if (i % 2 == 0)
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index ec1e323060e0..d368cc98bcdb 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -45,55 +45,11 @@ static const char *handler[]= {
"Error"
};

-int show_unhandled_signals = 1;
-
-/*
- * Dump out the contents of some memory nicely...
- */
-static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
- unsigned long top)
-{
- unsigned long first;
- mm_segment_t fs;
- int i;
-
- /*
- * We need to switch to kernel mode so that we can use __get_user
- * to safely read from kernel space.
- */
- fs = get_fs();
- set_fs(KERNEL_DS);
-
- printk("%s%s(0x%016lx to 0x%016lx)\n", lvl, str, bottom, top);
-
- for (first = bottom & ~31; first < top; first += 32) {
- unsigned long p;
- char str[sizeof(" 12345678") * 8 + 1];
-
- memset(str, ' ', sizeof(str));
- str[sizeof(str) - 1] = '\0';
-
- for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
- if (p >= bottom && p < top) {
- unsigned int val;
- if (__get_user(val, (unsigned int *)p) == 0)
- sprintf(str + i * 9, " %08x", val);
- else
- sprintf(str + i * 9, " ????????");
- }
- }
- printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
- }
-
- set_fs(fs);
-}
+int show_unhandled_signals = 0;

static void dump_backtrace_entry(unsigned long where, unsigned long stack)
{
- print_ip_sym(where);
- if (in_exception_text(where))
- dump_mem("", "Exception stack", stack,
- stack + sizeof(struct pt_regs));
+ printk(" %pS\n", (void *)where);
}

static void __dump_instr(const char *lvl, struct pt_regs *regs)
@@ -206,8 +162,6 @@ static int __die(const char *str, int err, struct thread_info *thread,
TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);

if (!user_mode(regs) || in_interrupt()) {
- dump_mem(KERN_EMERG, "Stack: ", regs->sp,
- THREAD_SIZE + (unsigned long)task_stack_page(tsk));
dump_backtrace(regs, tsk);
dump_instr(KERN_EMERG, regs);
}
@@ -297,14 +251,6 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
}
#endif

- if (show_unhandled_signals && printk_ratelimit()) {
- pr_info("%s[%d]: syscall %d\n", current->comm,
- task_pid_nr(current), (int)regs->syscallno);
- dump_instr("", regs);
- if (user_mode(regs))
- __show_regs(regs);
- }
-
return sys_ni_syscall();
}

diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 096824bedab6..fcc86311db73 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -43,7 +43,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)

static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
+ /*
+ * "If an SMC instruction executed at Non-secure EL1 is
+ * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
+ * Trap exception, not a Secure Monitor Call exception [...]"
+ *
+ * We need to advance the PC after the trap, as it would
+ * otherwise return to the same address...
+ */
*vcpu_reg(vcpu, 0) = ~0UL;
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}

diff --git a/arch/ia64/scripts/unwcheck.py b/arch/ia64/scripts/unwcheck.py
index 2bfd941ff7c7..23e2e6ad5f6f 100644
--- a/arch/ia64/scripts/unwcheck.py
+++ b/arch/ia64/scripts/unwcheck.py
@@ -15,7 +15,7 @@ import re
import sys

if len(sys.argv) != 2:
- print "Usage: %s FILE" % sys.argv[0]
+ print("Usage: %s FILE" % sys.argv[0])
sys.exit(2)

readelf = os.getenv("READELF", "readelf")
@@ -28,7 +28,7 @@ rlen_pattern = re.compile(".*rlen=([0-9]+)")
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
- print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
+ print("ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum))
return

num_funcs = 0
@@ -42,23 +42,23 @@ rlen_sum = 0
check_func(func, slots, rlen_sum)

func = m.group(1)
- start = long(m.group(2), 16)
- end = long(m.group(3), 16)
+ start = int(m.group(2), 16)
+ end = int(m.group(3), 16)
slots = 3 * (end - start) / 16
- rlen_sum = 0L
+ rlen_sum = 0
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
- rlen_sum += long(m.group(1))
+ rlen_sum += int(m.group(1))
check_func(func, slots, rlen_sum)

if num_errors == 0:
- print "No errors detected in %u functions." % num_funcs
+ print("No errors detected in %u functions." % num_funcs)
else:
if num_errors > 1:
err="errors"
else:
err="error"
- print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
+ print("%u %s detected in %u functions." % (num_errors, err, num_funcs))
sys.exit(1)
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index 61af6b6ab13d..1c0e5e61fcdc 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -117,4 +117,8 @@ OBJCOPYFLAGS_vmlinuz.srec := $(OBJCOPYFLAGS) -S -O srec
vmlinuz.srec: vmlinuz
$(call cmd,objcopy)

-clean-files := $(objtree)/vmlinuz $(objtree)/vmlinuz.{32,ecoff,bin,srec}
+clean-files += $(objtree)/vmlinuz
+clean-files += $(objtree)/vmlinuz.32
+clean-files += $(objtree)/vmlinuz.ecoff
+clean-files += $(objtree)/vmlinuz.bin
+clean-files += $(objtree)/vmlinuz.srec
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index df9e2bd9b2c2..c142b1ff089d 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -159,11 +159,11 @@ static void bmips_prepare_cpus(unsigned int max_cpus)
return;
}

- if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
- "smp_ipi0", NULL))
+ if (request_irq(IPI0_IRQ, bmips_ipi_interrupt,
+ IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL))
panic("Can't request IPI0 interrupt");
- if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
- "smp_ipi1", NULL))
+ if (request_irq(IPI1_IRQ, bmips_ipi_interrupt,
+ IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL))
panic("Can't request IPI1 interrupt");
}

diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index b9f58ce99bc8..d34c45cae5b9 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1247,7 +1247,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
status = -1;

if (unlikely(compute_return_epc(regs) < 0))
- goto out;
+ break;

if (get_isa16_mode(regs->cp0_epc)) {
unsigned short mmop[2] = { 0 };
@@ -1280,7 +1280,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
force_sig(status, current);
}

- goto out;
+ break;

case 3:
/*
@@ -1296,8 +1296,10 @@ asmlinkage void do_cpu(struct pt_regs *regs)
* erroneously too, so they are covered by this choice
* as well.
*/
- if (raw_cpu_has_fpu)
+ if (raw_cpu_has_fpu) {
+ force_sig(SIGILL, current);
break;
+ }
/* Fall through. */

case 1:
@@ -1320,16 +1322,13 @@ asmlinkage void do_cpu(struct pt_regs *regs)
if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
mt_ase_fp_affinity();

- goto out;
+ break;

case 2:
raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
- goto out;
+ break;
}

- force_sig(SIGILL, current);
-
-out:
exception_exit(prev_state);
}

diff --git a/arch/mips/ralink/reset.c b/arch/mips/ralink/reset.c
index 55c7ec59df3c..7dbf92b5370c 100644
--- a/arch/mips/ralink/reset.c
+++ b/arch/mips/ralink/reset.c
@@ -88,17 +88,9 @@ static void ralink_restart(char *command)
unreachable();
}

-static void ralink_halt(void)
-{
- local_irq_disable();
- unreachable();
-}
-
static int __init mips_reboot_setup(void)
{
_machine_restart = ralink_restart;
- _machine_halt = ralink_halt;
- pm_power_off = ralink_halt;

return 0;
}
diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c
index 2da5f25f98bc..e802259b2a59 100644
--- a/arch/mips/txx9/rbtx4939/setup.c
+++ b/arch/mips/txx9/rbtx4939/setup.c
@@ -186,7 +186,7 @@ static void __init rbtx4939_update_ioc_pen(void)

#define RBTX4939_MAX_7SEGLEDS 8

-#if IS_ENABLED(CONFIG_LEDS_CLASS)
+#if IS_BUILTIN(CONFIG_LEDS_CLASS)
static u8 led_val[RBTX4939_MAX_7SEGLEDS];
struct rbtx4939_led_data {
struct led_classdev cdev;
@@ -262,7 +262,7 @@ static inline void rbtx4939_led_setup(void)

static void __rbtx4939_7segled_putc(unsigned int pos, unsigned char val)
{
-#if IS_ENABLED(CONFIG_LEDS_CLASS)
+#if IS_BUILTIN(CONFIG_LEDS_CLASS)
unsigned long flags;
local_irq_save(flags);
/* bit7: reserved for LED class */
diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c
index b9920b1edd5a..70cef54dc40f 100644
--- a/arch/mn10300/mm/misalignment.c
+++ b/arch/mn10300/mm/misalignment.c
@@ -437,7 +437,7 @@ asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code)

info.si_signo = SIGSEGV;
info.si_errno = 0;
- info.si_code = 0;
+ info.si_code = SEGV_MAPERR;
info.si_addr = (void *) regs->pc;
force_sig_info(SIGSEGV, &info, current);
return;
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
index 3d3f6062f49c..605a284922fb 100644
--- a/arch/openrisc/kernel/traps.c
+++ b/arch/openrisc/kernel/traps.c
@@ -302,12 +302,12 @@ asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address)
siginfo_t info;

if (user_mode(regs)) {
- /* Send a SIGSEGV */
- info.si_signo = SIGSEGV;
+ /* Send a SIGBUS */
+ info.si_signo = SIGBUS;
info.si_errno = 0;
- /* info.si_code has been set above */
- info.si_addr = (void *)address;
- force_sig_info(SIGSEGV, &info, current);
+ info.si_code = BUS_ADRALN;
+ info.si_addr = (void __user *)address;
+ force_sig_info(SIGBUS, &info, current);
} else {
printk("KERNEL: Unaligned Access 0x%.8lx\n", address);
show_registers(regs);
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 0689091f9505..15d2307dbdd0 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -190,10 +190,8 @@ extern void kvmppc_hv_entry_trampoline(void);
extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
-extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
- struct kvm_vcpu *vcpu);
-extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
- struct kvmppc_book3s_shadow_vcpu *svcpu);
+extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
+extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);

static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
{
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index a69d091d31ff..7b350f12e4a0 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -44,6 +44,11 @@ extern void __init dump_numa_cpu_topology(void);
extern int sysfs_add_device_to_node(struct device *dev, int nid);
extern void sysfs_remove_device_from_node(struct device *dev, int nid);

+static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node)
+{
+ numa_cpu_lookup_table[cpu] = node;
+}
+
static inline int early_cpu_to_node(int cpu)
{
int nid;
@@ -71,6 +76,9 @@ static inline void sysfs_remove_device_from_node(struct device *dev,
int nid)
{
}
+
+static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {}
+
#endif /* CONFIG_NUMA */

#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 42c8e2d635e8..2e90c9b6d9c6 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -885,9 +885,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
beq 1f
rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
stb r7,PACAIRQHAPPENED(r13)
-1: li r0,0
- stb r0,PACASOFTIRQEN(r13);
- TRACE_DISABLE_INTS
+1:
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
+ /* The interrupt should not have soft enabled. */
+ lbz r7,PACASOFTIRQEN(r13)
+1: tdnei r7,0
+ EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
+#endif
b do_restore

/*
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index d044b8b7c69d..b053a0b1f853 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -96,7 +96,7 @@ _GLOBAL(__kvmppc_vcpu_run)

kvm_start_lightweight:
/* Copy registers into shadow vcpu so we can access them in real mode */
- GET_SHADOW_VCPU(r3)
+ mr r3, r4
bl FUNC(kvmppc_copy_to_svcpu)
nop
REST_GPR(4, r1)
@@ -165,9 +165,7 @@ _GLOBAL(__kvmppc_vcpu_run)
stw r12, VCPU_TRAP(r3)

/* Transfer reg values from shadow vcpu back to vcpu struct */
- /* On 64-bit, interrupts are still off at this point */

- GET_SHADOW_VCPU(r4)
bl FUNC(kvmppc_copy_from_svcpu)
nop

diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index e587264c2e8c..1d9c536e6b16 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -82,7 +82,7 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
#ifdef CONFIG_PPC_BOOK3S_64
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
if (svcpu->in_use) {
- kvmppc_copy_from_svcpu(vcpu, svcpu);
+ kvmppc_copy_from_svcpu(vcpu);
}
memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
@@ -95,9 +95,10 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
}

/* Copy data needed by real-mode code from vcpu to shadow vcpu */
-void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
- struct kvm_vcpu *vcpu)
+void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
{
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+
svcpu->gpr[0] = vcpu->arch.gpr[0];
svcpu->gpr[1] = vcpu->arch.gpr[1];
svcpu->gpr[2] = vcpu->arch.gpr[2];
@@ -121,17 +122,14 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
#endif
svcpu->in_use = true;
+
+ svcpu_put(svcpu);
}

/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
-void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
- struct kvmppc_book3s_shadow_vcpu *svcpu)
+void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
{
- /*
- * vcpu_put would just call us again because in_use hasn't
- * been updated yet.
- */
- preempt_disable();
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);

/*
* Maybe we were already preempted and synced the svcpu from
@@ -169,7 +167,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
svcpu->in_use = false;

out:
- preempt_enable();
+ svcpu_put(svcpu);
}

static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index d3e9a78eaed3..99960b0540e1 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -162,11 +162,6 @@ static void reset_numa_cpu_lookup_table(void)
numa_cpu_lookup_table[cpu] = -1;
}

-static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
-{
- numa_cpu_lookup_table[cpu] = node;
-}
-
static void map_cpu_to_node(int cpu, int node)
{
update_numa_cpu_lookup_table(cpu, node);
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 20d62975856f..a5d287dfd7b3 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -31,6 +31,7 @@
#include <asm/vdso_datapage.h>
#include <asm/xics.h>
#include <asm/plpar_wrappers.h>
+#include <asm/topology.h>

#include "offline_states.h"

@@ -328,6 +329,7 @@ static void pseries_remove_processor(struct device_node *np)
BUG_ON(cpu_online(cpu));
set_cpu_present(cpu, false);
set_hard_smp_processor_id(cpu, -1);
+ update_numa_cpu_lookup_table(cpu, -1);
break;
}
if (cpu >= nr_cpu_ids)
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 0176ebc97bfd..86f934255eb6 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -110,7 +110,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setregid16, u16, rgid, u16, egid)

COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid)
{
- return sys_setgid((gid_t)gid);
+ return sys_setgid(low2highgid(gid));
}

COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)
@@ -120,7 +120,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)

COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid)
{
- return sys_setuid((uid_t)uid);
+ return sys_setuid(low2highuid(uid));
}

COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid)
@@ -173,12 +173,12 @@ COMPAT_SYSCALL_DEFINE3(s390_getresgid16, u16 __user *, rgidp,

COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid)
{
- return sys_setfsuid((uid_t)uid);
+ return sys_setfsuid(low2highuid(uid));
}

COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid)
{
- return sys_setfsgid((gid_t)gid);
+ return sys_setfsgid(low2highgid(gid));
}

static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index dcf75dfd4583..b4858f4efbc6 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -62,6 +62,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
+ { "deliver_io_interrupt", VCPU_STAT(deliver_io_int) },
{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index ff639342a8be..c5b997757988 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -607,7 +607,8 @@ asmlinkage void do_divide_error(unsigned long r4)
break;
}

- force_sig_info(SIGFPE, &info, current);
+ info.si_signo = SIGFPE;
+ force_sig_info(info.si_signo, &info, current);
}
#endif

diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c
index d1064e46efe8..8aa664638c3c 100644
--- a/arch/sparc/crypto/crc32c_glue.c
+++ b/arch/sparc/crypto/crc32c_glue.c
@@ -133,6 +133,7 @@ static struct shash_alg alg = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_alignmask = 7,
diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
index 1937fc1d8763..0072dd30dd12 100644
--- a/arch/x86/crypto/crc32-pclmul_glue.c
+++ b/arch/x86/crypto/crc32-pclmul_glue.c
@@ -162,6 +162,7 @@ static struct shash_alg alg = {
.cra_name = "crc32",
.cra_driver_name = "crc32-pclmul",
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
index 28640c3d6af7..c743444b9274 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -240,6 +240,7 @@ static struct shash_alg alg = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-intel",
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
index 20370c6db74b..3d1ec41ae09a 100644
--- a/arch/x86/include/asm/apm.h
+++ b/arch/x86/include/asm/apm.h
@@ -6,6 +6,8 @@
#ifndef _ASM_X86_MACH_DEFAULT_APM_H
#define _ASM_X86_MACH_DEFAULT_APM_H

+#include <asm/nospec-branch.h>
+
#ifdef APM_ZERO_SEGS
# define APM_DO_ZERO_SEGS \
"pushl %%ds\n\t" \
@@ -31,6 +33,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
* N.B. We do NOT need a cld after the BIOS call
* because we always save and restore the flags.
*/
+ firmware_restrict_branch_speculation_start();
__asm__ __volatile__(APM_DO_ZERO_SEGS
"pushl %%edi\n\t"
"pushl %%ebp\n\t"
@@ -43,6 +46,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
"=S" (*esi)
: "a" (func), "b" (ebx_in), "c" (ecx_in)
: "memory", "cc");
+ firmware_restrict_branch_speculation_end();
}

static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
@@ -55,6 +59,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
* N.B. We do NOT need a cld after the BIOS call
* because we always save and restore the flags.
*/
+ firmware_restrict_branch_speculation_start();
__asm__ __volatile__(APM_DO_ZERO_SEGS
"pushl %%edi\n\t"
"pushl %%ebp\n\t"
@@ -67,6 +72,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
"=S" (si)
: "a" (func), "b" (ebx_in), "c" (ecx_in)
: "memory", "cc");
+ firmware_restrict_branch_speculation_end();
return error;
}

diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 8722c8f7405d..8f45ed429ba0 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -8,7 +8,7 @@
#include <asm/required-features.h>
#endif

-#define NCAPINTS 10 /* N 32-bit words worth of info */
+#define NCAPINTS 12 /* N 32-bit words worth of info */
#define NBUGINTS 1 /* N 32-bit bug flags */

/*
@@ -189,6 +189,9 @@
#define X86_FEATURE_INVPCID_SINGLE (7*32+10) /* Effectively INVPCID && CR4.PCIDE=1 */
#define X86_FEATURE_RSB_CTXSW (7*32+11) /* "" Fill RSB on context switches */

+#define X86_FEATURE_USE_IBPB (7*32+12) /* "" Indirect Branch Prediction Barrier enabled */
+#define X86_FEATURE_USE_IBRS_FW (7*32+13) /* "" Use IBRS during runtime firmware calls */
+
#define X86_FEATURE_RETPOLINE (7*32+29) /* "" Generic Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_RETPOLINE_AMD (7*32+30) /* "" AMD Retpoline mitigation for Spectre variant 2 */
/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
@@ -234,6 +237,16 @@
#define X86_FEATURE_AVX512ER (9*32+27) /* AVX-512 Exponential and Reciprocal */
#define X86_FEATURE_AVX512CD (9*32+28) /* AVX-512 Conflict Detection */

+/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 10 */
+#define X86_FEATURE_SPEC_CTRL (10*32+26) /* "" Speculation Control (IBRS + IBPB) */
+#define X86_FEATURE_INTEL_STIBP (10*32+27) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_ARCH_CAPABILITIES (10*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+
+/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 11 */
+#define X86_FEATURE_IBPB (11*32+12) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_IBRS (11*32+14) /* Indirect Branch Restricted Speculation */
+#define X86_FEATURE_STIBP (11*32+15) /* Single Thread Indirect Branch Predictors */
+
/*
* BUG word(s)
*/
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 81396a9a9277..69effe9f0380 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -2,6 +2,8 @@
#define _ASM_X86_EFI_H

#include <asm/i387.h>
+#include <asm/nospec-branch.h>
+
/*
* We map the EFI regions needed for runtime services non-contiguously,
* with preserved alignment on virtual addresses starting from -4G down
@@ -37,8 +39,10 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
({ \
efi_status_t __s; \
kernel_fpu_begin(); \
+ firmware_restrict_branch_speculation_start(); \
__s = ((efi_##f##_t __attribute__((regparm(0)))*) \
efi.systab->runtime->f)(args); \
+ firmware_restrict_branch_speculation_end(); \
kernel_fpu_end(); \
__s; \
})
@@ -47,8 +51,10 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
#define __efi_call_virt(f, args...) \
({ \
kernel_fpu_begin(); \
+ firmware_restrict_branch_speculation_start(); \
((efi_##f##_t __attribute__((regparm(0)))*) \
efi.systab->runtime->f)(args); \
+ firmware_restrict_branch_speculation_end(); \
kernel_fpu_end(); \
})

@@ -69,7 +75,9 @@ extern u64 asmlinkage efi_call(void *fp, ...);
efi_sync_low_kernel_mappings(); \
preempt_disable(); \
__kernel_fpu_begin(); \
+ firmware_restrict_branch_speculation_start(); \
__s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__); \
+ firmware_restrict_branch_speculation_end(); \
__kernel_fpu_end(); \
preempt_enable(); \
__s; \
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 6999f7d01a0d..caa8ccb7587b 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -12,16 +12,18 @@
*/

#define INTEL_FAM6_CORE_YONAH 0x0E
+
#define INTEL_FAM6_CORE2_MEROM 0x0F
#define INTEL_FAM6_CORE2_MEROM_L 0x16
#define INTEL_FAM6_CORE2_PENRYN 0x17
#define INTEL_FAM6_CORE2_DUNNINGTON 0x1D

#define INTEL_FAM6_NEHALEM 0x1E
+#define INTEL_FAM6_NEHALEM_G 0x1F /* Auburndale / Havendale */
#define INTEL_FAM6_NEHALEM_EP 0x1A
#define INTEL_FAM6_NEHALEM_EX 0x2E
+
#define INTEL_FAM6_WESTMERE 0x25
-#define INTEL_FAM6_WESTMERE2 0x1F
#define INTEL_FAM6_WESTMERE_EP 0x2C
#define INTEL_FAM6_WESTMERE_EX 0x2F

@@ -36,9 +38,9 @@
#define INTEL_FAM6_HASWELL_GT3E 0x46

#define INTEL_FAM6_BROADWELL_CORE 0x3D
-#define INTEL_FAM6_BROADWELL_XEON_D 0x56
#define INTEL_FAM6_BROADWELL_GT3E 0x47
#define INTEL_FAM6_BROADWELL_X 0x4F
+#define INTEL_FAM6_BROADWELL_XEON_D 0x56

#define INTEL_FAM6_SKYLAKE_MOBILE 0x4E
#define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E
@@ -56,10 +58,11 @@
#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */
#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
-#define INTEL_FAM6_ATOM_MERRIFIELD1 0x4A /* Tangier */
-#define INTEL_FAM6_ATOM_MERRIFIELD2 0x5A /* Annidale */
+#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */
+#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Anniedale */
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
+#define INTEL_FAM6_ATOM_GEMINI_LAKE 0x7A

/* Xeon Phi */

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 188428cfe3d3..4af016445a05 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -682,8 +682,8 @@ struct kvm_x86_ops {
void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
void (*vcpu_put)(struct kvm_vcpu *vcpu);

- void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu);
- int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
+ void (*update_bp_intercept)(struct kvm_vcpu *vcpu);
+ int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
void (*get_segment)(struct kvm_vcpu *vcpu,
@@ -853,7 +853,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu,

void kvm_enable_efer_bits(u64);
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
-int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
+int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);

struct x86_emulate_ctxt;
@@ -881,7 +881,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);

-int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
+int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);

unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 66094a0473a8..b87c91d5458e 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -6,6 +6,7 @@
#include <asm/alternative.h>
#include <asm/alternative-asm.h>
#include <asm/cpufeature.h>
+#include <asm/msr-index.h>

/*
* Fill the CPU return stack buffer.
@@ -194,5 +195,41 @@ static inline void vmexit_fill_RSB(void)
#endif
}

+#define alternative_msr_write(_msr, _val, _feature) \
+ asm volatile(ALTERNATIVE("", \
+ "movl %[msr], %%ecx\n\t" \
+ "movl %[val], %%eax\n\t" \
+ "movl $0, %%edx\n\t" \
+ "wrmsr", \
+ _feature) \
+ : : [msr] "i" (_msr), [val] "i" (_val) \
+ : "eax", "ecx", "edx", "memory")
+
+static inline void indirect_branch_prediction_barrier(void)
+{
+ alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
+ X86_FEATURE_USE_IBPB);
+}
+
+/*
+ * With retpoline, we must use IBRS to restrict branch prediction
+ * before calling into firmware.
+ *
+ * (Implemented as CPP macros due to header hell.)
+ */
+#define firmware_restrict_branch_speculation_start() \
+do { \
+ preempt_disable(); \
+ alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \
+ X86_FEATURE_USE_IBRS_FW); \
+} while (0)
+
+#define firmware_restrict_branch_speculation_end() \
+do { \
+ alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \
+ X86_FEATURE_USE_IBRS_FW); \
+ preempt_enable(); \
+} while (0)
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 1b5b34cba964..56bae3eedc70 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -263,14 +263,14 @@ static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
{
pmdval_t v = native_pmd_val(pmd);

- return __pmd(v | set);
+ return native_make_pmd(v | set);
}

static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
{
pmdval_t v = native_pmd_val(pmd);

- return __pmd(v & ~clear);
+ return native_make_pmd(v & ~clear);
}

static inline pmd_t pmd_mkold(pmd_t pmd)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index c5d7703dc591..90aaa6f39d61 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -321,6 +321,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
#else
#include <asm-generic/pgtable-nopmd.h>

+static inline pmd_t native_make_pmd(pmdval_t val)
+{
+ return (pmd_t) { .pud.pgd = native_make_pgd(val) };
+}
+
static inline pmdval_t native_pmd_val(pmd_t pmd)
{
return native_pgd_val(pmd.pud.pgd);
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 7004d21e6219..b4348fb451ab 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -296,6 +296,7 @@ enum vmcs_field {
#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */
#define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */
#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */
+#define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */
#define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */

/* GUEST_INTERRUPTIBILITY_INFO flags. */
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h
index a0eab85ce7b8..c9872a4585fe 100644
--- a/arch/x86/include/uapi/asm/mce.h
+++ b/arch/x86/include/uapi/asm/mce.h
@@ -25,6 +25,10 @@ struct mce {
__u32 socketid; /* CPU socket ID */
__u32 apicid; /* CPU initial apic ID */
__u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */
+ __u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */
+ __u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */
+ __u64 ppin; /* Protected Processor Inventory Number */
+ __u32 microcode;/* Microcode revision */
};

#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index 1d3811d1506e..6544e99baa1c 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -32,6 +32,13 @@
#define EFER_FFXSR (1<<_EFER_FFXSR)

/* Intel MSRs. Some also available on other CPUs */
+#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
+#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
+#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
+
+#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
+#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
+
#define MSR_IA32_PERFCTR0 0x000000c1
#define MSR_IA32_PERFCTR1 0x000000c2
#define MSR_FSB_FREQ 0x000000cd
@@ -46,6 +53,11 @@

#define MSR_PLATFORM_INFO 0x000000ce
#define MSR_MTRRcap 0x000000fe
+
+#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
+#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
+#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
+
#define MSR_IA32_BBL_CR_CTL 0x00000119
#define MSR_IA32_BBL_CR_CTL3 0x0000011e

diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 76164e173a24..0213ba8b4262 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -29,6 +29,7 @@
#include <asm/dma.h>
#include <asm/amd_nb.h>
#include <asm/x86_init.h>
+#include <linux/crash_dump.h>

/*
* Using 512M as goal, in case kexec will load kernel_big
@@ -55,6 +56,33 @@ int fallback_aper_force __initdata;

int fix_aperture __initdata = 1;

+#ifdef CONFIG_PROC_VMCORE
+/*
+ * If the first kernel maps the aperture over e820 RAM, the kdump kernel will
+ * use the same range because it will remain configured in the northbridge.
+ * Trying to dump this area via /proc/vmcore may crash the machine, so exclude
+ * it from vmcore.
+ */
+static unsigned long aperture_pfn_start, aperture_page_count;
+
+static int gart_oldmem_pfn_is_ram(unsigned long pfn)
+{
+ return likely((pfn < aperture_pfn_start) ||
+ (pfn >= aperture_pfn_start + aperture_page_count));
+}
+
+static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
+{
+ aperture_pfn_start = aper_base >> PAGE_SHIFT;
+ aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT;
+ WARN_ON(register_oldmem_pfn_is_ram(&gart_oldmem_pfn_is_ram));
+}
+#else
+static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
+{
+}
+#endif
+
/* This code runs before the PCI subsystem is initialized, so just
access the northbridge directly. */

@@ -436,8 +464,16 @@ int __init gart_iommu_hole_init(void)

out:
if (!fix && !fallback_aper_force) {
- if (last_aper_base)
+ if (last_aper_base) {
+ /*
+ * If this is the kdump kernel, the first kernel
+ * may have allocated the range over its e820 RAM
+ * and fixed up the northbridge
+ */
+ exclude_from_vmcore(last_aper_base, last_aper_order);
+
return 1;
+ }
return 0;
}

@@ -474,6 +510,14 @@ int __init gart_iommu_hole_init(void)
return 0;
}

+ /*
+ * If this is the kdump kernel _and_ the first kernel did not
+ * configure the aperture in the northbridge, this range may
+ * overlap with the first kernel's memory. We can't access the
+ * range through vmcore even though it should be part of the dump.
+ */
+ exclude_from_vmcore(aper_alloc, aper_order);
+
/* Fix up the north bridges */
for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
int bus, dev_base, dev_limit;
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index db2fc61ba99a..c3000969dd46 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -358,6 +358,21 @@ static void __init spectre_v2_select_mitigation(void)
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
pr_info("Filling RSB on context switch\n");
}
+
+ /* Initialize Indirect Branch Prediction Barrier if supported */
+ if (boot_cpu_has(X86_FEATURE_IBPB)) {
+ setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+ pr_info("Enabling Indirect Branch Prediction Barrier\n");
+ }
+
+ /*
+ * Retpoline means the kernel is safe because it has no indirect
+ * branches. But firmware isn't, so use IBRS to protect that.
+ */
+ if (boot_cpu_has(X86_FEATURE_IBRS)) {
+ setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
+ pr_info("Enabling Restricted Speculation for firmware calls\n");
+ }
}

#undef pr_fmt
@@ -387,7 +402,9 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
return sprintf(buf, "Not affected\n");

- return sprintf(buf, "%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+ return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+ boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
spectre_v2_module_string());
}
#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 44fd2ecb9859..6cfcffe66652 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -41,6 +41,8 @@
#include <asm/pat.h>
#include <asm/microcode.h>
#include <asm/microcode_intel.h>
+#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>

#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/uv/uv.h>
@@ -680,6 +682,26 @@ static void apply_forced_caps(struct cpuinfo_x86 *c)
}
}

+static void init_speculation_control(struct cpuinfo_x86 *c)
+{
+ /*
+ * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
+ * and they also have a different bit for STIBP support. Also,
+ * a hypervisor might have set the individual AMD bits even on
+ * Intel CPUs, for finer-grained selection of what's available.
+ *
+ * We use the AMD bits in 0x8000_0008 EBX as the generic hardware
+ * features, which are visible in /proc/cpuinfo and used by the
+ * kernel. So set those accordingly from the Intel bits.
+ */
+ if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
+ set_cpu_cap(c, X86_FEATURE_IBRS);
+ set_cpu_cap(c, X86_FEATURE_IBPB);
+ }
+ if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
+ set_cpu_cap(c, X86_FEATURE_STIBP);
+}
+
void get_cpu_cap(struct cpuinfo_x86 *c)
{
u32 tfms, xlvl;
@@ -701,6 +723,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);

c->x86_capability[9] = ebx;
+ c->x86_capability[10] = edx;
}

/* AMD-defined flags: level 0x80000001 */
@@ -715,10 +738,13 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
}

if (c->extended_cpuid_level >= 0x80000008) {
- u32 eax = cpuid_eax(0x80000008);
+ u32 eax, ebx, ecx, edx;
+
+ cpuid(0x80000008, &eax, &ebx, &ecx, &edx);

c->x86_virt_bits = (eax >> 8) & 0xff;
c->x86_phys_bits = eax & 0xff;
+ c->x86_capability[11] = ebx;
}
#ifdef CONFIG_X86_32
else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
@@ -729,6 +755,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
c->x86_power = cpuid_edx(0x80000007);

init_scattered_cpuid_features(c);
+ init_speculation_control(c);
}

static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
@@ -757,6 +784,41 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#endif
}

+static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY },
+ { X86_VENDOR_CENTAUR, 5 },
+ { X86_VENDOR_INTEL, 5 },
+ { X86_VENDOR_NSC, 5 },
+ { X86_VENDOR_ANY, 4 },
+ {}
+};
+
+static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
+ { X86_VENDOR_AMD },
+ {}
+};
+
+static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c)
+{
+ u64 ia32_cap = 0;
+
+ if (x86_match_cpu(cpu_no_meltdown))
+ return false;
+
+ if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+ /* Rogue Data Cache Load? No! */
+ if (ia32_cap & ARCH_CAP_RDCL_NO)
+ return false;
+
+ return true;
+}
+
/*
* Do minimum CPU detection early.
* Fields really needed: vendor, cpuid_level, family, model, mask,
@@ -805,11 +867,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)

setup_force_cpu_cap(X86_FEATURE_ALWAYS);

- if (c->x86_vendor != X86_VENDOR_AMD)
- setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
-
- setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
- setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+ if (!x86_match_cpu(cpu_no_speculation)) {
+ if (cpu_vulnerable_to_meltdown(c))
+ setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+ }
}

void __init early_cpu_init(void)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index cfbdac5c601e..aa3d005ad648 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -13,6 +13,7 @@
#include <asm/msr.h>
#include <asm/bugs.h>
#include <asm/cpu.h>
+#include <asm/intel-family.h>

#ifdef CONFIG_X86_64
#include <linux/topology.h>
@@ -25,6 +26,63 @@
#include <asm/apic.h>
#endif

+/*
+ * Early microcode releases for the Spectre v2 mitigation were broken.
+ * Information taken from;
+ * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf
+ * - https://kb.vmware.com/s/article/52345
+ * - Microcode revisions observed in the wild
+ * - Release note from 20180108 microcode release
+ */
+struct sku_microcode {
+ u8 model;
+ u8 stepping;
+ u32 microcode;
+};
+static const struct sku_microcode spectre_bad_microcodes[] = {
+ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 },
+ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 },
+ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 },
+ { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 },
+ { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
+ { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
+ { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
+ { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
+ { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
+ { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
+ { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
+ { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 },
+ { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
+ { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 },
+ { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 },
+ { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 },
+ { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
+ { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
+ { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
+ /* Observed in the wild */
+ { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
+ { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
+};
+
+static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
+{
+ int i;
+
+ /*
+ * We know that the hypervisor lie to us on the microcode version so
+ * we may as well hope that it is running the correct version.
+ */
+ if (cpu_has(c, X86_FEATURE_HYPERVISOR))
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
+ if (c->x86_model == spectre_bad_microcodes[i].model &&
+ c->x86_mask == spectre_bad_microcodes[i].stepping)
+ return (c->microcode <= spectre_bad_microcodes[i].microcode);
+ }
+ return false;
+}
+
static void early_init_intel(struct cpuinfo_x86 *c)
{
u64 misc_enable;
@@ -51,6 +109,19 @@ static void early_init_intel(struct cpuinfo_x86 *c)
rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
}

+ /* Now if any of them are set, check the blacklist and clear the lot */
+ if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
+ cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
+ cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
+ cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
+ pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
+ setup_clear_cpu_cap(X86_FEATURE_IBRS);
+ setup_clear_cpu_cap(X86_FEATURE_IBPB);
+ setup_clear_cpu_cap(X86_FEATURE_STIBP);
+ setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
+ setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
+ }
+
/*
* Atom erratum AAE44/AAF40/AAG38/AAH41:
*
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 86f98cbb411e..e040258fd4a0 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -57,6 +57,9 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex);
rcu_read_lock_sched_held() || \
lockdep_is_held(&mce_chrdev_read_mutex))

+/* sysfs synchronization */
+static DEFINE_MUTEX(mce_sysfs_mutex);
+
#define CREATE_TRACE_POINTS
#include <trace/events/mce.h>

@@ -132,6 +135,8 @@ void mce_setup(struct mce *m)
m->socketid = cpu_data(m->extcpu).phys_proc_id;
m->apicid = cpu_data(m->extcpu).initial_apicid;
rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
+
+ m->microcode = boot_cpu_data.microcode;
}

DEFINE_PER_CPU(struct mce, injectm);
@@ -279,7 +284,7 @@ static void print_mce(struct mce *m)
*/
pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
- cpu_data(m->extcpu).microcode);
+ m->microcode);

/*
* Print out human-readable details about the MCE error,
@@ -2199,6 +2204,7 @@ static ssize_t set_ignore_ce(struct device *s,
if (strict_strtoull(buf, 0, &new) < 0)
return -EINVAL;

+ mutex_lock(&mce_sysfs_mutex);
if (mca_cfg.ignore_ce ^ !!new) {
if (new) {
/* disable ce features */
@@ -2211,6 +2217,8 @@ static ssize_t set_ignore_ce(struct device *s,
on_each_cpu(mce_enable_ce, (void *)1, 1);
}
}
+ mutex_unlock(&mce_sysfs_mutex);
+
return size;
}

@@ -2223,6 +2231,7 @@ static ssize_t set_cmci_disabled(struct device *s,
if (strict_strtoull(buf, 0, &new) < 0)
return -EINVAL;

+ mutex_lock(&mce_sysfs_mutex);
if (mca_cfg.cmci_disabled ^ !!new) {
if (new) {
/* disable cmci */
@@ -2234,6 +2243,8 @@ static ssize_t set_cmci_disabled(struct device *s,
on_each_cpu(mce_enable_ce, NULL, 1);
}
}
+ mutex_unlock(&mce_sysfs_mutex);
+
return size;
}

@@ -2241,8 +2252,19 @@ static ssize_t store_int_with_restart(struct device *s,
struct device_attribute *attr,
const char *buf, size_t size)
{
- ssize_t ret = device_store_int(s, attr, buf, size);
+ unsigned long old_check_interval = check_interval;
+ ssize_t ret = device_store_ulong(s, attr, buf, size);
+
+ if (check_interval == old_check_interval)
+ return ret;
+
+ if (check_interval < 1)
+ check_interval = 1;
+
+ mutex_lock(&mce_sysfs_mutex);
mce_restart();
+ mutex_unlock(&mce_sysfs_mutex);
+
return ret;
}

diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index dd9d6190b08d..5bdb418f25c9 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -97,7 +97,7 @@ MODULE_LICENSE("GPL");

static struct microcode_ops *microcode_ops;

-bool dis_ucode_ldr;
+bool dis_ucode_ldr = IS_ENABLED(CONFIG_MICROCODE_EARLY);
module_param(dis_ucode_ldr, bool, 0);

/*
diff --git a/arch/x86/kernel/cpu/microcode/core_early.c b/arch/x86/kernel/cpu/microcode/core_early.c
index 5f28a64e71ea..0321ad198037 100644
--- a/arch/x86/kernel/cpu/microcode/core_early.c
+++ b/arch/x86/kernel/cpu/microcode/core_early.c
@@ -76,6 +76,8 @@ static int x86_family(void)

static bool __init check_loader_disabled_bsp(void)
{
+ u32 a, b, c, d;
+
#ifdef CONFIG_X86_32
const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
const char *opt = "dis_ucode_ldr";
@@ -88,8 +90,23 @@ static bool __init check_loader_disabled_bsp(void)
bool *res = &dis_ucode_ldr;
#endif

- if (cmdline_find_option_bool(cmdline, option))
- *res = true;
+ if (!have_cpuid_p())
+ return *res;
+
+ a = 1;
+ c = 0;
+ native_cpuid(&a, &b, &c, &d);
+
+ /*
+ * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
+ * completely accurate as xen pv guests don't see that CPUID bit set but
+ * that's good enough as they don't land on the BSP path anyway.
+ */
+ if (c & BIT(31))
+ return *res;
+
+ if (cmdline_find_option_bool(cmdline, option) <= 0)
+ *res = false;

return *res;
}
@@ -101,9 +118,6 @@ void __init load_ucode_bsp(void)
if (check_loader_disabled_bsp())
return;

- if (!have_cpuid_p())
- return;
-
vendor = x86_vendor();
x86 = x86_family();

@@ -124,7 +138,7 @@ void __init load_ucode_bsp(void)
static bool check_loader_disabled_ap(void)
{
#ifdef CONFIG_X86_32
- return __pa_nodebug(dis_ucode_ldr);
+ return *((bool *)__pa_nodebug(&dis_ucode_ldr));
#else
return dis_ucode_ldr;
#endif
@@ -137,9 +151,6 @@ void load_ucode_ap(void)
if (check_loader_disabled_ap())
return;

- if (!have_cpuid_p())
- return;
-
vendor = x86_vendor();
x86 = x86_family();

diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 3ae017f16930..1c4c26cfa778 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1322,7 +1322,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
#endif /* CONFIG_HYPERV */

idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
-idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
+idtentry int3 do_int3 has_error_code=0
idtentry stack_segment do_stack_segment has_error_code=1
#ifdef CONFIG_XEN
idtentry xen_debug do_debug has_error_code=0
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 871f43ae47ef..26ae4d7f8c45 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -334,7 +334,6 @@ do_general_protection(struct pt_regs *regs, long error_code)
}
NOKPROBE_SYMBOL(do_general_protection);

-/* May run on IST stack. */
dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
{
enum ctx_state prev_state;
@@ -367,15 +366,9 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
SIGTRAP) == NOTIFY_STOP)
goto exit;

- /*
- * Let others (NMI) know that the debug stack is in use
- * as we may switch to the interrupt stack.
- */
- debug_stack_usage_inc();
preempt_conditional_sti(regs);
do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
preempt_conditional_cli(regs);
- debug_stack_usage_dec();
exit:
exception_exit(prev_state);
}
@@ -773,9 +766,17 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
/* Set of traps needed for early debugging. */
void __init early_trap_init(void)
{
- set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
+ /*
+ * Don't set ist to DEBUG_STACK as it doesn't work until TSS is
+ * ready in cpu_init() <-- trap_init(). Before trap_init(), CPU
+ * runs at ring 0 so it is impossible to hit an invalid stack.
+ * Using the original stack works well enough at this early
+ * stage. DEBUG_STACK will be equipped after cpu_init() in
+ * trap_init().
+ */
+ set_intr_gate_ist(X86_TRAP_DB, &debug, 0);
/* int3 can be called from all */
- set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
+ set_system_intr_gate_ist(X86_TRAP_BP, &int3, 0);
#ifdef CONFIG_X86_32
set_intr_gate(X86_TRAP_PF, page_fault);
#endif
@@ -853,11 +854,17 @@ void __init trap_init(void)
*/
cpu_init();

+ /*
+ * X86_TRAP_DB was installed in early_trap_init(). However,
+ * DEBUG_STACK works only after cpu_init() loads TSS. See comments
+ * in early_trap_init().
+ */
+ set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
+
x86_init.irqs.trap_init();

#ifdef CONFIG_X86_64
memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16);
set_nmi_gate(X86_TRAP_DB, &debug);
- set_nmi_gate(X86_TRAP_BP, &int3);
#endif
}
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index d77d76936a6d..c911af5d0cf5 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -300,6 +300,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);

+ /* cpuid 0x80000008.ebx */
+ const u32 kvm_cpuid_8000_0008_ebx_x86_features =
+ F(IBPB) | F(IBRS);
+
/* cpuid 0xC0000001.edx */
const u32 kvm_supported_word5_x86_features =
F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
@@ -312,6 +316,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
F(ADX) | F(SMAP);

+ /* cpuid 7.0.edx*/
+ const u32 kvm_cpuid_7_0_edx_x86_features =
+ F(SPEC_CTRL) | F(ARCH_CAPABILITIES);
+
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();

@@ -383,11 +391,14 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
cpuid_mask(&entry->ebx, 9);
// TSC_ADJUST is emulated
entry->ebx |= F(TSC_ADJUST);
- } else
+ entry->edx &= kvm_cpuid_7_0_edx_x86_features;
+ cpuid_mask(&entry->edx, 10);
+ } else {
entry->ebx = 0;
+ entry->edx = 0;
+ }
entry->eax = 0;
entry->ecx = 0;
- entry->edx = 0;
break;
}
case 9:
@@ -512,7 +523,14 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
if (!g_phys_as)
g_phys_as = phys_as;
entry->eax = g_phys_as | (virt_as << 8);
- entry->ebx = entry->edx = 0;
+ entry->edx = 0;
+ /* IBRS and IBPB aren't necessarily present in hardware cpuid */
+ if (boot_cpu_has(X86_FEATURE_IBPB))
+ entry->ebx |= F(IBPB);
+ if (boot_cpu_has(X86_FEATURE_IBRS))
+ entry->ebx |= F(IBRS);
+ entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
+ cpuid_mask(&entry->ebx, 11);
break;
}
case 0x80000019:
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 06fa616e3f69..aff98b185e87 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -104,4 +104,35 @@ static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
return best && (best->ebx & bit(X86_FEATURE_MPX));
}

+static inline bool guest_cpuid_has_ibpb(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+ if (best && (best->ebx & bit(X86_FEATURE_IBPB)))
+ return true;
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->edx & bit(X86_FEATURE_SPEC_CTRL));
+}
+
+static inline bool guest_cpuid_has_ibrs(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+ if (best && (best->ebx & bit(X86_FEATURE_IBRS)))
+ return true;
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->edx & bit(X86_FEATURE_SPEC_CTRL));
+}
+
+static inline bool guest_cpuid_has_arch_capabilities(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->edx & bit(X86_FEATURE_ARCH_CAPABILITIES));
+}
+
+
#endif
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 80207eb90102..374d7c293740 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -36,6 +36,7 @@
#include <asm/desc.h>
#include <asm/debugreg.h>
#include <asm/kvm_para.h>
+#include <asm/microcode.h>
#include <asm/nospec-branch.h>

#include <asm/virtext.h>
@@ -146,6 +147,8 @@ struct vcpu_svm {
u64 gs_base;
} host;

+ u64 spec_ctrl;
+
u32 *msrpm;

ulong nmi_iret_rip;
@@ -180,6 +183,8 @@ static const struct svm_direct_access_msrs {
{ .index = MSR_CSTAR, .always = true },
{ .index = MSR_SYSCALL_MASK, .always = true },
#endif
+ { .index = MSR_IA32_SPEC_CTRL, .always = false },
+ { .index = MSR_IA32_PRED_CMD, .always = false },
{ .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
{ .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
{ .index = MSR_IA32_LASTINTFROMIP, .always = false },
@@ -409,6 +414,7 @@ struct svm_cpu_data {
struct kvm_ldttss_desc *tss_desc;

struct page *save_area;
+ struct vmcb *current_vmcb;
};

static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
@@ -760,6 +766,25 @@ static bool valid_msr_intercept(u32 index)
return false;
}

+static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr)
+{
+ u8 bit_write;
+ unsigned long tmp;
+ u32 offset;
+ u32 *msrpm;
+
+ msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
+ to_svm(vcpu)->msrpm;
+
+ offset = svm_msrpm_offset(msr);
+ bit_write = 2 * (msr & 0x0f) + 1;
+ tmp = msrpm[offset];
+
+ BUG_ON(offset == MSR_INVALID);
+
+ return !!test_bit(bit_write, &tmp);
+}
+
static void set_msr_interception(u32 *msrpm, unsigned msr,
int read, int write)
{
@@ -1204,6 +1229,8 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu)
u32 dummy;
u32 eax = 1;

+ svm->spec_ctrl = 0;
+
init_vmcb(svm);

kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy);
@@ -1294,11 +1321,17 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, svm);
+ /*
+ * The vmcb page can be recycled, causing a false negative in
+ * svm_vcpu_load(). So do a full IBPB now.
+ */
+ indirect_branch_prediction_barrier();
}

static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
int i;

if (unlikely(cpu != vcpu->cpu)) {
@@ -1321,6 +1354,10 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
__get_cpu_var(current_tsc_ratio) = svm->tsc_ratio;
wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio);
}
+ if (sd->current_vmcb != svm->vmcb) {
+ sd->current_vmcb = svm->vmcb;
+ indirect_branch_prediction_barrier();
+ }
}

static void svm_vcpu_put(struct kvm_vcpu *vcpu)
@@ -3037,42 +3074,42 @@ u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
svm_scale_tsc(vcpu, host_tsc);
}

-static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
+static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
struct vcpu_svm *svm = to_svm(vcpu);

- switch (ecx) {
+ switch (msr_info->index) {
case MSR_IA32_TSC: {
- *data = svm->vmcb->control.tsc_offset +
+ msr_info->data = svm->vmcb->control.tsc_offset +
svm_scale_tsc(vcpu, native_read_tsc());

break;
}
case MSR_STAR:
- *data = svm->vmcb->save.star;
+ msr_info->data = svm->vmcb->save.star;
break;
#ifdef CONFIG_X86_64
case MSR_LSTAR:
- *data = svm->vmcb->save.lstar;
+ msr_info->data = svm->vmcb->save.lstar;
break;
case MSR_CSTAR:
- *data = svm->vmcb->save.cstar;
+ msr_info->data = svm->vmcb->save.cstar;
break;
case MSR_KERNEL_GS_BASE:
- *data = svm->vmcb->save.kernel_gs_base;
+ msr_info->data = svm->vmcb->save.kernel_gs_base;
break;
case MSR_SYSCALL_MASK:
- *data = svm->vmcb->save.sfmask;
+ msr_info->data = svm->vmcb->save.sfmask;
break;
#endif
case MSR_IA32_SYSENTER_CS:
- *data = svm->vmcb->save.sysenter_cs;
+ msr_info->data = svm->vmcb->save.sysenter_cs;
break;
case MSR_IA32_SYSENTER_EIP:
- *data = svm->sysenter_eip;
+ msr_info->data = svm->sysenter_eip;
break;
case MSR_IA32_SYSENTER_ESP:
- *data = svm->sysenter_esp;
+ msr_info->data = svm->sysenter_esp;
break;
/*
* Nobody will change the following 5 values in the VMCB so we can
@@ -3080,31 +3117,38 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
* implemented.
*/
case MSR_IA32_DEBUGCTLMSR:
- *data = svm->vmcb->save.dbgctl;
+ msr_info->data = svm->vmcb->save.dbgctl;
break;
case MSR_IA32_LASTBRANCHFROMIP:
- *data = svm->vmcb->save.br_from;
+ msr_info->data = svm->vmcb->save.br_from;
break;
case MSR_IA32_LASTBRANCHTOIP:
- *data = svm->vmcb->save.br_to;
+ msr_info->data = svm->vmcb->save.br_to;
break;
case MSR_IA32_LASTINTFROMIP:
- *data = svm->vmcb->save.last_excp_from;
+ msr_info->data = svm->vmcb->save.last_excp_from;
break;
case MSR_IA32_LASTINTTOIP:
- *data = svm->vmcb->save.last_excp_to;
+ msr_info->data = svm->vmcb->save.last_excp_to;
break;
case MSR_VM_HSAVE_PA:
- *data = svm->nested.hsave_msr;
+ msr_info->data = svm->nested.hsave_msr;
break;
case MSR_VM_CR:
- *data = svm->nested.vm_cr_msr;
+ msr_info->data = svm->nested.vm_cr_msr;
+ break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has_ibrs(vcpu))
+ return 1;
+
+ msr_info->data = svm->spec_ctrl;
break;
case MSR_IA32_UCODE_REV:
- *data = 0x01000065;
+ msr_info->data = 0x01000065;
break;
default:
- return kvm_get_msr_common(vcpu, ecx, data);
+ return kvm_get_msr_common(vcpu, msr_info);
}
return 0;
}
@@ -3112,16 +3156,18 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
static int rdmsr_interception(struct vcpu_svm *svm)
{
u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
- u64 data;
+ struct msr_data msr_info;

- if (svm_get_msr(&svm->vcpu, ecx, &data)) {
+ msr_info.index = ecx;
+ msr_info.host_initiated = false;
+ if (svm_get_msr(&svm->vcpu, &msr_info)) {
trace_kvm_msr_read_ex(ecx);
kvm_inject_gp(&svm->vcpu, 0);
} else {
- trace_kvm_msr_read(ecx, data);
+ trace_kvm_msr_read(ecx, msr_info.data);

- svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
- svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
+ svm->vcpu.arch.regs[VCPU_REGS_RAX] = msr_info.data & 0xffffffff;
+ svm->vcpu.arch.regs[VCPU_REGS_RDX] = msr_info.data >> 32;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
skip_emulated_instruction(&svm->vcpu);
}
@@ -3170,6 +3216,49 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
case MSR_IA32_TSC:
kvm_write_tsc(vcpu, msr);
break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr->host_initiated &&
+ !guest_cpuid_has_ibrs(vcpu))
+ return 1;
+
+ /* The STIBP bit doesn't fault even if it's not advertised */
+ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
+ return 1;
+
+ svm->spec_ctrl = data;
+
+ if (!data)
+ break;
+
+ /*
+ * For non-nested:
+ * When it's written (to non-zero) for the first time, pass
+ * it through.
+ *
+ * For nested:
+ * The handling of the MSR bitmap for L2 guests is done in
+ * nested_svm_vmrun_msrpm.
+ * We update the L1 MSR bit as well since it will end up
+ * touching the MSR anyway now.
+ */
+ set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
+ break;
+ case MSR_IA32_PRED_CMD:
+ if (!msr->host_initiated &&
+ !guest_cpuid_has_ibpb(vcpu))
+ return 1;
+
+ if (data & ~PRED_CMD_IBPB)
+ return 1;
+
+ if (!data)
+ break;
+
+ wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
+ if (is_guest_mode(vcpu))
+ break;
+ set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
+ break;
case MSR_STAR:
svm->vmcb->save.star = data;
break;
@@ -3872,6 +3961,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)

local_irq_enable();

+ /*
+ * If this vCPU has touched SPEC_CTRL, restore the guest's value if
+ * it's non-zero. Since vmentry is serialising on affected CPUs, there
+ * is no need to worry about the conditional branch over the wrmsr
+ * being speculatively taken.
+ */
+ if (svm->spec_ctrl)
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+
asm volatile (
"push %%" _ASM_BP "; \n\t"
"mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
@@ -3964,6 +4062,27 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
#endif
);

+ /*
+ * We do not use IBRS in the kernel. If this vCPU has used the
+ * SPEC_CTRL MSR it may have left it on; save the value and
+ * turn it off. This is much more efficient than blindly adding
+ * it to the atomic save/restore list. Especially as the former
+ * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
+ *
+ * For non-nested case:
+ * If the L01 MSR bitmap does not intercept the MSR, then we need to
+ * save it.
+ *
+ * For nested case:
+ * If the L02 MSR bitmap does not intercept the MSR, then we need to
+ * save it.
+ */
+ if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+ svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
+
+ if (svm->spec_ctrl)
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();

@@ -4353,7 +4472,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.vcpu_load = svm_vcpu_load,
.vcpu_put = svm_vcpu_put,

- .update_db_bp_intercept = update_bp_intercept,
+ .update_bp_intercept = update_bp_intercept,
.get_msr = svm_get_msr,
.set_msr = svm_set_msr,
.get_segment_base = svm_get_segment_base,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 300ca8d07d9c..f37819266e81 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -46,6 +46,7 @@
#include <asm/perf_event.h>
#include <asm/debugreg.h>
#include <asm/kexec.h>
+#include <asm/microcode.h>
#include <asm/nospec-branch.h>

#include "trace.h"
@@ -101,6 +102,14 @@ module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
static bool __read_mostly nested = 0;
module_param(nested, bool, S_IRUGO);

+#define MSR_TYPE_R 1
+#define MSR_TYPE_W 2
+#define MSR_TYPE_RW 3
+
+#define MSR_BITMAP_MODE_X2APIC 1
+#define MSR_BITMAP_MODE_X2APIC_APICV 2
+#define MSR_BITMAP_MODE_LM 4
+
#define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
#define KVM_VM_CR0_ALWAYS_ON \
@@ -138,7 +147,6 @@ module_param(ple_window, int, S_IRUGO);
extern const ulong vmx_return;

#define NR_AUTOLOAD_MSRS 8
-#define VMCS02_POOL_SIZE 1

struct vmcs {
u32 revision_id;
@@ -155,6 +163,7 @@ struct loaded_vmcs {
struct vmcs *vmcs;
int cpu;
int launched;
+ unsigned long *msr_bitmap;
struct list_head loaded_vmcss_on_cpu_link;
};

@@ -171,7 +180,7 @@ struct shared_msr_entry {
* stored in guest memory specified by VMPTRLD, but is opaque to the guest,
* which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
* More than one of these structures may exist, if L1 runs multiple L2 guests.
- * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the
+ * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the
* underlying hardware which will be used to run L2.
* This structure is packed to ensure that its layout is identical across
* machines (necessary for live migration).
@@ -342,13 +351,6 @@ struct __packed vmcs12 {
*/
#define VMCS12_SIZE 0x1000

-/* Used to remember the last vmcs02 used for some recently used vmcs12s */
-struct vmcs02_list {
- struct list_head list;
- gpa_t vmptr;
- struct loaded_vmcs vmcs02;
-};
-
/*
* The nested_vmx structure is part of vcpu_vmx, and holds information we need
* for correct emulation of VMX (i.e., nested VMX) on this vcpu.
@@ -370,16 +372,16 @@ struct nested_vmx {
*/
bool sync_shadow_vmcs;

- /* vmcs02_list cache of VMCSs recently used to run L2 guests */
- struct list_head vmcs02_pool;
- int vmcs02_num;
u64 vmcs01_tsc_offset;
bool change_vmcs01_virtual_x2apic_mode;
/* L2 must run next, and mustn't decide to exit to L1. */
bool nested_run_pending;
+
+ struct loaded_vmcs vmcs02;
+
/*
- * Guest pages referred to in vmcs02 with host-physical pointers, so
- * we must keep them pinned while L2 runs.
+ * Guest pages referred to in the vmcs02 with host-physical
+ * pointers, so we must keep them pinned while L2 runs.
*/
struct page *apic_access_page;
u64 msr_ia32_feature_control;
@@ -418,6 +420,7 @@ struct vcpu_vmx {
unsigned long host_rsp;
u8 fail;
bool nmi_known_unmasked;
+ u8 msr_bitmap_mode;
u32 exit_intr_info;
u32 idt_vectoring_info;
ulong rflags;
@@ -429,6 +432,10 @@ struct vcpu_vmx {
u64 msr_host_kernel_gs_base;
u64 msr_guest_kernel_gs_base;
#endif
+
+ u64 arch_capabilities;
+ u64 spec_ctrl;
+
u32 vm_entry_controls_shadow;
u32 vm_exit_controls_shadow;
/*
@@ -753,6 +760,9 @@ static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu);
static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
static bool vmx_mpx_supported(void);
+static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
+static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
+ u32 msr, int type);

static DEFINE_PER_CPU(struct vmcs *, vmxarea);
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -765,10 +775,6 @@ static DEFINE_PER_CPU(struct desc_ptr, host_gdt);

static unsigned long *vmx_io_bitmap_a;
static unsigned long *vmx_io_bitmap_b;
-static unsigned long *vmx_msr_bitmap_legacy;
-static unsigned long *vmx_msr_bitmap_longmode;
-static unsigned long *vmx_msr_bitmap_legacy_x2apic;
-static unsigned long *vmx_msr_bitmap_longmode_x2apic;
static unsigned long *vmx_vmread_bitmap;
static unsigned long *vmx_vmwrite_bitmap;

@@ -868,6 +874,13 @@ static inline bool is_machine_check(u32 intr_info)
(INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
}

+/* Undocumented: icebp/int1 */
+static inline bool is_icebp(u32 intr_info)
+{
+ return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
+ == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
+}
+
static inline bool cpu_has_vmx_msr_bitmap(void)
{
return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
@@ -1501,6 +1514,29 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
vmcs_write32(EXCEPTION_BITMAP, eb);
}

+/*
+ * Check if MSR is intercepted for L01 MSR bitmap.
+ */
+static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
+{
+ unsigned long *msr_bitmap;
+ int f = sizeof(unsigned long);
+
+ if (!cpu_has_vmx_msr_bitmap())
+ return true;
+
+ msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
+
+ if (msr <= 0x1fff) {
+ return !!test_bit(msr, msr_bitmap + 0x800 / f);
+ } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
+ msr &= 0x1fff;
+ return !!test_bit(msr, msr_bitmap + 0xc00 / f);
+ }
+
+ return true;
+}
+
static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
unsigned long entry, unsigned long exit)
{
@@ -1822,6 +1858,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
vmcs_load(vmx->loaded_vmcs->vmcs);
+ indirect_branch_prediction_barrier();
}

if (vmx->loaded_vmcs->cpu != cpu) {
@@ -2074,25 +2111,6 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
vmx->guest_msrs[from] = tmp;
}

-static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
-{
- unsigned long *msr_bitmap;
-
- if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) {
- if (is_long_mode(vcpu))
- msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
- else
- msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
- } else {
- if (is_long_mode(vcpu))
- msr_bitmap = vmx_msr_bitmap_longmode;
- else
- msr_bitmap = vmx_msr_bitmap_legacy;
- }
-
- vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
-}
-
/*
* Set up the vmcs to automatically save and restore system
* msrs. Don't touch the 64-bit msrs if the guest is in legacy
@@ -2133,7 +2151,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
vmx->save_nmsrs = save_nmsrs;

if (cpu_has_vmx_msr_bitmap())
- vmx_set_msr_bitmap(&vmx->vcpu);
+ vmx_update_msr_bitmap(&vmx->vcpu);
}

/*
@@ -2486,71 +2504,77 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
* Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called.
*/
-static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
+static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
- u64 data;
struct shared_msr_entry *msr;

- if (!pdata) {
- printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
- return -EINVAL;
- }
-
- switch (msr_index) {
+ switch (msr_info->index) {
#ifdef CONFIG_X86_64
case MSR_FS_BASE:
- data = vmcs_readl(GUEST_FS_BASE);
+ msr_info->data = vmcs_readl(GUEST_FS_BASE);
break;
case MSR_GS_BASE:
- data = vmcs_readl(GUEST_GS_BASE);
+ msr_info->data = vmcs_readl(GUEST_GS_BASE);
break;
case MSR_KERNEL_GS_BASE:
vmx_load_host_state(to_vmx(vcpu));
- data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
+ msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
break;
#endif
case MSR_EFER:
- return kvm_get_msr_common(vcpu, msr_index, pdata);
+ return kvm_get_msr_common(vcpu, msr_info);
case MSR_IA32_TSC:
- data = guest_read_tsc();
+ msr_info->data = guest_read_tsc();
+ break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has_ibrs(vcpu))
+ return 1;
+
+ msr_info->data = to_vmx(vcpu)->spec_ctrl;
+ break;
+ case MSR_IA32_ARCH_CAPABILITIES:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has_arch_capabilities(vcpu))
+ return 1;
+ msr_info->data = to_vmx(vcpu)->arch_capabilities;
break;
case MSR_IA32_SYSENTER_CS:
- data = vmcs_read32(GUEST_SYSENTER_CS);
+ msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
break;
case MSR_IA32_SYSENTER_EIP:
- data = vmcs_readl(GUEST_SYSENTER_EIP);
+ msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
break;
case MSR_IA32_SYSENTER_ESP:
- data = vmcs_readl(GUEST_SYSENTER_ESP);
+ msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
break;
case MSR_IA32_BNDCFGS:
if (!vmx_mpx_supported() || !guest_cpuid_has_mpx(vcpu))
return 1;
- data = vmcs_read64(GUEST_BNDCFGS);
+ msr_info->data = vmcs_read64(GUEST_BNDCFGS);
break;
case MSR_IA32_FEATURE_CONTROL:
if (!nested_vmx_allowed(vcpu))
return 1;
- data = to_vmx(vcpu)->nested.msr_ia32_feature_control;
+ msr_info->data = to_vmx(vcpu)->nested.msr_ia32_feature_control;
break;
case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
if (!nested_vmx_allowed(vcpu))
return 1;
- return vmx_get_vmx_msr(vcpu, msr_index, pdata);
+ return vmx_get_vmx_msr(vcpu, msr_info->index, &msr_info->data);
case MSR_TSC_AUX:
if (!to_vmx(vcpu)->rdtscp_enabled)
return 1;
/* Otherwise falls through */
default:
- msr = find_msr_entry(to_vmx(vcpu), msr_index);
+ msr = find_msr_entry(to_vmx(vcpu), msr_info->index);
if (msr) {
- data = msr->data;
+ msr_info->data = msr->data;
break;
}
- return kvm_get_msr_common(vcpu, msr_index, pdata);
+ return kvm_get_msr_common(vcpu, msr_info);
}

- *pdata = data;
return 0;
}

@@ -2607,6 +2631,68 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_TSC:
kvm_write_tsc(vcpu, msr_info);
break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has_ibrs(vcpu))
+ return 1;
+
+ /* The STIBP bit doesn't fault even if it's not advertised */
+ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
+ return 1;
+
+ vmx->spec_ctrl = data;
+
+ if (!data)
+ break;
+
+ /*
+ * For non-nested:
+ * When it's written (to non-zero) for the first time, pass
+ * it through.
+ *
+ * For nested:
+ * The handling of the MSR bitmap for L2 guests is done in
+ * nested_vmx_merge_msr_bitmap. We should not touch the
+ * vmcs02.msr_bitmap here since it gets completely overwritten
+ * in the merging. We update the vmcs01 here for L1 as well
+ * since it will end up touching the MSR anyway now.
+ */
+ vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap,
+ MSR_IA32_SPEC_CTRL,
+ MSR_TYPE_RW);
+ break;
+ case MSR_IA32_PRED_CMD:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has_ibpb(vcpu))
+ return 1;
+
+ if (data & ~PRED_CMD_IBPB)
+ return 1;
+
+ if (!data)
+ break;
+
+ wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
+
+ /*
+ * For non-nested:
+ * When it's written (to non-zero) for the first time, pass
+ * it through.
+ *
+ * For nested:
+ * The handling of the MSR bitmap for L2 guests is done in
+ * nested_vmx_merge_msr_bitmap. We should not touch the
+ * vmcs02.msr_bitmap here since it gets completely overwritten
+ * in the merging.
+ */
+ vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
+ MSR_TYPE_W);
+ break;
+ case MSR_IA32_ARCH_CAPABILITIES:
+ if (!msr_info->host_initiated)
+ return 1;
+ vmx->arch_capabilities = data;
+ break;
case MSR_IA32_CR_PAT:
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
@@ -3007,11 +3093,6 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
return vmcs;
}

-static struct vmcs *alloc_vmcs(void)
-{
- return alloc_vmcs_cpu(raw_smp_processor_id());
-}
-
static void free_vmcs(struct vmcs *vmcs)
{
free_pages((unsigned long)vmcs, vmcs_config.order);
@@ -3027,6 +3108,34 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
loaded_vmcs_clear(loaded_vmcs);
free_vmcs(loaded_vmcs->vmcs);
loaded_vmcs->vmcs = NULL;
+ if (loaded_vmcs->msr_bitmap)
+ free_page((unsigned long)loaded_vmcs->msr_bitmap);
+}
+
+static struct vmcs *alloc_vmcs(void)
+{
+ return alloc_vmcs_cpu(raw_smp_processor_id());
+}
+
+static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
+{
+ loaded_vmcs->vmcs = alloc_vmcs();
+ if (!loaded_vmcs->vmcs)
+ return -ENOMEM;
+
+ loaded_vmcs_init(loaded_vmcs);
+
+ if (cpu_has_vmx_msr_bitmap()) {
+ loaded_vmcs->msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!loaded_vmcs->msr_bitmap)
+ goto out_vmcs;
+ memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
+ }
+ return 0;
+
+out_vmcs:
+ free_loaded_vmcs(loaded_vmcs);
+ return -ENOMEM;
}

static void free_kvm_area(void)
@@ -4087,10 +4196,8 @@ static void free_vpid(struct vcpu_vmx *vmx)
spin_unlock(&vmx_vpid_lock);
}

-#define MSR_TYPE_R 1
-#define MSR_TYPE_W 2
-static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
- u32 msr, int type)
+static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
+ u32 msr, int type)
{
int f = sizeof(unsigned long);

@@ -4124,8 +4231,8 @@ static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
}
}

-static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
- u32 msr, int type)
+static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
+ u32 msr, int type)
{
int f = sizeof(unsigned long);

@@ -4159,37 +4266,76 @@ static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
}
}

-static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
+static void __always_inline vmx_set_intercept_for_msr(unsigned long *msr_bitmap,
+ u32 msr, int type, bool value)
{
- if (!longmode_only)
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy,
- msr, MSR_TYPE_R | MSR_TYPE_W);
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode,
- msr, MSR_TYPE_R | MSR_TYPE_W);
+ if (value)
+ vmx_enable_intercept_for_msr(msr_bitmap, msr, type);
+ else
+ vmx_disable_intercept_for_msr(msr_bitmap, msr, type);
}

-static void vmx_enable_intercept_msr_read_x2apic(u32 msr)
+static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
{
- __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
- msr, MSR_TYPE_R);
- __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
- msr, MSR_TYPE_R);
+ u8 mode = 0;
+
+ if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) {
+ mode |= MSR_BITMAP_MODE_X2APIC;
+ if (enable_apicv)
+ mode |= MSR_BITMAP_MODE_X2APIC_APICV;
+ }
+
+ if (is_long_mode(vcpu))
+ mode |= MSR_BITMAP_MODE_LM;
+
+ return mode;
}

-static void vmx_disable_intercept_msr_read_x2apic(u32 msr)
+#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
+
+static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap,
+ u8 mode)
{
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
- msr, MSR_TYPE_R);
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
- msr, MSR_TYPE_R);
+ int msr;
+
+ for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
+ unsigned word = msr / BITS_PER_LONG;
+ msr_bitmap[word] = (mode & MSR_BITMAP_MODE_X2APIC_APICV) ? 0 : ~0;
+ msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
+ }
+
+ if (mode & MSR_BITMAP_MODE_X2APIC) {
+ /*
+ * TPR reads and writes can be virtualized even if virtual interrupt
+ * delivery is not in use.
+ */
+ vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW);
+ if (mode & MSR_BITMAP_MODE_X2APIC_APICV) {
+ vmx_enable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_ID), MSR_TYPE_R);
+ vmx_enable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_R);
+ vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_EOI), MSR_TYPE_W);
+ vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W);
+ }
+ }
}

-static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
+static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
{
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
- msr, MSR_TYPE_W);
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
- msr, MSR_TYPE_W);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
+ u8 mode = vmx_msr_bitmap_mode(vcpu);
+ u8 changed = mode ^ vmx->msr_bitmap_mode;
+
+ if (!changed)
+ return;
+
+ vmx_set_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW,
+ !(mode & MSR_BITMAP_MODE_LM));
+
+ if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
+ vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
+
+ vmx->msr_bitmap_mode = mode;
}

static int vmx_vm_has_apicv(struct kvm *kvm)
@@ -4197,6 +4343,23 @@ static int vmx_vm_has_apicv(struct kvm *kvm)
return enable_apicv && irqchip_in_kernel(kvm);
}

+static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ gfn_t gfn;
+
+ /*
+ * Don't need to mark the APIC access page dirty; it is never
+ * written to by the CPU during APIC virtualization.
+ */
+
+ if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
+ gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
+ mark_page_dirty(vcpu->kvm, gfn);
+ }
+}
+
+
/*
* Send interrupt to vcpu via posted interrupt way.
* 1. If target vcpu is running(non-root mode), send posted interrupt
@@ -4393,7 +4556,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
}
if (cpu_has_vmx_msr_bitmap())
- vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
+ vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap));

vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */

@@ -4473,6 +4636,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
++vmx->nmsrs;
}

+ if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, vmx->arch_capabilities);

vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);

@@ -4491,6 +4656,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
struct msr_data apic_base_msr;

vmx->rmode.vm86_active = 0;
+ vmx->spec_ctrl = 0;

vmx->soft_vnmi_blocked = 0;

@@ -4915,7 +5081,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
vcpu->arch.dr6 &= ~15;
vcpu->arch.dr6 |= dr6;
- if (!(dr6 & ~DR6_RESERVED)) /* icebp */
+ if (is_icebp(intr_info))
skip_emulated_instruction(vcpu);

kvm_queue_exception(vcpu, DB_VECTOR);
@@ -5254,19 +5420,21 @@ static int handle_cpuid(struct kvm_vcpu *vcpu)
static int handle_rdmsr(struct kvm_vcpu *vcpu)
{
u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
- u64 data;
+ struct msr_data msr_info;

- if (vmx_get_msr(vcpu, ecx, &data)) {
+ msr_info.index = ecx;
+ msr_info.host_initiated = false;
+ if (vmx_get_msr(vcpu, &msr_info)) {
trace_kvm_msr_read_ex(ecx);
kvm_inject_gp(vcpu, 0);
return 1;
}

- trace_kvm_msr_read(ecx, data);
+ trace_kvm_msr_read(ecx, msr_info.data);

/* FIXME: handling of bits 32:63 of rax, rdx */
- vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
- vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
+ vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u;
+ vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u;
skip_emulated_instruction(vcpu);
return 1;
}
@@ -5731,93 +5899,6 @@ static int handle_monitor(struct kvm_vcpu *vcpu)
return handle_nop(vcpu);
}

-/*
- * To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12.
- * We could reuse a single VMCS for all the L2 guests, but we also want the
- * option to allocate a separate vmcs02 for each separate loaded vmcs12 - this
- * allows keeping them loaded on the processor, and in the future will allow
- * optimizations where prepare_vmcs02 doesn't need to set all the fields on
- * every entry if they never change.
- * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE
- * (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first.
- *
- * The following functions allocate and free a vmcs02 in this pool.
- */
-
-/* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */
-static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
-{
- struct vmcs02_list *item;
- list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
- if (item->vmptr == vmx->nested.current_vmptr) {
- list_move(&item->list, &vmx->nested.vmcs02_pool);
- return &item->vmcs02;
- }
-
- if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) {
- /* Recycle the least recently used VMCS. */
- item = list_entry(vmx->nested.vmcs02_pool.prev,
- struct vmcs02_list, list);
- item->vmptr = vmx->nested.current_vmptr;
- list_move(&item->list, &vmx->nested.vmcs02_pool);
- return &item->vmcs02;
- }
-
- /* Create a new VMCS */
- item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
- if (!item)
- return NULL;
- item->vmcs02.vmcs = alloc_vmcs();
- if (!item->vmcs02.vmcs) {
- kfree(item);
- return NULL;
- }
- loaded_vmcs_init(&item->vmcs02);
- item->vmptr = vmx->nested.current_vmptr;
- list_add(&(item->list), &(vmx->nested.vmcs02_pool));
- vmx->nested.vmcs02_num++;
- return &item->vmcs02;
-}
-
-/* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */
-static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr)
-{
- struct vmcs02_list *item;
- list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
- if (item->vmptr == vmptr) {
- free_loaded_vmcs(&item->vmcs02);
- list_del(&item->list);
- kfree(item);
- vmx->nested.vmcs02_num--;
- return;
- }
-}
-
-/*
- * Free all VMCSs saved for this vcpu, except the one pointed by
- * vmx->loaded_vmcs. We must be running L1, so vmx->loaded_vmcs
- * must be &vmx->vmcs01.
- */
-static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
-{
- struct vmcs02_list *item, *n;
-
- WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01);
- list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) {
- /*
- * Something will leak if the above WARN triggers. Better than
- * a use-after-free.
- */
- if (vmx->loaded_vmcs == &item->vmcs02)
- continue;
-
- free_loaded_vmcs(&item->vmcs02);
- list_del(&item->list);
- kfree(item);
- vmx->nested.vmcs02_num--;
- }
-}
-
/*
* The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
* set the success or error code of an emulated VMX instruction, as specified
@@ -6041,6 +6122,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
struct vmcs *shadow_vmcs;
const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
+ int r;

/* The Intel VMX Instruction Reference lists a bunch of bits that
* are prerequisite to running VMXON, most notably cr4.VMXE must be
@@ -6080,10 +6162,16 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
return 1;
}

+ r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
+ if (r < 0)
+ return -ENOMEM;
+
if (enable_shadow_vmcs) {
shadow_vmcs = alloc_vmcs();
- if (!shadow_vmcs)
+ if (!shadow_vmcs) {
+ free_loaded_vmcs(&vmx->nested.vmcs02);
return -ENOMEM;
+ }
/* mark vmcs as shadow */
shadow_vmcs->revision_id |= (1u << 31);
/* init shadow vmcs */
@@ -6091,9 +6179,6 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
vmx->nested.current_shadow_vmcs = shadow_vmcs;
}

- INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
- vmx->nested.vmcs02_num = 0;
-
hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
@@ -6170,13 +6255,13 @@ static void free_nested(struct vcpu_vmx *vmx)
}
if (enable_shadow_vmcs)
free_vmcs(vmx->nested.current_shadow_vmcs);
- /* Unpin physical memory we referred to in current vmcs02 */
+ /* Unpin physical memory we referred to in the vmcs02 */
if (vmx->nested.apic_access_page) {
nested_release_page(vmx->nested.apic_access_page);
vmx->nested.apic_access_page = 0;
}

- nested_free_all_saved_vmcss(vmx);
+ free_loaded_vmcs(&vmx->nested.vmcs02);
}

/* Emulate the VMXOFF instruction */
@@ -6227,8 +6312,6 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
kunmap(page);
nested_release_page(page);

- nested_free_vmcs02(vmx, vmptr);
-
skip_emulated_instruction(vcpu);
nested_vmx_succeed(vcpu);
return 1;
@@ -6900,6 +6983,19 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
KVM_ISA_VMX);

+ /*
+ * The host physical addresses of some pages of guest memory
+ * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
+ * Page). The CPU may write to these pages via their host
+ * physical address while L2 is running, bypassing any
+ * address-translation-based dirty tracking (e.g. EPT write
+ * protection).
+ *
+ * Mark them dirty on every exit from L2 to prevent them from
+ * getting out of sync with dirty tracking.
+ */
+ nested_mark_vmcs12_pages_dirty(vcpu);
+
if (vmx->nested.nested_run_pending)
return 0;

@@ -7136,7 +7232,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
}
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);

- vmx_set_msr_bitmap(vcpu);
+ vmx_update_msr_bitmap(vcpu);
}

static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
@@ -7440,6 +7536,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
atomic_switch_perf_msrs(vmx);
debugctlmsr = get_debugctlmsr();

+ /*
+ * If this vCPU has touched SPEC_CTRL, restore the guest's value if
+ * it's non-zero. Since vmentry is serialising on affected CPUs, there
+ * is no need to worry about the conditional branch over the wrmsr
+ * being speculatively taken.
+ */
+ if (vmx->spec_ctrl)
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+
vmx->__launched = vmx->loaded_vmcs->launched;
asm(
/* Store host registers */
@@ -7558,6 +7663,22 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
);

+ /*
+ * We do not use IBRS in the kernel. If this vCPU has used the
+ * SPEC_CTRL MSR it may have left it on; save the value and
+ * turn it off. This is much more efficient than blindly adding
+ * it to the atomic save/restore list. Especially as the former
+ * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
+ *
+ * If the L01 MSR bitmap does not intercept the MSR, then we need to
+ * save it.
+ */
+ if (unlikely(!msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL)))
+ vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
+
+ if (vmx->spec_ctrl)
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();

@@ -7656,6 +7777,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
{
int err;
struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+ unsigned long *msr_bitmap;
int cpu;

if (!vmx)
@@ -7673,16 +7795,24 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
goto uninit_vcpu;
}

- vmx->loaded_vmcs = &vmx->vmcs01;
- vmx->loaded_vmcs->vmcs = alloc_vmcs();
- if (!vmx->loaded_vmcs->vmcs)
- goto free_msrs;
if (!vmm_exclusive)
kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id())));
- loaded_vmcs_init(vmx->loaded_vmcs);
+ err = alloc_loaded_vmcs(&vmx->vmcs01);
if (!vmm_exclusive)
kvm_cpu_vmxoff();
+ if (err < 0)
+ goto free_msrs;
+
+ msr_bitmap = vmx->vmcs01.msr_bitmap;
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
+ vmx->msr_bitmap_mode = 0;

+ vmx->loaded_vmcs = &vmx->vmcs01;
cpu = get_cpu();
vmx_vcpu_load(&vmx->vcpu, cpu);
vmx->vcpu.cpu = cpu;
@@ -8204,7 +8334,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
struct vmcs12 *vmcs12;
struct vcpu_vmx *vmx = to_vmx(vcpu);
int cpu;
- struct loaded_vmcs *vmcs02;
bool ia32e;

if (!nested_vmx_check_permission(vcpu) ||
@@ -8341,16 +8470,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
* the nested entry.
*/

- vmcs02 = nested_get_current_vmcs02(vmx);
- if (!vmcs02)
- return -ENOMEM;
-
enter_guest_mode(vcpu);

vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);

cpu = get_cpu();
- vmx->loaded_vmcs = vmcs02;
+ vmx->loaded_vmcs = &vmx->nested.vmcs02;
vmx_vcpu_put(vcpu);
vmx_vcpu_load(vcpu, cpu);
vcpu->cpu = cpu;
@@ -8830,10 +8955,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS));
vmx_segment_cache_clear(vmx);

- /* if no vmcs02 cache requested, remove the one we used */
- if (VMCS02_POOL_SIZE == 0)
- nested_free_vmcs02(vmx, vmx->nested.current_vmptr);
-
load_vmcs12_host_state(vcpu, vmcs12);

/* Update TSC_OFFSET if TSC was changed while L2 ran */
@@ -8925,7 +9046,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.vcpu_load = vmx_vcpu_load,
.vcpu_put = vmx_vcpu_put,

- .update_db_bp_intercept = update_exception_bitmap,
+ .update_bp_intercept = update_exception_bitmap,
.get_msr = vmx_get_msr,
.set_msr = vmx_set_msr,
.get_segment_base = vmx_get_segment_base,
@@ -9016,7 +9137,7 @@ static struct kvm_x86_ops vmx_x86_ops = {

static int __init vmx_init(void)
{
- int r, i, msr;
+ int r, i;

rdmsrl_safe(MSR_EFER, &host_efer);

@@ -9033,30 +9154,13 @@ static int __init vmx_init(void)
if (!vmx_io_bitmap_b)
goto out;

- vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_legacy)
- goto out1;
-
- vmx_msr_bitmap_legacy_x2apic =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_legacy_x2apic)
- goto out2;
-
- vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_longmode)
- goto out3;
-
- vmx_msr_bitmap_longmode_x2apic =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_longmode_x2apic)
- goto out4;
vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_vmread_bitmap)
- goto out5;
+ goto out1;

vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_vmwrite_bitmap)
- goto out6;
+ goto out2;

memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
@@ -9065,51 +9169,18 @@ static int __init vmx_init(void)

memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);

- memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
- memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
-
set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */

r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
__alignof__(struct vcpu_vmx), THIS_MODULE);
if (r)
- goto out7;
+ goto out3;

#ifdef CONFIG_KEXEC
rcu_assign_pointer(crash_vmclear_loaded_vmcss,
crash_vmclear_local_loaded_vmcss);
#endif

- vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
- vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
- vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
-
- memcpy(vmx_msr_bitmap_legacy_x2apic,
- vmx_msr_bitmap_legacy, PAGE_SIZE);
- memcpy(vmx_msr_bitmap_longmode_x2apic,
- vmx_msr_bitmap_longmode, PAGE_SIZE);
-
- if (enable_apicv) {
- for (msr = 0x800; msr <= 0x8ff; msr++)
- vmx_disable_intercept_msr_read_x2apic(msr);
-
- /* According SDM, in x2apic mode, the whole id reg is used.
- * But in KVM, it only use the highest eight bits. Need to
- * intercept it */
- vmx_enable_intercept_msr_read_x2apic(0x802);
- /* TMCCT */
- vmx_enable_intercept_msr_read_x2apic(0x839);
- /* TPR */
- vmx_disable_intercept_msr_write_x2apic(0x808);
- /* EOI */
- vmx_disable_intercept_msr_write_x2apic(0x80b);
- /* SELF-IPI */
- vmx_disable_intercept_msr_write_x2apic(0x83f);
- }
-
if (enable_ept) {
kvm_mmu_set_mask_ptes(0ull,
(enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
@@ -9122,18 +9193,10 @@ static int __init vmx_init(void)

return 0;

-out7:
- free_page((unsigned long)vmx_vmwrite_bitmap);
-out6:
- free_page((unsigned long)vmx_vmread_bitmap);
-out5:
- free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
-out4:
- free_page((unsigned long)vmx_msr_bitmap_longmode);
out3:
- free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
+ free_page((unsigned long)vmx_vmwrite_bitmap);
out2:
- free_page((unsigned long)vmx_msr_bitmap_legacy);
+ free_page((unsigned long)vmx_vmread_bitmap);
out1:
free_page((unsigned long)vmx_io_bitmap_b);
out:
@@ -9143,10 +9206,6 @@ static int __init vmx_init(void)

static void __exit vmx_exit(void)
{
- free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
- free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
- free_page((unsigned long)vmx_msr_bitmap_legacy);
- free_page((unsigned long)vmx_msr_bitmap_longmode);
free_page((unsigned long)vmx_io_bitmap_b);
free_page((unsigned long)vmx_io_bitmap_a);
free_page((unsigned long)vmx_vmwrite_bitmap);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b293c9570477..e959897a0f44 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -893,7 +893,8 @@ static u32 msrs_to_save[] = {
MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
#endif
MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
- MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS
+ MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS,
+ MSR_IA32_SPEC_CTRL, MSR_IA32_ARCH_CAPABILITIES
};

static unsigned num_msrs_to_save;
@@ -999,6 +1000,21 @@ EXPORT_SYMBOL_GPL(kvm_set_msr);
/*
* Adapt set_msr() to msr_io()'s calling convention
*/
+static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
+{
+ struct msr_data msr;
+ int r;
+
+ msr.index = index;
+ msr.host_initiated = true;
+ r = kvm_get_msr(vcpu, &msr);
+ if (r)
+ return r;
+
+ *data = msr.data;
+ return 0;
+}
+
static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{
struct msr_data msr;
@@ -2280,9 +2296,9 @@ EXPORT_SYMBOL_GPL(kvm_set_msr_common);
* Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called.
*/
-int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
+int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
{
- return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
+ return kvm_x86_ops->get_msr(vcpu, msr);
}

static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
@@ -2418,11 +2434,11 @@ static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
return 0;
}

-int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
u64 data;

- switch (msr) {
+ switch (msr_info->index) {
case MSR_IA32_PLATFORM_ID:
case MSR_IA32_EBL_CR_POWERON:
case MSR_IA32_DEBUGCTLMSR:
@@ -2441,26 +2457,26 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_AMD64_NB_CFG:
case MSR_FAM10H_MMIO_CONF_BASE:
case MSR_AMD64_BU_CFG2:
- data = 0;
+ msr_info->data = 0;
break;
case MSR_P6_PERFCTR0:
case MSR_P6_PERFCTR1:
case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1:
- if (kvm_pmu_msr(vcpu, msr))
- return kvm_pmu_get_msr(vcpu, msr, pdata);
- data = 0;
+ if (kvm_pmu_msr(vcpu, msr_info->index))
+ return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
+ msr_info->data = 0;
break;
case MSR_IA32_UCODE_REV:
- data = 0x100000000ULL;
+ msr_info->data = 0x100000000ULL;
break;
case MSR_MTRRcap:
- data = 0x500 | KVM_NR_VAR_MTRR;
+ msr_info->data = 0x500 | KVM_NR_VAR_MTRR;
break;
case 0x200 ... 0x2ff:
- return get_msr_mtrr(vcpu, msr, pdata);
+ return get_msr_mtrr(vcpu, msr_info->index, &msr_info->data);
case 0xcd: /* fsb frequency */
- data = 3;
+ msr_info->data = 3;
break;
/*
* MSR_EBC_FREQUENCY_ID
@@ -2474,48 +2490,48 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
* multiplying by zero otherwise.
*/
case MSR_EBC_FREQUENCY_ID:
- data = 1 << 24;
+ msr_info->data = 1 << 24;
break;
case MSR_IA32_APICBASE:
- data = kvm_get_apic_base(vcpu);
+ msr_info->data = kvm_get_apic_base(vcpu);
break;
case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
- return kvm_x2apic_msr_read(vcpu, msr, pdata);
+ return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
break;
case MSR_IA32_TSCDEADLINE:
- data = kvm_get_lapic_tscdeadline_msr(vcpu);
+ msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
break;
case MSR_IA32_TSC_ADJUST:
- data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
+ msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
break;
case MSR_IA32_MISC_ENABLE:
- data = vcpu->arch.ia32_misc_enable_msr;
+ msr_info->data = vcpu->arch.ia32_misc_enable_msr;
break;
case MSR_IA32_PERF_STATUS:
/* TSC increment by tick */
- data = 1000ULL;
+ msr_info->data = 1000ULL;
/* CPU multiplier */
data |= (((uint64_t)4ULL) << 40);
break;
case MSR_EFER:
- data = vcpu->arch.efer;
+ msr_info->data = vcpu->arch.efer;
break;
case MSR_KVM_WALL_CLOCK:
case MSR_KVM_WALL_CLOCK_NEW:
- data = vcpu->kvm->arch.wall_clock;
+ msr_info->data = vcpu->kvm->arch.wall_clock;
break;
case MSR_KVM_SYSTEM_TIME:
case MSR_KVM_SYSTEM_TIME_NEW:
- data = vcpu->arch.time;
+ msr_info->data = vcpu->arch.time;
break;
case MSR_KVM_ASYNC_PF_EN:
- data = vcpu->arch.apf.msr_val;
+ msr_info->data = vcpu->arch.apf.msr_val;
break;
case MSR_KVM_STEAL_TIME:
- data = vcpu->arch.st.msr_val;
+ msr_info->data = vcpu->arch.st.msr_val;
break;
case MSR_KVM_PV_EOI_EN:
- data = vcpu->arch.pv_eoi.msr_val;
+ msr_info->data = vcpu->arch.pv_eoi.msr_val;
break;
case MSR_IA32_P5_MC_ADDR:
case MSR_IA32_P5_MC_TYPE:
@@ -2523,7 +2539,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_IA32_MCG_CTL:
case MSR_IA32_MCG_STATUS:
case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
- return get_msr_mce(vcpu, msr, pdata);
+ return get_msr_mce(vcpu, msr_info->index, &msr_info->data);
case MSR_K7_CLK_CTL:
/*
* Provide expected ramp-up count for K7. All other
@@ -2534,17 +2550,17 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
* type 6, model 8 and higher from exploding due to
* the rdmsr failing.
*/
- data = 0x20000000;
+ msr_info->data = 0x20000000;
break;
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
- if (kvm_hv_msr_partition_wide(msr)) {
+ if (kvm_hv_msr_partition_wide(msr_info->index)) {
int r;
mutex_lock(&vcpu->kvm->lock);
- r = get_msr_hyperv_pw(vcpu, msr, pdata);
+ r = get_msr_hyperv_pw(vcpu, msr_info->index, &msr_info->data);
mutex_unlock(&vcpu->kvm->lock);
return r;
} else
- return get_msr_hyperv(vcpu, msr, pdata);
+ return get_msr_hyperv(vcpu, msr_info->index, &msr_info->data);
break;
case MSR_IA32_BBL_CR_CTL3:
/* This legacy MSR exists but isn't fully documented in current
@@ -2557,31 +2573,30 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
* L2 cache control register 3: 64GB range, 256KB size,
* enabled, latency 0x1, configured
*/
- data = 0xbe702111;
+ msr_info->data = 0xbe702111;
break;
case MSR_AMD64_OSVW_ID_LENGTH:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
- data = vcpu->arch.osvw.length;
+ msr_info->data = vcpu->arch.osvw.length;
break;
case MSR_AMD64_OSVW_STATUS:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
- data = vcpu->arch.osvw.status;
+ msr_info->data = vcpu->arch.osvw.status;
break;
default:
- if (kvm_pmu_msr(vcpu, msr))
- return kvm_pmu_get_msr(vcpu, msr, pdata);
+ if (kvm_pmu_msr(vcpu, msr_info->index))
+ return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
if (!ignore_msrs) {
- vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
+ vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr_info->index);
return 1;
} else {
- vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
- data = 0;
+ vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr_info->index);
+ msr_info->data = 0;
}
break;
}
- *pdata = data;
return 0;
}
EXPORT_SYMBOL_GPL(kvm_get_msr_common);
@@ -3290,7 +3305,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
}
case KVM_GET_MSRS:
- r = msr_io(vcpu, argp, kvm_get_msr, 1);
+ r = msr_io(vcpu, argp, do_get_msr, 1);
break;
case KVM_SET_MSRS:
r = msr_io(vcpu, argp, do_set_msr, 0);
@@ -4810,7 +4825,17 @@ static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
u32 msr_index, u64 *pdata)
{
- return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
+ struct msr_data msr;
+ int r;
+
+ msr.index = msr_index;
+ msr.host_initiated = false;
+ r = kvm_get_msr(emul_to_vcpu(ctxt), &msr);
+ if (r)
+ return r;
+
+ *pdata = msr.data;
+ return 0;
}

static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
@@ -6732,7 +6757,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
*/
kvm_set_rflags(vcpu, rflags);

- kvm_x86_ops->update_db_bp_intercept(vcpu);
+ kvm_x86_ops->update_bp_intercept(vcpu);

r = 0;

diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index efa4839c3b59..71b450232c7a 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -10,6 +10,7 @@

#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
+#include <asm/nospec-branch.h>
#include <asm/cache.h>
#include <asm/apic.h>
#include <asm/uv/uv.h>
@@ -100,6 +101,24 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
unsigned cpu = smp_processor_id();

if (likely(prev != next)) {
+ /*
+ * Avoid user/user BTB poisoning by flushing the branch
+ * predictor when switching between processes. This stops
+ * one process from doing Spectre-v2 attacks on another.
+ *
+ * As an optimization, flush indirect branches only when
+ * switching into processes that disable dumping. This
+ * protects high value processes like gpg, without having
+ * too high performance overhead. IBPB is *expensive*!
+ *
+ * This will not flush branches when switching into kernel
+ * threads. It will flush if we switch to a different non-
+ * dumpable process.
+ */
+ if (tsk && tsk->mm &&
+ get_dumpable(tsk->mm) != SUID_DUMP_USER)
+ indirect_branch_prediction_barrier();
+
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
this_cpu_write(cpu_tlbstate.active_mm, next);
cpumask_set_cpu(cpu, mm_cpumask(next));
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 379e8bd0deea..9f5246293ae7 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -471,7 +471,7 @@ static int nmi_setup(void)
goto fail;

for_each_possible_cpu(cpu) {
- if (!cpu)
+ if (!IS_ENABLED(CONFIG_SMP) || !cpu)
continue;

memcpy(per_cpu(cpu_msrs, cpu).counters,
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index f05f2d897a67..85003e57b1a3 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2500,7 +2500,7 @@ void __init xen_hvm_init_mmu_ops(void)
if (is_pagetable_dying_supported())
pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
#ifdef CONFIG_PROC_VMCORE
- register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
+ WARN_ON(register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram));
#endif
}
#endif
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 2277cc8965fe..e37ae1c74d2b 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -1,10 +1,13 @@
#include <linux/types.h>
#include <linux/clockchips.h>
+#include <linux/percpu-defs.h>

#include <xen/interface/xen.h>
#include <xen/grant_table.h>
#include <xen/events.h>

+#include <asm/cpufeature.h>
+#include <asm/msr-index.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>
#include <asm/fixmap.h>
@@ -12,6 +15,8 @@
#include "xen-ops.h"
#include "mmu.h"

+static DEFINE_PER_CPU(u64, spec_ctrl);
+
static void xen_pv_pre_suspend(void)
{
xen_mm_pin_all();
@@ -84,6 +89,9 @@ static void xen_vcpu_notify_restore(void *data)
{
unsigned long reason = (unsigned long)data;

+ if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL))
+ wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl));
+
/* Boot processor notified via generic timekeeping_resume() */
if ( smp_processor_id() == 0)
return;
@@ -91,8 +99,24 @@ static void xen_vcpu_notify_restore(void *data)
clockevents_notify(reason, NULL);
}

+static void xen_vcpu_notify_suspend(void *data)
+{
+ u64 tmp;
+
+ if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) {
+ rdmsrl(MSR_IA32_SPEC_CTRL, tmp);
+ this_cpu_write(spec_ctrl, tmp);
+ wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+ }
+}
+
void xen_arch_resume(void)
{
on_each_cpu(xen_vcpu_notify_restore,
(void *)CLOCK_EVT_NOTIFY_RESUME, 1);
}
+
+void xen_arch_suspend(void)
+{
+ on_each_cpu(xen_vcpu_notify_suspend, NULL, 1);
+}
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 485b69585540..3c2465aa5951 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -8,7 +8,9 @@

#include <asm/boot.h>
#include <asm/asm.h>
+#include <asm/msr.h>
#include <asm/page_types.h>
+#include <asm/percpu.h>

#include <xen/interface/elfnote.h>
#include <xen/interface/features.h>
@@ -42,7 +44,20 @@ ENTRY(startup_xen)
#else
mov %rsi,xen_start_info
mov $init_thread_union+THREAD_SIZE,%rsp
+
+ /* Set up %gs.
+ *
+ * The base of %gs always points to the bottom of the irqstack
+ * union. If the stack protector canary is enabled, it is
+ * located at %gs:40. Note that, on SMP, the boot cpu uses
+ * init data section till per cpu areas are set up.
+ */
+ movl $MSR_GS_BASE,%ecx
+ movq $INIT_PER_CPU_VAR(irq_stack_union),%rax
+ cdq
+ wrmsr
#endif
+
jmp xen_start_kernel

__FINIT
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
index b39531babec0..72bfc1cbc2b5 100644
--- a/arch/xtensa/include/asm/futex.h
+++ b/arch/xtensa/include/asm/futex.h
@@ -109,7 +109,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
int ret = 0;
- u32 prev;

if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
@@ -120,26 +119,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,

__asm__ __volatile__ (
" # futex_atomic_cmpxchg_inatomic\n"
- "1: l32i %1, %3, 0\n"
- " mov %0, %5\n"
- " wsr %1, scompare1\n"
- "2: s32c1i %0, %3, 0\n"
- "3:\n"
+ " wsr %5, scompare1\n"
+ "1: s32c1i %1, %4, 0\n"
+ " s32i %1, %6, 0\n"
+ "2:\n"
" .section .fixup,\"ax\"\n"
" .align 4\n"
- "4: .long 3b\n"
- "5: l32r %1, 4b\n"
- " movi %0, %6\n"
+ "3: .long 2b\n"
+ "4: l32r %1, 3b\n"
+ " movi %0, %7\n"
" jx %1\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
- " .long 1b,5b,2b,5b\n"
+ " .long 1b,4b\n"
" .previous\n"
- : "+r" (ret), "=&r" (prev), "+m" (*uaddr)
- : "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT)
+ : "+r" (ret), "+r" (newval), "+m" (*uaddr), "+m" (*uval)
+ : "r" (uaddr), "r" (oldval), "r" (uval), "I" (-EFAULT)
: "memory");

- *uval = prev;
return ret;
}

diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 30c1ae491fd4..9821a116047e 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -149,6 +149,7 @@ EXPORT_SYMBOL_GPL(af_alg_release_parent);

static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
+ const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sockaddr_alg *sa = (void *)uaddr;
@@ -156,6 +157,10 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
void *private;
int err;

+ /* If caller uses non-allowed flag, return error. */
+ if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
+ return -EINVAL;
+
if (sock->state == SS_CONNECTED)
return -EINVAL;

diff --git a/crypto/ahash.c b/crypto/ahash.c
index c2982958a2a0..d5f9c1586c42 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -203,11 +203,18 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
unsigned long alignmask = crypto_ahash_alignmask(tfm);
+ int err;

if ((unsigned long)key & alignmask)
- return ahash_setkey_unaligned(tfm, key, keylen);
+ err = ahash_setkey_unaligned(tfm, key, keylen);
+ else
+ err = tfm->setkey(tfm, key, keylen);

- return tfm->setkey(tfm, key, keylen);
+ if (err)
+ return err;
+
+ crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);

@@ -380,7 +387,12 @@ EXPORT_SYMBOL_GPL(crypto_ahash_finup);

int crypto_ahash_digest(struct ahash_request *req)
{
- return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return crypto_ahash_op(req, tfm->digest);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);

@@ -466,7 +478,6 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
struct ahash_alg *alg = crypto_ahash_alg(hash);

hash->setkey = ahash_nosetkey;
- hash->has_setkey = false;
hash->export = ahash_no_export;
hash->import = ahash_no_import;

@@ -481,7 +492,8 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)

if (alg->setkey) {
hash->setkey = alg->setkey;
- hash->has_setkey = true;
+ if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
+ crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
}
if (alg->export)
hash->export = alg->export;
@@ -630,5 +642,16 @@ struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(ahash_attr_alg);

+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
+{
+ struct crypto_alg *alg = &halg->base;
+
+ if (alg->cra_type != &crypto_ahash_type)
+ return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
+
+ return __crypto_ahash_alg(alg)->setkey != NULL;
+}
+EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index d11d431251f7..3aa8890feef6 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -34,11 +34,6 @@ struct hash_ctx {
struct ahash_request req;
};

-struct algif_hash_tfm {
- struct crypto_ahash *hash;
- bool has_key;
-};
-
static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
struct msghdr *msg, size_t ignored)
{
@@ -258,7 +253,7 @@ static int hash_check_key(struct socket *sock)
int err = 0;
struct sock *psk;
struct alg_sock *pask;
- struct algif_hash_tfm *tfm;
+ struct crypto_ahash *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);

@@ -272,7 +267,7 @@ static int hash_check_key(struct socket *sock)

err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
- if (!tfm->has_key)
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
goto unlock;

if (!pask->refcnt++)
@@ -363,41 +358,17 @@ static struct proto_ops algif_hash_ops_nokey = {

static void *hash_bind(const char *name, u32 type, u32 mask)
{
- struct algif_hash_tfm *tfm;
- struct crypto_ahash *hash;
-
- tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
- if (!tfm)
- return ERR_PTR(-ENOMEM);
-
- hash = crypto_alloc_ahash(name, type, mask);
- if (IS_ERR(hash)) {
- kfree(tfm);
- return ERR_CAST(hash);
- }
-
- tfm->hash = hash;
-
- return tfm;
+ return crypto_alloc_ahash(name, type, mask);
}

static void hash_release(void *private)
{
- struct algif_hash_tfm *tfm = private;
-
- crypto_free_ahash(tfm->hash);
- kfree(tfm);
+ crypto_free_ahash(private);
}

static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
{
- struct algif_hash_tfm *tfm = private;
- int err;
-
- err = crypto_ahash_setkey(tfm->hash, key, keylen);
- tfm->has_key = !err;
-
- return err;
+ return crypto_ahash_setkey(private, key, keylen);
}

static void hash_sock_destruct(struct sock *sk)
@@ -413,12 +384,11 @@ static void hash_sock_destruct(struct sock *sk)

static int hash_accept_parent_nokey(void *private, struct sock *sk)
{
- struct hash_ctx *ctx;
+ struct crypto_ahash *tfm = private;
struct alg_sock *ask = alg_sk(sk);
- struct algif_hash_tfm *tfm = private;
- struct crypto_ahash *hash = tfm->hash;
- unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash);
- unsigned ds = crypto_ahash_digestsize(hash);
+ struct hash_ctx *ctx;
+ unsigned int len = sizeof(*ctx) + crypto_ahash_reqsize(tfm);
+ unsigned ds = crypto_ahash_digestsize(tfm);

ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
@@ -438,7 +408,7 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)

ask->private = ctx;

- ahash_request_set_tfm(&ctx->req, hash);
+ ahash_request_set_tfm(&ctx->req, tfm);
ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, &ctx->completion);

@@ -449,9 +419,9 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)

static int hash_accept_parent(void *private, struct sock *sk)
{
- struct algif_hash_tfm *tfm = private;
+ struct crypto_ahash *tfm = private;

- if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash))
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;

return hash_accept_parent_nokey(private, sk);
diff --git a/crypto/crc32.c b/crypto/crc32.c
index 187ded28cb0b..06666302f382 100644
--- a/crypto/crc32.c
+++ b/crypto/crc32.c
@@ -133,6 +133,7 @@ static struct shash_alg alg = {
.cra_name = "crc32",
.cra_driver_name = "crc32-table",
.cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
index 4c0a0e271876..372320399622 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c_generic.c
@@ -146,6 +146,7 @@ static struct shash_alg alg = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 3,
.cra_ctxsize = sizeof(struct chksum_ctx),
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index d85fab975514..3443c1145a1f 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -603,7 +603,8 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
if (err)
goto out_free_inst;

- inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ (alg->cra_flags & CRYPTO_ALG_OPTIONAL_KEY);

inst->alg.halg.digestsize = salg->digestsize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
@@ -617,7 +618,8 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.finup = cryptd_hash_finup_enqueue;
inst->alg.export = cryptd_hash_export;
inst->alg.import = cryptd_hash_import;
- inst->alg.setkey = cryptd_hash_setkey;
+ if (crypto_shash_alg_has_setkey(salg))
+ inst->alg.setkey = cryptd_hash_setkey;
inst->alg.digest = cryptd_hash_digest_enqueue;

err = ahash_register_instance(tmpl, inst);
diff --git a/crypto/shash.c b/crypto/shash.c
index 73c065321867..194f7b1ff5cb 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -57,11 +57,18 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
{
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
+ int err;

if ((unsigned long)key & alignmask)
- return shash_setkey_unaligned(tfm, key, keylen);
+ err = shash_setkey_unaligned(tfm, key, keylen);
+ else
+ err = shash->setkey(tfm, key, keylen);
+
+ if (err)
+ return err;

- return shash->setkey(tfm, key, keylen);
+ crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
EXPORT_SYMBOL_GPL(crypto_shash_setkey);

@@ -180,6 +187,9 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);

+ if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
if (((unsigned long)data | (unsigned long)out) & alignmask)
return shash_digest_unaligned(desc, data, len, out);

@@ -359,7 +369,8 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
crt->digest = shash_async_digest;
crt->setkey = shash_async_setkey;

- crt->has_setkey = alg->setkey != shash_no_setkey;
+ crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
+ CRYPTO_TFM_NEED_KEY);

if (alg->export)
crt->export = shash_async_export;
@@ -519,8 +530,14 @@ static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type,
static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_shash *hash = __crypto_shash_cast(tfm);
+ struct shash_alg *alg = crypto_shash_alg(hash);
+
+ hash->descsize = alg->descsize;
+
+ if (crypto_shash_alg_has_setkey(alg) &&
+ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
+ crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY);

- hash->descsize = crypto_shash_alg(hash)->descsize;
return 0;
}

diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 631646e13119..bc4ff68c9816 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -358,11 +358,29 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
{ PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
- { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
+ { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
+ { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
+ { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x0f22), board_ahci }, /* Bay Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x0f23), board_ahci }, /* Bay Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x22a3), board_ahci }, /* Cherry Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci }, /* Apollo Lake AHCI */

/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -511,7 +529,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
.driver_data = board_ahci_yes_fbs },
- { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642),
+ { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), /* highpoint rocketraid 642L */
+ .driver_data = board_ahci_yes_fbs },
+ { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0645), /* highpoint rocketraid 644L */
.driver_data = board_ahci_yes_fbs },

/* Promise */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index cfa2982d7286..d5fb981acec9 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4224,13 +4224,29 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },

+ /* Crucial BX100 SSD 500GB has broken LPM support */
+ { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
+
+ /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
+ { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_NOLPM, },
+ /* 512GB MX100 with newer firmware has only LPM issues */
+ { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_NOLPM, },
+
+ /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
+ { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_NOLPM, },
+ { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_NOLPM, },
+
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
{ "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
{ "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM, },
{ "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM, },
{ "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM, },
- { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+ { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+ { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
{ "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },

/* devices that don't properly handle TRIM commands */
@@ -5139,8 +5155,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
* We guarantee to LLDs that they will have at least one
* non-zero sg if the command is a data command.
*/
- if (WARN_ON_ONCE(ata_is_data(prot) &&
- (!qc->sg || !qc->n_elem || !qc->nbytes)))
+ if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
goto sys_err;

if (ata_is_dma(prot) || (ata_is_pio(prot) &&
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 0550c76f4e6c..2cb7cd9b9e29 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -815,7 +815,8 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)

if (ap->pflags & ATA_PFLAG_LOADING)
ap->pflags &= ~ATA_PFLAG_LOADING;
- else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
+ else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) &&
+ !(ap->flags & ATA_FLAG_SAS_HOST))
schedule_delayed_work(&ap->hotplug_task, 0);

if (ap->pflags & ATA_PFLAG_RECOVERED)
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index b297d43e1121..e03b0b40fcac 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3435,7 +3435,9 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
/* relay SCSI command to ATAPI device */
int len = COMMAND_SIZE(scsi_op);
- if (unlikely(len > scmd->cmd_len || len > dev->cdb_len))
+ if (unlikely(len > scmd->cmd_len ||
+ len > dev->cdb_len ||
+ scmd->cmd_len > ATAPI_CDB_LEN))
goto bad_cdb_len;

xlat_func = atapi_xlat;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 758ac442c5b5..89d513975992 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2796,7 +2796,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
pd->pkt_dev = MKDEV(pktdev_major, idx);
ret = pkt_new_dev(pd, dev);
if (ret)
- goto out_new_dev;
+ goto out_mem2;

/* inherit events of the host device */
disk->events = pd->bdev->bd_disk->events;
@@ -2814,8 +2814,6 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
mutex_unlock(&ctl_mutex);
return 0;

-out_new_dev:
- blk_cleanup_queue(disk->queue);
out_mem2:
put_disk(disk);
out_mem:
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index d1c04f0f79b6..cebc758da98f 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -117,8 +117,11 @@ static int atomic_dec_return_safe(atomic_t *v)

#define RBD_FEATURE_LAYERING (1<<0)
#define RBD_FEATURE_STRIPINGV2 (1<<1)
-#define RBD_FEATURES_ALL \
- (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
+#define RBD_FEATURE_OPERATIONS (1<<8)
+
+#define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
+ RBD_FEATURE_STRIPINGV2 | \
+ RBD_FEATURE_OPERATIONS)

/* Features supported by this (client software) implementation. */

diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 898b84bba28a..aac259d6e248 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2357,7 +2357,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
return media_changed(cdi, 1);

- if ((unsigned int)arg >= cdi->capacity)
+ if (arg >= cdi->capacity)
return -EINVAL;

info = kmalloc(sizeof(*info), GFP_KERNEL);
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index cfb9089887bd..780e3879e13e 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -1029,6 +1029,10 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
break;

recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len);
+ if (recd > num_bytes) {
+ total = -EFAULT;
+ break;
+ }
memcpy(dest, tpm_cmd.params.getrandom_out.rng_data, recd);

dest += recd;
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 472af4bb1b61..ce41780a4213 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -436,7 +436,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
int size = 0;
- int expected, status;
+ int status;
+ u32 expected;

if (count < TPM_HEADER_SIZE) {
size = -EIO;
@@ -451,7 +452,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
}

expected = be32_to_cpu(*(__be32 *)(buf + 2));
- if ((size_t) expected > count) {
+ if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) {
size = -EIO;
goto out;
}
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index 23c7b137a7fd..39635429ea3f 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -267,7 +267,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
struct device *dev = chip->dev;
struct i2c_client *client = to_i2c_client(dev);
s32 rc;
- int expected, status, burst_count, retries, size = 0;
+ int status;
+ int burst_count;
+ int retries;
+ int size = 0;
+ u32 expected;

if (count < TPM_HEADER_SIZE) {
i2c_nuvoton_ready(chip); /* return to idle */
@@ -309,7 +313,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
* to machine native
*/
expected = be32_to_cpu(*(__be32 *) (buf + 2));
- if (expected > count) {
+ if (expected > count || expected < size) {
dev_err(dev, "%s() expected > count\n", __func__);
size = -EIO;
continue;
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 415e47d79a46..27efd4211cc3 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -234,7 +234,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
int size = 0;
- int expected, status;
+ int status;
+ u32 expected;

if (count < TPM_HEADER_SIZE) {
size = -EIO;
@@ -249,7 +250,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
}

expected = be32_to_cpu(*(__be32 *) (buf + 2));
- if (expected > count) {
+ if (expected > count || expected < TPM_HEADER_SIZE) {
size = -EIO;
goto out;
}
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
index 454227d4f895..de38acab9602 100644
--- a/drivers/clocksource/fsl_ftm_timer.c
+++ b/drivers/clocksource/fsl_ftm_timer.c
@@ -282,7 +282,7 @@ static int __init __ftm_clk_init(struct device_node *np, char *cnt_name,

static unsigned long __init ftm_clk_init(struct device_node *np)
{
- unsigned long freq;
+ long freq;

freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt");
if (freq <= 0)
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 176e5da16829..d719c7e04a75 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -370,7 +370,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
static int s3c_cpufreq_init(struct cpufreq_policy *policy)
{
policy->clk = clk_arm;
- return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
+
+ policy->cpuinfo.transition_latency = cpu_cur.info->latency;
+
+ if (ftab)
+ return cpufreq_table_validate_and_show(policy, ftab);
+
+ return 0;
}

static int __init s3c_cpufreq_initclks(void)
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index b099e33cb073..d1fc02db0436 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -514,7 +514,8 @@ static struct ahash_alg algs = {
.cra_driver_name = DRIVER_NAME,
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx),
.cra_alignmask = 3,
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index dcc8bcf625c4..841b494ec011 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -196,12 +196,16 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
* without any error (HW optimizations for later
* CAAM eras), then try again.
*/
+ if (ret)
+ break;
+
rdsta_val =
rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IFMASK;
- if (status || !(rdsta_val & (1 << sh_idx)))
+ if (status || !(rdsta_val & (1 << sh_idx))) {
ret = -EAGAIN;
- if (ret)
break;
+ }
+

dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
/* Clear the contents before recreating the descriptor */
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index cb86d4487605..367a53f38cd6 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -426,15 +426,21 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
uint32_t aes_control;
int err;
unsigned long flags;
+ u8 *iv;

aes_control = SSS_AES_KEY_CHANGE_MODE;
if (mode & FLAGS_AES_DECRYPT)
aes_control |= SSS_AES_MODE_DECRYPT;

- if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC)
+ if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
aes_control |= SSS_AES_CHAIN_MODE_CBC;
- else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR)
+ iv = req->info;
+ } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
aes_control |= SSS_AES_CHAIN_MODE_CTR;
+ iv = req->info;
+ } else {
+ iv = NULL; /* AES_ECB */
+ }

if (dev->ctx->keylen == AES_KEYSIZE_192)
aes_control |= SSS_AES_KEY_SIZE_192;
@@ -465,7 +471,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
goto outdata_error;

SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
- s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
+ s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);

s5p_set_dma_indata(dev, req->src);
s5p_set_dma_outdata(dev, req->dst);
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 3ec5509b1433..decaa3698e12 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -592,7 +592,7 @@ struct devfreq *devm_devfreq_add_device(struct device *dev,
devfreq = devfreq_add_device(dev, profile, governor_name, data);
if (IS_ERR(devfreq)) {
devres_free(ptr);
- return ERR_PTR(-ENOMEM);
+ return devfreq;
}

*ptr = devfreq;
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
index 4bd10f94f068..c0c80ae8c47c 100644
--- a/drivers/edac/octeon_edac-lmc.c
+++ b/drivers/edac/octeon_edac-lmc.c
@@ -79,6 +79,7 @@ static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
if (!pvt->inject)
int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
else {
+ int_reg.u64 = 0;
if (pvt->error_type == 1)
int_reg.s.sec_err = 1;
if (pvt->error_type == 2)
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 35286fe52823..4939e48c3517 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -15,7 +15,7 @@
* of and an antecedent to, SMBIOS, which stands for System
* Management BIOS. See further: http://www.dmtf.org/standards
*/
-static const char dmi_empty_string[] = " ";
+static const char dmi_empty_string[] = "";

static u16 __initdata dmi_ver;
/*
@@ -36,25 +36,21 @@ static int dmi_memdev_nr;
static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
{
const u8 *bp = ((u8 *) dm) + dm->length;
+ const u8 *nsp;

if (s) {
- s--;
- while (s > 0 && *bp) {
+ while (--s > 0 && *bp)
bp += strlen(bp) + 1;
- s--;
- }
-
- if (*bp != 0) {
- size_t len = strlen(bp)+1;
- size_t cmp_len = len > 8 ? 8 : len;

- if (!memcmp(bp, dmi_empty_string, cmp_len))
- return dmi_empty_string;
+ /* Strings containing only spaces are considered empty */
+ nsp = bp;
+ while (*nsp == ' ')
+ nsp++;
+ if (*nsp != '\0')
return bp;
- }
}

- return "";
+ return dmi_empty_string;
}

static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 1db601e109d0..80d76349da8a 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -105,6 +105,9 @@ static struct edid_quirk {
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
{ "AEO", 0, EDID_QUIRK_FORCE_6BPC },

+ /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
+ { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
+
/* Belinea 10 15 55 */
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index d22676b89cbb..79bc67cda504 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -302,6 +302,26 @@ static void output_poll_execute(struct work_struct *work)
schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
}

+/**
+ * drm_kms_helper_is_poll_worker - is %current task an output poll worker?
+ *
+ * Determine if %current task is an output poll worker. This can be used
+ * to select distinct code paths for output polling versus other contexts.
+ *
+ * One use case is to avoid a deadlock between the output poll worker and
+ * the autosuspend worker wherein the latter waits for polling to finish
+ * upon calling drm_kms_helper_poll_disable(), while the former waits for
+ * runtime suspend to finish upon calling pm_runtime_get_sync() in a
+ * connector ->detect hook.
+ */
+bool drm_kms_helper_is_poll_worker(void)
+{
+ struct work_struct *work = current_work();
+
+ return work && work->func == output_poll_execute;
+}
+EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
+
/**
* drm_kms_helper_poll_disable - disable output polling
* @dev: drm_device
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 6b2057dbbcd6..5f8e9711c313 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -256,9 +256,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
nv_connector->edid = NULL;
}

- ret = pm_runtime_get_sync(connector->dev->dev);
- if (ret < 0 && ret != -EACCES)
- return conn_status;
+ /* Outputs are only polled while runtime active, so acquiring a
+ * runtime PM ref here is unnecessary (and would deadlock upon
+ * runtime suspend because it waits for polling to finish).
+ */
+ if (!drm_kms_helper_is_poll_worker()) {
+ ret = pm_runtime_get_sync(connector->dev->dev);
+ if (ret < 0 && ret != -EACCES)
+ return conn_status;
+ }

nv_encoder = nouveau_connector_ddc_detect(connector);
if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
@@ -326,8 +332,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)

out:

- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }

return conn_status;
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 10b6329f9ba1..06c62ad349da 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3299,35 +3299,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
case CHIP_KAVERI:
rdev->config.cik.max_shader_engines = 1;
rdev->config.cik.max_tile_pipes = 4;
- if ((rdev->pdev->device == 0x1304) ||
- (rdev->pdev->device == 0x1305) ||
- (rdev->pdev->device == 0x130C) ||
- (rdev->pdev->device == 0x130F) ||
- (rdev->pdev->device == 0x1310) ||
- (rdev->pdev->device == 0x1311) ||
- (rdev->pdev->device == 0x131C)) {
- rdev->config.cik.max_cu_per_sh = 8;
- rdev->config.cik.max_backends_per_se = 2;
- } else if ((rdev->pdev->device == 0x1309) ||
- (rdev->pdev->device == 0x130A) ||
- (rdev->pdev->device == 0x130D) ||
- (rdev->pdev->device == 0x1313) ||
- (rdev->pdev->device == 0x131D)) {
- rdev->config.cik.max_cu_per_sh = 6;
- rdev->config.cik.max_backends_per_se = 2;
- } else if ((rdev->pdev->device == 0x1306) ||
- (rdev->pdev->device == 0x1307) ||
- (rdev->pdev->device == 0x130B) ||
- (rdev->pdev->device == 0x130E) ||
- (rdev->pdev->device == 0x1315) ||
- (rdev->pdev->device == 0x1318) ||
- (rdev->pdev->device == 0x131B)) {
- rdev->config.cik.max_cu_per_sh = 4;
- rdev->config.cik.max_backends_per_se = 1;
- } else {
- rdev->config.cik.max_cu_per_sh = 3;
- rdev->config.cik.max_backends_per_se = 1;
- }
+ rdev->config.cik.max_cu_per_sh = 8;
+ rdev->config.cik.max_backends_per_se = 2;
rdev->config.cik.max_sh_per_se = 1;
rdev->config.cik.max_texture_channel_caches = 4;
rdev->config.cik.max_gprs = 256;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index e77ddbcc9d56..60bedd500941 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -66,25 +66,18 @@ void radeon_connector_hotplug(struct drm_connector *connector)
/* don't do anything if sink is not display port, i.e.,
* passive dp->(dvi|hdmi) adaptor
*/
- if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
- int saved_dpms = connector->dpms;
- /* Only turn off the display if it's physically disconnected */
- if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- } else if (radeon_dp_needs_link_train(radeon_connector)) {
- /* Don't try to start link training before we
- * have the dpcd */
- if (!radeon_dp_getdpcd(radeon_connector))
- return;
-
- /* set it to OFF so that drm_helper_connector_dpms()
- * won't return immediately since the current state
- * is ON at this point.
- */
- connector->dpms = DRM_MODE_DPMS_OFF;
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- }
- connector->dpms = saved_dpms;
+ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
+ radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
+ radeon_dp_needs_link_train(radeon_connector)) {
+ /* Don't start link training before we have the DPCD */
+ if (!radeon_dp_getdpcd(radeon_connector))
+ return;
+
+ /* Turn the connector off and back on immediately, which
+ * will trigger link training
+ */
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
}
}
@@ -707,9 +700,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;

- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }

if (encoder) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -739,8 +734,12 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
/* check acpi lid status ??? */

radeon_connector_update_scratch_regs(connector, ret);
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
+
return ret;
}

@@ -842,9 +841,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;

- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }

encoder = radeon_best_single_encoder(connector);
if (!encoder)
@@ -913,8 +914,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
radeon_connector_update_scratch_regs(connector, ret);

out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }

return ret;
}
@@ -977,9 +980,11 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
if (!radeon_connector->dac_load_detect)
return ret;

- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }

encoder = radeon_best_single_encoder(connector);
if (!encoder)
@@ -991,8 +996,12 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
if (ret == connector_status_connected)
ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
radeon_connector_update_scratch_regs(connector, ret);
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
+
return ret;
}

@@ -1064,9 +1073,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;

- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }

if (!force && radeon_check_hpd_status_unchanged(connector)) {
ret = connector->status;
@@ -1225,8 +1236,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
radeon_connector_update_scratch_regs(connector, ret);

exit:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }

return ret;
}
@@ -1480,9 +1493,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
int r;

- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }

if (!force && radeon_check_hpd_status_unchanged(connector)) {
ret = connector->status;
@@ -1557,8 +1572,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)

radeon_connector_update_scratch_regs(connector, ret);
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }

return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 1ba38687a85e..edff18760031 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1339,6 +1339,10 @@ int radeon_device_init(struct radeon_device *rdev,
if ((rdev->flags & RADEON_IS_PCI) &&
(rdev->family <= CHIP_RS740))
rdev->need_dma32 = true;
+#ifdef CONFIG_PPC64
+ if (rdev->family == CHIP_CEDAR)
+ rdev->need_dma32 = true;
+#endif

dma_bits = rdev->need_dma32 ? 32 : 40;
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index d41505cb35f1..216f9a433f0c 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -34,8 +34,6 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
struct radeon_bo *robj = gem_to_radeon_bo(gobj);

if (robj) {
- if (robj->gem_base.import_attach)
- drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
radeon_bo_unref(&robj);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 6c717b257d6d..6b1762e0879d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -91,6 +91,8 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
mutex_unlock(&bo->rdev->gem.mutex);
radeon_bo_clear_surface_reg(bo);
radeon_bo_clear_va(bo);
+ if (bo->gem_base.import_attach)
+ drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
drm_gem_object_release(&bo->gem_base);
kfree(bo);
}
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index a86cc490c35f..6d43fd615a61 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -950,7 +950,7 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
/* calc dclk divider with current vco freq */
dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
pd_min, pd_even);
- if (vclk_div > pd_max)
+ if (dclk_div > pd_max)
break; /* vco is too big, it has to stop */

/* calc score with current vco freq */
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 2b04749ad314..b5ef48fdef14 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2974,6 +2974,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
max_sclk = 75000;
max_mclk = 80000;
}
+ if ((rdev->pdev->revision == 0xC3) ||
+ (rdev->pdev->device == 0x6665)) {
+ max_sclk = 60000;
+ max_mclk = 80000;
+ }
}
/* Apply dpm quirks */
while (p && p->chip_device != 0) {
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 4f99f8084973..7c1153ccb1ed 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -175,7 +175,8 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
list_add_tail(&bo->lru, &man->lru);
kref_get(&bo->list_kref);

- if (bo->ttm != NULL) {
+ if (bo->ttm && !(bo->ttm->page_flags &
+ (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
list_add_tail(&bo->swap, &bo->glob->swap_lru);
kref_get(&bo->list_kref);
}
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 8cd50da22c7e..e9342d0483db 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -256,10 +256,15 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
unsigned long start = vma->vm_start;
unsigned long size = vma->vm_end - vma->vm_start;
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long offset;
unsigned long page, pos;

- if (offset + size > info->fix.smem_len)
+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+ return -EINVAL;
+
+ offset = vma->vm_pgoff << PAGE_SHIFT;
+
+ if (offset > info->fix.smem_len || size > info->fix.smem_len - offset)
return -EINVAL;

pos = (unsigned long)info->fix.smem_start + offset;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 8df8344c1098..631622fecaa8 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2309,6 +2309,9 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 2b6f0bbaa28b..f6b36b7e70ca 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -488,6 +488,7 @@
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A 0x0a4a
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
+#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a

#define USB_VENDOR_ID_HUION 0x256c
#define USB_DEVICE_ID_HUION_580 0x006e
@@ -581,6 +582,9 @@
#define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033
#define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035
#define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038
+#define USB_DEVICE_ID_LD_POWERANALYSERCASSY 0x1040
+#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY 0x1042
+#define USB_DEVICE_ID_LD_MACHINETESTCASSY 0x1043
#define USB_DEVICE_ID_LD_JWM 0x1080
#define USB_DEVICE_ID_LD_DMMP 0x1081
#define USB_DEVICE_ID_LD_UMIP 0x1090
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 966047711fbf..1073c0d1fae5 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -37,6 +37,8 @@ static uint kovaplus_convert_event_cpi(uint value)
static void kovaplus_profile_activated(struct kovaplus_device *kovaplus,
uint new_profile_index)
{
+ if (new_profile_index >= ARRAY_SIZE(kovaplus->profile_settings))
+ return;
kovaplus->actual_profile = new_profile_index;
kovaplus->actual_cpi = kovaplus->profile_settings[new_profile_index].cpi_startup_level;
kovaplus->actual_x_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_x;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 07b7e30c1b0e..ba1d9ab5fc97 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -99,6 +99,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A, HID_QUIRK_ALWAYS_POLL },
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
index f53e9a803a0e..93b99bd93738 100644
--- a/drivers/iio/imu/adis_trigger.c
+++ b/drivers/iio/imu/adis_trigger.c
@@ -47,6 +47,10 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
if (adis->trig == NULL)
return -ENOMEM;

+ adis->trig->dev.parent = &adis->spi->dev;
+ adis->trig->ops = &adis_trigger_ops;
+ iio_trigger_set_drvdata(adis->trig, adis);
+
ret = request_irq(adis->spi->irq,
&iio_trigger_generic_data_rdy_poll,
IRQF_TRIGGER_RISING,
@@ -55,9 +59,6 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
if (ret)
goto error_free_trig;

- adis->trig->dev.parent = &adis->spi->dev;
- adis->trig->ops = &adis_trigger_ops;
- iio_trigger_set_drvdata(adis->trig, adis);
ret = iio_trigger_register(adis->trig);

indio_dev->trig = iio_trigger_get(adis->trig);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index d495b2e82810..7562531ebf0e 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -95,7 +95,7 @@ unsigned int iio_buffer_poll(struct file *filp,
struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer;

- if (!indio_dev->info)
+ if (!indio_dev->info || rb == NULL)
return 0;

poll_wait(filp, &rb->pollq, wait);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index bb20bc543a29..0c856b380f48 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -3355,6 +3355,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
struct cma_multicast *mc;
int ret;

+ if (!id->device)
+ return -EINVAL;
+
id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
!cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
@@ -3637,7 +3640,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
RDMA_NL_RDMA_CM_ATTR_SRC_ADDR))
goto out;
if (ibnl_put_attr(skb, nlh,
- rdma_addr_size(cma_src_addr(id_priv)),
+ rdma_addr_size(cma_dst_addr(id_priv)),
cma_dst_addr(id_priv),
RDMA_NL_RDMA_CM_ATTR_DST_ADDR))
goto out;
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index e532ba6d3627..140209c52571 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -513,6 +513,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
}
skb_num++;
spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
+ ret = -EINVAL;
for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) {
hlist_for_each_entry(map_info, &iwpm_hash_bucket[i],
hlist_node) {
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 81dd84d0b68b..29e646836cde 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -121,7 +121,7 @@ static inline struct ucma_context *_ucma_find_context(int id,
ctx = idr_find(&ctx_idr, id);
if (!ctx)
ctx = ERR_PTR(-ENOENT);
- else if (ctx->file != file)
+ else if (ctx->file != file || !ctx->cm_id)
ctx = ERR_PTR(-EINVAL);
return ctx;
}
@@ -371,6 +371,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
struct rdma_ucm_create_id cmd;
struct rdma_ucm_create_id_resp resp;
struct ucma_context *ctx;
+ struct rdma_cm_id *cm_id;
enum ib_qp_type qp_type;
int ret;

@@ -391,9 +392,9 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
return -ENOMEM;

ctx->uid = cmd.uid;
- ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type);
- if (IS_ERR(ctx->cm_id)) {
- ret = PTR_ERR(ctx->cm_id);
+ cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type);
+ if (IS_ERR(cm_id)) {
+ ret = PTR_ERR(cm_id);
goto err1;
}

@@ -403,14 +404,19 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
ret = -EFAULT;
goto err2;
}
+
+ ctx->cm_id = cm_id;
return 0;

err2:
- rdma_destroy_id(ctx->cm_id);
+ rdma_destroy_id(cm_id);
err1:
mutex_lock(&mut);
idr_remove(&ctx_idr, ctx->id);
mutex_unlock(&mut);
+ mutex_lock(&file->mut);
+ list_del(&ctx->list);
+ mutex_unlock(&file->mut);
kfree(ctx);
return ret;
}
@@ -560,19 +566,23 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
int in_len, int out_len)
{
struct rdma_ucm_resolve_ip cmd;
+ struct sockaddr *src, *dst;
struct ucma_context *ctx;
int ret;

if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;

+ src = (struct sockaddr *) &cmd.src_addr;
+ dst = (struct sockaddr *) &cmd.dst_addr;
+ if (!rdma_addr_size(src) || !rdma_addr_size(dst))
+ return -EINVAL;
+
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);

- ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
- (struct sockaddr *) &cmd.dst_addr,
- cmd.timeout_ms);
+ ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
ucma_put_ctx(ctx);
return ret;
}
@@ -1050,10 +1060,18 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;

+ if (cmd.qp_state > IB_QPS_ERR)
+ return -EINVAL;
+
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);

+ if (!ctx->cm_id->device) {
+ ret = -EINVAL;
+ goto out;
+ }
+
resp.qp_attr_mask = 0;
memset(&qp_attr, 0, sizeof qp_attr);
qp_attr.qp_state = cmd.qp_state;
@@ -1187,6 +1205,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);

+ if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
+ return -EINVAL;
+
optval = memdup_user((void __user *) (unsigned long) cmd.optval,
cmd.optlen);
if (IS_ERR(optval)) {
@@ -1208,7 +1229,7 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
{
struct rdma_ucm_notify cmd;
struct ucma_context *ctx;
- int ret;
+ int ret = -EINVAL;

if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
@@ -1217,7 +1238,9 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);

- ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
+ if (ctx->cm_id->device)
+ ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
+
ucma_put_ctx(ctx);
return ret;
}
@@ -1235,7 +1258,7 @@ static ssize_t ucma_process_join(struct ucma_file *file,
return -ENOSPC;

addr = (struct sockaddr *) &cmd->addr;
- if (cmd->reserved || !cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
+ if (cmd->reserved || cmd->addr_size != rdma_addr_size(addr))
return -EINVAL;

ctx = ucma_get_ctx(file, cmd->id);
@@ -1295,6 +1318,9 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
join_cmd.uid = cmd.uid;
join_cmd.id = cmd.id;
join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
+ if (!join_cmd.addr_size)
+ return -EINVAL;
+
join_cmd.reserved = 0;
memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);

@@ -1310,6 +1336,9 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;

+ if (!rdma_addr_size((struct sockaddr *)&cmd.addr))
+ return -EINVAL;
+
return ucma_process_join(file, &cmd, out_len);
}

diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 775c3cb87097..79a8accc0801 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2272,9 +2272,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
kfree(ibdev->ib_uc_qpns_bitmap);

err_steer_qp_release:
- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
- mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
- ibdev->steer_qpn_count);
+ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_count);
err_counter:
for (; i; --i)
if (ibdev->counters[i - 1] != -1)
@@ -2373,11 +2372,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
ibdev->iboe.nb.notifier_call = NULL;
}

- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
- mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
- ibdev->steer_qpn_count);
- kfree(ibdev->ib_uc_qpns_bitmap);
- }
+ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_count);
+ kfree(ibdev->ib_uc_qpns_bitmap);

if (ibdev->iboe.nb_inet.notifier_call) {
if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index a3395e4cc721..3629081c3c05 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -956,7 +956,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (ucmd.reserved0 || ucmd.reserved1)
return -EINVAL;

- umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size,
+ /* check multiplication overflow */
+ if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
+ return -EINVAL;
+
+ umem = ib_umem_get(context, ucmd.buf_addr,
+ (size_t)ucmd.cqe_size * entries,
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(umem)) {
err = PTR_ERR(umem);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 74cb286e6012..8761cf4c003f 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3050,12 +3050,9 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
int err;

err = mlx5_core_xrcd_dealloc(&dev->mdev, xrcdn);
- if (err) {
+ if (err)
mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
- return err;
- }

kfree(xrcd);
-
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 7c2fe5235ae2..cbfed00189ad 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -234,8 +234,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_srq *srq;
- int desc_size;
- int buf_size;
+ size_t desc_size;
+ size_t buf_size;
int err;
struct mlx5_create_srq_mbox_in *uninitialized_var(in);
int uninitialized_var(inlen);
@@ -261,15 +261,18 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,

desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
+ if (desc_size == 0 || srq->msrq.max_gs > desc_size)
+ return ERR_PTR(-EINVAL);
desc_size = roundup_pow_of_two(desc_size);
- desc_size = max_t(int, 32, desc_size);
+ desc_size = max_t(size_t, 32, desc_size);
+ if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
+ return ERR_PTR(-EINVAL);
srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
sizeof(struct mlx5_wqe_data_seg);
srq->msrq.wqe_shift = ilog2(desc_size);
buf_size = srq->msrq.max * desc_size;
- mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n",
- desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
- srq->msrq.max_avail_gather);
+ if (buf_size < desc_size)
+ return ERR_PTR(-EINVAL);

if (pd->uobject)
err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
index 09396bd7b02d..63be3bcdc0e3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
@@ -281,8 +281,6 @@ void ipoib_delete_debug_files(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);

- WARN_ONCE(!priv->mcg_dentry, "null mcg debug file\n");
- WARN_ONCE(!priv->path_dentry, "null path debug file\n");
debugfs_remove(priv->mcg_dentry);
debugfs_remove(priv->path_dentry);
priv->mcg_dentry = priv->path_dentry = NULL;
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 8d2e19e81e1e..fde979216d58 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -216,8 +216,10 @@ static void matrix_keypad_stop(struct input_dev *dev)
{
struct matrix_keypad *keypad = input_get_drvdata(dev);

+ spin_lock_irq(&keypad->lock);
keypad->stopped = true;
- mb();
+ spin_unlock_irq(&keypad->lock);
+
flush_work(&keypad->work.work);
/*
* matrix_keypad_scan() will leave IRQs enabled;
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index d4f33992ad8c..62b4a329bfe4 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -491,6 +491,12 @@ static int edt_ft5x06_factory_mode(struct edt_ft5x06_ts_data *tsdata)
int ret;
int error;

+ if (tsdata->version != M06) {
+ dev_err(&client->dev,
+ "No factory mode support for non-M06 devices\n");
+ return -EINVAL;
+ }
+
disable_irq(client->irq);

if (!tsdata->raw_buffer) {
@@ -504,9 +510,6 @@ static int edt_ft5x06_factory_mode(struct edt_ft5x06_ts_data *tsdata)
}

/* mode register is 0x3c when in the work mode */
- if (tsdata->version == M09)
- goto m09_out;
-
error = edt_ft5x06_register_write(tsdata, WORK_REGISTER_OPMODE, 0x03);
if (error) {
dev_err(&client->dev,
@@ -539,11 +542,6 @@ static int edt_ft5x06_factory_mode(struct edt_ft5x06_ts_data *tsdata)
enable_irq(client->irq);

return error;
-
-m09_out:
- dev_err(&client->dev, "No factory mode support for M09\n");
- return -EINVAL;
-
}

static int edt_ft5x06_work_mode(struct edt_ft5x06_ts_data *tsdata)
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 372bbf7658fe..1f26ac1c0134 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -592,4 +592,4 @@ module_i2c_driver(mms114_driver);
/* Module information */
MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@xxxxxxxxxxx>");
MODULE_DESCRIPTION("MELFAS mms114 Touchscreen driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 8498d9488bc0..9c574978c400 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -964,6 +964,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
uint32_t rtime = cpu_to_le32(get_seconds());
struct uuid_entry *u;
char buf[BDEVNAME_SIZE];
+ struct cached_dev *exist_dc, *t;

bdevname(dc->bdev, buf);

@@ -987,6 +988,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
return -EINVAL;
}

+ /* Check whether already attached */
+ list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
+ if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
+ pr_err("Tried to attach %s but duplicate UUID already attached",
+ buf);
+
+ return -EINVAL;
+ }
+ }
+
u = uuid_find(c, dc->sb.uuid);

if (u &&
@@ -1203,7 +1214,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,

return;
err:
- pr_notice("error opening %s: %s", bdevname(bdev, name), err);
+ pr_notice("error %s: %s", bdevname(bdev, name), err);
bcache_device_stop(&dc->disk);
}

@@ -1861,6 +1872,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
const char *err = NULL; /* must be set for any error case */
int ret = 0;

+ bdevname(bdev, name);
+
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
ca->bdev->bd_holder = ca;
@@ -1871,11 +1884,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
get_page(sb_page);

- if (blk_queue_discard(bdev_get_queue(ca->bdev)))
+ if (blk_queue_discard(bdev_get_queue(bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);

ret = cache_alloc(sb, ca);
if (ret != 0) {
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
if (ret == -ENOMEM)
err = "cache_alloc(): -ENOMEM";
else
@@ -1898,14 +1912,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
goto out;
}

- pr_info("registered cache device %s", bdevname(bdev, name));
+ pr_info("registered cache device %s", name);

out:
kobject_put(&ca->kobj);

err:
if (err)
- pr_notice("error opening %s: %s", bdevname(bdev, name), err);
+ pr_notice("error %s: %s", name, err);

return ret;
}
@@ -1994,6 +2008,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (err)
goto err_close;

+ err = "failed to register device";
if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
if (!dc)
@@ -2008,7 +2023,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
goto err_close;

if (register_cache(sb, sb_page, bdev, ca) != 0)
- goto err_close;
+ goto err;
}
out:
if (sb_page)
@@ -2021,7 +2036,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
err_close:
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
err:
- pr_info("error opening %s: %s", path, err);
+ pr_info("error %s: %s", path, err);
ret = -EINVAL;
goto out;
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 5af9a56e5018..18b0c80fc447 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2746,7 +2746,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
for (m = 0; m < conf->copies; m++) {
int dev = r10_bio->devs[m].devnum;
rdev = conf->mirrors[dev].rdev;
- if (r10_bio->devs[m].bio == NULL)
+ if (r10_bio->devs[m].bio == NULL ||
+ r10_bio->devs[m].bio->bi_end_io == NULL)
continue;
if (test_bit(BIO_UPTODATE,
&r10_bio->devs[m].bio->bi_flags)) {
@@ -2762,7 +2763,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
md_error(conf->mddev, rdev);
}
rdev = conf->mirrors[dev].replacement;
- if (r10_bio->devs[m].repl_bio == NULL)
+ if (r10_bio->devs[m].repl_bio == NULL ||
+ r10_bio->devs[m].repl_bio->bi_end_io == NULL)
continue;
if (test_bit(BIO_UPTODATE,
&r10_bio->devs[m].repl_bio->bi_flags)) {
diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c
index d0c281f41a0a..4cf01546afe0 100644
--- a/drivers/media/pci/bt8xx/bt878.c
+++ b/drivers/media/pci/bt8xx/bt878.c
@@ -433,8 +433,7 @@ static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
bt878_num);
if (bt878_num >= BT878_MAX) {
printk(KERN_ERR "bt878: Too many devices inserted\n");
- result = -ENOMEM;
- goto fail0;
+ return -ENOMEM;
}
if (pci_enable_device(dev))
return -EIO;
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
index be62d6b9ac48..9cf9d8484657 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp.c
@@ -366,16 +366,16 @@ static int fimc_isp_subdev_s_power(struct v4l2_subdev *sd, int on)
static int fimc_isp_subdev_open(struct v4l2_subdev *sd,
struct v4l2_subdev_fh *fh)
{
- struct v4l2_mbus_framefmt fmt;
struct v4l2_mbus_framefmt *format;
+ struct v4l2_mbus_framefmt fmt = {
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .code = fimc_isp_formats[0].mbus_code,
+ .width = DEFAULT_PREVIEW_STILL_WIDTH + FIMC_ISP_CAC_MARGIN_WIDTH,
+ .height = DEFAULT_PREVIEW_STILL_HEIGHT + FIMC_ISP_CAC_MARGIN_HEIGHT,
+ .field = V4L2_FIELD_NONE,
+ };

format = v4l2_subdev_get_try_format(fh, FIMC_ISP_SD_PAD_SINK);
-
- fmt.colorspace = V4L2_COLORSPACE_SRGB;
- fmt.code = fimc_isp_formats[0].mbus_code;
- fmt.width = DEFAULT_PREVIEW_STILL_WIDTH + FIMC_ISP_CAC_MARGIN_WIDTH;
- fmt.height = DEFAULT_PREVIEW_STILL_HEIGHT + FIMC_ISP_CAC_MARGIN_HEIGHT;
- fmt.field = V4L2_FIELD_NONE;
*format = fmt;

format = v4l2_subdev_get_try_format(fh, FIMC_ISP_SD_PAD_SRC_FIFO);
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index d5d42b6e94be..82c9470833ac 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -812,7 +812,7 @@ static int cpia2_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
struct camera_data *cam = video_drvdata(file);

if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
- buf->index > cam->num_frames)
+ buf->index >= cam->num_frames)
return -EINVAL;

buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer;
@@ -863,7 +863,7 @@ static int cpia2_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)

if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
buf->memory != V4L2_MEMORY_MMAP ||
- buf->index > cam->num_frames)
+ buf->index >= cam->num_frames)
return -EINVAL;

DBG("QBUF #%d\n", buf->index);
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index d2a4e6d40bf0..63d5f190b3e7 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -434,18 +434,23 @@ static int lme2510_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid,

static int lme2510_return_status(struct dvb_usb_device *d)
{
- int ret = 0;
+ int ret;
u8 *data;

- data = kzalloc(10, GFP_KERNEL);
+ data = kzalloc(6, GFP_KERNEL);
if (!data)
return -ENOMEM;

- ret |= usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
- 0x06, 0x80, 0x0302, 0x00, data, 0x0006, 200);
- info("Firmware Status: %x (%x)", ret , data[2]);
+ ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
+ 0x06, 0x80, 0x0302, 0x00,
+ data, 0x6, 200);
+ if (ret != 6)
+ ret = -EINVAL;
+ else
+ ret = data[2];
+
+ info("Firmware Status: %6ph", data);

- ret = (ret < 0) ? -ENODEV : data[2];
kfree(data);
return ret;
}
@@ -1110,8 +1115,6 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)

if (adap->fe[0]) {
info("FE Found M88RS2000");
- dvb_attach(ts2020_attach, adap->fe[0], &ts2020_config,
- &d->i2c_adap);
st->i2c_tuner_gate_w = 5;
st->i2c_tuner_gate_r = 5;
st->i2c_tuner_addr = 0x60;
@@ -1174,17 +1177,18 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap)
ret = st->tuner_config;
break;
case TUNER_RS2000:
- ret = st->tuner_config;
+ if (dvb_attach(ts2020_attach, adap->fe[0],
+ &ts2020_config, &d->i2c_adap))
+ ret = st->tuner_config;
break;
default:
break;
}

- if (ret)
+ if (ret) {
info("TUN Found %s tuner", tun_msg[ret]);
- else {
- info("TUN No tuner found --- resetting device");
- lme_coldreset(d);
+ } else {
+ info("TUN No tuner found");
return -ENODEV;
}

@@ -1228,6 +1232,7 @@ static int lme2510_get_adapter_count(struct dvb_usb_device *d)
static int lme2510_identify_state(struct dvb_usb_device *d, const char **name)
{
struct lme2510_state *st = d->priv;
+ int status;

usb_reset_configuration(d->udev);

@@ -1236,12 +1241,16 @@ static int lme2510_identify_state(struct dvb_usb_device *d, const char **name)

st->dvb_usb_lme2510_firmware = dvb_usb_lme2510_firmware;

- if (lme2510_return_status(d) == 0x44) {
+ status = lme2510_return_status(d);
+ if (status == 0x44) {
*name = lme_firmware_switch(d, 0);
return COLD;
}

- return 0;
+ if (status != 0x47)
+ return -EINVAL;
+
+ return WARM;
}

static int lme2510_get_stream_config(struct dvb_frontend *fe, u8 *ts_type,
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index a1c641e18362..6f4bf0ec43d3 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -816,6 +816,8 @@ static int dvico_bluebird_xc2028_callback(void *ptr, int component,
case XC2028_RESET_CLK:
deb_info("%s: XC2028_RESET_CLK %d\n", __func__, arg);
break;
+ case XC2028_I2C_FLUSH:
+ break;
default:
deb_info("%s: unknown command %d, arg %d\n", __func__,
command, arg);
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 10e0db8d1850..4d99682a207f 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -405,6 +405,7 @@ static int stk7700ph_xc3028_callback(void *ptr, int component,
dib7000p_set_gpio(adap->fe_adap[0].fe, 8, 0, 1);
break;
case XC2028_RESET_CLK:
+ case XC2028_I2C_FLUSH:
break;
default:
err("%s: unknown command %d, arg %d\n", __func__,
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index d66a2f24f6b3..47eb9181b49f 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -566,7 +566,7 @@ static int lkdtm_register_cpoint(enum cname which)
lkdtm.entry = (kprobe_opcode_t*) jp_do_irq;
break;
case CN_INT_HW_IRQ_EN:
- lkdtm.kp.symbol_name = "handle_IRQ_event";
+ lkdtm.kp.symbol_name = "handle_irq_event";
lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event;
break;
case CN_INT_TASKLET_ENTRY:
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 26cac35d1adc..20a32d14a636 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -67,6 +67,9 @@ MODULE_ALIAS("mmc:block");
#define PACKED_CMD_VER 0x01
#define PACKED_CMD_WR 0x02

+#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
+#define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8)
+
static DEFINE_MUTEX(block_mutex);

/*
@@ -568,6 +571,24 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
goto cmd_rel_host;
}

+ /*
+ * Make sure the cache of the PARTITION_CONFIG register and
+ * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write
+ * changed it successfully.
+ */
+ if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_PART_CONFIG) &&
+ (cmd.opcode == MMC_SWITCH)) {
+ struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
+ u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg);
+
+ /*
+ * Update cache so the next mmc_blk_part_switch call operates
+ * on up-to-date data.
+ */
+ card->ext_csd.part_config = value;
+ main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK;
+ }
+
/*
* According to the SD specs, some commands require a delay after
* issuing the command.
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 0fbc53ac7eae..9bb46026b7da 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -394,6 +394,7 @@ static unsigned long exynos_dwmmc_caps[4] = {

static const struct dw_mci_drv_data exynos_drv_data = {
.caps = exynos_dwmmc_caps,
+ .num_caps = ARRAY_SIZE(exynos_dwmmc_caps),
.init = dw_mci_exynos_priv_init,
.setup_clock = dw_mci_exynos_setup_clock,
.prepare_command = dw_mci_exynos_prepare_command,
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 609f0ab6891f..6ddc7a187bf7 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2044,12 +2044,47 @@ static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
}
#endif /* CONFIG_OF */

+static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
+{
+ struct dw_mci *host = slot->host;
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
+ struct mmc_host *mmc = slot->mmc;
+ int ctrl_id;
+
+ if (host->pdata->caps)
+ mmc->caps = host->pdata->caps;
+
+ if (host->pdata->pm_caps)
+ mmc->pm_caps = host->pdata->pm_caps;
+
+ if (host->dev->of_node) {
+ ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
+ if (ctrl_id < 0)
+ ctrl_id = 0;
+ } else {
+ ctrl_id = to_platform_device(host->dev)->id;
+ }
+
+ if (drv_data && drv_data->caps) {
+ if (ctrl_id >= drv_data->num_caps) {
+ dev_err(host->dev, "invalid controller id %d\n",
+ ctrl_id);
+ return -EINVAL;
+ }
+ mmc->caps |= drv_data->caps[ctrl_id];
+ }
+
+ if (host->pdata->caps2)
+ mmc->caps2 = host->pdata->caps2;
+
+ return 0;
+}
+
static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
{
struct mmc_host *mmc;
struct dw_mci_slot *slot;
- const struct dw_mci_drv_data *drv_data = host->drv_data;
- int ctrl_id, ret;
+ int ret;
u32 freq[2];

mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
@@ -2076,27 +2111,12 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)

mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;

- if (host->pdata->caps)
- mmc->caps = host->pdata->caps;
-
- if (host->pdata->pm_caps)
- mmc->pm_caps = host->pdata->pm_caps;
-
- if (host->dev->of_node) {
- ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
- if (ctrl_id < 0)
- ctrl_id = 0;
- } else {
- ctrl_id = to_platform_device(host->dev)->id;
- }
- if (drv_data && drv_data->caps)
- mmc->caps |= drv_data->caps[ctrl_id];
-
- if (host->pdata->caps2)
- mmc->caps2 = host->pdata->caps2;
-
mmc_of_parse(mmc);

+ ret = dw_mci_init_slot_caps(slot);
+ if (ret)
+ goto err_host_allocated;
+
if (host->pdata->blk_settings) {
mmc->max_segs = host->pdata->blk_settings->max_segs;
mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
@@ -2127,7 +2147,7 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)

ret = mmc_add_host(mmc);
if (ret)
- goto err_setup_bus;
+ goto err_host_allocated;

#if defined(CONFIG_DEBUG_FS)
dw_mci_init_debugfs(slot);
@@ -2138,9 +2158,9 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)

return 0;

-err_setup_bus:
+err_host_allocated:
mmc_free_host(mmc);
- return -EINVAL;
+ return ret;
}

static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 738fa241d058..8814666961c8 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -237,6 +237,7 @@ struct dw_mci_tuning_data {
/**
* dw_mci driver data - dw-mshc implementation specific driver data.
* @caps: mmc subsystem specified capabilities of the controller(s).
+ * @num_caps: number of capabilities specified by @caps.
* @init: early implementation specific initialization.
* @setup_clock: implementation specific clock configuration.
* @prepare_command: handle CMD register extensions.
@@ -250,6 +251,7 @@ struct dw_mci_tuning_data {
*/
struct dw_mci_drv_data {
unsigned long *caps;
+ u32 num_caps;
int (*init)(struct dw_mci *host);
int (*setup_clock)(struct dw_mci *host);
void (*prepare_command)(struct dw_mci *host, u32 *cmdr);
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 56389ae09222..0a14dcb8e505 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -265,8 +265,34 @@ static void sdhci_pci_int_hw_reset(struct sdhci_host *host)
usleep_range(300, 1000);
}

+static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ int err = sdhci_execute_tuning(mmc, opcode);
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (err)
+ return err;
+
+ /*
+ * Tuning can leave the IP in an active state (Buffer Read Enable bit
+ * set) which prevents the entry to low power states (i.e. S0i3). Data
+ * reset will clear it.
+ */
+ sdhci_reset(host, SDHCI_RESET_DATA);
+
+ return 0;
+}
+
+static void byt_probe_slot(struct sdhci_pci_slot *slot)
+{
+ struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
+
+ ops->execute_tuning = intel_execute_tuning;
+}
+
static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
+ byt_probe_slot(slot);
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
MMC_CAP_HW_RESET;
slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
@@ -278,6 +304,7 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)

static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
{
+ byt_probe_slot(slot);
slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE;
return 0;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index d79b8c324934..393444edc78e 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -52,7 +52,6 @@ static unsigned int debug_quirks2;
static void sdhci_finish_data(struct sdhci_host *);

static void sdhci_finish_command(struct sdhci_host *);
-static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
static void sdhci_tuning_timer(unsigned long data);
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);

@@ -1840,7 +1839,7 @@ static int sdhci_card_busy(struct mmc_host *mmc)
return !(present_state & SDHCI_DATA_LVL_MASK);
}

-static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
u16 ctrl;
@@ -2054,6 +2053,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)

return err;
}
+EXPORT_SYMBOL_GPL(sdhci_execute_tuning);


static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
@@ -2781,6 +2781,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,

host = mmc_priv(mmc);
host->mmc = mmc;
+ host->mmc_host_ops = sdhci_ops;
+ mmc->ops = &host->mmc_host_ops;

return host;
}
@@ -2939,7 +2941,6 @@ int sdhci_add_host(struct sdhci_host *host)
/*
* Set host parameters.
*/
- mmc->ops = &sdhci_ops;
mmc->f_max = host->max_clk;
if (host->ops->get_min_clock)
mmc->f_min = host->ops->get_min_clock(host);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 4a5cd5e3fa3e..82cbb526baa5 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -402,6 +402,7 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
void sdhci_set_bus_width(struct sdhci_host *host, int width);
void sdhci_reset(struct sdhci_host *host, u8 mask);
void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
+int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);

#ifdef CONFIG_PM
extern int sdhci_suspend_host(struct sdhci_host *host);
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index 7c0b27d132b1..b479bd81120b 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1889,6 +1889,8 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
do {
uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi);
mask = (1 << (cfi->device_type * 8)) - 1;
+ if (ofs >= map->size)
+ return 0;
result = map_read(map, base + ofs);
bank++;
} while ((result.x[0] & mask) == CFI_MFR_CONTINUATION);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 857e6cc4a91e..fe3d8234b9ad 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1837,6 +1837,7 @@ static int nand_write_oob_syndrome(struct mtd_info *mtd,
static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
+ unsigned int max_bitflips = 0;
int page, realpage, chipnr;
struct nand_chip *chip = mtd->priv;
struct mtd_ecc_stats stats;
@@ -1897,6 +1898,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
nand_wait_ready(mtd);
}

+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+
readlen -= len;
if (!readlen)
break;
@@ -1922,7 +1925,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;

- return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+ return max_bitflips;
}

/**
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 96131eb34c9f..ff5f1ac79581 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -308,6 +308,12 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->last_eb_bytes = vol->usable_leb_size;
}

+ /* Make volume "available" before it becomes accessible via sysfs */
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = vol;
+ ubi->vol_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+
/* Register character device for the volume */
cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
vol->cdev.owner = THIS_MODULE;
@@ -350,11 +356,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
if (err)
goto out_sysfs;

- spin_lock(&ubi->volumes_lock);
- ubi->volumes[vol_id] = vol;
- ubi->vol_count += 1;
- spin_unlock(&ubi->volumes_lock);
-
ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
self_check_volumes(ubi);
return err;
@@ -374,6 +375,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
out_cdev:
cdev_del(&vol->cdev);
out_mapping:
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = NULL;
+ ubi->vol_count -= 1;
+ spin_unlock(&ubi->volumes_lock);
if (do_free)
kfree(vol->eba_tbl);
out_acc:
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 371a973e718e..2b8d1e46bc5e 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1925,8 +1925,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
cond_resched();

e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
+ if (!e) {
+ err = -ENOMEM;
goto out_free;
+ }

e->pnum = aeb->pnum;
e->ec = aeb->ec;
@@ -1966,8 +1968,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
cond_resched();

e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
+ if (!e) {
+ err = -ENOMEM;
goto out_free;
+ }

e->pnum = aeb->pnum;
e->ec = aeb->ec;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 41503ce1e3b7..d87c0d275aba 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1414,39 +1414,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
goto err_close;
}

- /* If the mode uses primary, then the following is handled by
- * bond_change_active_slave().
- */
- if (!bond_uses_primary(bond)) {
- /* set promiscuity level to new slave */
- if (bond_dev->flags & IFF_PROMISC) {
- res = dev_set_promiscuity(slave_dev, 1);
- if (res)
- goto err_close;
- }
-
- /* set allmulti level to new slave */
- if (bond_dev->flags & IFF_ALLMULTI) {
- res = dev_set_allmulti(slave_dev, 1);
- if (res)
- goto err_close;
- }
-
- netif_addr_lock_bh(bond_dev);
-
- dev_mc_sync_multiple(slave_dev, bond_dev);
- dev_uc_sync_multiple(slave_dev, bond_dev);
-
- netif_addr_unlock_bh(bond_dev);
- }
-
- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- /* add lacpdu mc addr to mc list */
- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
-
- dev_mc_add(slave_dev, lacpdu_multicast);
- }
-
res = vlan_vids_add_by_dev(slave_dev, bond_dev);
if (res) {
pr_err("%s: Error: Couldn't add bond vlan ids to %s\n",
@@ -1598,6 +1565,40 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
goto err_upper_unlink;
}

+ /* If the mode uses primary, then the following is handled by
+ * bond_change_active_slave().
+ */
+ if (!bond_uses_primary(bond)) {
+ /* set promiscuity level to new slave */
+ if (bond_dev->flags & IFF_PROMISC) {
+ res = dev_set_promiscuity(slave_dev, 1);
+ if (res)
+ goto err_sysfs_del;
+ }
+
+ /* set allmulti level to new slave */
+ if (bond_dev->flags & IFF_ALLMULTI) {
+ res = dev_set_allmulti(slave_dev, 1);
+ if (res) {
+ if (bond_dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(slave_dev, -1);
+ goto err_sysfs_del;
+ }
+ }
+
+ netif_addr_lock_bh(bond_dev);
+ dev_mc_sync_multiple(slave_dev, bond_dev);
+ dev_uc_sync_multiple(slave_dev, bond_dev);
+ netif_addr_unlock_bh(bond_dev);
+
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+ /* add lacpdu mc addr to mc list */
+ u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+
+ dev_mc_add(slave_dev, lacpdu_multicast);
+ }
+ }
+
bond->slave_cnt++;
bond_compute_features(bond);
bond_set_carrier(bond);
@@ -1619,6 +1620,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
return 0;

/* Undo stages on error */
+err_sysfs_del:
+ bond_sysfs_slave_del(new_slave);
+
err_upper_unlink:
bond_upper_dev_unlink(bond_dev, slave_dev);

@@ -1626,9 +1630,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
netdev_rx_handler_unregister(slave_dev);

err_detach:
- if (!bond_uses_primary(bond))
- bond_hw_addr_flush(bond_dev, slave_dev);
-
vlan_vids_del_by_dev(slave_dev, bond_dev);
if (bond->primary_slave == new_slave)
bond->primary_slave = NULL;
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index d8379278d648..b8f4614fb02f 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -390,37 +390,23 @@ static int cc770_get_berr_counter(const struct net_device *dev,
return 0;
}

-static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static void cc770_tx(struct net_device *dev, int mo)
{
struct cc770_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct can_frame *cf = (struct can_frame *)skb->data;
- unsigned int mo = obj2msgobj(CC770_OBJ_TX);
+ struct can_frame *cf = (struct can_frame *)priv->tx_skb->data;
u8 dlc, rtr;
u32 id;
int i;

- if (can_dropped_invalid_skb(dev, skb))
- return NETDEV_TX_OK;
-
- if ((cc770_read_reg(priv,
- msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
- netdev_err(dev, "TX register is still occupied!\n");
- return NETDEV_TX_BUSY;
- }
-
- netif_stop_queue(dev);
-
dlc = cf->can_dlc;
id = cf->can_id;
- if (cf->can_id & CAN_RTR_FLAG)
- rtr = 0;
- else
- rtr = MSGCFG_DIR;
+ rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR;
+
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
cc770_write_reg(priv, msgobj[mo].ctrl1,
RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES);
- cc770_write_reg(priv, msgobj[mo].ctrl0,
- MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES);
+
if (id & CAN_EFF_FLAG) {
id &= CAN_EFF_MASK;
cc770_write_reg(priv, msgobj[mo].config,
@@ -439,22 +425,30 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < dlc; i++)
cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]);

- /* Store echo skb before starting the transfer */
- can_put_echo_skb(skb, dev, 0);
-
cc770_write_reg(priv, msgobj[mo].ctrl1,
- RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
+ RMTPND_UNC | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_SET | TXIE_SET | RXIE_SET | INTPND_UNC);
+}

- stats->tx_bytes += dlc;
+static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ unsigned int mo = obj2msgobj(CC770_OBJ_TX);

+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;

- /*
- * HM: We had some cases of repeated IRQs so make sure the
- * INT is acknowledged I know it's already further up, but
- * doing again fixed the issue
- */
- cc770_write_reg(priv, msgobj[mo].ctrl0,
- MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+ netif_stop_queue(dev);
+
+ if ((cc770_read_reg(priv,
+ msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
+ netdev_err(dev, "TX register is still occupied!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ priv->tx_skb = skb;
+ cc770_tx(dev, mo);

return NETDEV_TX_OK;
}
@@ -679,19 +673,46 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
struct cc770_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
unsigned int mo = obj2msgobj(o);
+ struct can_frame *cf;
+ u8 ctrl1;
+
+ ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);

- /* Nothing more to send, switch off interrupts */
cc770_write_reg(priv, msgobj[mo].ctrl0,
MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
- /*
- * We had some cases of repeated IRQ so make sure the
- * INT is acknowledged
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ RMTPND_RES | TXRQST_RES | MSGLST_RES | NEWDAT_RES);
+
+ if (unlikely(!priv->tx_skb)) {
+ netdev_err(dev, "missing tx skb in tx interrupt\n");
+ return;
+ }
+
+ if (unlikely(ctrl1 & MSGLST_SET)) {
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ }
+
+ /* When the CC770 is sending an RTR message and it receives a regular
+ * message that matches the id of the RTR message, it will overwrite the
+ * outgoing message in the TX register. When this happens we must
+ * process the received message and try to transmit the outgoing skb
+ * again.
*/
- cc770_write_reg(priv, msgobj[mo].ctrl0,
- MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+ if (unlikely(ctrl1 & NEWDAT_SET)) {
+ cc770_rx(dev, mo, ctrl1);
+ cc770_tx(dev, mo);
+ return;
+ }

+ cf = (struct can_frame *)priv->tx_skb->data;
+ stats->tx_bytes += cf->can_dlc;
stats->tx_packets++;
+
+ can_put_echo_skb(priv->tx_skb, dev, 0);
can_get_echo_skb(dev, 0);
+ priv->tx_skb = NULL;
+
netif_wake_queue(dev);
}

@@ -803,6 +824,7 @@ struct net_device *alloc_cc770dev(int sizeof_priv)
priv->can.do_set_bittiming = cc770_set_bittiming;
priv->can.do_set_mode = cc770_set_mode;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+ priv->tx_skb = NULL;

memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags));

diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h
index a1739db98d91..95752e1d1283 100644
--- a/drivers/net/can/cc770/cc770.h
+++ b/drivers/net/can/cc770/cc770.h
@@ -193,6 +193,8 @@ struct cc770_priv {
u8 cpu_interface; /* CPU interface register */
u8 clkout; /* Clock out register */
u8 bus_config; /* Bus conffiguration register */
+
+ struct sk_buff *tx_skb;
};

struct net_device *alloc_cc770dev(int sizeof_priv);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index c2b5d3b74c5f..2d2c6703819e 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -584,37 +584,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
struct bcm_sysport_tx_ring *ring)
{
struct net_device *ndev = priv->netdev;
- unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
unsigned int pkts_compl = 0, bytes_compl = 0;
+ unsigned int txbds_processed = 0;
struct bcm_sysport_cb *cb;
+ unsigned int txbds_ready;
+ unsigned int c_index;
u32 hw_ind;

/* Compute how many descriptors have been processed since last call */
hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
- ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
-
- last_c_index = ring->c_index;
- num_tx_cbs = ring->size;
-
- c_index &= (num_tx_cbs - 1);
-
- if (c_index >= last_c_index)
- last_tx_cn = c_index - last_c_index;
- else
- last_tx_cn = num_tx_cbs - last_c_index + c_index;
+ txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;

netif_dbg(priv, tx_done, ndev,
- "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
- ring->index, c_index, last_tx_cn, last_c_index);
+ "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
+ ring->index, ring->c_index, c_index, txbds_ready);

- while (last_tx_cn-- > 0) {
- cb = ring->cbs + last_c_index;
+ while (txbds_processed < txbds_ready) {
+ cb = &ring->cbs[ring->clean_index];
bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);

ring->desc_count++;
- last_c_index++;
- last_c_index &= (num_tx_cbs - 1);
+ txbds_processed++;
+
+ if (likely(ring->clean_index < ring->size - 1))
+ ring->clean_index++;
+ else
+ ring->clean_index = 0;
}

ring->c_index = c_index;
@@ -1036,6 +1032,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
ring->index = index;
ring->size = size;
+ ring->clean_index = 0;
ring->alloc_size = ring->size;
ring->desc_cpu = p;
ring->desc_count = ring->size;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 281c08246037..5e770dc44367 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -624,7 +624,7 @@ struct bcm_sysport_tx_ring {
unsigned int desc_count; /* Number of descriptors */
unsigned int curr_desc; /* Current descriptor */
unsigned int c_index; /* Last consumer index */
- unsigned int p_index; /* Current producer index */
+ unsigned int clean_index; /* Current clean index */
struct bcm_sysport_cb *cbs; /* Transmit control blocks */
struct dma_desc *desc_cpu; /* CPU view of the descriptor */
struct bcm_sysport_priv *priv; /* private context backpointer */
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index ed830be35d80..dd1d635c416d 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1442,7 +1442,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* we have already determined whether we have link or not.
*/
if (!mac->autoneg)
- return -E1000_ERR_CONFIG;
+ return 1;

/* Auto-Neg is enabled. Auto Speed Detection takes care
* of MAC speed/duplex configuration. So we only need to
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index f131627ac7df..69bd6634e8f1 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -450,7 +450,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
* we have already determined whether we have link or not.
*/
if (!mac->autoneg)
- return -E1000_ERR_CONFIG;
+ return 1;

/* Auto-Neg is enabled. Auto Speed Detection takes care
* of MAC speed/duplex configuration. So we only need to
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index c95ca252187c..babfb8fba2aa 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -162,6 +162,7 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_port_profile *prof = priv->prof;
struct mlx4_en_dev *mdev = priv->mdev;
+ u32 tx_pause, tx_ppp, rx_pause, rx_ppp;
int err;

en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
@@ -170,19 +171,23 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
pfc->mbc,
pfc->delay);

- prof->rx_pause = !pfc->pfc_en;
- prof->tx_pause = !pfc->pfc_en;
- prof->rx_ppp = pfc->pfc_en;
- prof->tx_ppp = pfc->pfc_en;
+ rx_pause = prof->rx_pause && !pfc->pfc_en;
+ tx_pause = prof->tx_pause && !pfc->pfc_en;
+ rx_ppp = pfc->pfc_en;
+ tx_ppp = pfc->pfc_en;

err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
- prof->tx_pause,
- prof->tx_ppp,
- prof->rx_pause,
- prof->rx_ppp);
- if (err)
+ tx_pause, tx_ppp, rx_pause, rx_ppp);
+ if (err) {
en_err(priv, "Failed setting pause params\n");
+ return err;
+ }
+
+ prof->tx_ppp = tx_ppp;
+ prof->rx_ppp = rx_ppp;
+ prof->rx_pause = rx_pause;
+ prof->tx_pause = tx_pause;

return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index f07b814d717c..d187819b1340 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -474,18 +474,29 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
int err;

- priv->prof->tx_pause = pause->tx_pause != 0;
- priv->prof->rx_pause = pause->rx_pause != 0;
+ if (pause->autoneg)
+ return -EINVAL;
+
+ tx_pause = !!(pause->tx_pause);
+ rx_pause = !!(pause->rx_pause);
+ rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause);
+ tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause);
+
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
- priv->prof->tx_pause,
- priv->prof->tx_ppp,
- priv->prof->rx_pause,
- priv->prof->rx_ppp);
- if (err)
- en_err(priv, "Failed setting pause params\n");
+ tx_pause, tx_ppp, rx_pause, rx_ppp);
+ if (err) {
+ en_err(priv, "Failed setting pause params, err = %d\n", err);
+ return err;
+ }
+
+ priv->prof->tx_pause = tx_pause;
+ priv->prof->rx_pause = rx_pause;
+ priv->prof->tx_ppp = tx_ppp;
+ priv->prof->rx_ppp = rx_ppp;

return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 80af090463eb..041213d9e899 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -137,9 +137,9 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->udp_rss = 0;
}
for (i = 1; i <= MLX4_MAX_PORTS; i++) {
- params->prof[i].rx_pause = 1;
+ params->prof[i].rx_pause = !(pfcrx || pfctx);
params->prof[i].rx_ppp = pfcrx;
- params->prof[i].tx_pause = 1;
+ params->prof[i].tx_pause = !(pfcrx || pfctx);
params->prof[i].tx_ppp = pfctx;
params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 2839abb878a6..79e163b252f8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -944,30 +944,16 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
u32 *lkey, u32 *rkey)
{
- struct mlx4_cmd_mailbox *mailbox;
- int err;
-
if (!fmr->maps)
return;

- fmr->maps = 0;
+ /* To unmap: it is sufficient to take back ownership from HW */
+ *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;

- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox)) {
- err = PTR_ERR(mailbox);
- pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err);
- return;
- }
+ /* Make sure MPT status is visible */
+ wmb();

- err = mlx4_HW2SW_MPT(dev, NULL,
- key_to_hw_index(fmr->mr.key) &
- (dev->caps.num_mpts - 1));
- mlx4_free_cmd_mailbox(dev, mailbox);
- if (err) {
- pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err);
- return;
- }
- fmr->mr.enabled = MLX4_MPT_EN_SW;
+ fmr->maps = 0;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);

@@ -977,6 +963,22 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)

if (fmr->maps)
return -EBUSY;
+ if (fmr->mr.enabled == MLX4_MPT_EN_HW) {
+ /* In case of FMR was enabled and unmapped
+ * make sure to give ownership of MPT back to HW
+ * so HW2SW_MPT command will success.
+ */
+ *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
+ /* Make sure MPT status is visible before changing MPT fields */
+ wmb();
+ fmr->mpt->length = 0;
+ fmr->mpt->start = 0;
+ /* Make sure MPT data is visible after changing MPT status */
+ wmb();
+ *(u8 *)fmr->mpt = MLX4_MPT_STATUS_HW;
+ /* make sure MPT status is visible */
+ wmb();
+ }

ret = mlx4_mr_free(dev, &fmr->mr);
if (ret)
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index aeaf888e0e73..230832628116 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -257,6 +257,9 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
u64 in_param = 0;
int err;

+ if (!cnt)
+ return;
+
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, base_qpn);
set_param_h(&in_param, cnt);
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 0641fccdc954..3780c4672bfa 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -732,7 +732,7 @@ static void sl_sync(void)


/* Find a free SLIP channel, and link in this `tty' line. */
-static struct slip *sl_alloc(dev_t line)
+static struct slip *sl_alloc(void)
{
int i;
char name[IFNAMSIZ];
@@ -814,7 +814,7 @@ static int slip_open(struct tty_struct *tty)

/* OK. Find a free SLIP channel to use. */
err = -ENFILE;
- sl = sl_alloc(tty_devnum(tty));
+ sl = sl_alloc();
if (sl == NULL)
goto err_exit;

diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index b78532e8c56e..6b38cbafef09 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2356,7 +2356,7 @@ static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
if (!nlh) {
err = __send_and_alloc_skb(&skb, team, portid, send_func);
if (err)
- goto errout;
+ return err;
goto send_done;
}

@@ -2636,7 +2636,7 @@ static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
if (!nlh) {
err = __send_and_alloc_skb(&skb, team, portid, send_func);
if (err)
- goto errout;
+ return err;
goto send_done;
}

diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a87b76341502..b37e06722388 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1648,7 +1648,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (!dev)
return -ENOMEM;
err = dev_get_valid_name(net, dev, name);
- if (err)
+ if (err < 0)
goto err_free_dev;

dev_net_set(dev, net);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 5627917c5ff7..8980f7c7e93f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1672,6 +1672,10 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
+ if (tid >= ATH9K_HTC_MAX_TID) {
+ ret = -EINVAL;
+ break;
+ }
ista = (struct ath9k_htc_sta *) sta->drv_priv;
spin_lock_bh(&priv->tx.tx_lock);
ista->tid_state[tid] = AGGR_OPERATIONAL;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index b489476532df..82ef0844b41d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -457,25 +457,23 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)
* @dev_addr: optional device address.
*
* P2P needs mac addresses for P2P device and interface. If no device
- * address it specified, these are derived from the primary net device, ie.
- * the permanent ethernet address of the device.
+ * address it specified, these are derived from a random ethernet
+ * address.
*/
static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr)
{
- struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
- bool local_admin = false;
+ bool random_addr = false;

- if (!dev_addr || is_zero_ether_addr(dev_addr)) {
- dev_addr = pri_ifp->mac_addr;
- local_admin = true;
- }
+ if (!dev_addr || is_zero_ether_addr(dev_addr))
+ random_addr = true;

- /* Generate the P2P Device Address. This consists of the device's
- * primary MAC address with the locally administered bit set.
+ /* Generate the P2P Device Address obtaining a random ethernet
+ * address with the locally administered bit set.
*/
- memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);
- if (local_admin)
- p2p->dev_addr[0] |= 0x02;
+ if (random_addr)
+ eth_random_addr(p2p->dev_addr);
+ else
+ memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);

/* Generate the P2P Interface Address. If the discovery and connection
* BSSCFGs need to simultaneously co-exist, then this address must be
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 293b4e3b6d02..a99b94641cf3 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1199,8 +1199,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);

enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc;
- wl1251_acx_arp_ip_filter(wl, enable, addr);
-
+ ret = wl1251_acx_arp_ip_filter(wl, enable, addr);
if (ret < 0)
goto out_sleep;
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index bc4147bfe0ad..354026cf7ce9 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3511,6 +3511,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
quirk_dma_func1_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,
quirk_dma_func1_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645,
+ quirk_dma_func1_alias);
/* https://bugs.gentoo.org/show_bug.cgi?id=497630 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
PCI_DEVICE_ID_JMICRON_JMB388_ESD,
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 84f946a7a405..3d4b514367ac 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -977,19 +977,16 @@ struct pinctrl_state *pinctrl_lookup_state(struct pinctrl *p,
EXPORT_SYMBOL_GPL(pinctrl_lookup_state);

/**
- * pinctrl_select_state() - select/activate/program a pinctrl state to HW
+ * pinctrl_commit_state() - select/activate/program a pinctrl state to HW
* @p: the pinctrl handle for the device that requests configuration
* @state: the state handle to select/activate/program
*/
-int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state)
+static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
{
struct pinctrl_setting *setting, *setting2;
struct pinctrl_state *old_state = p->state;
int ret;

- if (p->state == state)
- return 0;
-
if (p->state) {
/*
* The set of groups with a mux configuration in the old state
@@ -1067,6 +1064,19 @@ int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state)

return ret;
}
+
+/**
+ * pinctrl_select_state() - select/activate/program a pinctrl state to HW
+ * @p: the pinctrl handle for the device that requests configuration
+ * @state: the state handle to select/activate/program
+ */
+int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state)
+{
+ if (p->state == state)
+ return 0;
+
+ return pinctrl_commit_state(p, state);
+}
EXPORT_SYMBOL_GPL(pinctrl_select_state);

static void devm_pinctrl_release(struct device *dev, void *res)
@@ -1235,7 +1245,7 @@ void pinctrl_unregister_map(struct pinctrl_map const *map)
int pinctrl_force_sleep(struct pinctrl_dev *pctldev)
{
if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_sleep))
- return pinctrl_select_state(pctldev->p, pctldev->hog_sleep);
+ return pinctrl_commit_state(pctldev->p, pctldev->hog_sleep);
return 0;
}
EXPORT_SYMBOL_GPL(pinctrl_force_sleep);
@@ -1247,7 +1257,7 @@ EXPORT_SYMBOL_GPL(pinctrl_force_sleep);
int pinctrl_force_default(struct pinctrl_dev *pctldev)
{
if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_default))
- return pinctrl_select_state(pctldev->p, pctldev->hog_default);
+ return pinctrl_commit_state(pctldev->p, pctldev->hog_default);
return 0;
}
EXPORT_SYMBOL_GPL(pinctrl_force_default);
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index e743b03f50da..b9429fbf1cd8 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -22,7 +22,6 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/vga_switcheroo.h>
-#include <linux/vgaarb.h>
#include <acpi/video.h>
#include <asm/io.h>

@@ -32,7 +31,6 @@ struct apple_gmux_data {
bool indexed;
struct mutex index_lock;

- struct pci_dev *pdev;
struct backlight_device *bdev;

/* switcheroo data */
@@ -417,23 +415,6 @@ static int gmux_resume(struct device *dev)
return 0;
}

-static struct pci_dev *gmux_get_io_pdev(void)
-{
- struct pci_dev *pdev = NULL;
-
- while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev))) {
- u16 cmd;
-
- pci_read_config_word(pdev, PCI_COMMAND, &cmd);
- if (!(cmd & PCI_COMMAND_IO))
- continue;
-
- return pdev;
- }
-
- return NULL;
-}
-
static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
{
struct apple_gmux_data *gmux_data;
@@ -444,7 +425,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
int ret = -ENXIO;
acpi_status status;
unsigned long long gpe;
- struct pci_dev *pdev = NULL;

if (apple_gmux_data)
return -EBUSY;
@@ -495,7 +475,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
ver_minor = (version >> 16) & 0xff;
ver_release = (version >> 8) & 0xff;
} else {
- pr_info("gmux device not present or IO disabled\n");
+ pr_info("gmux device not present\n");
ret = -ENODEV;
goto err_release;
}
@@ -503,23 +483,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
pr_info("Found gmux version %d.%d.%d [%s]\n", ver_major, ver_minor,
ver_release, (gmux_data->indexed ? "indexed" : "classic"));

- /*
- * Apple systems with gmux are EFI based and normally don't use
- * VGA. In addition changing IO+MEM ownership between IGP and dGPU
- * disables IO/MEM used for backlight control on some systems.
- * Lock IO+MEM to GPU with active IO to prevent switch.
- */
- pdev = gmux_get_io_pdev();
- if (pdev && vga_tryget(pdev,
- VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM)) {
- pr_err("IO+MEM vgaarb-locking for PCI:%s failed\n",
- pci_name(pdev));
- ret = -EBUSY;
- goto err_release;
- } else if (pdev)
- pr_info("locked IO for PCI:%s\n", pci_name(pdev));
- gmux_data->pdev = pdev;
-
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS);
@@ -611,10 +574,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
err_notify:
backlight_device_unregister(bdev);
err_release:
- if (gmux_data->pdev)
- vga_put(gmux_data->pdev,
- VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM);
- pci_dev_put(pdev);
release_region(gmux_data->iostart, gmux_data->iolen);
err_free:
kfree(gmux_data);
@@ -634,11 +593,6 @@ static void gmux_remove(struct pnp_dev *pnp)
&gmux_notify_handler);
}

- if (gmux_data->pdev) {
- vga_put(gmux_data->pdev,
- VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM);
- pci_dev_put(gmux_data->pdev);
- }
backlight_device_unregister(gmux_data->bdev);

release_region(gmux_data->iostart, gmux_data->iolen);
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
index 19110aa613a1..bae3f31493cd 100644
--- a/drivers/power/ab8500_charger.c
+++ b/drivers/power/ab8500_charger.c
@@ -3224,11 +3224,13 @@ static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
}

/* Enable backup battery charging */
- abx500_mask_and_set_register_interruptible(di->dev,
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
AB8500_RTC, AB8500_RTC_CTRL_REG,
RTC_BUP_CH_ENA, RTC_BUP_CH_ENA);
- if (ret < 0)
+ if (ret < 0) {
dev_err(di->dev, "%s mask and set failed\n", __func__);
+ goto out;
+ }

if (is_ab8540(di->parent)) {
ret = abx500_mask_and_set_register_interruptible(di->dev,
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 8100396ba1b4..e15de80e8439 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -593,6 +593,11 @@ struct qeth_cmd_buffer {
void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
};

+static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
+{
+ return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
+}
+
/**
* definition of a qeth channel, used for read and write
*/
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 7fc3922dd54c..0aacf78d5b19 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2025,7 +2025,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
unsigned long flags;
struct qeth_reply *reply = NULL;
unsigned long timeout, event_timeout;
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = NULL;

QETH_CARD_TEXT(card, 2, "sendctl");

@@ -2052,10 +2052,13 @@ int qeth_send_control_data(struct qeth_card *card, int len,
while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
qeth_prepare_control_data(card, len, iob);

- if (IS_IPA(iob->data))
+ if (IS_IPA(iob->data)) {
+ cmd = __ipa_cmd(iob);
event_timeout = QETH_IPA_TIMEOUT;
- else
+ } else {
event_timeout = QETH_TIMEOUT;
+ }
+
timeout = jiffies + event_timeout;

QETH_CARD_TEXT(card, 6, "noirqpnd");
@@ -2080,9 +2083,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,

/* we have only one long running ipassist, since we can ensure
process context of this command we can sleep */
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- if ((cmd->hdr.command == IPA_CMD_SETIP) &&
- (cmd->hdr.prot_version == QETH_PROT_IPV4)) {
+ if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
+ cmd->hdr.prot_version == QETH_PROT_IPV4) {
if (!wait_event_timeout(reply->wait_q,
atomic_read(&reply->received), event_timeout))
goto time_err;
@@ -4855,8 +4857,6 @@ static void qeth_core_free_card(struct qeth_card *card)
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
qeth_clean_channel(&card->read);
qeth_clean_channel(&card->write);
- if (card->dev)
- free_netdev(card->dev);
kfree(card->ip_tbd_list);
qeth_free_qdio_buffers(card);
unregister_service_level(&card->qeth_service_level);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index ccc71ab92fe6..784ba56bb5d3 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -922,8 +922,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
qeth_l2_set_offline(cgdev);

if (card->dev) {
- netif_napi_del(&card->napi);
unregister_netdev(card->dev);
+ free_netdev(card->dev);
card->dev = NULL;
}
return;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 3beb591b8ad8..4ba6cf34e522 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3340,8 +3340,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_l3_set_offline(cgdev);

if (card->dev) {
- netif_napi_del(&card->napi);
unregister_netdev(card->dev);
+ free_netdev(card->dev);
card->dev = NULL;
}

diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 4598a1978a1d..0cae3ba00090 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -770,8 +770,16 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
memset(str, ' ', sizeof(*str));

if (dev->supplement_adapter_info.AdapterTypeText[0]) {
- char * cp = dev->supplement_adapter_info.AdapterTypeText;
int c;
+ char *cp;
+ char *cname = kmemdup(dev->supplement_adapter_info.AdapterTypeText,
+ sizeof(dev->supplement_adapter_info.AdapterTypeText),
+ GFP_ATOMIC);
+
+ if (!cname)
+ return;
+
+ cp = cname;
if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
inqstrcpy("SMC", str->vid);
else {
@@ -780,8 +788,7 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
++cp;
c = *cp;
*cp = '\0';
- inqstrcpy (dev->supplement_adapter_info.AdapterTypeText,
- str->vid);
+ inqstrcpy(cname, str->vid);
*cp = c;
while (*cp && *cp != ' ')
++cp;
@@ -789,14 +796,11 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
while (*cp == ' ')
++cp;
/* last six chars reserved for vol type */
- c = 0;
- if (strlen(cp) > sizeof(str->pid)) {
- c = cp[sizeof(str->pid)];
+ if (strlen(cp) > sizeof(str->pid))
cp[sizeof(str->pid)] = '\0';
- }
inqstrcpy (cp, str->pid);
- if (c)
- cp[sizeof(str->pid)] = c;
+
+ kfree(cname);
} else {
struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);

diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index b46a6f6c0eb3..6d11e2bce827 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2009,7 +2009,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
* have valid data in the sense buffer that could
* confuse the higher levels.
*/
- memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
+ memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
//printk("scsi%d.%c: sense buffer: ", info->host->host_no, '0' + SCpnt->device->id);
//{ int i; for (i = 0; i < 32; i++) printk("%02x ", SCpnt->sense_buffer[i]); printk("\n"); }
/*
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 017a5290e8c1..b5214ad3f0fd 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -366,7 +366,7 @@ enum ibmvfc_fcp_rsp_info_codes {
};

struct ibmvfc_fcp_rsp_info {
- u16 reserved;
+ u8 reserved[3];
u8 rsp_code;
u8 reserved2[4];
}__attribute__((packed, aligned (2)));
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index ee38ab64422b..9d9a7534ff2e 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6862,7 +6862,8 @@ static struct ata_port_operations ipr_sata_ops = {
};

static struct ata_port_info sata_port_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
+ ATA_FLAG_SAS_HOST,
.pio_mask = ATA_PIO4_ONLY,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 3f0c3e0b5838..672208993ec0 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -566,7 +566,8 @@ static struct ata_port_operations sas_sata_ops = {
};

static struct ata_port_info sata_port_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ |
+ ATA_FLAG_SAS_HOST,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 0cac7d8fd0f7..9ee5a359248c 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -675,7 +675,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
resp, RPEL_RESP_SIZE);

- if (!res)
+ if (res)
goto out;

phy->invalid_dword_count = scsi_to_u32(&resp[12]);
@@ -684,6 +684,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);

out:
+ kfree(req);
kfree(resp);
return res;

diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 5daff2054ae4..5ff097c80dbc 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -943,12 +943,23 @@ static int spi_imx_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+ int ret;

spi_bitbang_stop(&spi_imx->bitbang);

+ ret = clk_enable(spi_imx->clk_per);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(spi_imx->clk_ipg);
+ if (ret) {
+ clk_disable(spi_imx->clk_per);
+ return ret;
+ }
+
writel(0, spi_imx->base + MXC_CSPICTRL);
- clk_unprepare(spi_imx->clk_ipg);
- clk_unprepare(spi_imx->clk_per);
+ clk_disable_unprepare(spi_imx->clk_ipg);
+ clk_disable_unprepare(spi_imx->clk_per);
spi_master_put(master);

return 0;
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index 97f2e9bb3c24..49bf07cfb04f 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -457,7 +457,7 @@ static int sun6i_spi_probe(struct platform_device *pdev)

static int sun6i_spi_remove(struct platform_device *pdev)
{
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);

return 0;
}
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index be2958f0ce28..fbca97248f37 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -330,24 +330,23 @@ static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
mutex_lock(&ashmem_mutex);

if (asma->size == 0) {
- ret = -EINVAL;
- goto out;
+ mutex_unlock(&ashmem_mutex);
+ return -EINVAL;
}

if (!asma->file) {
- ret = -EBADF;
- goto out;
+ mutex_unlock(&ashmem_mutex);
+ return -EBADF;
}

+ mutex_unlock(&ashmem_mutex);
+
ret = asma->file->f_op->llseek(asma->file, offset, origin);
if (ret < 0)
- goto out;
+ return ret;

/** Copy f_pos from backing file, since f_ops->llseek() sets it */
file->f_pos = asma->file->f_pos;
-
-out:
- mutex_unlock(&ashmem_mutex);
return ret;
}

@@ -703,30 +702,30 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
size_t pgstart, pgend;
int ret = -EINVAL;

- if (unlikely(!asma->file))
- return -EINVAL;
-
if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
return -EFAULT;

+ mutex_lock(&ashmem_mutex);
+
+ if (unlikely(!asma->file))
+ goto out_unlock;
+
/* per custom, you can pass zero for len to mean "everything onward" */
if (!pin.len)
pin.len = PAGE_ALIGN(asma->size) - pin.offset;

if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
- return -EINVAL;
+ goto out_unlock;

if (unlikely(((__u32) -1) - pin.offset < pin.len))
- return -EINVAL;
+ goto out_unlock;

if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
- return -EINVAL;
+ goto out_unlock;

pgstart = pin.offset / PAGE_SIZE;
pgend = pgstart + (pin.len / PAGE_SIZE) - 1;

- mutex_lock(&ashmem_mutex);
-
switch (cmd) {
case ASHMEM_PIN:
ret = ashmem_pin(asma, pgstart, pgend);
@@ -739,6 +738,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
break;
}

+out_unlock:
mutex_unlock(&ashmem_mutex);

return ret;
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 3b79624703a7..892b91054914 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -1242,7 +1242,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
int debug_id = buffer->debug_id;

binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d buffer release %d, size %zd-%zd, failed at %p\n",
+ "%d buffer release %d, size %zd-%zd, failed at %pK\n",
proc->pid, buffer->debug_id,
buffer->data_size, buffer->offsets_size, failed_at);

@@ -2059,7 +2059,7 @@ static int binder_thread_write(struct binder_proc *proc,
}
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
- "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
+ "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
proc->pid, thread->pid, (u64)cookie,
death);
if (death == NULL) {
@@ -2572,6 +2572,10 @@ static unsigned int binder_poll(struct file *filp,
binder_lock(__func__);

thread = binder_get_thread(proc);
+ if (!thread) {
+ binder_unlock(__func__);
+ return POLLERR;
+ }

wait_for_proc_work = thread->transaction_stack == NULL &&
list_empty(&thread->todo) && thread->return_error == BR_OK;
@@ -2808,7 +2812,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
goto err_already_mapped;
}

- area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
+ area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC);
if (area == NULL) {
ret = -ENOMEM;
failure_string = "get_vm_area";
@@ -3158,7 +3162,7 @@ static void print_binder_transaction(struct seq_file *m, const char *prefix,
struct binder_transaction *t)
{
seq_printf(m,
- "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
+ "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
prefix, t->debug_id, t,
t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0,
@@ -3172,7 +3176,7 @@ static void print_binder_transaction(struct seq_file *m, const char *prefix,
if (t->buffer->target_node)
seq_printf(m, " node %d",
t->buffer->target_node->debug_id);
- seq_printf(m, " size %zd:%zd data %p\n",
+ seq_printf(m, " size %zd:%zd data %pK\n",
t->buffer->data_size, t->buffer->offsets_size,
t->buffer->data);
}
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index c7aaf2bb1018..4b8bd2936c1d 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -125,7 +125,9 @@
#define AD7192_GPOCON_P1DAT (1 << 1) /* P1 state */
#define AD7192_GPOCON_P0DAT (1 << 0) /* P0 state */

-#define AD7192_INT_FREQ_MHz 4915200
+#define AD7192_EXT_FREQ_MHZ_MIN 2457600
+#define AD7192_EXT_FREQ_MHZ_MAX 5120000
+#define AD7192_INT_FREQ_MHZ 4915200

/* NOTE:
* The AD7190/2/5 features a dual use data out ready DOUT/RDY output.
@@ -200,6 +202,12 @@ static int ad7192_calibrate_all(struct ad7192_state *st)
ARRAY_SIZE(ad7192_calib_arr));
}

+static inline bool ad7192_valid_external_frequency(u32 freq)
+{
+ return (freq >= AD7192_EXT_FREQ_MHZ_MIN &&
+ freq <= AD7192_EXT_FREQ_MHZ_MAX);
+}
+
static int ad7192_setup(struct ad7192_state *st,
const struct ad7192_platform_data *pdata)
{
@@ -224,17 +232,20 @@ static int ad7192_setup(struct ad7192_state *st,
dev_warn(&st->sd.spi->dev, "device ID query failed (0x%X)\n", id);

switch (pdata->clock_source_sel) {
- case AD7192_CLK_EXT_MCLK1_2:
- case AD7192_CLK_EXT_MCLK2:
- st->mclk = AD7192_INT_FREQ_MHz;
- break;
case AD7192_CLK_INT:
case AD7192_CLK_INT_CO:
- if (pdata->ext_clk_Hz)
- st->mclk = pdata->ext_clk_Hz;
- else
- st->mclk = AD7192_INT_FREQ_MHz;
+ st->mclk = AD7192_INT_FREQ_MHZ;
break;
+ case AD7192_CLK_EXT_MCLK1_2:
+ case AD7192_CLK_EXT_MCLK2:
+ if (ad7192_valid_external_frequency(pdata->ext_clk_hz)) {
+ st->mclk = pdata->ext_clk_hz;
+ break;
+ }
+ dev_err(&st->sd.spi->dev, "Invalid frequency setting %u\n",
+ pdata->ext_clk_hz);
+ ret = -EINVAL;
+ goto out;
default:
ret = -EINVAL;
goto out;
diff --git a/drivers/staging/iio/adc/ad7192.h b/drivers/staging/iio/adc/ad7192.h
index a0a5b61a41f1..7433a43c2611 100644
--- a/drivers/staging/iio/adc/ad7192.h
+++ b/drivers/staging/iio/adc/ad7192.h
@@ -34,7 +34,7 @@
struct ad7192_platform_data {
u16 vref_mv;
u8 clock_source_sel;
- u32 ext_clk_Hz;
+ u32 ext_clk_hz;
bool refin2_en;
bool rej60_en;
bool sinc3_en;
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index d215edf66af2..e2e60756dc03 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -89,7 +89,7 @@

#define AD7280A_ALL_CELLS (0xAD << 16)

-#define AD7280A_MAX_SPI_CLK_Hz 700000 /* < 1MHz */
+#define AD7280A_MAX_SPI_CLK_HZ 700000 /* < 1MHz */
#define AD7280A_MAX_CHAIN 8
#define AD7280A_CELLS_PER_DEV 6
#define AD7280A_BITS 12
@@ -850,7 +850,7 @@ static int ad7280_probe(struct spi_device *spi)

ad7280_crc8_build_table(st->crc_tab);

- st->spi->max_speed_hz = AD7280A_MAX_SPI_CLK_Hz;
+ st->spi->max_speed_hz = AD7280A_MAX_SPI_CLK_HZ;
st->spi->mode = SPI_MODE_1;
spi_setup(st->spi);

diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c
index 20b2d61d9ff2..61662a0e1bbf 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c
@@ -123,6 +123,7 @@ static struct shash_alg alg = {
.cra_name = "adler32",
.cra_driver_name = "adler32-zlib",
.cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.c b/drivers/staging/lustre/lustre/libcfs/tracefile.c
index 07845e844243..f883030ad972 100644
--- a/drivers/staging/lustre/lustre/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lustre/libcfs/tracefile.c
@@ -767,7 +767,7 @@ int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
return -EFAULT;

nob = strnlen(knl_buffer, usr_buffer_nob);
- while (nob-- >= 0) /* strip trailing whitespace */
+ while (--nob >= 0) /* strip trailing whitespace */
if (!isspace(knl_buffer[nob]))
break;

diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index d22916a4b9d8..574ac8ac320a 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -2397,6 +2397,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
int reset_ms_card(struct rtsx_chip *chip)
{
struct ms_info *ms_card = &(chip->ms_card);
+ int seg_no = ms_card->total_block / 512 - 1;
int retval;

memset(ms_card, 0, sizeof(struct ms_info));
@@ -2430,7 +2431,7 @@ int reset_ms_card(struct rtsx_chip *chip)
/* Build table for the last segment,
* to check if L2P table block exists, erasing it
*/
- retval = ms_build_l2p_tbl(chip, ms_card->total_block / 512 - 1);
+ retval = ms_build_l2p_tbl(chip, seg_no);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, STATUS_FAIL);
}
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index da5ff96bbf3a..3959633ad09c 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -114,6 +114,7 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
goto err;

sdev->ud.tcp_socket = socket;
+ sdev->ud.sockfd = sockfd;

spin_unlock_irq(&sdev->ud.lock);

@@ -213,6 +214,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
if (ud->tcp_socket) {
sockfd_put(ud->tcp_socket);
ud->tcp_socket = NULL;
+ ud->sockfd = -1;
}

/* 3. free used data */
@@ -307,6 +309,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev)
sdev->ud.status = SDEV_ST_AVAILABLE;
spin_lock_init(&sdev->ud.lock);
sdev->ud.tcp_socket = NULL;
+ sdev->ud.sockfd = -1;

INIT_LIST_HEAD(&sdev->priv_init);
INIT_LIST_HEAD(&sdev->priv_tx);
diff --git a/drivers/staging/usbip/userspace/src/usbip_bind.c b/drivers/staging/usbip/userspace/src/usbip_bind.c
index fa46141ae68b..e121cfb1746a 100644
--- a/drivers/staging/usbip/userspace/src/usbip_bind.c
+++ b/drivers/staging/usbip/userspace/src/usbip_bind.c
@@ -144,6 +144,7 @@ static int bind_device(char *busid)
int rc;
struct udev *udev;
struct udev_device *dev;
+ const char *devpath;

/* Check whether the device with this bus ID exists. */
udev = udev_new();
@@ -152,8 +153,16 @@ static int bind_device(char *busid)
err("device with the specified bus ID does not exist");
return -1;
}
+ devpath = udev_device_get_devpath(dev);
udev_unref(udev);

+ /* If the device is already attached to vhci_hcd - bail out */
+ if (strstr(devpath, USBIP_VHCI_DRV_NAME)) {
+ err("bind loop detected: device: %s is attached to %s\n",
+ devpath, USBIP_VHCI_DRV_NAME);
+ return -1;
+ }
+
rc = unbind_other(busid);
if (rc == UNBIND_ST_FAILED) {
err("could not unbind driver from device on busid %s", busid);
diff --git a/drivers/staging/usbip/userspace/src/usbip_list.c b/drivers/staging/usbip/userspace/src/usbip_list.c
index d5ce34a410e7..ac6081c3db82 100644
--- a/drivers/staging/usbip/userspace/src/usbip_list.c
+++ b/drivers/staging/usbip/userspace/src/usbip_list.c
@@ -180,6 +180,7 @@ static int list_devices(bool parsable)
const char *busid;
char product_name[128];
int ret = -1;
+ const char *devpath;

/* Create libudev context. */
udev = udev_new();
@@ -202,6 +203,14 @@ static int list_devices(bool parsable)
path = udev_list_entry_get_name(dev_list_entry);
dev = udev_device_new_from_syspath(udev, path);

+ /* Ignore devices attached to vhci_hcd */
+ devpath = udev_device_get_devpath(dev);
+ if (strstr(devpath, USBIP_VHCI_DRV_NAME)) {
+ dbg("Skip the device %s already attached to %s\n",
+ devpath, USBIP_VHCI_DRV_NAME);
+ continue;
+ }
+
/* Get device information. */
idVendor = udev_device_get_sysattr_value(dev, "idVendor");
idProduct = udev_device_get_sysattr_value(dev, "idProduct");
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index ada952424a7b..2ae3b63741bd 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -786,6 +786,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
if (vdev->ud.tcp_socket) {
sockfd_put(vdev->ud.tcp_socket);
vdev->ud.tcp_socket = NULL;
+ vdev->ud.sockfd = -1;
}
pr_info("release socket\n");

@@ -833,6 +834,7 @@ static void vhci_device_reset(struct usbip_device *ud)
if (ud->tcp_socket) {
sockfd_put(ud->tcp_socket);
ud->tcp_socket = NULL;
+ ud->sockfd = -1;
}
ud->status = VDEV_ST_NULL;

diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 3fab0811ca11..0877dde6d722 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2261,6 +2261,12 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
}
if (tty_hung_up_p(file))
break;
+ /*
+ * Abort readers for ttys which never actually
+ * get hung up. See __tty_hangup().
+ */
+ if (test_bit(TTY_HUPPING, &tty->flags))
+ break;
if (!timeout)
break;
if (file->f_flags & O_NONBLOCK) {
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index e3af4a758440..6ad273e68005 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -5066,6 +5066,17 @@ static struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
pbn_b2_4_115200 },
+ /*
+ * BrainBoxes UC-260
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0D21,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
+ pbn_b2_4_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0E34,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
+ pbn_b2_4_115200 },
/*
* Perle PCI-RAS cards
*/
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 56a3cc42a4b0..bc1c68c926bd 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1644,6 +1644,7 @@ static void atmel_get_ip_name(struct uart_port *port)
switch (version) {
case 0x302:
case 0x10213:
+ case 0x10302:
dev_dbg(port->dev, "This version is usart\n");
atmel_port->is_usart = true;
break;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index b1d761e022f0..e2224213111c 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -734,6 +734,8 @@ static void sci_receive_chars(struct uart_port *port)
/* Tell the rest of the system the news. New characters! */
tty_flip_buffer_push(tport);
} else {
+ /* TTY buffers full; read from RX reg to prevent lockup */
+ serial_port_in(port, SCxRDR);
serial_port_in(port, SCxSR); /* dummy read */
serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
}
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 322c4818dd0c..23f7f96c7508 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1705,7 +1705,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
default_attr(vc);
update_attr(vc);

- vc->vc_tab_stop[0] = 0x01010100;
+ vc->vc_tab_stop[0] =
vc->vc_tab_stop[1] =
vc->vc_tab_stop[2] =
vc->vc_tab_stop[3] =
@@ -1748,7 +1748,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
vc->vc_pos -= (vc->vc_x << 1);
while (vc->vc_x < vc->vc_cols - 1) {
vc->vc_x++;
- if (vc->vc_tab_stop[vc->vc_x >> 5] & (1 << (vc->vc_x & 31)))
+ if (vc->vc_tab_stop[7 & (vc->vc_x >> 5)] & (1 << (vc->vc_x & 31)))
break;
}
vc->vc_pos += (vc->vc_x << 1);
@@ -1808,7 +1808,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
lf(vc);
return;
case 'H':
- vc->vc_tab_stop[vc->vc_x >> 5] |= (1 << (vc->vc_x & 31));
+ vc->vc_tab_stop[7 & (vc->vc_x >> 5)] |= (1 << (vc->vc_x & 31));
return;
case 'Z':
respond_ID(tty);
@@ -2001,7 +2001,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
return;
case 'g':
if (!vc->vc_par[0])
- vc->vc_tab_stop[vc->vc_x >> 5] &= ~(1 << (vc->vc_x & 31));
+ vc->vc_tab_stop[7 & (vc->vc_x >> 5)] &= ~(1 << (vc->vc_x & 31));
else if (vc->vc_par[0] == 3) {
vc->vc_tab_stop[0] =
vc->vc_tab_stop[1] =
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 58034df6351b..5ea049bd1905 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -378,7 +378,7 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)

res = usb_submit_urb(acm->read_urbs[index], mem_flags);
if (res) {
- if (res != -EPERM) {
+ if (res != -EPERM && res != -ENODEV) {
dev_err(&acm->data->dev,
"%s - usb_submit_urb failed: %d\n",
__func__, res);
@@ -1708,6 +1708,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0ace, 0x1611), /* ZyDAS 56K USB MODEM - new version */
.driver_info = SINGLE_RX_URB, /* firmware bug */
},
+ { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
+ .driver_info = SINGLE_RX_URB,
+ },
{ USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 0c8a7fc4dad8..66477fef2912 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -147,6 +147,10 @@ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,

ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout);

+ /* Linger a bit, prior to the next control message. */
+ if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
+ msleep(200);
+
kfree(dr);

return ret;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index e54da625bb36..25391bef0642 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -222,8 +222,12 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },

+ /* Corsair K70 RGB */
+ { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Corsair Strafe RGB */
- { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
+ USB_QUIRK_DELAY_CTRL_MSG },

/* Corsair K70 LUX */
{ USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 52620eb41838..f8a20b88ccfd 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2436,6 +2436,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
break;
}

+ dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket;
+
/* Enable USB2 LPM Capability */

if ((dwc->revision > DWC3_REVISION_194A)
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 019d404dc547..5e3a3a2f1ff6 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -2998,6 +2998,7 @@ static int ffs_ready(struct ffs_data *ffs)
static void ffs_closed(struct ffs_data *ffs)
{
struct ffs_dev *ffs_obj;
+ struct config_item *ci;

ENTER();
ffs_dev_lock();
@@ -3015,8 +3016,12 @@ static void ffs_closed(struct ffs_data *ffs)
|| !ffs_obj->opts->func_inst.group.cg_item.ci_parent)
goto done;

- unregister_gadget_item(ffs_obj->opts->
- func_inst.group.cg_item.ci_parent->ci_parent);
+ ci = ffs_obj->opts->func_inst.group.cg_item.ci_parent->ci_parent;
+ ffs_dev_unlock();
+
+ if (test_bit(FFS_FL_BOUND, &ffs->flags))
+ unregister_gadget_item(ci);
+ return;
done:
ffs_dev_unlock();
}
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index e9599f64547b..f89294c517d8 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -966,6 +966,8 @@ finish_unlinks (struct ohci_hcd *ohci, u16 tick)
* have modified this list. normally it's just prepending
* entries (which we'd ignore), but paranoia won't hurt.
*/
+ *last = ed->ed_next;
+ ed->ed_next = NULL;
modified = 0;

/* unlink urbs as requested, but rescan the list after
@@ -1024,20 +1026,21 @@ finish_unlinks (struct ohci_hcd *ohci, u16 tick)
goto rescan_this;

/*
- * If no TDs are queued, take ED off the ed_rm_list.
+ * If no TDs are queued, ED is now idle.
* Otherwise, if the HC is running, reschedule.
- * If not, leave it on the list for further dequeues.
+ * If the HC isn't running, add ED back to the
+ * start of the list for later processing.
*/
if (list_empty(&ed->td_list)) {
- *last = ed->ed_next;
- ed->ed_next = NULL;
ed->state = ED_IDLE;
} else if (ohci->rh_state == OHCI_RH_RUNNING) {
- *last = ed->ed_next;
- ed->ed_next = NULL;
ed_schedule(ohci, ed);
} else {
- last = &ed->ed_next;
+ ed->ed_next = ohci->ed_rm_list;
+ ohci->ed_rm_list = ed;
+ /* Don't loop on the same ED */
+ if (last == &ohci->ed_rm_list)
+ last = &ed->ed_next;
}

if (modified)
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 7df1edc2c199..33cf347e754b 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -124,6 +124,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
xhci->quirks |= XHCI_AMD_PLL_FIX;

+ if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x43bb)
+ xhci->quirks |= XHCI_SUSPEND_DELAY;
+
if (pdev->vendor == PCI_VENDOR_ID_AMD)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;

diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 45654fffa7a5..a30b16b3c847 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -922,6 +922,9 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
del_timer_sync(&hcd->rh_timer);

+ if (xhci->quirks & XHCI_SUSPEND_DELAY)
+ usleep_range(1000, 1500);
+
spin_lock_irq(&xhci->lock);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 08057459c8e6..df47cb8e5ec8 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1571,6 +1571,7 @@ struct xhci_hcd {
#define XHCI_MISSING_CAS (1 << 24)
#define XHCI_U2_DISABLE_WAKE (1 << 27)
#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
+#define XHCI_SUSPEND_DELAY (1 << 30)
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 82503a7ff6c8..2bbca7d674d6 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -46,6 +46,9 @@
#define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 /* USB Product ID of Micro-CASSY Time (reserved) */
#define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 /* USB Product ID of Micro-CASSY Temperature */
#define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 /* USB Product ID of Micro-CASSY pH */
+#define USB_DEVICE_ID_LD_POWERANALYSERCASSY 0x1040 /* USB Product ID of Power Analyser CASSY */
+#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY 0x1042 /* USB Product ID of Converter Controller CASSY */
+#define USB_DEVICE_ID_LD_MACHINETESTCASSY 0x1043 /* USB Product ID of Machine Test CASSY */
#define USB_DEVICE_ID_LD_JWM 0x1080 /* USB Product ID of Joule and Wattmeter */
#define USB_DEVICE_ID_LD_DMMP 0x1081 /* USB Product ID of Digital Multimeter P (reserved) */
#define USB_DEVICE_ID_LD_UMIP 0x1090 /* USB Product ID of UMI P */
@@ -94,6 +97,9 @@ static const struct usb_device_id ld_usb_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
+ { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) },
+ { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) },
+ { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index ad408251d955..108dcc5f5350 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -82,6 +82,8 @@ struct mon_reader_text {

wait_queue_head_t wait;
int printf_size;
+ size_t printf_offset;
+ size_t printf_togo;
char *printf_buf;
struct mutex printf_lock;

@@ -373,73 +375,103 @@ static int mon_text_open(struct inode *inode, struct file *file)
return rc;
}

-/*
- * For simplicity, we read one record in one system call and throw out
- * what does not fit. This means that the following does not work:
- * dd if=/dbg/usbmon/0t bs=10
- * Also, we do not allow seeks and do not bother advancing the offset.
- */
+static ssize_t mon_text_copy_to_user(struct mon_reader_text *rp,
+ char __user * const buf, const size_t nbytes)
+{
+ const size_t togo = min(nbytes, rp->printf_togo);
+
+ if (copy_to_user(buf, &rp->printf_buf[rp->printf_offset], togo))
+ return -EFAULT;
+ rp->printf_togo -= togo;
+ rp->printf_offset += togo;
+ return togo;
+}
+
+/* ppos is not advanced since the llseek operation is not permitted. */
static ssize_t mon_text_read_t(struct file *file, char __user *buf,
- size_t nbytes, loff_t *ppos)
+ size_t nbytes, loff_t *ppos)
{
struct mon_reader_text *rp = file->private_data;
struct mon_event_text *ep;
struct mon_text_ptr ptr;
+ ssize_t ret;

- if (IS_ERR(ep = mon_text_read_wait(rp, file)))
- return PTR_ERR(ep);
mutex_lock(&rp->printf_lock);
- ptr.cnt = 0;
- ptr.pbuf = rp->printf_buf;
- ptr.limit = rp->printf_size;
-
- mon_text_read_head_t(rp, &ptr, ep);
- mon_text_read_statset(rp, &ptr, ep);
- ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
- " %d", ep->length);
- mon_text_read_data(rp, &ptr, ep);
-
- if (copy_to_user(buf, rp->printf_buf, ptr.cnt))
- ptr.cnt = -EFAULT;
+
+ if (rp->printf_togo == 0) {
+
+ ep = mon_text_read_wait(rp, file);
+ if (IS_ERR(ep)) {
+ mutex_unlock(&rp->printf_lock);
+ return PTR_ERR(ep);
+ }
+ ptr.cnt = 0;
+ ptr.pbuf = rp->printf_buf;
+ ptr.limit = rp->printf_size;
+
+ mon_text_read_head_t(rp, &ptr, ep);
+ mon_text_read_statset(rp, &ptr, ep);
+ ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
+ " %d", ep->length);
+ mon_text_read_data(rp, &ptr, ep);
+
+ rp->printf_togo = ptr.cnt;
+ rp->printf_offset = 0;
+
+ kmem_cache_free(rp->e_slab, ep);
+ }
+
+ ret = mon_text_copy_to_user(rp, buf, nbytes);
mutex_unlock(&rp->printf_lock);
- kmem_cache_free(rp->e_slab, ep);
- return ptr.cnt;
+ return ret;
}

+/* ppos is not advanced since the llseek operation is not permitted. */
static ssize_t mon_text_read_u(struct file *file, char __user *buf,
- size_t nbytes, loff_t *ppos)
+ size_t nbytes, loff_t *ppos)
{
struct mon_reader_text *rp = file->private_data;
struct mon_event_text *ep;
struct mon_text_ptr ptr;
+ ssize_t ret;

- if (IS_ERR(ep = mon_text_read_wait(rp, file)))
- return PTR_ERR(ep);
mutex_lock(&rp->printf_lock);
- ptr.cnt = 0;
- ptr.pbuf = rp->printf_buf;
- ptr.limit = rp->printf_size;

- mon_text_read_head_u(rp, &ptr, ep);
- if (ep->type == 'E') {
- mon_text_read_statset(rp, &ptr, ep);
- } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) {
- mon_text_read_isostat(rp, &ptr, ep);
- mon_text_read_isodesc(rp, &ptr, ep);
- } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) {
- mon_text_read_intstat(rp, &ptr, ep);
- } else {
- mon_text_read_statset(rp, &ptr, ep);
+ if (rp->printf_togo == 0) {
+
+ ep = mon_text_read_wait(rp, file);
+ if (IS_ERR(ep)) {
+ mutex_unlock(&rp->printf_lock);
+ return PTR_ERR(ep);
+ }
+ ptr.cnt = 0;
+ ptr.pbuf = rp->printf_buf;
+ ptr.limit = rp->printf_size;
+
+ mon_text_read_head_u(rp, &ptr, ep);
+ if (ep->type == 'E') {
+ mon_text_read_statset(rp, &ptr, ep);
+ } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) {
+ mon_text_read_isostat(rp, &ptr, ep);
+ mon_text_read_isodesc(rp, &ptr, ep);
+ } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) {
+ mon_text_read_intstat(rp, &ptr, ep);
+ } else {
+ mon_text_read_statset(rp, &ptr, ep);
+ }
+ ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
+ " %d", ep->length);
+ mon_text_read_data(rp, &ptr, ep);
+
+ rp->printf_togo = ptr.cnt;
+ rp->printf_offset = 0;
+
+ kmem_cache_free(rp->e_slab, ep);
}
- ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
- " %d", ep->length);
- mon_text_read_data(rp, &ptr, ep);

- if (copy_to_user(buf, rp->printf_buf, ptr.cnt))
- ptr.cnt = -EFAULT;
+ ret = mon_text_copy_to_user(rp, buf, nbytes);
mutex_unlock(&rp->printf_lock);
- kmem_cache_free(rp->e_slab, ep);
- return ptr.cnt;
+ return ret;
}

static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp,
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 92ad61c0f133..f73009bc7268 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -58,10 +58,13 @@ config USB_SERIAL_SIMPLE
handles a wide range of very simple devices, all in one
driver. Specifically, it supports:
- Suunto ANT+ USB device.
+ - Medtronic CareLink USB device
- Fundamental Software dongle.
- Google USB serial devices
- HP4x calculators
- a number of Motorola phones
+ - Motorola Tetra devices
+ - Novatel Wireless GPS receivers
- Siemens USB/MPI adapter.
- ViVOtech ViVOpay USB device.
- Infineon Modem Flashloader USB interface
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 749e1b674145..6947985ccfb0 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2219,7 +2219,6 @@ static int write_cmd_usb(struct edgeport_port *edge_port,
/* something went wrong */
dev_err(dev, "%s - usb_submit_urb(write command) failed, status = %d\n",
__func__, status);
- usb_kill_urb(urb);
usb_free_urb(urb);
atomic_dec(&CmdUrbs);
return status;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 3784bc166642..93e7fcf18088 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -385,6 +385,9 @@ static void option_instat_callback(struct urb *urb);
#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
#define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01

+/* Fujisoft products */
+#define FUJISOFT_PRODUCT_FS040U 0x9b02
+
/* iBall 3.5G connect wireless modem */
#define IBALL_3_5G_CONNECT 0x9605

@@ -1908,6 +1911,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
.driver_info = (kernel_ulong_t)&four_g_w100_blacklist
},
+ {USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist},
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 4455b1fd8b70..9c3b02e2011e 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -39,6 +39,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_CHILITAG) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 09d9be88209e..f379e00920bd 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -17,6 +17,7 @@
#define PL2303_PRODUCT_ID_DCU11 0x1234
#define PL2303_PRODUCT_ID_PHAROS 0xaaa0
#define PL2303_PRODUCT_ID_RSAQ3 0xaaa2
+#define PL2303_PRODUCT_ID_CHILITAG 0xaaa8
#define PL2303_PRODUCT_ID_ALDIGA 0x0611
#define PL2303_PRODUCT_ID_MMX 0x0612
#define PL2303_PRODUCT_ID_GPRS 0x0609
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 790452c7a9e4..a084d4578420 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -20,7 +20,7 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>

-#define DEVICE(vendor, IDS) \
+#define DEVICE_N(vendor, IDS, nport) \
static const struct usb_device_id vendor##_id_table[] = { \
IDS(), \
{ }, \
@@ -31,9 +31,15 @@ static struct usb_serial_driver vendor##_device = { \
.name = #vendor, \
}, \
.id_table = vendor##_id_table, \
- .num_ports = 1, \
+ .num_ports = nport, \
};

+#define DEVICE(vendor, IDS) DEVICE_N(vendor, IDS, 1)
+
+/* Medtronic CareLink USB driver */
+#define CARELINK_IDS() \
+ { USB_DEVICE(0x0a21, 0x8001) } /* MMT-7305WW */
+DEVICE(carelink, CARELINK_IDS);

/* ZIO Motherboard USB driver */
#define ZIO_IDS() \
@@ -74,6 +80,16 @@ DEVICE(vivopay, VIVOPAY_IDS);
{ USB_DEVICE(0x22b8, 0x2c64) } /* Motorola V950 phone */
DEVICE(moto_modem, MOTO_IDS);

+/* Motorola Tetra driver */
+#define MOTOROLA_TETRA_IDS() \
+ { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */
+DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
+
+/* Novatel Wireless GPS driver */
+#define NOVATEL_IDS() \
+ { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */
+DEVICE_N(novatel_gps, NOVATEL_IDS, 3);
+
/* HP4x (48/49) Generic Serial driver */
#define HP4X_IDS() \
{ USB_DEVICE(0x03f0, 0x0121) }
@@ -92,12 +108,15 @@ DEVICE(siemens_mpi, SIEMENS_IDS);

/* All of the above structures mushed into two lists */
static struct usb_serial_driver * const serial_drivers[] = {
+ &carelink_device,
&zio_device,
&funsoft_device,
&flashloader_device,
&google_device,
&vivopay_device,
&moto_modem_device,
+ &motorola_tetra_device,
+ &novatel_gps_device,
&hp4x_device,
&suunto_device,
&siemens_mpi_device,
@@ -105,12 +124,15 @@ static struct usb_serial_driver * const serial_drivers[] = {
};

static const struct usb_device_id id_table[] = {
+ CARELINK_IDS(),
ZIO_IDS(),
FUNSOFT_IDS(),
FLASHLOADER_IDS(),
GOOGLE_IDS(),
VIVOPAY_IDS(),
MOTO_IDS(),
+ MOTOROLA_TETRA_IDS(),
+ NOVATEL_IDS(),
HP4X_IDS(),
SUUNTO_IDS(),
SIEMENS_IDS(),
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 87ecc65fe6e1..21d2d52a3b26 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -928,7 +928,8 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
usb_unlock_device(udev);

if (err) {
- shost_printk(KERN_INFO, sdev->host, "%s FAILED\n", __func__);
+ shost_printk(KERN_INFO, sdev->host, "%s FAILED err %d\n",
+ __func__, err);
return FAILED;
}

@@ -1188,23 +1189,25 @@ static int uas_post_reset(struct usb_interface *intf)
struct Scsi_Host *shost = usb_get_intfdata(intf);
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
unsigned long flags;
+ int err;

if (devinfo->shutdown)
return 0;

- if (uas_configure_endpoints(devinfo) != 0) {
+ err = uas_configure_endpoints(devinfo);
+ if (err && err != -ENODEV)
shost_printk(KERN_ERR, shost,
- "%s: alloc streams error after reset", __func__);
- return 1;
- }
+ "%s: alloc streams error %d after reset",
+ __func__, err);

+ /* we must unblock the host in every case lest we deadlock */
spin_lock_irqsave(shost->host_lock, flags);
scsi_report_bus_reset(shost, 0);
spin_unlock_irqrestore(shost->host_lock, flags);

scsi_unblock_requests(shost);

- return 0;
+ return err ? 1 : 0;
}

static int uas_suspend(struct usb_interface *intf, pm_message_t message)
@@ -1232,10 +1235,13 @@ static int uas_reset_resume(struct usb_interface *intf)
struct Scsi_Host *shost = usb_get_intfdata(intf);
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
unsigned long flags;
+ int err;

- if (uas_configure_endpoints(devinfo) != 0) {
+ err = uas_configure_endpoints(devinfo);
+ if (err) {
shost_printk(KERN_ERR, shost,
- "%s: alloc streams error after reset", __func__);
+ "%s: alloc streams error %d after reset",
+ __func__, err);
return -EIO;
}

diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 2827ed2cd23f..01dcd204648f 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2004,6 +2004,13 @@ UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA ),

+/* Reported by Teijo Kinnunen <teijo.kinnunen@xxxxxxxxx> */
+UNUSUAL_DEV( 0x152d, 0x2567, 0x0117, 0x0117,
+ "JMicron",
+ "USB to ATA/ATAPI Bridge",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BROKEN_FUA ),
+
/* Reported-by George Cherian <george.cherian@xxxxxxxxxx> */
UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999,
"JMicron",
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 8dae2f724a35..d812f9d71011 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1009,6 +1009,7 @@ static long vhost_net_reset_owner(struct vhost_net *n)
}
vhost_net_stop(n, &tx_sock, &rx_sock);
vhost_net_flush(n);
+ vhost_dev_stop(&n->dev);
vhost_dev_reset_owner(&n->dev, memory);
vhost_net_vq_reset(n);
done:
diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c
index 40bec8d64b0a..003500802168 100644
--- a/drivers/video/console/dummycon.c
+++ b/drivers/video/console/dummycon.c
@@ -71,7 +71,6 @@ const struct consw dummy_con = {
.con_switch = DUMMY,
.con_blank = DUMMY,
.con_font_set = DUMMY,
- .con_font_get = DUMMY,
.con_font_default = DUMMY,
.con_font_copy = DUMMY,
.con_set_palette = DUMMY,
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index d36e830d6fc6..2820ca3ec3cb 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -1101,12 +1101,14 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
timings = of_get_display_timings(display_np);
if (!timings) {
dev_err(dev, "failed to get display timings\n");
+ ret = -EINVAL;
goto put_display_node;
}

- timings_np = of_find_node_by_name(display_np, "display-timings");
+ timings_np = of_get_child_by_name(display_np, "display-timings");
if (!timings_np) {
dev_err(dev, "failed to find display-timings node\n");
+ ret = -ENODEV;
goto put_display_node;
}

@@ -1124,6 +1126,12 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
fb_add_videomode(&fb_vm, &info->modelist);
}

+ /*
+ * FIXME: Make sure we are not referencing any fields in display_np
+ * and timings_np and drop our references to them before returning to
+ * avoid leaking the nodes on probe deferral and driver unbind.
+ */
+
return 0;

put_timings_node:
diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c
index a350209ffbd3..31c301d6be62 100644
--- a/drivers/video/fbdev/sbuslib.c
+++ b/drivers/video/fbdev/sbuslib.c
@@ -121,7 +121,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
unsigned char __user *ured;
unsigned char __user *ugreen;
unsigned char __user *ublue;
- int index, count, i;
+ unsigned int index, count, i;

if (get_user(index, &c->index) ||
__get_user(count, &c->count) ||
@@ -160,7 +160,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
unsigned char __user *ugreen;
unsigned char __user *ublue;
struct fb_cmap *cmap = &info->cmap;
- int index, count, i;
+ unsigned int index, count, i;
u8 red, green, blue;

if (get_user(index, &c->index) ||
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index b254b7d20dfa..7e93925ace03 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -763,8 +763,8 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
mutex_unlock(&irq_mapping_update_lock);
return irq;
error_irq:
- for (; i >= 0; i--)
- __unbind_from_irq(irq + i);
+ while (nvec--)
+ __unbind_from_irq(irq + nvec);
mutex_unlock(&irq_mapping_update_lock);
return ret;
}
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index bf1940706422..9e6a85104a20 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -131,6 +131,8 @@ static void do_suspend(void)
goto out_resume;
}

+ xen_arch_suspend();
+
si.cancelled = 1;

err = stop_machine(xen_suspend, &si, cpumask_of(0));
@@ -148,11 +150,12 @@ static void do_suspend(void)
si.cancelled = 1;
}

+ xen_arch_resume();
+
out_resume:
- if (!si.cancelled) {
- xen_arch_resume();
+ if (!si.cancelled)
xs_resume();
- } else
+ else
xs_suspend_cancel();

dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
diff --git a/fs/aio.c b/fs/aio.c
index 3e6d6974434c..d01069c5e4b5 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -68,15 +68,20 @@ struct aio_ring {
#define AIO_RING_PAGES 8

struct kioctx_table {
- struct rcu_head rcu;
- unsigned nr;
- struct kioctx *table[];
+ struct rcu_head rcu;
+ unsigned nr;
+ struct kioctx __rcu *table[];
};

struct kioctx_cpu {
unsigned reqs_available;
};

+struct ctx_rq_wait {
+ struct completion comp;
+ atomic_t count;
+};
+
struct kioctx {
struct percpu_ref users;
atomic_t dead;
@@ -110,12 +115,13 @@ struct kioctx {
struct page **ring_pages;
long nr_pages;

- struct work_struct free_work;
+ struct rcu_head free_rcu;
+ struct work_struct free_work; /* see free_ioctx() */

/*
* signals when all in-flight requests are done
*/
- struct completion *requests_done;
+ struct ctx_rq_wait *rq_wait;

struct {
/*
@@ -507,6 +513,12 @@ static int kiocb_cancel(struct kiocb *kiocb)
return cancel(kiocb);
}

+/*
+ * free_ioctx() should be RCU delayed to synchronize against the RCU
+ * protected lookup_ioctx() and also needs process context to call
+ * aio_free_ring(), so the double bouncing through kioctx->free_rcu and
+ * ->free_work.
+ */
static void free_ioctx(struct work_struct *work)
{
struct kioctx *ctx = container_of(work, struct kioctx, free_work);
@@ -518,16 +530,24 @@ static void free_ioctx(struct work_struct *work)
kmem_cache_free(kioctx_cachep, ctx);
}

+static void free_ioctx_rcufn(struct rcu_head *head)
+{
+ struct kioctx *ctx = container_of(head, struct kioctx, free_rcu);
+
+ INIT_WORK(&ctx->free_work, free_ioctx);
+ schedule_work(&ctx->free_work);
+}
+
static void free_ioctx_reqs(struct percpu_ref *ref)
{
struct kioctx *ctx = container_of(ref, struct kioctx, reqs);

/* At this point we know that there are no any in-flight requests */
- if (ctx->requests_done)
- complete(ctx->requests_done);
+ if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
+ complete(&ctx->rq_wait->comp);

- INIT_WORK(&ctx->free_work, free_ioctx);
- schedule_work(&ctx->free_work);
+ /* Synchronize against RCU protected table->table[] dereferences */
+ call_rcu(&ctx->free_rcu, free_ioctx_rcufn);
}

/*
@@ -563,16 +583,14 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
struct aio_ring *ring;

spin_lock(&mm->ioctx_lock);
- rcu_read_lock();
- table = rcu_dereference(mm->ioctx_table);
+ table = rcu_dereference_raw(mm->ioctx_table);

while (1) {
if (table)
for (i = 0; i < table->nr; i++)
- if (!table->table[i]) {
+ if (!rcu_access_pointer(table->table[i])) {
ctx->id = i;
- table->table[i] = ctx;
- rcu_read_unlock();
+ rcu_assign_pointer(table->table[i], ctx);
spin_unlock(&mm->ioctx_lock);

/* While kioctx setup is in progress,
@@ -586,8 +604,6 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
}

new_nr = (table ? table->nr : 1) * 4;
-
- rcu_read_unlock();
spin_unlock(&mm->ioctx_lock);

table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) *
@@ -598,8 +614,7 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
table->nr = new_nr;

spin_lock(&mm->ioctx_lock);
- rcu_read_lock();
- old = rcu_dereference(mm->ioctx_table);
+ old = rcu_dereference_raw(mm->ioctx_table);

if (!old) {
rcu_assign_pointer(mm->ioctx_table, table);
@@ -740,7 +755,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
* the rapid destruction of the kioctx.
*/
static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
- struct completion *requests_done)
+ struct ctx_rq_wait *wait)
{
struct kioctx_table *table;

@@ -749,15 +764,12 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,


spin_lock(&mm->ioctx_lock);
- rcu_read_lock();
- table = rcu_dereference(mm->ioctx_table);
-
- WARN_ON(ctx != table->table[ctx->id]);
- table->table[ctx->id] = NULL;
- rcu_read_unlock();
+ table = rcu_dereference_raw(mm->ioctx_table);
+ WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id]));
+ RCU_INIT_POINTER(table->table[ctx->id], NULL);
spin_unlock(&mm->ioctx_lock);

- /* percpu_ref_kill() will do the necessary call_rcu() */
+ /* free_ioctx_reqs() will do the necessary RCU synchronization */
wake_up_all(&ctx->wait);

/*
@@ -772,7 +784,7 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
if (ctx->mmap_size)
vm_munmap(ctx->mmap_base, ctx->mmap_size);

- ctx->requests_done = requests_done;
+ ctx->rq_wait = wait;
percpu_ref_kill(&ctx->users);
return 0;
}
@@ -803,46 +815,44 @@ EXPORT_SYMBOL(wait_on_sync_kiocb);
*/
void exit_aio(struct mm_struct *mm)
{
- struct kioctx_table *table;
- struct kioctx *ctx;
- unsigned i = 0;
+ struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
+ struct ctx_rq_wait wait;
+ int i, skipped;

- while (1) {
- struct completion requests_done =
- COMPLETION_INITIALIZER_ONSTACK(requests_done);
+ if (!table)
+ return;

- rcu_read_lock();
- table = rcu_dereference(mm->ioctx_table);
+ atomic_set(&wait.count, table->nr);
+ init_completion(&wait.comp);

- do {
- if (!table || i >= table->nr) {
- rcu_read_unlock();
- rcu_assign_pointer(mm->ioctx_table, NULL);
- if (table)
- kfree(table);
- return;
- }
+ skipped = 0;
+ for (i = 0; i < table->nr; ++i) {
+ struct kioctx *ctx =
+ rcu_dereference_protected(table->table[i], true);

- ctx = table->table[i++];
- } while (!ctx);
-
- rcu_read_unlock();
+ if (!ctx) {
+ skipped++;
+ continue;
+ }

/*
- * We don't need to bother with munmap() here -
- * exit_mmap(mm) is coming and it'll unmap everything.
- * Since aio_free_ring() uses non-zero ->mmap_size
- * as indicator that it needs to unmap the area,
- * just set it to 0; aio_free_ring() is the only
- * place that uses ->mmap_size, so it's safe.
+ * We don't need to bother with munmap() here - exit_mmap(mm)
+ * is coming and it'll unmap everything. And we simply can't,
+ * this is not necessarily our ->mm.
+ * Since kill_ioctx() uses non-zero ->mmap_size as indicator
+ * that it needs to unmap the area, just set it to 0.
*/
ctx->mmap_size = 0;
+ kill_ioctx(mm, ctx, &wait);
+ }

- kill_ioctx(mm, ctx, &requests_done);
-
+ if (!atomic_sub_and_test(skipped, &wait.count)) {
/* Wait until all IO for the context are done. */
- wait_for_completion(&requests_done);
+ wait_for_completion(&wait.comp);
}
+
+ RCU_INIT_POINTER(mm->ioctx_table, NULL);
+ kfree(table);
}

static void put_reqs_available(struct kioctx *ctx, unsigned nr)
@@ -1013,7 +1023,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
if (!table || id >= table->nr)
goto out;

- ctx = table->table[id];
+ ctx = rcu_dereference(table->table[id]);
if (ctx && ctx->user_id == ctx_id) {
percpu_ref_get(&ctx->users);
ret = ctx;
@@ -1318,15 +1328,17 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
{
struct kioctx *ioctx = lookup_ioctx(ctx);
if (likely(NULL != ioctx)) {
- struct completion requests_done =
- COMPLETION_INITIALIZER_ONSTACK(requests_done);
+ struct ctx_rq_wait wait;
int ret;

+ init_completion(&wait.comp);
+ atomic_set(&wait.count, 1);
+
/* Pass requests_done to kill_ioctx() where it can be set
* in a thread-safe way. If we try to set it here then we have
* a race condition if two io_destroy() called simultaneously.
*/
- ret = kill_ioctx(current->mm, ioctx, &requests_done);
+ ret = kill_ioctx(current->mm, ioctx, &wait);
percpu_ref_put(&ioctx->users);

/* Wait until all IO for the context are done. Otherwise kernel
@@ -1334,7 +1346,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
* is destroyed.
*/
if (!ret)
- wait_for_completion(&requests_done);
+ wait_for_completion(&wait.comp);

return ret;
}
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 93afd58764e6..079280119a41 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -979,7 +979,16 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,

while (!list_empty(&prefs)) {
ref = list_first_entry(&prefs, struct __prelim_ref, list);
- WARN_ON(ref->count < 0);
+ /*
+ * ref->count < 0 can happen here if there are delayed
+ * refs with a node->action of BTRFS_DROP_DELAYED_REF.
+ * prelim_ref_insert() relies on this when merging
+ * identical refs to keep the overall count correct.
+ * prelim_ref_insert() will merge only those refs
+ * which compare identically. Any refs having
+ * e.g. different offsets would not be merged,
+ * and would retain their original ref->count < 0.
+ */
if (roots && ref->count && ref->root_id && ref->parent == 0) {
/* no parent == root of tree */
ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e4c07b2cf9b0..65d2b3130188 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1235,8 +1235,11 @@ static noinline int run_delalloc_nocow(struct inode *inode,
leaf = path->nodes[0];
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
- if (ret < 0)
+ if (ret < 0) {
+ if (cow_start != (u64)-1)
+ cur_offset = cow_start;
goto error;
+ }
if (ret > 0)
break;
leaf = path->nodes[0];
@@ -1847,7 +1850,15 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
goto out;
}

- btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
+ ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
+ &cached_state);
+ if (ret) {
+ mapping_set_error(page->mapping, ret);
+ end_extent_writepage(page, ret, page_start, page_end);
+ ClearPageChecked(page);
+ goto out;
+ }
+
ClearPageChecked(page);
set_page_dirty(page);
out:
@@ -3071,12 +3082,17 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
if (insert >= 1) {
ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
if (ret) {
- atomic_dec(&root->orphan_inodes);
if (reserve) {
clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
&BTRFS_I(inode)->runtime_flags);
btrfs_orphan_release_metadata(inode);
}
+ /*
+ * btrfs_orphan_commit_root may race with us and set
+ * ->orphan_block_rsv to zero, in order to avoid that,
+ * decrease ->orphan_inodes after everything is done.
+ */
+ atomic_dec(&root->orphan_inodes);
if (ret != -EEXIST) {
clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&BTRFS_I(inode)->runtime_flags);
@@ -3108,28 +3124,26 @@ static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int delete_item = 0;
- int release_rsv = 0;
int ret = 0;

- spin_lock(&root->orphan_lock);
if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&BTRFS_I(inode)->runtime_flags))
delete_item = 1;

+ if (delete_item && trans)
+ ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
+
if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
&BTRFS_I(inode)->runtime_flags))
- release_rsv = 1;
- spin_unlock(&root->orphan_lock);
+ btrfs_orphan_release_metadata(inode);

- if (delete_item) {
+ /*
+ * btrfs_orphan_commit_root may race with us and set ->orphan_block_rsv
+ * to zero, in order to avoid that, decrease ->orphan_inodes after
+ * everything is done.
+ */
+ if (delete_item)
atomic_dec(&root->orphan_inodes);
- if (trans)
- ret = btrfs_del_orphan_item(trans, root,
- btrfs_ino(inode));
- }
-
- if (release_rsv)
- btrfs_orphan_release_metadata(inode);

return ret;
}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index a0fa16033006..fb049cb4571d 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2197,6 +2197,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
clean_tree_block(trans, root, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
+ } else {
+ if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
+ clear_extent_buffer_dirty(next);
}

WARN_ON(root_owner !=
@@ -2275,6 +2278,9 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
clean_tree_block(trans, root, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
+ } else {
+ if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
+ clear_extent_buffer_dirty(next);
}

WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
@@ -2351,6 +2357,9 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
clean_tree_block(trans, log, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
+ } else {
+ if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
+ clear_extent_buffer_dirty(next);
}

WARN_ON(log->root_key.objectid !=
@@ -2722,13 +2731,14 @@ static void free_log_tree(struct btrfs_trans_handle *trans,

while (1) {
ret = find_first_extent_bit(&log->dirty_log_pages,
- 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
+ 0, &start, &end,
+ EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT,
NULL);
if (ret)
break;

clear_extent_bits(&log->dirty_log_pages, start, end,
- EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
+ EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT, GFP_NOFS);
}

/*
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 81bec9fd8f19..52c30fd35a81 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -4241,10 +4241,13 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
if (devs_max && ndevs > devs_max)
ndevs = devs_max;
/*
- * the primary goal is to maximize the number of stripes, so use as many
- * devices as possible, even if the stripes are not maximum sized.
+ * The primary goal is to maximize the number of stripes, so use as
+ * many devices as possible, even if the stripes are not maximum sized.
+ *
+ * The DUP profile stores more than one stripe per device, the
+ * max_avail is the total size so we have to adjust.
*/
- stripe_size = devices_info[ndevs-1].max_avail;
+ stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes);
num_stripes = ndevs * dev_stripes;

/*
@@ -4284,8 +4287,6 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
stripe_size = devices_info[ndevs-1].max_avail;
}

- do_div(stripe_size, dev_stripes);
-
/* align to BTRFS_STRIPE_LEN */
do_div(stripe_size, raid_stripe_len);
stripe_size *= raid_stripe_len;
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index f1aa100758df..cbab714a3a24 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -303,9 +303,8 @@ int calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt,
{
int i;
int rc;
- char password_with_pad[CIFS_ENCPWD_SIZE];
+ char password_with_pad[CIFS_ENCPWD_SIZE] = {0};

- memset(password_with_pad, 0, CIFS_ENCPWD_SIZE);
if (password)
strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE);

diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 36344cb2f1ca..669d1a0a7358 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -6366,9 +6366,7 @@ CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon,
pSMB->InformationLevel =
cpu_to_le16(SMB_SET_FILE_EA);

- parm_data =
- (struct fealist *) (((char *) &pSMB->hdr.Protocol) +
- offset);
+ parm_data = (void *)pSMB + offsetof(struct smb_hdr, Protocol) + offset;
pSMB->ParameterOffset = cpu_to_le16(param_offset);
pSMB->DataOffset = cpu_to_le16(offset);
pSMB->SetupCount = 1;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index d7be06289ff8..9ba78c4ca138 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1614,6 +1614,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
tmp_end++;
if (!(tmp_end < end && tmp_end[1] == delim)) {
/* No it is not. Set the password to NULL */
+ kzfree(vol->password);
vol->password = NULL;
break;
}
@@ -1651,6 +1652,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
options = end;
}

+ kzfree(vol->password);
/* Now build new password string */
temp_len = strlen(value);
vol->password = kzalloc(temp_len+1, GFP_KERNEL);
@@ -4037,7 +4039,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
reset_cifs_unix_caps(0, tcon, NULL, vol_info);
out:
kfree(vol_info->username);
- kfree(vol_info->password);
+ kzfree(vol_info->password);
kfree(vol_info);

return tcon;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index a1feb13c7590..ebe39ef0616f 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3116,20 +3116,18 @@ static struct vm_operations_struct cifs_file_vm_ops = {

int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
{
- int rc, xid;
+ int xid, rc = 0;
struct inode *inode = file_inode(file);

xid = get_xid();

- if (!CIFS_CACHE_READ(CIFS_I(inode))) {
+ if (!CIFS_CACHE_READ(CIFS_I(inode)))
rc = cifs_zap_mapping(inode);
- if (rc)
- return rc;
- }
-
- rc = generic_file_mmap(file, vma);
- if (rc == 0)
+ if (!rc)
+ rc = generic_file_mmap(file, vma);
+ if (!rc)
vma->vm_ops = &cifs_file_vm_ops;
+
free_xid(xid);
return rc;
}
@@ -3139,16 +3137,16 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
int rc, xid;

xid = get_xid();
+
rc = cifs_revalidate_file(file);
- if (rc) {
+ if (rc)
cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
rc);
- free_xid(xid);
- return rc;
- }
- rc = generic_file_mmap(file, vma);
- if (rc == 0)
+ if (!rc)
+ rc = generic_file_mmap(file, vma);
+ if (!rc)
vma->vm_ops = &cifs_file_vm_ops;
+
free_xid(xid);
return rc;
}
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 9f0808335536..f03fecafc5d5 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -99,14 +99,11 @@ sesInfoFree(struct cifs_ses *buf_to_free)
kfree(buf_to_free->serverOS);
kfree(buf_to_free->serverDomain);
kfree(buf_to_free->serverNOS);
- if (buf_to_free->password) {
- memset(buf_to_free->password, 0, strlen(buf_to_free->password));
- kfree(buf_to_free->password);
- }
+ kzfree(buf_to_free->password);
kfree(buf_to_free->user_name);
kfree(buf_to_free->domainName);
- kfree(buf_to_free->auth_key.response);
- kfree(buf_to_free);
+ kzfree(buf_to_free->auth_key.response);
+ kzfree(buf_to_free);
}

struct cifs_tcon *
@@ -137,10 +134,7 @@ tconInfoFree(struct cifs_tcon *buf_to_free)
}
atomic_dec(&tconInfoAllocCount);
kfree(buf_to_free->nativeFileSystem);
- if (buf_to_free->password) {
- memset(buf_to_free->password, 0, strlen(buf_to_free->password));
- kfree(buf_to_free->password);
- }
+ kzfree(buf_to_free->password);
kfree(buf_to_free);
}

diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 038e428b5173..422028a7baf5 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -541,8 +541,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
}

/* check validate negotiate info response matches what we got earlier */
- if (pneg_rsp->Dialect !=
- cpu_to_le16(tcon->ses->server->vals->protocol_id))
+ if (pneg_rsp->Dialect != cpu_to_le16(tcon->ses->server->dialect))
goto vneg_out;

if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode))
@@ -596,6 +595,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
*/
kfree(ses->auth_key.response);
ses->auth_key.response = NULL;
+ ses->auth_key.len = 0;

/*
* If memory allocation is successful, caller of this function
@@ -756,6 +756,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
rc = server->ops->generate_signingkey(ses);
kfree(ses->auth_key.response);
ses->auth_key.response = NULL;
+ ses->auth_key.len = 0;
if (rc) {
cifs_dbg(FYI,
"SMB3 session key generation failed\n");
@@ -780,6 +781,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
if (!server->sign) {
kfree(ses->auth_key.response);
ses->auth_key.response = NULL;
+ ses->auth_key.len = 0;
}
kfree(ses->ntlmssp);

diff --git a/fs/dcache.c b/fs/dcache.c
index 8a84b99ea8d4..e1380da5c183 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -590,11 +590,16 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
spin_unlock(&parent->d_lock);
goto again;
}
- rcu_read_unlock();
- if (parent != dentry)
+ if (parent != dentry) {
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
- else
+ if (unlikely(dentry->d_lockref.count < 0)) {
+ spin_unlock(&parent->d_lock);
+ parent = NULL;
+ }
+ } else {
parent = NULL;
+ }
+ rcu_read_unlock();
return parent;
}

diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 5fb1195f602f..0a0f8a89d425 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -323,6 +323,7 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_grpblk_t offset;
ext4_grpblk_t next_zero_bit;
+ ext4_grpblk_t max_bit = EXT4_CLUSTERS_PER_GROUP(sb);
ext4_fsblk_t blk;
ext4_fsblk_t group_first_block;

@@ -340,20 +341,25 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
/* check whether block bitmap block number is set */
blk = ext4_block_bitmap(sb, desc);
offset = blk - group_first_block;
- if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
+ if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+ !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
/* bad block bitmap */
return blk;

/* check whether the inode bitmap block number is set */
blk = ext4_inode_bitmap(sb, desc);
offset = blk - group_first_block;
- if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
+ if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+ !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
/* bad block bitmap */
return blk;

/* check whether the inode table block number is set */
blk = ext4_inode_table(sb, desc);
offset = blk - group_first_block;
+ if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+ EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= max_bit)
+ return blk;
next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group),
EXT4_B2C(sbi, offset));
@@ -416,6 +422,7 @@ struct buffer_head *
ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
{
struct ext4_group_desc *desc;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
struct buffer_head *bh;
ext4_fsblk_t bitmap_blk;

@@ -423,6 +430,12 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
if (!desc)
return NULL;
bitmap_blk = ext4_block_bitmap(sb, desc);
+ if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
+ (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
+ ext4_error(sb, "Invalid block bitmap block %llu in "
+ "block_group %u", bitmap_blk, block_group);
+ return NULL;
+ }
bh = sb_getblk(sb, bitmap_blk);
if (unlikely(!bh)) {
ext4_error(sb, "Cannot get buffer for block bitmap - "
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index a6620b1fe334..62fbd4f0ff51 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -133,6 +133,12 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
return NULL;

bitmap_blk = ext4_inode_bitmap(sb, desc);
+ if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
+ (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
+ ext4_error(sb, "Invalid inode bitmap blk %llu in "
+ "block_group %u", bitmap_blk, block_group);
+ return NULL;
+ }
bh = sb_getblk(sb, bitmap_blk);
if (unlikely(!bh)) {
ext4_error(sb, "Cannot read inode bitmap - "
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 65579972c4ba..e90fa513c14c 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4198,6 +4198,12 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
goto bad_inode;
raw_inode = ext4_raw_inode(&iloc);

+ if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
+ EXT4_ERROR_INODE(inode, "root inode unallocated");
+ ret = -EIO;
+ goto bad_inode;
+ }
+
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index af0267fbecf4..ca1036fb0971 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -678,6 +678,7 @@ __acquires(bitlock)
}

ext4_unlock_group(sb, grp);
+ ext4_commit_super(sb, 1);
ext4_handle_error(sb);
/*
* We only get here in the ERRORS_RO case; relocking the group
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index cd4d5dccc9fd..a927d9f08e70 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -273,6 +273,9 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
spin_lock_init(&fcc->issue_lock);
init_waitqueue_head(&fcc->flush_wait_queue);
sbi->sm_info->cmd_control_info = fcc;
+ if (!test_opt(sbi, FLUSH_MERGE))
+ return err;
+
fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
if (IS_ERR(fcc->f2fs_issue_flush)) {
@@ -1905,7 +1908,7 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
sm_info->nr_discards = 0;
sm_info->max_discards = 0;

- if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
+ if (!f2fs_readonly(sbi->sb)) {
err = create_flush_cmd_control(sbi);
if (err)
return err;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 3a4af9d401c3..69546f170ccf 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -97,6 +97,16 @@ static void huge_pagevec_release(struct pagevec *pvec)
pagevec_reinit(pvec);
}

+/*
+ * Mask used when checking the page offset value passed in via system
+ * calls. This value will be converted to a loff_t which is signed.
+ * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
+ * value. The extra bit (- 1 in the shift value) is to take the sign
+ * bit into account.
+ */
+#define PGOFF_LOFFT_MAX \
+ (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
+
static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct inode *inode = file_inode(file);
@@ -115,17 +125,27 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
vma->vm_ops = &hugetlb_vm_ops;

+ /*
+ * page based offset in vm_pgoff could be sufficiently large to
+ * overflow a (l)off_t when converted to byte offset.
+ */
+ if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
+ return -EINVAL;
+
+ /* must be huge page aligned */
if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
return -EINVAL;

vma_len = (loff_t)(vma->vm_end - vma->vm_start);
+ len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+ /* check for overflow */
+ if (len < vma_len)
+ return -EINVAL;

mutex_lock(&inode->i_mutex);
file_accessed(file);

ret = -ENOMEM;
- len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
-
if (hugetlb_reserve_pages(inode,
vma->vm_pgoff >> huge_page_order(h),
len >> huge_page_shift(h), vma,
@@ -135,7 +155,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
ret = 0;
hugetlb_prefault_arch_hook(vma->vm_mm);
if (vma->vm_flags & VM_WRITE && inode->i_size < len)
- inode->i_size = len;
+ i_size_write(inode, len);
out:
mutex_unlock(&inode->i_mutex);

diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index bb9460ea36e8..ff77c6041e6f 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -363,7 +363,6 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
ret = -EIO;
error:
mutex_unlock(&f->sem);
- jffs2_do_clear_inode(c, f);
iget_failed(inode);
return ERR_PTR(ret);
}
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index b120e85aa7b9..2f442d9f0cf9 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -267,7 +267,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
{
struct kernfs_open_file *of = kernfs_of(file);
const struct kernfs_ops *ops;
- size_t len;
+ ssize_t len;
char *buf;

if (of->atomic_write_len) {
diff --git a/fs/namei.c b/fs/namei.c
index 134ca0e00d81..2ec89079ddbd 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -496,9 +496,10 @@ EXPORT_SYMBOL(path_put);
static bool path_connected(const struct path *path)
{
struct vfsmount *mnt = path->mnt;
+ struct super_block *sb = mnt->mnt_sb;

- /* Only bind mounts can have disconnected paths */
- if (mnt->mnt_root == mnt->mnt_sb->s_root)
+ /* Bind mounts and multi-root filesystems can have disconnected paths */
+ if (!(sb->s_iflags & SB_I_MULTIROOT) && (mnt->mnt_root == sb->s_root))
return true;

return is_subdir(path->dentry, mnt->mnt_root);
diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c
index 482387532f54..9fb4fe875a75 100644
--- a/fs/ncpfs/ncplib_kernel.c
+++ b/fs/ncpfs/ncplib_kernel.c
@@ -980,6 +980,10 @@ ncp_read_kernel(struct ncp_server *server, const char *file_id,
goto out;
}
*bytes_read = ncp_reply_be16(server, 0);
+ if (*bytes_read > to_read) {
+ result = -EINVAL;
+ goto out;
+ }
source = ncp_reply_data(server, 2 + (offset & 1));

memcpy(target, source, *bytes_read);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 1d36353ce13c..42fb879ccd3a 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -732,10 +732,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)

spin_lock(&dreq->lock);

- if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
- dreq->flags = 0;
+ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
dreq->error = hdr->error;
- }
if (dreq->error != 0)
bit = NFS_IOHDR_ERROR;
else {
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index 567983d2c0eb..11450097e4ed 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -577,9 +577,13 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
struct idmap_msg *im;
struct idmap *idmap = (struct idmap *)aux;
struct key *key = cons->key;
- int ret = -ENOMEM;
+ int ret = -ENOKEY;
+
+ if (!aux)
+ goto out1;

/* msg and im are freed in idmap_pipe_destroy_msg */
+ ret = -ENOMEM;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
goto out1;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 609398d01da7..4336eb2ab047 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -249,7 +249,6 @@ int nfs_iocounter_wait(struct nfs_io_counter *c);
extern const struct nfs_pageio_ops nfs_pgio_rw_ops;
struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *);
void nfs_pgio_header_free(struct nfs_pgio_header *);
-void nfs_pgio_data_destroy(struct nfs_pgio_header *);
int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_header *,
const struct rpc_call_ops *, int, int);
diff --git a/fs/nfs/nfs4sysctl.c b/fs/nfs/nfs4sysctl.c
index b6ebe7e445f6..b83056329370 100644
--- a/fs/nfs/nfs4sysctl.c
+++ b/fs/nfs/nfs4sysctl.c
@@ -31,7 +31,7 @@ static struct ctl_table nfs4_cb_sysctls[] = {
.data = &nfs_idmap_cache_timeout,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
+ .proc_handler = proc_dointvec,
},
{ }
};
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 9fc33210f68f..c042d2ff918e 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -508,16 +508,6 @@ struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
}
EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);

-/*
- * nfs_pgio_header_free - Free a read or write header
- * @hdr: The header to free
- */
-void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
-{
- hdr->rw_ops->rw_free_header(hdr);
-}
-EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
-
/**
* nfs_pgio_data_destroy - make @hdr suitable for reuse
*
@@ -526,14 +516,24 @@ EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
*
* @hdr: A header that has had nfs_generic_pgio called
*/
-void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
+static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
{
if (hdr->args.context)
put_nfs_open_context(hdr->args.context);
if (hdr->page_array.pagevec != hdr->page_array.page_array)
kfree(hdr->page_array.pagevec);
}
-EXPORT_SYMBOL_GPL(nfs_pgio_data_destroy);
+
+/*
+ * nfs_pgio_header_free - Free a read or write header
+ * @hdr: The header to free
+ */
+void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
+{
+ nfs_pgio_data_destroy(hdr);
+ hdr->rw_ops->rw_free_header(hdr);
+}
+EXPORT_SYMBOL_GPL(nfs_pgio_header_free);

/**
* nfs_pgio_rpcsetup - Set up arguments for a pageio call
@@ -648,7 +648,6 @@ static int nfs_pgio_error(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
set_bit(NFS_IOHDR_REDO, &hdr->flags);
- nfs_pgio_data_destroy(hdr);
hdr->completion_ops->completion(hdr);
desc->pg_completion_ops->error_cleanup(&desc->pg_list);
return -ENOMEM;
@@ -663,7 +662,6 @@ static void nfs_pgio_release(void *calldata)
struct nfs_pgio_header *hdr = calldata;
if (hdr->rw_ops->rw_release)
hdr->rw_ops->rw_release(hdr);
- nfs_pgio_data_destroy(hdr);
hdr->completion_ops->completion(hdr);
}

diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 77498f9ba554..6c15ba561283 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1557,8 +1557,7 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
nfs_pageio_reset_write_mds(desc);
desc->pg_recoalesce = 1;
}
- nfs_pgio_data_destroy(hdr);
- hdr->release(hdr);
+ hdr->completion_ops->completion(hdr);
}

static enum pnfs_try_status
@@ -1695,8 +1694,7 @@ pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
nfs_pageio_reset_read_mds(desc);
desc->pg_recoalesce = 1;
}
- nfs_pgio_data_destroy(hdr);
- hdr->release(hdr);
+ hdr->completion_ops->completion(hdr);
}

/*
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index b04563f7be69..29ec54a58361 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2590,6 +2590,8 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
/* initial superblock/root creation */
mount_info->fill_super(s, mount_info);
nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned);
+ if (!(server->flags & NFS_MOUNT_UNSHARED))
+ s->s_iflags |= SB_I_MULTIROOT;
}

mntroot = nfs_get_root(s, mount_info->mntfh, dev_name);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 34f18294817a..c66d59f1bd06 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1661,6 +1661,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
next:
nfs_unlock_and_release_request(req);
+ /* Latency breaker */
+ cond_resched();
}
nfs_init_cinfo(&cinfo, data->inode, data->dreq);
if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
index 441c84e169e6..a6420a94dea8 100644
--- a/fs/ocfs2/cluster/nodemanager.c
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -40,6 +40,9 @@ char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = {
"panic", /* O2NM_FENCE_PANIC */
};

+static inline void o2nm_lock_subsystem(void);
+static inline void o2nm_unlock_subsystem(void);
+
struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
{
struct o2nm_node *node = NULL;
@@ -181,7 +184,10 @@ static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node)
{
/* through the first node_set .parent
* mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
- return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
+ if (node->nd_item.ci_parent)
+ return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
+ else
+ return NULL;
}

enum {
@@ -194,7 +200,7 @@ enum {
static ssize_t o2nm_node_num_write(struct o2nm_node *node, const char *page,
size_t count)
{
- struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
+ struct o2nm_cluster *cluster;
unsigned long tmp;
char *p = (char *)page;

@@ -213,6 +219,13 @@ static ssize_t o2nm_node_num_write(struct o2nm_node *node, const char *page,
!test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
return -EINVAL; /* XXX */

+ o2nm_lock_subsystem();
+ cluster = to_o2nm_cluster_from_node(node);
+ if (!cluster) {
+ o2nm_unlock_subsystem();
+ return -EINVAL;
+ }
+
write_lock(&cluster->cl_nodes_lock);
if (cluster->cl_nodes[tmp])
p = NULL;
@@ -222,6 +235,8 @@ static ssize_t o2nm_node_num_write(struct o2nm_node *node, const char *page,
set_bit(tmp, cluster->cl_nodes_bitmap);
}
write_unlock(&cluster->cl_nodes_lock);
+ o2nm_unlock_subsystem();
+
if (p == NULL)
return -EEXIST;

@@ -261,7 +276,7 @@ static ssize_t o2nm_node_ipv4_address_write(struct o2nm_node *node,
const char *page,
size_t count)
{
- struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
+ struct o2nm_cluster *cluster;
int ret, i;
struct rb_node **p, *parent;
unsigned int octets[4];
@@ -278,6 +293,13 @@ static ssize_t o2nm_node_ipv4_address_write(struct o2nm_node *node,
be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
}

+ o2nm_lock_subsystem();
+ cluster = to_o2nm_cluster_from_node(node);
+ if (!cluster) {
+ o2nm_unlock_subsystem();
+ return -EINVAL;
+ }
+
ret = 0;
write_lock(&cluster->cl_nodes_lock);
if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
@@ -287,6 +309,8 @@ static ssize_t o2nm_node_ipv4_address_write(struct o2nm_node *node,
rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
}
write_unlock(&cluster->cl_nodes_lock);
+ o2nm_unlock_subsystem();
+
if (ret)
return ret;

@@ -303,7 +327,7 @@ static ssize_t o2nm_node_local_read(struct o2nm_node *node, char *page)
static ssize_t o2nm_node_local_write(struct o2nm_node *node, const char *page,
size_t count)
{
- struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
+ struct o2nm_cluster *cluster;
unsigned long tmp;
char *p = (char *)page;
ssize_t ret;
@@ -321,17 +345,26 @@ static ssize_t o2nm_node_local_write(struct o2nm_node *node, const char *page,
!test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
return -EINVAL; /* XXX */

+ o2nm_lock_subsystem();
+ cluster = to_o2nm_cluster_from_node(node);
+ if (!cluster) {
+ ret = -EINVAL;
+ goto out;
+ }
+
/* the only failure case is trying to set a new local node
* when a different one is already set */
if (tmp && tmp == cluster->cl_has_local &&
- cluster->cl_local_node != node->nd_num)
- return -EBUSY;
+ cluster->cl_local_node != node->nd_num) {
+ ret = -EBUSY;
+ goto out;
+ }

/* bring up the rx thread if we're setting the new local node. */
if (tmp && !cluster->cl_has_local) {
ret = o2net_start_listening(node);
if (ret)
- return ret;
+ goto out;
}

if (!tmp && cluster->cl_has_local &&
@@ -346,7 +379,11 @@ static ssize_t o2nm_node_local_write(struct o2nm_node *node, const char *page,
cluster->cl_local_node = node->nd_num;
}

- return count;
+ ret = count;
+
+out:
+ o2nm_unlock_subsystem();
+ return ret;
}

struct o2nm_node_attribute {
@@ -889,6 +926,16 @@ static struct o2nm_cluster_group o2nm_cluster_group = {
},
};

+static inline void o2nm_lock_subsystem(void)
+{
+ mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex);
+}
+
+static inline void o2nm_unlock_subsystem(void)
+{
+ mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex);
+}
+
int o2nm_depend_item(struct config_item *item)
{
return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item);
diff --git a/fs/pipe.c b/fs/pipe.c
index ca7d71a9ce69..4de213b5854f 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -34,11 +34,6 @@
*/
unsigned int pipe_max_size = 1048576;

-/*
- * Minimum pipe size, as required by POSIX
- */
-unsigned int pipe_min_size = PAGE_SIZE;
-
/* Maximum allocatable pages per user. Hard limit is unset by default, soft
* matches default values.
*/
@@ -590,52 +585,73 @@ pipe_fasync(int fd, struct file *filp, int on)
return retval;
}

-static void account_pipe_buffers(struct pipe_inode_info *pipe,
+static unsigned long account_pipe_buffers(struct user_struct *user,
unsigned long old, unsigned long new)
{
- atomic_long_add(new - old, &pipe->user->pipe_bufs);
+ return atomic_long_add_return(new - old, &user->pipe_bufs);
+}
+
+static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
+{
+ unsigned long soft_limit = ACCESS_ONCE(pipe_user_pages_soft);
+
+ return soft_limit && user_bufs > soft_limit;
}

-static bool too_many_pipe_buffers_soft(struct user_struct *user)
+static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
{
- return pipe_user_pages_soft &&
- atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_soft;
+ unsigned long hard_limit = ACCESS_ONCE(pipe_user_pages_hard);
+
+ return hard_limit && user_bufs > hard_limit;
}

-static bool too_many_pipe_buffers_hard(struct user_struct *user)
+static bool is_unprivileged_user(void)
{
- return pipe_user_pages_hard &&
- atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_hard;
+ return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
}

struct pipe_inode_info *alloc_pipe_info(void)
{
struct pipe_inode_info *pipe;
+ unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
+ struct user_struct *user = get_current_user();
+ unsigned long user_bufs;
+ unsigned int max_size = ACCESS_ONCE(pipe_max_size);

pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
- if (pipe) {
- unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
- struct user_struct *user = get_current_user();
-
- if (!too_many_pipe_buffers_hard(user)) {
- if (too_many_pipe_buffers_soft(user))
- pipe_bufs = 1;
- pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * pipe_bufs, GFP_KERNEL);
- }
+ if (pipe == NULL)
+ goto out_free_uid;

- if (pipe->bufs) {
- init_waitqueue_head(&pipe->wait);
- pipe->r_counter = pipe->w_counter = 1;
- pipe->buffers = pipe_bufs;
- pipe->user = user;
- account_pipe_buffers(pipe, 0, pipe_bufs);
- mutex_init(&pipe->mutex);
- return pipe;
- }
- free_uid(user);
- kfree(pipe);
+ if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
+ pipe_bufs = max_size >> PAGE_SHIFT;
+
+ user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
+
+ if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) {
+ user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
+ pipe_bufs = 1;
+ }
+
+ if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user())
+ goto out_revert_acct;
+
+ pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
+ GFP_KERNEL);
+
+ if (pipe->bufs) {
+ init_waitqueue_head(&pipe->wait);
+ pipe->r_counter = pipe->w_counter = 1;
+ pipe->buffers = pipe_bufs;
+ pipe->user = user;
+ mutex_init(&pipe->mutex);
+ return pipe;
}

+out_revert_acct:
+ (void) account_pipe_buffers(user, pipe_bufs, 0);
+ kfree(pipe);
+out_free_uid:
+ free_uid(user);
return NULL;
}

@@ -643,7 +659,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
{
int i;

- account_pipe_buffers(pipe, pipe->buffers, 0);
+ (void) account_pipe_buffers(pipe->user, pipe->buffers, 0);
free_uid(pipe->user);
for (i = 0; i < pipe->buffers; i++) {
struct pipe_buffer *buf = pipe->bufs + i;
@@ -993,13 +1009,59 @@ const struct file_operations pipefifo_fops = {
.fasync = pipe_fasync,
};

+/*
+ * Currently we rely on the pipe array holding a power-of-2 number
+ * of pages. Returns 0 on error.
+ */
+unsigned int round_pipe_size(unsigned long size)
+{
+ if (size > (1U << 31))
+ return 0;
+
+ /* Minimum pipe size, as required by POSIX */
+ if (size < PAGE_SIZE)
+ return PAGE_SIZE;
+
+ return roundup_pow_of_two(size);
+}
+
/*
* Allocate a new array of pipe buffers and copy the info over. Returns the
* pipe size if successful, or return -ERROR on error.
*/
-static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
+static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
{
struct pipe_buffer *bufs;
+ unsigned int size, nr_pages;
+ unsigned long user_bufs;
+ long ret = 0;
+
+ size = round_pipe_size(arg);
+ nr_pages = size >> PAGE_SHIFT;
+
+ if (!nr_pages)
+ return -EINVAL;
+
+ /*
+ * If trying to increase the pipe capacity, check that an
+ * unprivileged user is not trying to exceed various limits
+ * (soft limit check here, hard limit check just below).
+ * Decreasing the pipe capacity is always permitted, even
+ * if the user is currently over a limit.
+ */
+ if (nr_pages > pipe->buffers &&
+ size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
+ return -EPERM;
+
+ user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages);
+
+ if (nr_pages > pipe->buffers &&
+ (too_many_pipe_buffers_hard(user_bufs) ||
+ too_many_pipe_buffers_soft(user_bufs)) &&
+ is_unprivileged_user()) {
+ ret = -EPERM;
+ goto out_revert_acct;
+ }

/*
* We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
@@ -1007,12 +1069,16 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
* again like we would do for growing. If the pipe currently
* contains more buffers than arg, then return busy.
*/
- if (nr_pages < pipe->nrbufs)
- return -EBUSY;
+ if (nr_pages < pipe->nrbufs) {
+ ret = -EBUSY;
+ goto out_revert_acct;
+ }

bufs = kcalloc(nr_pages, sizeof(*bufs), GFP_KERNEL | __GFP_NOWARN);
- if (unlikely(!bufs))
- return -ENOMEM;
+ if (unlikely(!bufs)) {
+ ret = -ENOMEM;
+ goto out_revert_acct;
+ }

/*
* The pipe array wraps around, so just start the new one at zero
@@ -1035,40 +1101,14 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
}

- account_pipe_buffers(pipe, pipe->buffers, nr_pages);
pipe->curbuf = 0;
kfree(pipe->bufs);
pipe->bufs = bufs;
pipe->buffers = nr_pages;
return nr_pages * PAGE_SIZE;
-}

-/*
- * Currently we rely on the pipe array holding a power-of-2 number
- * of pages.
- */
-static inline unsigned int round_pipe_size(unsigned int size)
-{
- unsigned long nr_pages;
-
- nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
-}
-
-/*
- * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
- * will return an error.
- */
-int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
- size_t *lenp, loff_t *ppos)
-{
- int ret;
-
- ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
- if (ret < 0 || !write)
- return ret;
-
- pipe_max_size = round_pipe_size(pipe_max_size);
+out_revert_acct:
+ (void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers);
return ret;
}

@@ -1094,28 +1134,9 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
__pipe_lock(pipe);

switch (cmd) {
- case F_SETPIPE_SZ: {
- unsigned int size, nr_pages;
-
- size = round_pipe_size(arg);
- nr_pages = size >> PAGE_SHIFT;
-
- ret = -EINVAL;
- if (!nr_pages)
- goto out;
-
- if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
- ret = -EPERM;
- goto out;
- } else if ((too_many_pipe_buffers_hard(pipe->user) ||
- too_many_pipe_buffers_soft(pipe->user)) &&
- !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
- ret = -EPERM;
- goto out;
- }
- ret = pipe_set_size(pipe, nr_pages);
+ case F_SETPIPE_SZ:
+ ret = pipe_set_size(pipe, arg);
break;
- }
case F_GETPIPE_SZ:
ret = pipe->buffers * PAGE_SIZE;
break;
@@ -1124,7 +1145,6 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
break;
}

-out:
__pipe_unlock(pipe);
return ret;
}
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 4bdd795dfaf9..7bb0ea17607b 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -94,7 +94,6 @@ struct crypto_ahash {
unsigned int keylen);

unsigned int reqsize;
- bool has_setkey;
struct crypto_tfm base;
};

@@ -183,11 +182,6 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);

-static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm)
-{
- return tfm->has_setkey;
-}
-
int crypto_ahash_finup(struct ahash_request *req);
int crypto_ahash_final(struct ahash_request *req);
int crypto_ahash_digest(struct ahash_request *req);
@@ -199,12 +193,22 @@ static inline int crypto_ahash_export(struct ahash_request *req, void *out)

static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
{
- return crypto_ahash_reqtfm(req)->import(req, in);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return tfm->import(req, in);
}

static inline int crypto_ahash_init(struct ahash_request *req)
{
- return crypto_ahash_reqtfm(req)->init(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return tfm->init(req);
}

static inline int crypto_ahash_update(struct ahash_request *req)
@@ -343,12 +347,22 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out)

static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
{
- return crypto_shash_alg(desc->tfm)->import(desc, in);
+ struct crypto_shash *tfm = desc->tfm;
+
+ if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return crypto_shash_alg(tfm)->import(desc, in);
}

static inline int crypto_shash_init(struct shash_desc *desc)
{
- return crypto_shash_alg(desc->tfm)->init(desc);
+ struct crypto_shash *tfm = desc->tfm;
+
+ if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return crypto_shash_alg(tfm)->init(desc);
}

int crypto_shash_update(struct shash_desc *desc, const u8 *data,
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 27f5fa488ad0..5abbb5e473d2 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -91,6 +91,8 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
return alg->setkey != shash_no_setkey;
}

+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
+
int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
struct hash_alg_common *alg,
struct crypto_instance *inst);
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index a3d75fefd010..9dbc9f7b77ca 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -176,5 +176,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev);

extern void drm_kms_helper_poll_disable(struct drm_device *dev);
extern void drm_kms_helper_poll_enable(struct drm_device *dev);
+bool drm_kms_helper_is_poll_worker(void);

#endif
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 61dd0b15d21c..447b6950d381 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -94,9 +94,17 @@
*/
#define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000

+/*
+ * Set if the algorithm has a ->setkey() method but can be used without
+ * calling it first, i.e. there is a default key.
+ */
+#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000
+
/*
* Transform masks and values (for crt_flags).
*/
+#define CRYPTO_TFM_NEED_KEY 0x00000001
+
#define CRYPTO_TFM_REQ_MASK 0x000fff00
#define CRYPTO_TFM_RES_MASK 0xfff00000

diff --git a/include/linux/fs.h b/include/linux/fs.h
index d594f0fbc41a..b5118c1cdb98 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1150,6 +1150,9 @@ struct mm_struct;
#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */

+/* sb->s_iflags */
+#define SB_I_MULTIROOT 0x00000008 /* Multiple roots to the dentry tree */
+
extern struct list_head super_blocks;
extern spinlock_t sb_lock;

@@ -1190,6 +1193,7 @@ struct super_block {
const struct quotactl_ops *s_qcop;
const struct export_operations *s_export_op;
unsigned long s_flags;
+ unsigned long s_iflags; /* internal SB_I_* flags */
unsigned long s_magic;
struct dentry *s_root;
struct rw_semaphore s_umount;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index bd83b8e83cfe..ab58855b9de5 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -232,6 +232,7 @@ enum {
* led */
ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */
ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */
+ ATA_FLAG_SAS_HOST = (1 << 25), /* SAS host */

/* bits 24:31 of ap->flags are reserved for LLD specific flags */

diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 687dba5dceda..a6e05ad7ebf3 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -432,8 +432,8 @@ struct mlx5_core_mr {
struct mlx5_core_srq {
u32 srqn;
int max;
- int max_gs;
- int max_avail_gather;
+ size_t max_gs;
+ size_t max_avail_gather;
int wqe_shift;
void (*event) (struct mlx5_core_srq *, enum mlx5_event);

diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 08abe9941884..e0d11601d79c 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -109,6 +109,7 @@ struct sdhci_host {

/* Internal data */
struct mmc_host *mmc; /* MMC structure */
+ struct mmc_host_ops mmc_host_ops; /* MMC host ops */
u64 dma_mask; /* custom DMA mask */

#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
diff --git a/include/linux/nospec.h b/include/linux/nospec.h
index 3b1d69c52beb..e791ebc65c9c 100644
--- a/include/linux/nospec.h
+++ b/include/linux/nospec.h
@@ -53,7 +53,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
\
- _i &= _mask; \
- _i; \
+ (typeof(_i)) (_i & _mask); \
})
#endif /* _LINUX_NOSPEC_H */
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 24f5470d3944..eed87a0fdc5c 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -124,10 +124,9 @@ void pipe_lock(struct pipe_inode_info *);
void pipe_unlock(struct pipe_inode_info *);
void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);

-extern unsigned int pipe_max_size, pipe_min_size;
+extern unsigned int pipe_max_size;
extern unsigned long pipe_user_pages_hard;
extern unsigned long pipe_user_pages_soft;
-int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *);


/* Drop the inode semaphore and wait for a pipe event, atomically */
@@ -149,5 +148,6 @@ long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
struct pipe_inode_info *get_pipe_info(struct file *file);

int create_pipe_files(struct file **, int);
+unsigned int round_pipe_size(unsigned long size);

#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 93431a42f2f9..9b31b6d38da2 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2477,6 +2477,23 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,

unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);

+static inline void skb_postpush_rcsum(struct sk_buff *skb,
+ const void *start, unsigned int len)
+{
+ /* For performing the reverse operation to skb_postpull_rcsum(),
+ * we can instead of ...
+ *
+ * skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
+ *
+ * ... just use this equivalent version here to save a few
+ * instructions. Feeding csum of 0 in csum_partial() and later
+ * on adding skb->csum is equivalent to feed skb->csum in the
+ * first place.
+ */
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_partial(start, len, skb->csum);
+}
+
/**
* pskb_trim_rcsum - trim received skb and update checksum
* @skb: buffer to trim
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index b54fdcb52b1c..70b965c25e3a 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -56,4 +56,7 @@
*/
#define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11)

+/* Device needs a pause after every control message. */
+#define USB_QUIRK_DELAY_CTRL_MSG BIT(13)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index fa25f353f985..1e0745d75053 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -455,6 +455,7 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork);

extern void workqueue_set_max_active(struct workqueue_struct *wq,
int max_active);
+extern struct work_struct *current_work(void);
extern bool current_is_workqueue_rescuer(void);
extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
extern unsigned int work_busy(struct work_struct *work);
diff --git a/include/net/ip.h b/include/net/ip.h
index 3d389a86a7e2..27dd9826e05d 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -263,12 +263,19 @@ int ip_decrease_ttl(struct iphdr *iph)
return --iph->ttl;
}

+static inline int ip_mtu_locked(const struct dst_entry *dst)
+{
+ const struct rtable *rt = (const struct rtable *)dst;
+
+ return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
+}
+
static inline
int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
{
return inet_sk(sk)->pmtudisc == IP_PMTUDISC_DO ||
(inet_sk(sk)->pmtudisc == IP_PMTUDISC_WANT &&
- !(dst_metric_locked(dst, RTAX_MTU)));
+ !ip_mtu_locked(dst));
}

static inline bool ip_sk_accept_pmtu(const struct sock *sk)
@@ -294,7 +301,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
struct net *net = dev_net(dst->dev);

if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
- dst_metric_locked(dst, RTAX_MTU) ||
+ ip_mtu_locked(dst) ||
!forwarding)
return dst_mtu(dst);

diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index fc7014fe8ee3..905cd10a9478 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -54,6 +54,7 @@ struct fib_nh_exception {
int fnhe_genid;
__be32 fnhe_daddr;
u32 fnhe_pmtu;
+ bool fnhe_mtu_locked;
__be32 fnhe_gw;
unsigned long fnhe_expires;
struct rtable __rcu *fnhe_rth_input;
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index dad7ab20a8cb..c95dcba0aa94 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -78,7 +78,7 @@ struct regulatory_request {
int wiphy_idx;
enum nl80211_reg_initiator initiator;
enum nl80211_user_reg_hint_type user_reg_hint_type;
- char alpha2[2];
+ char alpha2[3];
enum nl80211_dfs_regions dfs_region;
bool intersect;
bool processed;
diff --git a/include/net/route.h b/include/net/route.h
index 671d5b1c813b..6de6c4977d07 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -61,7 +61,8 @@ struct rtable {
__be32 rt_gateway;

/* Miscellaneous cached information */
- u32 rt_pmtu;
+ u32 rt_mtu_locked:1,
+ rt_pmtu:31;

struct list_head rt_uncached;
};
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 513aecb66565..2d5acae04344 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -656,6 +656,14 @@ static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_DROP;
}

+static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch)
+{
+ kfree_skb_list(skb);
+ sch->qstats.drops++;
+
+ return NET_XMIT_DROP;
+}
+
static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
{
sch->qstats.drops++;
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 775724d054cd..9156dea15207 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -582,11 +582,14 @@ static inline void sctp_v6_map_v4(union sctp_addr *addr)
/* Map v4 address to v4-mapped v6 address */
static inline void sctp_v4_map_v6(union sctp_addr *addr)
{
+ __be16 port;
+
+ port = addr->v4.sin_port;
+ addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
+ addr->v6.sin6_port = port;
addr->v6.sin6_family = AF_INET6;
addr->v6.sin6_flowinfo = 0;
addr->v6.sin6_scope_id = 0;
- addr->v6.sin6_port = addr->v4.sin_port;
- addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
addr->v6.sin6_addr.s6_addr32[0] = 0;
addr->v6.sin6_addr.s6_addr32[1] = 0;
addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 2caadabcd07b..3c8985489992 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -61,6 +61,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
UDP_SKB_CB(skb)->cscov = cscov;
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = CHECKSUM_NONE;
+ skb->csum_valid = 0;
}

return 0;
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index ce999f30a1e5..54fb9a2d7c7c 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -67,31 +67,31 @@ enum ha_event {

enum port_event {
PORTE_BYTES_DMAED = 0U,
- PORTE_BROADCAST_RCVD = 1,
- PORTE_LINK_RESET_ERR = 2,
- PORTE_TIMER_EVENT = 3,
- PORTE_HARD_RESET = 4,
- PORT_NUM_EVENTS = 5,
+ PORTE_BROADCAST_RCVD,
+ PORTE_LINK_RESET_ERR,
+ PORTE_TIMER_EVENT,
+ PORTE_HARD_RESET,
+ PORT_NUM_EVENTS,
};

enum phy_event {
PHYE_LOSS_OF_SIGNAL = 0U,
- PHYE_OOB_DONE = 1,
- PHYE_OOB_ERROR = 2,
- PHYE_SPINUP_HOLD = 3, /* hot plug SATA, no COMWAKE sent */
- PHYE_RESUME_TIMEOUT = 4,
- PHY_NUM_EVENTS = 5,
+ PHYE_OOB_DONE,
+ PHYE_OOB_ERROR,
+ PHYE_SPINUP_HOLD, /* hot plug SATA, no COMWAKE sent */
+ PHYE_RESUME_TIMEOUT,
+ PHY_NUM_EVENTS,
};

enum discover_event {
DISCE_DISCOVER_DOMAIN = 0U,
- DISCE_REVALIDATE_DOMAIN = 1,
- DISCE_PORT_GONE = 2,
- DISCE_PROBE = 3,
- DISCE_SUSPEND = 4,
- DISCE_RESUME = 5,
- DISCE_DESTRUCT = 6,
- DISC_NUM_EVENTS = 7,
+ DISCE_REVALIDATE_DOMAIN,
+ DISCE_PORT_GONE,
+ DISCE_PROBE,
+ DISCE_SUSPEND,
+ DISCE_RESUME,
+ DISCE_DESTRUCT,
+ DISC_NUM_EVENTS,
};

/* ---------- Expander Devices ---------- */
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 0f8210b8e0bc..b2d0ffdd637a 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -29,12 +29,15 @@
*/

#define ETH_ALEN 6 /* Octets in one ethernet addr */
+#define ETH_TLEN 2 /* Octets in ethernet type field */
#define ETH_HLEN 14 /* Total octets in header. */
#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
#define ETH_DATA_LEN 1500 /* Max. octets in payload */
#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */
#define ETH_FCS_LEN 4 /* Octets in the FCS */

+#define ETH_MIN_MTU 68 /* Min IPv4 MTU per RFC791 */
+
/*
* These are the defined Ethernet Protocol ID's.
*/
diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h
index d2314be4f0c0..19f9dc2c06f6 100644
--- a/include/uapi/linux/usb/audio.h
+++ b/include/uapi/linux/usb/audio.h
@@ -369,7 +369,7 @@ static inline __u8 uac_processing_unit_bControlSize(struct uac_processing_unit_d
{
return (protocol == UAC_VERSION_1) ?
desc->baSourceID[desc->bNrInPins + 4] :
- desc->baSourceID[desc->bNrInPins + 6];
+ 2; /* in UAC2, this value is constant */
}

static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_descriptor *desc,
@@ -377,7 +377,7 @@ static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_de
{
return (protocol == UAC_VERSION_1) ?
&desc->baSourceID[desc->bNrInPins + 5] :
- &desc->baSourceID[desc->bNrInPins + 7];
+ &desc->baSourceID[desc->bNrInPins + 6];
}

static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_descriptor *desc,
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 0b3149ed7eaa..ff05022328a7 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -12,6 +12,7 @@ void xen_arch_post_suspend(int suspend_cancelled);

void xen_timer_resume(void);
void xen_arch_resume(void);
+void xen_arch_suspend(void);

void xen_resume_notifier_register(struct notifier_block *nb);
void xen_resume_notifier_unregister(struct notifier_block *nb);
diff --git a/kernel/async.c b/kernel/async.c
index 61f023ce0228..4ae59815ced4 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -84,20 +84,24 @@ static atomic_t entry_count;

static async_cookie_t lowest_in_progress(struct async_domain *domain)
{
- struct list_head *pending;
+ struct async_entry *first = NULL;
async_cookie_t ret = ASYNC_COOKIE_MAX;
unsigned long flags;

spin_lock_irqsave(&async_lock, flags);

- if (domain)
- pending = &domain->pending;
- else
- pending = &async_global_pending;
+ if (domain) {
+ if (!list_empty(&domain->pending))
+ first = list_first_entry(&domain->pending,
+ struct async_entry, domain_list);
+ } else {
+ if (!list_empty(&async_global_pending))
+ first = list_first_entry(&async_global_pending,
+ struct async_entry, global_list);
+ }

- if (!list_empty(pending))
- ret = list_first_entry(pending, struct async_entry,
- domain_list)->cookie;
+ if (first)
+ ret = first->cookie;

spin_unlock_irqrestore(&async_lock, flags);
return ret;
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 1559fb0b9296..0ff7951d22de 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -427,16 +427,9 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
* modify_user_hw_breakpoint - modify a user-space hardware breakpoint
* @bp: the breakpoint structure to modify
* @attr: new breakpoint attributes
- * @triggered: callback to trigger when we hit the breakpoint
- * @tsk: pointer to 'task_struct' of the process to which the address belongs
*/
int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
{
- u64 old_addr = bp->attr.bp_addr;
- u64 old_len = bp->attr.bp_len;
- int old_type = bp->attr.bp_type;
- int err = 0;
-
/*
* modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
* will not be possible to raise IPIs that invoke __perf_event_disable.
@@ -451,27 +444,18 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att
bp->attr.bp_addr = attr->bp_addr;
bp->attr.bp_type = attr->bp_type;
bp->attr.bp_len = attr->bp_len;
+ bp->attr.disabled = 1;

- if (attr->disabled)
- goto end;
-
- err = validate_hw_breakpoint(bp);
- if (!err)
- perf_event_enable(bp);
+ if (!attr->disabled) {
+ int err = validate_hw_breakpoint(bp);

- if (err) {
- bp->attr.bp_addr = old_addr;
- bp->attr.bp_type = old_type;
- bp->attr.bp_len = old_len;
- if (!bp->attr.disabled)
- perf_event_enable(bp);
+ if (err)
+ return err;

- return err;
+ perf_event_enable(bp);
+ bp->attr.disabled = 0;
}

-end:
- bp->attr.disabled = attr->disabled;
-
return 0;
}
EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index a794eaebffe8..dbad6176fb67 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1197,7 +1197,12 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,

cpu_base = &__raw_get_cpu_var(hrtimer_bases);

- if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
+ /*
+ * POSIX magic: Relative CLOCK_REALTIME timers are not affected by
+ * clock modifications, so they needs to become CLOCK_MONOTONIC to
+ * ensure POSIX compliance.
+ */
+ if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
clock_id = CLOCK_MONOTONIC;

base = hrtimer_clockid_to_base(clock_id);
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 3e8afd2bb1dc..5a4cefc43f40 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -48,6 +48,7 @@
#include <linux/workqueue.h>
#include <linux/export.h>
#include <linux/hashtable.h>
+#include <linux/nospec.h>

/*
* Management arrays for POSIX timers. Timers are now kept in static hash table
@@ -578,13 +579,21 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)

static struct k_clock *clockid_to_kclock(const clockid_t id)
{
- if (id < 0)
+ clockid_t idx = id;
+ struct k_clock *kc;
+
+ if (id < 0) {
return (id & CLOCKFD_MASK) == CLOCKFD ?
&clock_posix_dynamic : &clock_posix_cpu;
+ }
+
+ if (id >= ARRAY_SIZE(posix_clocks))
+ return NULL;

- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
+ kc = &posix_clocks[array_index_nospec(idx, ARRAY_SIZE(posix_clocks))];
+ if (!kc->clock_getres)
return NULL;
- return &posix_clocks[id];
+ return kc;
}

static int common_timer_create(struct k_itimer *new_timer)
diff --git a/kernel/relay.c b/kernel/relay.c
index 5a56d3c8dc03..42d06d1d80f1 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -166,7 +166,7 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan)
{
struct rchan_buf *buf;

- if (chan->n_subbufs > UINT_MAX / sizeof(size_t *))
+ if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *))
return NULL;

buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index e918a641b1a0..e714622b6fa9 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -64,6 +64,7 @@
#include <linux/sched/sysctl.h>
#include <linux/kexec.h>
#include <linux/mount.h>
+#include <linux/pipe_fs_i.h>

#include <asm/uaccess.h>
#include <asm/processor.h>
@@ -193,6 +194,8 @@ static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
static int proc_dostring_coredump(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
#endif
+static int proc_dopipe_max_size(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);

#ifdef CONFIG_MAGIC_SYSRQ
/* Note: sysrq code uses it's own private copy */
@@ -1670,8 +1673,7 @@ static struct ctl_table fs_table[] = {
.data = &pipe_max_size,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &pipe_proc_fn,
- .extra1 = &pipe_min_size,
+ .proc_handler = proc_dopipe_max_size,
},
{
.procname = "pipe-user-pages-hard",
@@ -2222,6 +2224,33 @@ int proc_dointvec_minmax(struct ctl_table *table, int write,
do_proc_dointvec_minmax_conv, &param);
}

+static int do_proc_dopipe_max_size_conv(bool *negp, unsigned long *lvalp,
+ int *valp, int write, void *data)
+{
+ if (write) {
+ unsigned int val;
+
+ val = round_pipe_size(*lvalp);
+ if (*negp || val == 0)
+ return -EINVAL;
+
+ *valp = val;
+ } else {
+ unsigned int val = *valp;
+ *negp = false;
+ *lvalp = (unsigned long) val;
+ }
+
+ return 0;
+}
+
+static int proc_dopipe_max_size(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ return do_proc_dointvec(table, write, buffer, lenp, ppos,
+ do_proc_dopipe_max_size_conv, NULL);
+}
+
static void validate_coredump_safety(void)
{
#ifdef CONFIG_COREDUMP
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 562cbf747622..e3bc78d97c7a 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -611,7 +611,7 @@ static int create_trace_kprobe(int argc, char **argv)
bool is_return = false, is_delete = false;
char *symbol = NULL, *event = NULL, *group = NULL;
char *arg;
- unsigned long offset = 0;
+ long offset = 0;
void *addr = NULL;
char buf[MAX_EVENT_NAME_LEN];

@@ -684,7 +684,7 @@ static int create_trace_kprobe(int argc, char **argv)
symbol = argv[1];
/* TODO: support .init module functions */
ret = traceprobe_split_symbol_offset(symbol, &offset);
- if (ret) {
+ if (ret || offset < 0 || offset > UINT_MAX) {
pr_info("Failed to parse either an address or a symbol.\n");
return ret;
}
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index d4b9fc22cd27..a9cfe6c71745 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -291,7 +291,7 @@ static fetch_func_t get_fetch_size_function(const struct fetch_type *type,
}

/* Split symbol and offset. */
-int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset)
+int traceprobe_split_symbol_offset(char *symbol, long *offset)
{
char *tmp;
int ret;
@@ -299,13 +299,11 @@ int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset)
if (!offset)
return -EINVAL;

- tmp = strchr(symbol, '+');
+ tmp = strpbrk(symbol, "+-");
if (tmp) {
- /* skip sign because kstrtoul doesn't accept '+' */
- ret = kstrtoul(tmp + 1, 0, offset);
+ ret = kstrtol(tmp, 0, offset);
if (ret)
return ret;
-
*tmp = '\0';
} else
*offset = 0;
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 4f815fbce16d..2c89ac33d7ad 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -341,7 +341,7 @@ extern int traceprobe_conflict_field_name(const char *name,
extern void traceprobe_update_arg(struct probe_arg *arg);
extern void traceprobe_free_probe_arg(struct probe_arg *arg);

-extern int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset);
+extern int traceprobe_split_symbol_offset(char *symbol, long *offset);

extern ssize_t traceprobe_probes_write(struct file *file,
const char __user *buffer, size_t count, loff_t *ppos,
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 9bf0b4bd3cc7..0d09d4a4de7b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4368,6 +4368,22 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
}
EXPORT_SYMBOL_GPL(workqueue_set_max_active);

+/**
+ * current_work - retrieve %current task's work struct
+ *
+ * Determine if %current task is a workqueue worker and what it's working on.
+ * Useful to find out the context that the %current task is running in.
+ *
+ * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
+ */
+struct work_struct *current_work(void)
+{
+ struct worker *worker = current_wq_worker();
+
+ return worker ? worker->current_work : NULL;
+}
+EXPORT_SYMBOL(current_work);
+
/**
* current_is_workqueue_rescuer - is %current workqueue rescuer?
*
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c892a6b51e8e..18249651423b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -19,6 +19,7 @@
#include <linux/bootmem.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
+#include <linux/mmdebug.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
@@ -3504,6 +3505,14 @@ int hugetlb_reserve_pages(struct inode *inode,
struct hugepage_subpool *spool = subpool_inode(inode);
struct resv_map *resv_map;

+ /* This should never happen */
+ if (from > to) {
+#ifdef CONFIG_DEBUG_VM
+ WARN(1, "%s called with a negative range\n", __func__);
+#endif
+ return -EINVAL;
+ }
+
/*
* Only apply hugepage reservation if asked. At fault time, an
* attempt will be made for VM_NORESERVE to allocate a page
diff --git a/mm/madvise.c b/mm/madvise.c
index a402f8fdc68e..b414e9af2191 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -221,9 +221,9 @@ static long madvise_willneed(struct vm_area_struct *vma,
{
struct file *file = vma->vm_file;

+ *prev = vma;
#ifdef CONFIG_SWAP
if (!file || mapping_cap_swap_backed(file->f_mapping)) {
- *prev = vma;
if (!file)
force_swapin_readahead(vma, start, end);
else
@@ -241,7 +241,6 @@ static long madvise_willneed(struct vm_area_struct *vma,
return 0;
}

- *prev = vma;
start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
if (end > vma->vm_end)
end = vma->vm_end;
diff --git a/mm/memory.c b/mm/memory.c
index 4ab5b0c55f4a..4dbf1173891d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -71,7 +71,7 @@

#include "internal.h"

-#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
#endif

diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index fffff2a6677a..aaf4c2e002e2 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2160,6 +2160,9 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
case MPOL_INTERLEAVE:
return !!nodes_equal(a->v.nodes, b->v.nodes);
case MPOL_PREFERRED:
+ /* a's ->flags is the same as b's */
+ if (a->flags & MPOL_F_LOCAL)
+ return true;
return a->v.preferred_node == b->v.preferred_node;
default:
BUG();
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c9091e10a18d..f1dc719e0fe8 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1206,6 +1206,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)

if (PageDirty(page)) {
struct address_space *mapping;
+ bool migrate_dirty;

/* ISOLATE_CLEAN means only clean pages */
if (mode & ISOLATE_CLEAN)
@@ -1214,10 +1215,19 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
/*
* Only pages without mappings or that have a
* ->migratepage callback are possible to migrate
- * without blocking
+ * without blocking. However, we can be racing with
+ * truncation so it's necessary to lock the page
+ * to stabilise the mapping as truncation holds
+ * the page lock until after the page is removed
+ * from the page cache.
*/
+ if (!trylock_page(page))
+ return ret;
+
mapping = page_mapping(page);
- if (mapping && !mapping->a_ops->migratepage)
+ migrate_dirty = !mapping || mapping->a_ops->migratepage;
+ unlock_page(page);
+ if (!migrate_dirty)
return ret;
}
}
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 2121d9e5f801..bbf55b440da5 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -164,7 +164,8 @@ static void req_done(struct virtqueue *vq)
p9_debug(P9_DEBUG_TRANS, ": rc %p\n", rc);
p9_debug(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
req = p9_tag_lookup(chan->client, rc->tag);
- p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
+ if (len)
+ p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
}
}

diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 5e58b1858c39..12090b307e9c 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -105,7 +105,7 @@ static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node)
* Returns 0 on success, a negative error code otherwise.
*/
static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node,
- int max_if_num)
+ unsigned int max_if_num)
{
void *data_ptr;
size_t data_size, old_size;
@@ -150,7 +150,8 @@ static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node,
* Returns 0 on success, a negative error code otherwise.
*/
static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
- int max_if_num, int del_if_num)
+ unsigned int max_if_num,
+ unsigned int del_if_num)
{
int chunk_size, ret = -ENOMEM, if_offset;
void *data_ptr = NULL;
@@ -867,7 +868,7 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
uint32_t i;
size_t word_index;
uint8_t *w;
- int if_num;
+ unsigned int if_num;

for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -977,7 +978,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
struct batadv_neigh_node *router = NULL;
struct batadv_orig_node *orig_node_tmp;
- int if_num;
+ unsigned int if_num;
uint8_t sum_orig, sum_neigh;
uint8_t *neigh_addr;
uint8_t tq_avg;
@@ -1134,7 +1135,8 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
uint8_t total_count;
uint8_t orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
- int if_num, ret = 0;
+ unsigned int if_num;
+ int ret = 0;
unsigned int tq_asym_penalty, inv_asym_penalty;
unsigned int combined_tq;
unsigned int tq_iface_penalty;
@@ -1641,9 +1643,9 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,

if (is_my_orig) {
unsigned long *word;
- int offset;
+ size_t offset;
int32_t bit_pos;
- int16_t if_num;
+ unsigned int if_num;
uint8_t *weight;

orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 7353487b787f..ab83d8b16c45 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -343,7 +343,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
batadv_arp_hw_src(skb, hdr_size), &ip_src,
batadv_arp_hw_dst(skb, hdr_size), &ip_dst);

- if (hdr_size == 0)
+ if (hdr_size < sizeof(struct batadv_unicast_packet))
return;

unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index af97752b52aa..50de6a07c65e 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -262,7 +262,8 @@ batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb)
/* Move the existing MAC header to just before the payload. (Override
* the fragment header.)
*/
- skb_pull_rcsum(skb_out, hdr_size);
+ skb_pull(skb_out, hdr_size);
+ skb_out->ip_summed = CHECKSUM_NONE;
memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
skb_set_mac_header(skb_out, -ETH_HLEN);
skb_reset_network_header(skb_out);
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index e0bcf9e84273..b22ba87b6b22 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -804,6 +804,9 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,

vid = batadv_get_vid(skb, 0);

+ if (is_multicast_ether_addr(ethhdr->h_dest))
+ goto out;
+
orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
ethhdr->h_dest, vid);
if (!orig_dst_node)
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index fbda6b54baff..fed6259ed5a5 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -411,6 +411,11 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
hard_iface->soft_iface = soft_iface;
bat_priv = netdev_priv(hard_iface->soft_iface);

+ if (bat_priv->num_ifaces >= UINT_MAX) {
+ ret = -ENOSPC;
+ goto err_dev;
+ }
+
ret = netdev_master_upper_dev_link(hard_iface->net_dev, soft_iface);
if (ret)
goto err_dev;
@@ -514,7 +519,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
dev_put(hard_iface->soft_iface);

/* nobody uses this interface anymore */
- if (!bat_priv->num_ifaces) {
+ if (bat_priv->num_ifaces == 0) {
batadv_gw_check_client_stop(bat_priv);

if (autodel == BATADV_IF_CLEANUP_AUTO)
@@ -571,7 +576,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
if (ret)
goto free_if;

- hard_iface->if_num = -1;
+ hard_iface->if_num = 0;
hard_iface->net_dev = net_dev;
hard_iface->soft_iface = NULL;
hard_iface->if_status = BATADV_IF_NOT_IN_USE;
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index a6a908f1ae87..607bbd0ec343 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -398,8 +398,8 @@ static struct batadv_orig_node *
batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
struct ethhdr *ethhdr)
{
- return batadv_transtable_search(bat_priv, ethhdr->h_source,
- ethhdr->h_dest, BATADV_NO_FLAGS);
+ return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest,
+ BATADV_NO_FLAGS);
}

/**
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 9e6967a1f23e..5ed180ce423e 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1069,7 +1069,7 @@ int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
}

int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
- int max_if_num)
+ unsigned int max_if_num)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
@@ -1105,7 +1105,7 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
}

int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
- int max_if_num)
+ unsigned int max_if_num)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct batadv_hashtable *hash = bat_priv->orig_hash;
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index ce75339563b8..9dfc35f6a3d8 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -58,9 +58,9 @@ void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo);
int batadv_orig_seq_print_text(struct seq_file *seq, void *offset);
int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset);
int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
- int max_if_num);
+ unsigned int max_if_num);
int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
- int max_if_num);
+ unsigned int max_if_num);
struct batadv_orig_node_vlan *
batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
unsigned short vid);
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 7a1032348ea9..9990d4040f6c 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -687,6 +687,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
/**
* batadv_reroute_unicast_packet - update the unicast header for re-routing
* @bat_priv: the bat priv with all the soft interface information
+ * @skb: unicast packet to process
* @unicast_packet: the unicast header to be updated
* @dst_addr: the payload destination
* @vid: VLAN identifier
@@ -698,7 +699,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
* Returns true if the packet header has been updated, false otherwise
*/
static bool
-batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
+batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
struct batadv_unicast_packet *unicast_packet,
uint8_t *dst_addr, unsigned short vid)
{
@@ -727,8 +728,10 @@ batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
}

/* update the packet header */
+ skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
ether_addr_copy(unicast_packet->dest, orig_addr);
unicast_packet->ttvn = orig_ttvn;
+ skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));

ret = true;
out:
@@ -768,7 +771,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
* the packet to
*/
if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) {
- if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
+ if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
ethhdr->h_dest, vid))
net_ratelimited_function(batadv_dbg, BATADV_DBG_TT,
bat_priv,
@@ -814,7 +817,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
* destination can possibly be updated and forwarded towards the new
* target host
*/
- if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
+ if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
ethhdr->h_dest, vid)) {
net_ratelimited_function(batadv_dbg, BATADV_DBG_TT, bat_priv,
"Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
@@ -837,12 +840,14 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
if (!primary_if)
return 0;

+ /* update the packet header */
+ skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr);
+ unicast_packet->ttvn = curr_ttvn;
+ skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));

batadv_hardif_free_ref(primary_if);

- unicast_packet->ttvn = curr_ttvn;
-
return 1;
}

@@ -886,8 +891,6 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
bool is4addr;

unicast_packet = (struct batadv_unicast_packet *)skb->data;
- unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
-
is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
/* the caller function should have already pulled 2 bytes */
if (is4addr)
@@ -907,9 +910,13 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
return NET_RX_DROP;

+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
+
/* packet for me */
if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
if (is4addr) {
+ unicast_4addr_packet =
+ (struct batadv_unicast_4addr_packet *)skb->data;
batadv_dat_inc_counter(bat_priv,
unicast_4addr_packet->subtype);
orig_addr = unicast_4addr_packet->src;
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 765c89ccc3fb..c8642161b1fc 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -401,13 +401,7 @@ void batadv_interface_rx(struct net_device *soft_iface,

/* skb->dev & skb->pkt_type are set here */
skb->protocol = eth_type_trans(skb, soft_iface);
-
- /* should not be necessary anymore as we use skb_pull_rcsum()
- * TODO: please verify this and remove this TODO
- * -- Dec 21st 2009, Simon Wunderlich
- */
-
- /* skb->ip_summed = CHECKSUM_UNNECESSARY; */
+ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);

batadv_inc_counter(bat_priv, BATADV_CNT_RX);
batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 9161e3e62a55..c6388ce62f82 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -89,7 +89,7 @@ struct batadv_hard_iface_bat_iv {
*/
struct batadv_hard_iface {
struct list_head list;
- int16_t if_num;
+ unsigned int if_num;
char if_status;
struct net_device *net_dev;
uint8_t num_bcasts;
@@ -795,7 +795,7 @@ struct batadv_priv {
atomic_t bcast_seqno;
atomic_t bcast_queue_left;
atomic_t batman_queue_left;
- char num_ifaces;
+ unsigned int num_ifaces;
struct kobject *mesh_obj;
struct dentry *debug_dir;
struct hlist_head forw_bat_list;
@@ -1166,9 +1166,10 @@ struct batadv_algo_ops {
struct batadv_hard_iface *hard_iface);
void (*bat_orig_free)(struct batadv_orig_node *orig_node);
int (*bat_orig_add_if)(struct batadv_orig_node *orig_node,
- int max_if_num);
+ unsigned int max_if_num);
int (*bat_orig_del_if)(struct batadv_orig_node *orig_node,
- int max_if_num, int del_if_num);
+ unsigned int max_if_num,
+ unsigned int del_if_num);
};

/**
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 3b45cfee3743..32400034fed4 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -1332,13 +1332,14 @@ int hidp_connection_add(struct hidp_connadd_req *req,
{
struct hidp_session *session;
struct l2cap_conn *conn;
- struct l2cap_chan *chan = l2cap_pi(ctrl_sock->sk)->chan;
+ struct l2cap_chan *chan;
int ret;

ret = hidp_verify_sockets(ctrl_sock, intr_sock);
if (ret)
return ret;

+ chan = l2cap_pi(ctrl_sock->sk)->chan;
conn = NULL;
l2cap_chan_lock(chan);
if (chan->conn) {
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index e561cd59b8a6..65a092c5e0f5 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -225,6 +225,9 @@ static ssize_t brport_show(struct kobject *kobj,
struct brport_attribute *brport_attr = to_brport_attr(attr);
struct net_bridge_port *p = to_brport(kobj);

+ if (!brport_attr->show)
+ return -EINVAL;
+
return brport_attr->show(p, buf);
}

diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
index 9024283d2bca..9adf16258cab 100644
--- a/net/bridge/netfilter/ebt_among.c
+++ b/net/bridge/netfilter/ebt_among.c
@@ -172,18 +172,69 @@ ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par)
return true;
}

+static bool poolsize_invalid(const struct ebt_mac_wormhash *w)
+{
+ return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple));
+}
+
+static bool wormhash_offset_invalid(int off, unsigned int len)
+{
+ if (off == 0) /* not present */
+ return false;
+
+ if (off < (int)sizeof(struct ebt_among_info) ||
+ off % __alignof__(struct ebt_mac_wormhash))
+ return true;
+
+ off += sizeof(struct ebt_mac_wormhash);
+
+ return off > len;
+}
+
+static bool wormhash_sizes_valid(const struct ebt_mac_wormhash *wh, int a, int b)
+{
+ if (a == 0)
+ a = sizeof(struct ebt_among_info);
+
+ return ebt_mac_wormhash_size(wh) + a == b;
+}
+
static int ebt_among_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_among_info *info = par->matchinfo;
const struct ebt_entry_match *em =
container_of(par->matchinfo, const struct ebt_entry_match, data);
- int expected_length = sizeof(struct ebt_among_info);
+ unsigned int expected_length = sizeof(struct ebt_among_info);
const struct ebt_mac_wormhash *wh_dst, *wh_src;
int err;

+ if (expected_length > em->match_size)
+ return -EINVAL;
+
+ if (wormhash_offset_invalid(info->wh_dst_ofs, em->match_size) ||
+ wormhash_offset_invalid(info->wh_src_ofs, em->match_size))
+ return -EINVAL;
+
wh_dst = ebt_among_wh_dst(info);
- wh_src = ebt_among_wh_src(info);
+ if (poolsize_invalid(wh_dst))
+ return -EINVAL;
+
expected_length += ebt_mac_wormhash_size(wh_dst);
+ if (expected_length > em->match_size)
+ return -EINVAL;
+
+ wh_src = ebt_among_wh_src(info);
+ if (poolsize_invalid(wh_src))
+ return -EINVAL;
+
+ if (info->wh_src_ofs < info->wh_dst_ofs) {
+ if (!wormhash_sizes_valid(wh_src, info->wh_src_ofs, info->wh_dst_ofs))
+ return -EINVAL;
+ } else {
+ if (!wormhash_sizes_valid(wh_dst, info->wh_dst_ofs, info->wh_src_ofs))
+ return -EINVAL;
+ }
+
expected_length += ebt_mac_wormhash_size(wh_src);

if (em->match_size != EBT_ALIGN(expected_length)) {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 1059ed3bc255..72142370bd98 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -2010,7 +2010,9 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
if (match_kern)
match_kern->match_size = ret;

- WARN_ON(type == EBT_COMPAT_TARGET && size_left);
+ if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
+ return -EINVAL;
+
match32 = (struct compat_ebt_entry_mwt *) buf;
}

@@ -2067,6 +2069,19 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
*
* offsets are relative to beginning of struct ebt_entry (i.e., 0).
*/
+ for (i = 0; i < 4 ; ++i) {
+ if (offsets[i] > *total)
+ return -EINVAL;
+
+ if (i < 3 && offsets[i] == *total)
+ return -EINVAL;
+
+ if (i == 0)
+ continue;
+ if (offsets[i-1] > offsets[i])
+ return -EINVAL;
+ }
+
for (i = 0, j = 1 ; j < 4 ; j++, i++) {
struct compat_ebt_entry_mwt *match32;
unsigned int size;
diff --git a/net/core/dev.c b/net/core/dev.c
index 37e2e5423af0..f8f03ebd0507 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2084,8 +2084,11 @@ EXPORT_SYMBOL(netif_set_xps_queue);
*/
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
{
+ bool disabling;
int rc;

+ disabling = txq < dev->real_num_tx_queues;
+
if (txq < 1 || txq > dev->num_tx_queues)
return -EINVAL;

@@ -2101,15 +2104,19 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
if (dev->num_tc)
netif_setup_tc(dev, txq);

- if (txq < dev->real_num_tx_queues) {
+ dev->real_num_tx_queues = txq;
+
+ if (disabling) {
+ synchronize_net();
qdisc_reset_all_tx_gt(dev, txq);
#ifdef CONFIG_XPS
netif_reset_xps_queues_gt(dev, txq);
#endif
}
+ } else {
+ dev->real_num_tx_queues = txq;
}

- dev->real_num_tx_queues = txq;
return 0;
}
EXPORT_SYMBOL(netif_set_real_num_tx_queues);
@@ -2329,7 +2336,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
return 0;

- eth = (struct ethhdr *)skb_mac_header(skb);
+ eth = (struct ethhdr *)skb->data;
type = eth->h_proto;
}

diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 235c639d370b..9bb9a01a1478 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3512,7 +3512,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)

skb_queue_tail(&sk->sk_error_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk);
+ sk->sk_error_report(sk);
return 0;
}
EXPORT_SYMBOL(sock_queue_err_skb);
@@ -3994,13 +3994,18 @@ EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);

static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
{
+ int mac_len;
+
if (skb_cow(skb, skb_headroom(skb)) < 0) {
kfree_skb(skb);
return NULL;
}

- memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN,
- 2 * ETH_ALEN);
+ mac_len = skb->data - skb_mac_header(skb);
+ if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
+ memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
+ mac_len - VLAN_HLEN - ETH_TLEN);
+ }
skb->mac_header += VLAN_HLEN;
return skb;
}
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 82068e0d9891..b9d4f2c55dec 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -785,6 +785,11 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (skb == NULL)
goto out_release;

+ if (sk->sk_state == DCCP_CLOSED) {
+ rc = -ENOTCONN;
+ goto out_discard;
+ }
+
skb_reserve(skb, sk->sk_prot->max_header);
rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
if (rc != 0)
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 0d3cc9566f0f..e6c3234338f9 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1336,6 +1336,12 @@ static int dn_setsockopt(struct socket *sock, int level, int optname, char __use
lock_sock(sk);
err = __dn_setsockopt(sock, level, optname, optval, optlen, 0);
release_sock(sk);
+#ifdef CONFIG_NETFILTER
+ /* we need to exclude all possible ENOPROTOOPTs except default case */
+ if (err == -ENOPROTOOPT && optname != DSO_LINKINFO &&
+ optname != DSO_STREAM && optname != DSO_SEQPACKET)
+ err = nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
+#endif

return err;
}
@@ -1443,15 +1449,6 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
break;

- default:
-#ifdef CONFIG_NETFILTER
- return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
-#endif
- case DSO_LINKINFO:
- case DSO_STREAM:
- case DSO_SEQPACKET:
- return -ENOPROTOOPT;
-
case DSO_MAXWINDOW:
if (optlen != sizeof(unsigned long))
return -EINVAL;
@@ -1499,6 +1496,12 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
return -EINVAL;
scp->info_loc = u.info;
break;
+
+ case DSO_LINKINFO:
+ case DSO_STREAM:
+ case DSO_SEQPACKET:
+ default:
+ return -ENOPROTOOPT;
}

return 0;
@@ -1512,6 +1515,20 @@ static int dn_getsockopt(struct socket *sock, int level, int optname, char __use
lock_sock(sk);
err = __dn_getsockopt(sock, level, optname, optval, optlen, 0);
release_sock(sk);
+#ifdef CONFIG_NETFILTER
+ if (err == -ENOPROTOOPT && optname != DSO_STREAM &&
+ optname != DSO_SEQPACKET && optname != DSO_CONACCEPT &&
+ optname != DSO_CONREJECT) {
+ int len;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ err = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
+ if (err >= 0)
+ err = put_user(len, optlen);
+ }
+#endif

return err;
}
@@ -1577,26 +1594,6 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
r_data = &link;
break;

- default:
-#ifdef CONFIG_NETFILTER
- {
- int ret, len;
-
- if (get_user(len, optlen))
- return -EFAULT;
-
- ret = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
- if (ret >= 0)
- ret = put_user(len, optlen);
- return ret;
- }
-#endif
- case DSO_STREAM:
- case DSO_SEQPACKET:
- case DSO_CONACCEPT:
- case DSO_CONREJECT:
- return -ENOPROTOOPT;
-
case DSO_MAXWINDOW:
if (r_len > sizeof(unsigned long))
r_len = sizeof(unsigned long);
@@ -1628,6 +1625,13 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
r_len = sizeof(unsigned char);
r_data = &scp->info_rem;
break;
+
+ case DSO_STREAM:
+ case DSO_SEQPACKET:
+ case DSO_CONACCEPT:
+ case DSO_CONREJECT:
+ default:
+ return -ENOPROTOOPT;
}

if (r_data) {
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 387c5e404650..337c333f64ae 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -388,7 +388,11 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
pip->frag_off = htons(IP_DF);
pip->ttl = 1;
pip->daddr = fl4.daddr;
+
+ rcu_read_lock();
pip->saddr = igmpv3_get_srcaddr(dev, &fl4);
+ rcu_read_unlock();
+
pip->protocol = IPPROTO_IGMP;
pip->tot_len = 0; /* filled in later */
ip_select_ident(skb, NULL);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index ecb2b00b6ad3..4b03c579c747 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1106,11 +1106,8 @@ int ip_setsockopt(struct sock *sk, int level,
if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
optname != IP_IPSEC_POLICY &&
optname != IP_XFRM_POLICY &&
- !ip_mroute_opt(optname)) {
- lock_sock(sk);
+ !ip_mroute_opt(optname))
err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
- release_sock(sk);
- }
#endif
return err;
}
@@ -1135,12 +1132,9 @@ int compat_ip_setsockopt(struct sock *sk, int level, int optname,
if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
optname != IP_IPSEC_POLICY &&
optname != IP_XFRM_POLICY &&
- !ip_mroute_opt(optname)) {
- lock_sock(sk);
- err = compat_nf_setsockopt(sk, PF_INET, optname,
- optval, optlen);
- release_sock(sk);
- }
+ !ip_mroute_opt(optname))
+ err = compat_nf_setsockopt(sk, PF_INET, optname, optval,
+ optlen);
#endif
return err;
}
@@ -1397,10 +1391,7 @@ int ip_getsockopt(struct sock *sk, int level,
if (get_user(len, optlen))
return -EFAULT;

- lock_sock(sk);
- err = nf_getsockopt(sk, PF_INET, optname, optval,
- &len);
- release_sock(sk);
+ err = nf_getsockopt(sk, PF_INET, optname, optval, &len);
if (err >= 0)
err = put_user(len, optlen);
return err;
@@ -1432,9 +1423,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;

- lock_sock(sk);
err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len);
- release_sock(sk);
if (err >= 0)
err = put_user(len, optlen);
return err;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 51dd1605f944..34fce09c5fd1 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -405,17 +405,26 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
{
struct ip_tunnel *nt;
struct net_device *dev;
+ int mtu;
+ int err;

BUG_ON(!itn->fb_tunnel_dev);
dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms);
if (IS_ERR(dev))
return ERR_CAST(dev);

- dev->mtu = ip_tunnel_bind_dev(dev);
+ mtu = ip_tunnel_bind_dev(dev);
+ err = dev_set_mtu(dev, mtu);
+ if (err)
+ goto err_dev_set_mtu;

nt = netdev_priv(dev);
ip_tunnel_add(itn, nt);
return nt;
+
+err_dev_set_mtu:
+ unregister_netdevice(dev);
+ return ERR_PTR(err);
}

int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
@@ -962,18 +971,29 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
nt->parms = *p;
err = register_netdevice(dev);
if (err)
- goto out;
+ goto err_register_netdevice;

if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
eth_hw_addr_random(dev);

mtu = ip_tunnel_bind_dev(dev);
- if (!tb[IFLA_MTU])
- dev->mtu = mtu;
+ if (tb[IFLA_MTU]) {
+ unsigned int max = 0xfff8 - dev->hard_header_len - nt->hlen;
+
+ mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU,
+ (unsigned int)(max - sizeof(struct iphdr)));
+ }
+
+ err = dev_set_mtu(dev, mtu);
+ if (err)
+ goto err_dev_set_mtu;

ip_tunnel_add(itn, nt);
+ return 0;

-out:
+err_dev_set_mtu:
+ unregister_netdevice(dev);
+err_register_netdevice:
return err;
}
EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index ca0b7408614c..b4fc9e710308 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -359,8 +359,6 @@ static int vti_tunnel_init(struct net_device *dev)
memcpy(dev->dev_addr, &iph->saddr, 4);
memcpy(dev->broadcast, &iph->daddr, 4);

- dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
- dev->mtu = ETH_DATA_LEN;
dev->flags = IFF_NOARP;
dev->iflink = 0;
dev->addr_len = 4;
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 0b6a54b9bdeb..36abfce2c2d9 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -147,8 +147,12 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry)
if (c) {
if (unlikely(!atomic_inc_not_zero(&c->refcount)))
c = NULL;
- else if (entry)
- atomic_inc(&c->entries);
+ else if (entry) {
+ if (unlikely(!atomic_inc_not_zero(&c->entries))) {
+ clusterip_config_put(c);
+ c = NULL;
+ }
+ }
}
rcu_read_unlock_bh();

@@ -366,7 +370,7 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
struct ipt_clusterip_tgt_info *cipinfo = par->targinfo;
const struct ipt_entry *e = par->entryinfo;
struct clusterip_config *config;
- int ret;
+ int ret, i;

if (cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP &&
cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT &&
@@ -380,8 +384,18 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
pr_info("Please specify destination IP\n");
return -EINVAL;
}
-
- /* FIXME: further sanity checks */
+ if (cipinfo->num_local_nodes > ARRAY_SIZE(cipinfo->local_nodes)) {
+ pr_info("bad num_local_nodes %u\n", cipinfo->num_local_nodes);
+ return -EINVAL;
+ }
+ for (i = 0; i < cipinfo->num_local_nodes; i++) {
+ if (cipinfo->local_nodes[i] - 1 >=
+ sizeof(config->local_nodes) * 8) {
+ pr_info("bad local_nodes[%d] %u\n",
+ i, cipinfo->local_nodes[i]);
+ return -EINVAL;
+ }
+ }

config = clusterip_config_find_get(par->net, e->ip.dst.s_addr, 1);
if (!config) {
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 8127dc802865..5343d72a5a34 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -269,15 +269,19 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
struct nf_conntrack_tuple tuple;

memset(&tuple, 0, sizeof(tuple));
+
+ lock_sock(sk);
tuple.src.u3.ip = inet->inet_rcv_saddr;
tuple.src.u.tcp.port = inet->inet_sport;
tuple.dst.u3.ip = inet->inet_daddr;
tuple.dst.u.tcp.port = inet->inet_dport;
tuple.src.l3num = PF_INET;
tuple.dst.protonum = sk->sk_protocol;
+ release_sock(sk);

/* We only do TCP and SCTP at the moment: is there a better way? */
- if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP) {
+ if (tuple.dst.protonum != IPPROTO_TCP &&
+ tuple.dst.protonum != IPPROTO_SCTP) {
pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n");
return -ENOPROTOOPT;
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index a7c1d6f798ea..c75c5e89dfc9 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -608,6 +608,7 @@ static inline u32 fnhe_hashfun(__be32 daddr)
static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
{
rt->rt_pmtu = fnhe->fnhe_pmtu;
+ rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
rt->dst.expires = fnhe->fnhe_expires;

if (fnhe->fnhe_gw) {
@@ -618,7 +619,7 @@ static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnh
}

static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
- u32 pmtu, unsigned long expires)
+ u32 pmtu, bool lock, unsigned long expires)
{
struct fnhe_hash_bucket *hash;
struct fib_nh_exception *fnhe;
@@ -655,8 +656,10 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
fnhe->fnhe_genid = genid;
if (gw)
fnhe->fnhe_gw = gw;
- if (pmtu)
+ if (pmtu) {
fnhe->fnhe_pmtu = pmtu;
+ fnhe->fnhe_mtu_locked = lock;
+ }
fnhe->fnhe_expires = max(1UL, expires);
/* Update all cached dsts too */
rt = rcu_dereference(fnhe->fnhe_rth_input);
@@ -680,6 +683,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
fnhe->fnhe_daddr = daddr;
fnhe->fnhe_gw = gw;
fnhe->fnhe_pmtu = pmtu;
+ fnhe->fnhe_mtu_locked = lock;
fnhe->fnhe_expires = expires;

/* Exception created; mark the cached routes for the nexthop
@@ -761,7 +765,8 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
struct fib_nh *nh = &FIB_RES_NH(res);

update_or_create_fnhe(nh, fl4->daddr, new_gw,
- 0, jiffies + ip_rt_gc_timeout);
+ 0, false,
+ jiffies + ip_rt_gc_timeout);
}
if (kill_route)
rt->dst.obsolete = DST_OBSOLETE_KILL;
@@ -970,15 +975,18 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
{
struct dst_entry *dst = &rt->dst;
struct fib_result res;
+ bool lock = false;

- if (dst_metric_locked(dst, RTAX_MTU))
+ if (ip_mtu_locked(dst))
return;

if (dst->dev->mtu < mtu)
return;

- if (mtu < ip_rt_min_pmtu)
+ if (mtu < ip_rt_min_pmtu) {
+ lock = true;
mtu = ip_rt_min_pmtu;
+ }

if (rt->rt_pmtu == mtu &&
time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
@@ -988,7 +996,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
if (fib_lookup(dev_net(dst->dev), fl4, &res) == 0) {
struct fib_nh *nh = &FIB_RES_NH(res);

- update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
+ update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock,
jiffies + ip_rt_mtu_expires);
}
rcu_read_unlock();
@@ -1243,7 +1251,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)

mtu = dst->dev->mtu;

- if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
+ if (unlikely(ip_mtu_locked(dst))) {
if (rt->rt_uses_gateway && mtu > 576)
mtu = 576;
}
@@ -1435,12 +1443,34 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
}

static struct rtable *rt_dst_alloc(struct net_device *dev,
+ unsigned int flags, u16 type,
bool nopolicy, bool noxfrm, bool will_cache)
{
- return dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
- (will_cache ? 0 : (DST_HOST | DST_NOCACHE)) |
- (nopolicy ? DST_NOPOLICY : 0) |
- (noxfrm ? DST_NOXFRM : 0));
+ struct rtable *rt;
+
+ rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
+ (will_cache ? 0 : (DST_HOST | DST_NOCACHE)) |
+ (nopolicy ? DST_NOPOLICY : 0) |
+ (noxfrm ? DST_NOXFRM : 0));
+
+ if (rt) {
+ rt->rt_genid = rt_genid_ipv4(dev_net(dev));
+ rt->rt_flags = flags;
+ rt->rt_type = type;
+ rt->rt_is_input = 0;
+ rt->rt_iif = 0;
+ rt->rt_pmtu = 0;
+ rt->rt_mtu_locked = 0;
+ rt->rt_gateway = 0;
+ rt->rt_uses_gateway = 0;
+ INIT_LIST_HEAD(&rt->rt_uncached);
+
+ rt->dst.output = ip_output;
+ if (flags & RTCF_LOCAL)
+ rt->dst.input = ip_local_deliver;
+ }
+
+ return rt;
}

/* called in rcu_read_lock() section */
@@ -1480,6 +1510,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
{
struct in_device *in_dev = __in_dev_get_rcu(dev);
struct rtable *rth;
+ unsigned int flags = RTCF_MULTICAST;
u32 itag = 0;
int err;

@@ -1487,7 +1518,10 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (err)
return err;

- rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
+ if (our)
+ flags |= RTCF_LOCAL;
+
+ rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
if (!rth)
return -ENOBUFS;
@@ -1496,20 +1530,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rth->dst.tclassid = itag;
#endif
rth->dst.output = ip_rt_bug;
-
- rth->rt_genid = rt_genid_ipv4(dev_net(dev));
- rth->rt_flags = RTCF_MULTICAST;
- rth->rt_type = RTN_MULTICAST;
rth->rt_is_input= 1;
- rth->rt_iif = 0;
- rth->rt_pmtu = 0;
- rth->rt_gateway = 0;
- rth->rt_uses_gateway = 0;
- INIT_LIST_HEAD(&rth->rt_uncached);
- if (our) {
- rth->dst.input= ip_local_deliver;
- rth->rt_flags |= RTCF_LOCAL;
- }

#ifdef CONFIG_IP_MROUTE
if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
@@ -1587,7 +1608,6 @@ static int __mkroute_input(struct sk_buff *skb,
struct rtable *rth;
int err;
struct in_device *out_dev;
- unsigned int flags = 0;
bool do_cache;
u32 itag = 0;

@@ -1651,7 +1671,7 @@ static int __mkroute_input(struct sk_buff *skb,
}
}

- rth = rt_dst_alloc(out_dev->dev,
+ rth = rt_dst_alloc(out_dev->dev, 0, res->type,
IN_DEV_CONF_GET(in_dev, NOPOLICY),
IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
if (!rth) {
@@ -1659,19 +1679,10 @@ static int __mkroute_input(struct sk_buff *skb,
goto cleanup;
}

- rth->rt_genid = rt_genid_ipv4(dev_net(rth->dst.dev));
- rth->rt_flags = flags;
- rth->rt_type = res->type;
rth->rt_is_input = 1;
- rth->rt_iif = 0;
- rth->rt_pmtu = 0;
- rth->rt_gateway = 0;
- rth->rt_uses_gateway = 0;
- INIT_LIST_HEAD(&rth->rt_uncached);
RT_CACHE_STAT_INC(in_slow_tot);

rth->dst.input = ip_forward;
- rth->dst.output = ip_output;

rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
skb_dst_set(skb, &rth->dst);
@@ -1822,26 +1833,17 @@ out: return err;
}
}

- rth = rt_dst_alloc(net->loopback_dev,
+ rth = rt_dst_alloc(net->loopback_dev, flags | RTCF_LOCAL, res.type,
IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
if (!rth)
goto e_nobufs;

- rth->dst.input= ip_local_deliver;
rth->dst.output= ip_rt_bug;
#ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag;
#endif
-
- rth->rt_genid = rt_genid_ipv4(net);
- rth->rt_flags = flags|RTCF_LOCAL;
- rth->rt_type = res.type;
rth->rt_is_input = 1;
- rth->rt_iif = 0;
- rth->rt_pmtu = 0;
- rth->rt_gateway = 0;
- rth->rt_uses_gateway = 0;
- INIT_LIST_HEAD(&rth->rt_uncached);
+
RT_CACHE_STAT_INC(in_slow_tot);
if (res.type == RTN_UNREACHABLE) {
rth->dst.input= ip_error;
@@ -2038,29 +2040,16 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
}

add:
- rth = rt_dst_alloc(dev_out,
+ rth = rt_dst_alloc(dev_out, flags, type,
IN_DEV_CONF_GET(in_dev, NOPOLICY),
IN_DEV_CONF_GET(in_dev, NOXFRM),
do_cache);
if (!rth)
return ERR_PTR(-ENOBUFS);

- rth->dst.output = ip_output;
-
- rth->rt_genid = rt_genid_ipv4(dev_net(dev_out));
- rth->rt_flags = flags;
- rth->rt_type = type;
- rth->rt_is_input = 0;
rth->rt_iif = orig_oif ? : 0;
- rth->rt_pmtu = 0;
- rth->rt_gateway = 0;
- rth->rt_uses_gateway = 0;
- INIT_LIST_HEAD(&rth->rt_uncached);
-
RT_CACHE_STAT_INC(out_slow_tot);

- if (flags & RTCF_LOCAL)
- rth->dst.input = ip_local_deliver;
if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
if (flags & RTCF_LOCAL &&
!(dev_out->flags & IFF_LOOPBACK)) {
@@ -2328,6 +2317,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
rt->rt_is_input = ort->rt_is_input;
rt->rt_iif = ort->rt_iif;
rt->rt_pmtu = ort->rt_pmtu;
+ rt->rt_mtu_locked = ort->rt_mtu_locked;

rt->rt_genid = rt_genid_ipv4(net);
rt->rt_flags = ort->rt_flags;
@@ -2431,6 +2421,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
if (rt->rt_pmtu && expires)
metrics[RTAX_MTU - 1] = rt->rt_pmtu;
+ if (rt->rt_mtu_locked && expires)
+ metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
if (rtnetlink_put_metrics(skb, metrics) < 0)
goto nla_put_failure;

diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5877d782bdc1..41c96cacdbaa 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1729,6 +1729,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
err = udplite_checksum_init(skb, uh);
if (err)
return err;
+
+ if (UDP_SKB_CB(skb)->partial_cov) {
+ skb->csum = inet_compute_pseudo(skb, proto);
+ return 0;
+ }
}

return skb_checksum_init_zero_check(skb, proto, uh->check,
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 94fc16dad6c6..3a4c1f93023a 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -93,6 +93,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
xdst->u.rt.rt_gateway = rt->rt_gateway;
xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway;
xdst->u.rt.rt_pmtu = rt->rt_pmtu;
+ xdst->u.rt.rt_mtu_locked = rt->rt_mtu_locked;
INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);

return 0;
diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
index 9a4d7322fb22..391a8fedb27e 100644
--- a/net/ipv6/ip6_checksum.c
+++ b/net/ipv6/ip6_checksum.c
@@ -73,6 +73,11 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
err = udplite_checksum_init(skb, uh);
if (err)
return err;
+
+ if (UDP_SKB_CB(skb)->partial_cov) {
+ skb->csum = ip6_compute_pseudo(skb, proto);
+ return 0;
+ }
}

/* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 1fd80d8abd1d..0d30c9192da3 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1146,7 +1146,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_cork *cork;
struct sk_buff *skb, *skb_prev = NULL;
- unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, headersize;
+ unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, headersize, pmtu;
int exthdrlen;
int dst_exthdrlen;
int hh_len;
@@ -1242,6 +1242,12 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
sizeof(struct frag_hdr) : 0) +
rt->rt6i_nfheader_len;

+ /* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit
+ * the first fragment
+ */
+ if (headersize + transhdrlen > mtu)
+ goto emsgsize;
+
if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
unsigned int maxnonfragsize;

@@ -1261,9 +1267,8 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,

if (cork->length + length > maxnonfragsize - headersize) {
emsgsize:
- ipv6_local_error(sk, EMSGSIZE, fl6,
- mtu - headersize +
- sizeof(struct ipv6hdr));
+ pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0);
+ ipv6_local_error(sk, EMSGSIZE, fl6, pmtu);
return -EMSGSIZE;
}
}
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index acda3ed4ba7e..eee453393cf7 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -871,12 +871,8 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname,
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY &&
- optname != IPV6_XFRM_POLICY) {
- lock_sock(sk);
- err = nf_setsockopt(sk, PF_INET6, optname, optval,
- optlen);
- release_sock(sk);
- }
+ optname != IPV6_XFRM_POLICY)
+ err = nf_setsockopt(sk, PF_INET6, optname, optval, optlen);
#endif
return err;
}
@@ -907,12 +903,9 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY &&
- optname != IPV6_XFRM_POLICY) {
- lock_sock(sk);
- err = compat_nf_setsockopt(sk, PF_INET6, optname,
- optval, optlen);
- release_sock(sk);
- }
+ optname != IPV6_XFRM_POLICY)
+ err = compat_nf_setsockopt(sk, PF_INET6, optname, optval,
+ optlen);
#endif
return err;
}
@@ -1316,10 +1309,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;

- lock_sock(sk);
- err = nf_getsockopt(sk, PF_INET6, optname, optval,
- &len);
- release_sock(sk);
+ err = nf_getsockopt(sk, PF_INET6, optname, optval, &len);
if (err >= 0)
err = put_user(len, optlen);
}
@@ -1359,10 +1349,7 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;

- lock_sock(sk);
- err = compat_nf_getsockopt(sk, PF_INET6,
- optname, optval, &len);
- release_sock(sk);
+ err = compat_nf_getsockopt(sk, PF_INET6, optname, optval, &len);
if (err >= 0)
err = put_user(len, optlen);
}
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 4cbc6b290dd5..e178fe026379 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -240,20 +240,27 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
static int
ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
{
- const struct inet_sock *inet = inet_sk(sk);
+ struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
const struct ipv6_pinfo *inet6 = inet6_sk(sk);
+ const struct inet_sock *inet = inet_sk(sk);
const struct nf_conntrack_tuple_hash *h;
struct sockaddr_in6 sin6;
- struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
struct nf_conn *ct;
+ __be32 flow_label;
+ int bound_dev_if;

+ lock_sock(sk);
tuple.src.u3.in6 = sk->sk_v6_rcv_saddr;
tuple.src.u.tcp.port = inet->inet_sport;
tuple.dst.u3.in6 = sk->sk_v6_daddr;
tuple.dst.u.tcp.port = inet->inet_dport;
tuple.dst.protonum = sk->sk_protocol;
+ bound_dev_if = sk->sk_bound_dev_if;
+ flow_label = inet6->flow_label;
+ release_sock(sk);

- if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP)
+ if (tuple.dst.protonum != IPPROTO_TCP &&
+ tuple.dst.protonum != IPPROTO_SCTP)
return -ENOPROTOOPT;

if (*len < 0 || (unsigned int) *len < sizeof(sin6))
@@ -271,14 +278,13 @@ ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)

sin6.sin6_family = AF_INET6;
sin6.sin6_port = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port;
- sin6.sin6_flowinfo = inet6->flow_label & IPV6_FLOWINFO_MASK;
+ sin6.sin6_flowinfo = flow_label & IPV6_FLOWINFO_MASK;
memcpy(&sin6.sin6_addr,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6,
sizeof(sin6.sin6_addr));

nf_ct_put(ct);
- sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr,
- sk->sk_bound_dev_if);
+ sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr, bound_dev_if);
return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0;
}

diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index abfe75a2e316..fada816bd0f2 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -99,6 +99,10 @@ static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
!l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff,
target, maniptype))
return false;
+
+ /* must reload, offset might have changed */
+ ipv6h = (void *)skb->data + iphdroff;
+
manip_addr:
if (maniptype == NF_NAT_MANIP_SRC)
ipv6h->saddr = target->src.u3.in6;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index c79d70b77402..0ea33a9f7dfd 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -99,8 +99,6 @@ struct l2tp_skb_cb {

#define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])

-static atomic_t l2tp_tunnel_count;
-static atomic_t l2tp_session_count;
static struct workqueue_struct *l2tp_wq;

/* per-net private data for this module */
@@ -112,6 +110,13 @@ struct l2tp_net {
spinlock_t l2tp_session_hlist_lock;
};

+#if IS_ENABLED(CONFIG_IPV6)
+static bool l2tp_sk_is_v6(struct sock *sk)
+{
+ return sk->sk_family == PF_INET6 &&
+ !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
+}
+#endif

static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
{
@@ -137,51 +142,6 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)

}

-/* Lookup the tunnel socket, possibly involving the fs code if the socket is
- * owned by userspace. A struct sock returned from this function must be
- * released using l2tp_tunnel_sock_put once you're done with it.
- */
-static struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
-{
- int err = 0;
- struct socket *sock = NULL;
- struct sock *sk = NULL;
-
- if (!tunnel)
- goto out;
-
- if (tunnel->fd >= 0) {
- /* Socket is owned by userspace, who might be in the process
- * of closing it. Look the socket up using the fd to ensure
- * consistency.
- */
- sock = sockfd_lookup(tunnel->fd, &err);
- if (sock)
- sk = sock->sk;
- } else {
- /* Socket is owned by kernelspace */
- sk = tunnel->sock;
- sock_hold(sk);
- }
-
-out:
- return sk;
-}
-
-/* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */
-static void l2tp_tunnel_sock_put(struct sock *sk)
-{
- struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
- if (tunnel) {
- if (tunnel->fd >= 0) {
- /* Socket is owned by userspace */
- sockfd_put(sk->sk_socket);
- }
- sock_put(sk);
- }
- sock_put(sk);
-}
-
/* Lookup a session by id in the global session list
*/
static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
@@ -243,6 +203,13 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
}
EXPORT_SYMBOL_GPL(l2tp_session_find);

+void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
+{
+ sock_put(tunnel->sock);
+ /* the tunnel is freed in the socket destructor */
+}
+EXPORT_SYMBOL(l2tp_tunnel_free);
+
/* Lookup a tunnel. A new reference is held on the returned tunnel. */
struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
{
@@ -407,22 +374,16 @@ int l2tp_session_register(struct l2tp_session *session,
}

l2tp_tunnel_inc_refcount(tunnel);
- sock_hold(tunnel->sock);
hlist_add_head_rcu(&session->global_hlist, g_head);

spin_unlock_bh(&pn->l2tp_session_hlist_lock);
} else {
l2tp_tunnel_inc_refcount(tunnel);
- sock_hold(tunnel->sock);
}

hlist_add_head(&session->hlist, head);
write_unlock_bh(&tunnel->hlist_lock);

- /* Ignore management session in session count value */
- if (session->session_id != 0)
- atomic_inc(&l2tp_session_count);
-
return 0;

err_tlock_pnlock:
@@ -1057,7 +1018,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct l2tp_tunnel *tunnel;

- tunnel = l2tp_sock_to_tunnel(sk);
+ tunnel = l2tp_tunnel(sk);
if (tunnel == NULL)
goto pass_up;

@@ -1065,13 +1026,10 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
tunnel->name, skb->len);

if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
- goto pass_up_put;
+ goto pass_up;

- sock_put(sk);
return 0;

-pass_up_put:
- sock_put(sk);
pass_up:
return 1;
}
@@ -1183,7 +1141,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
/* Queue the packet to IP for output */
skb->ignore_df = 1;
#if IS_ENABLED(CONFIG_IPV6)
- if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped)
+ if (l2tp_sk_is_v6(tunnel->sock))
error = inet6_csk_xmit(tunnel->sock, skb, NULL);
else
#endif
@@ -1246,6 +1204,15 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
goto out_unlock;
}

+ /* The user-space may change the connection status for the user-space
+ * provided socket at run time: we must check it under the socket lock
+ */
+ if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
+ kfree_skb(skb);
+ ret = NET_XMIT_DROP;
+ goto out_unlock;
+ }
+
/* Get routing info from the tunnel socket */
skb_dst_drop(skb);
skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
@@ -1265,7 +1232,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len

/* Calculate UDP checksum if configured to do so */
#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
+ if (l2tp_sk_is_v6(sk))
udp6_set_csum(udp_get_no_check6_tx(sk),
skb, &inet6_sk(sk)->saddr,
&sk->sk_v6_daddr, udp_len);
@@ -1298,14 +1265,12 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
static void l2tp_tunnel_destruct(struct sock *sk)
{
struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
- struct l2tp_net *pn;

if (tunnel == NULL)
goto end;

l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);

-
/* Disable udp encapsulation */
switch (tunnel->encap) {
case L2TP_ENCAPTYPE_UDP:
@@ -1322,21 +1287,11 @@ static void l2tp_tunnel_destruct(struct sock *sk)
sk->sk_destruct = tunnel->old_sk_destruct;
sk->sk_user_data = NULL;

- /* Remove the tunnel struct from the tunnel list */
- pn = l2tp_pernet(tunnel->l2tp_net);
- spin_lock_bh(&pn->l2tp_tunnel_list_lock);
- list_del_rcu(&tunnel->list);
- spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
- atomic_dec(&l2tp_tunnel_count);
-
- l2tp_tunnel_closeall(tunnel);
-
- tunnel->sock = NULL;
- l2tp_tunnel_dec_refcount(tunnel);
-
/* Call the original destructor */
if (sk->sk_destruct)
(*sk->sk_destruct)(sk);
+
+ kfree_rcu(tunnel, rcu);
end:
return;
}
@@ -1400,48 +1355,42 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
/* Tunnel socket destroy hook for UDP encapsulation */
static void l2tp_udp_encap_destroy(struct sock *sk)
{
- struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
- if (tunnel) {
- l2tp_tunnel_closeall(tunnel);
- sock_put(sk);
- }
+ struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
+
+ if (tunnel)
+ l2tp_tunnel_delete(tunnel);
}

/* Workqueue tunnel deletion function */
static void l2tp_tunnel_del_work(struct work_struct *work)
{
- struct l2tp_tunnel *tunnel = NULL;
- struct socket *sock = NULL;
- struct sock *sk = NULL;
-
- tunnel = container_of(work, struct l2tp_tunnel, del_work);
+ struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
+ del_work);
+ struct sock *sk = tunnel->sock;
+ struct socket *sock = sk->sk_socket;
+ struct l2tp_net *pn;

l2tp_tunnel_closeall(tunnel);

- sk = l2tp_tunnel_sock_lookup(tunnel);
- if (!sk)
- goto out;
-
- sock = sk->sk_socket;
-
- /* If the tunnel socket was created by userspace, then go through the
- * inet layer to shut the socket down, and let userspace close it.
- * Otherwise, if we created the socket directly within the kernel, use
+ /* If the tunnel socket was created within the kernel, use
* the sk API to release it here.
- * In either case the tunnel resources are freed in the socket
- * destructor when the tunnel socket goes away.
*/
- if (tunnel->fd >= 0) {
- if (sock)
- inet_shutdown(sock, 2);
- } else {
+ if (tunnel->fd < 0) {
if (sock)
kernel_sock_shutdown(sock, SHUT_RDWR);
sk_release_kernel(sk);
}

- l2tp_tunnel_sock_put(sk);
-out:
+ /* Remove the tunnel struct from the tunnel list */
+ pn = l2tp_pernet(tunnel->l2tp_net);
+ spin_lock_bh(&pn->l2tp_tunnel_list_lock);
+ list_del_rcu(&tunnel->list);
+ spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+
+ /* drop initial ref */
+ l2tp_tunnel_dec_refcount(tunnel);
+
+ /* drop workqueue ref */
l2tp_tunnel_dec_refcount(tunnel);
}

@@ -1648,9 +1597,14 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
encap = cfg->encap;

/* Quick sanity checks */
+ err = -EPROTONOSUPPORT;
+ if (sk->sk_type != SOCK_DGRAM) {
+ pr_debug("tunl %hu: fd %d wrong socket type\n",
+ tunnel_id, fd);
+ goto err;
+ }
switch (encap) {
case L2TP_ENCAPTYPE_UDP:
- err = -EPROTONOSUPPORT;
if (sk->sk_protocol != IPPROTO_UDP) {
pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
@@ -1658,7 +1612,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
}
break;
case L2TP_ENCAPTYPE_IP:
- err = -EPROTONOSUPPORT;
if (sk->sk_protocol != IPPROTO_L2TP) {
pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
@@ -1698,24 +1651,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
if (cfg != NULL)
tunnel->debug = cfg->debug;

-#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family == PF_INET6) {
- struct ipv6_pinfo *np = inet6_sk(sk);
-
- if (ipv6_addr_v4mapped(&np->saddr) &&
- ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
- struct inet_sock *inet = inet_sk(sk);
-
- tunnel->v4mapped = true;
- inet->inet_saddr = np->saddr.s6_addr32[3];
- inet->inet_rcv_saddr = sk->sk_v6_rcv_saddr.s6_addr32[3];
- inet->inet_daddr = sk->sk_v6_daddr.s6_addr32[3];
- } else {
- tunnel->v4mapped = false;
- }
- }
-#endif
-
/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
tunnel->encap = encap;
if (encap == L2TP_ENCAPTYPE_UDP) {
@@ -1724,7 +1659,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;
#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
+ if (l2tp_sk_is_v6(sk))
udpv6_encap_enable();
else
#endif
@@ -1733,13 +1668,22 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32

sk->sk_user_data = tunnel;

+ /* Bump the reference count. The tunnel context is deleted
+ * only when this drops to zero. A reference is also held on
+ * the tunnel socket to ensure that it is not released while
+ * the tunnel is extant. Must be done before sk_destruct is
+ * set.
+ */
+ atomic_set(&tunnel->ref_count, 1);
+ sock_hold(sk);
+ tunnel->sock = sk;
+ tunnel->fd = fd;
+
/* Hook on the tunnel socket destructor so that we can cleanup
* if the tunnel socket goes away.
*/
tunnel->old_sk_destruct = sk->sk_destruct;
sk->sk_destruct = &l2tp_tunnel_destruct;
- tunnel->sock = sk;
- tunnel->fd = fd;
lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");

sk->sk_allocation = GFP_ATOMIC;
@@ -1749,12 +1693,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32

/* Add tunnel to our list */
INIT_LIST_HEAD(&tunnel->list);
- atomic_inc(&l2tp_tunnel_count);
-
- /* Bump the reference count. The tunnel context is deleted
- * only when this drops to zero. Must be done before list insertion
- */
- l2tp_tunnel_inc_refcount(tunnel);
spin_lock_bh(&pn->l2tp_tunnel_list_lock);
list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
@@ -1795,10 +1733,6 @@ void l2tp_session_free(struct l2tp_session *session)

if (tunnel) {
BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
- if (session->session_id != 0)
- atomic_dec(&l2tp_session_count);
- sock_put(tunnel->sock);
- session->tunnel = NULL;
l2tp_tunnel_dec_refcount(tunnel);
}

diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index faa2d3e2f9bd..48e19b58e334 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -203,9 +203,6 @@ struct l2tp_tunnel {
struct sock *sock; /* Parent socket */
int fd; /* Parent fd, if tunnel socket
* was created by userspace */
-#if IS_ENABLED(CONFIG_IPV6)
- bool v4mapped;
-#endif

struct work_struct del_work;

@@ -229,27 +226,8 @@ static inline void *l2tp_session_priv(struct l2tp_session *session)
return &session->priv[0];
}

-static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk)
-{
- struct l2tp_tunnel *tunnel;
-
- if (sk == NULL)
- return NULL;
-
- sock_hold(sk);
- tunnel = (struct l2tp_tunnel *)(sk->sk_user_data);
- if (tunnel == NULL) {
- sock_put(sk);
- goto out;
- }
-
- BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
-
-out:
- return tunnel;
-}
-
struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
+void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);

struct l2tp_session *l2tp_session_get(const struct net *net,
struct l2tp_tunnel *tunnel,
@@ -303,7 +281,7 @@ static inline void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
{
if (atomic_dec_and_test(&tunnel->ref_count))
- kfree_rcu(tunnel, rcu);
+ l2tp_tunnel_free(tunnel);
}

/* Session reference counts. Incremented when code obtains a reference
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 67c59119d01d..1ce4c1b4e809 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -234,17 +234,13 @@ static void l2tp_ip_close(struct sock *sk, long timeout)
static void l2tp_ip_destroy_sock(struct sock *sk)
{
struct sk_buff *skb;
- struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
+ struct l2tp_tunnel *tunnel = sk->sk_user_data;

while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
kfree_skb(skb);

- if (tunnel) {
- l2tp_tunnel_closeall(tunnel);
- sock_put(sk);
- }
-
- sk_refcnt_debug_dec(sk);
+ if (tunnel)
+ l2tp_tunnel_delete(tunnel);
}

static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 1615cb38828d..c966305cf46c 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -246,16 +246,14 @@ static void l2tp_ip6_close(struct sock *sk, long timeout)

static void l2tp_ip6_destroy_sock(struct sock *sk)
{
- struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
+ struct l2tp_tunnel *tunnel = sk->sk_user_data;

lock_sock(sk);
ip6_flush_pending_frames(sk);
release_sock(sk);

- if (tunnel) {
- l2tp_tunnel_closeall(tunnel);
- sock_put(sk);
- }
+ if (tunnel)
+ l2tp_tunnel_delete(tunnel);

inet6_destroy_sock(sk);
}
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 8d6e261704df..6ac543b1330b 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -312,7 +312,6 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
int error;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel;
- struct pppol2tp_session *ps;
int uhlen;

error = -ENOTCONN;
@@ -325,10 +324,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
if (session == NULL)
goto error;

- ps = l2tp_session_priv(session);
- tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
- if (tunnel == NULL)
- goto error_put_sess;
+ tunnel = session->tunnel;

uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;

@@ -339,7 +335,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
sizeof(ppph) + total_len,
0, GFP_KERNEL);
if (!skb)
- goto error_put_sess_tun;
+ goto error_put_sess;

/* Reserve space for headers. */
skb_reserve(skb, NET_SKB_PAD);
@@ -358,20 +354,17 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
total_len);
if (error < 0) {
kfree_skb(skb);
- goto error_put_sess_tun;
+ goto error_put_sess;
}

local_bh_disable();
l2tp_xmit_skb(session, skb, session->hdr_len);
local_bh_enable();

- sock_put(ps->tunnel_sock);
sock_put(sk);

return total_len;

-error_put_sess_tun:
- sock_put(ps->tunnel_sock);
error_put_sess:
sock_put(sk);
error:
@@ -396,10 +389,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
{
static const u8 ppph[2] = { 0xff, 0x03 };
struct sock *sk = (struct sock *) chan->private;
- struct sock *sk_tun;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel;
- struct pppol2tp_session *ps;
int uhlen, headroom;

if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
@@ -410,13 +401,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
if (session == NULL)
goto abort;

- ps = l2tp_session_priv(session);
- sk_tun = ps->tunnel_sock;
- if (sk_tun == NULL)
- goto abort_put_sess;
- tunnel = l2tp_sock_to_tunnel(sk_tun);
- if (tunnel == NULL)
- goto abort_put_sess;
+ tunnel = session->tunnel;

uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
headroom = NET_SKB_PAD +
@@ -425,7 +410,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
session->hdr_len + /* L2TP header */
sizeof(ppph); /* PPP header */
if (skb_cow_head(skb, headroom))
- goto abort_put_sess_tun;
+ goto abort_put_sess;

/* Setup PPP header */
__skb_push(skb, sizeof(ppph));
@@ -436,12 +421,10 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
l2tp_xmit_skb(session, skb, session->hdr_len);
local_bh_enable();

- sock_put(sk_tun);
sock_put(sk);
+
return 1;

-abort_put_sess_tun:
- sock_put(sk_tun);
abort_put_sess:
sock_put(sk);
abort:
@@ -454,20 +437,28 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
* Session (and tunnel control) socket create/destroy.
*****************************************************************************/

+static void pppol2tp_put_sk(struct rcu_head *head)
+{
+ struct pppol2tp_session *ps;
+
+ ps = container_of(head, typeof(*ps), rcu);
+ sock_put(ps->__sk);
+}
+
/* Called by l2tp_core when a session socket is being closed.
*/
static void pppol2tp_session_close(struct l2tp_session *session)
{
- struct sock *sk;
-
- BUG_ON(session->magic != L2TP_SESSION_MAGIC);
+ struct pppol2tp_session *ps;

- sk = pppol2tp_session_get_sock(session);
- if (sk) {
- if (sk->sk_socket)
- inet_shutdown(sk->sk_socket, SEND_SHUTDOWN);
- sock_put(sk);
- }
+ ps = l2tp_session_priv(session);
+ mutex_lock(&ps->sk_lock);
+ ps->__sk = rcu_dereference_protected(ps->sk,
+ lockdep_is_held(&ps->sk_lock));
+ RCU_INIT_POINTER(ps->sk, NULL);
+ if (ps->__sk)
+ call_rcu(&ps->rcu, pppol2tp_put_sk);
+ mutex_unlock(&ps->sk_lock);
}

/* Really kill the session socket. (Called from sock_put() if
@@ -487,14 +478,6 @@ static void pppol2tp_session_destruct(struct sock *sk)
}
}

-static void pppol2tp_put_sk(struct rcu_head *head)
-{
- struct pppol2tp_session *ps;
-
- ps = container_of(head, typeof(*ps), rcu);
- sock_put(ps->__sk);
-}
-
/* Called when the PPPoX socket (session) is closed.
*/
static int pppol2tp_release(struct socket *sock)
@@ -518,26 +501,17 @@ static int pppol2tp_release(struct socket *sock)
sock_orphan(sk);
sock->sk = NULL;

+ /* If the socket is associated with a session,
+ * l2tp_session_delete will call pppol2tp_session_close which
+ * will drop the session's ref on the socket.
+ */
session = pppol2tp_sock_to_session(sk);
-
- if (session != NULL) {
- struct pppol2tp_session *ps;
-
+ if (session) {
l2tp_session_delete(session);
-
- ps = l2tp_session_priv(session);
- mutex_lock(&ps->sk_lock);
- ps->__sk = rcu_dereference_protected(ps->sk,
- lockdep_is_held(&ps->sk_lock));
- RCU_INIT_POINTER(ps->sk, NULL);
- mutex_unlock(&ps->sk_lock);
- call_rcu(&ps->rcu, pppol2tp_put_sk);
-
- /* Rely on the sock_put() call at the end of the function for
- * dropping the reference held by pppol2tp_sock_to_session().
- * The last reference will be dropped by pppol2tp_put_sk().
- */
+ /* drop the ref obtained by pppol2tp_sock_to_session */
+ sock_put(sk);
}
+
release_sock(sk);

/* This will delete the session context via
@@ -842,6 +816,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,

out_no_ppp:
/* This is how we get the session context from the socket. */
+ sock_hold(sk);
sk->sk_user_data = session;
rcu_assign_pointer(ps->sk, sk);
mutex_unlock(&ps->sk_lock);
@@ -938,9 +913,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
goto end;

pls = l2tp_session_priv(session);
- tunnel = l2tp_sock_to_tunnel(pls->tunnel_sock);
- if (tunnel == NULL)
- goto end_put_sess;
+ tunnel = session->tunnel;

inet = inet_sk(tunnel->sock);
if ((tunnel->version == 2) && (tunnel->sock->sk_family == AF_INET)) {
@@ -1020,8 +993,6 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
*usockaddr_len = len;
error = 0;

- sock_put(pls->tunnel_sock);
-end_put_sess:
sock_put(sk);
end:
return error;
@@ -1262,7 +1233,6 @@ static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
struct sock *sk = sock->sk;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel;
- struct pppol2tp_session *ps;
int err;

if (!sk)
@@ -1286,16 +1256,10 @@ static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
/* Special case: if session's session_id is zero, treat ioctl as a
* tunnel ioctl
*/
- ps = l2tp_session_priv(session);
if ((session->session_id == 0) &&
(session->peer_session_id == 0)) {
- err = -EBADF;
- tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
- if (tunnel == NULL)
- goto end_put_sess;
-
+ tunnel = session->tunnel;
err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg);
- sock_put(ps->tunnel_sock);
goto end_put_sess;
}

@@ -1421,7 +1385,6 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
struct sock *sk = sock->sk;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel;
- struct pppol2tp_session *ps;
int val;
int err;

@@ -1446,20 +1409,14 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,

/* Special case: if session_id == 0x0000, treat as operation on tunnel
*/
- ps = l2tp_session_priv(session);
if ((session->session_id == 0) &&
(session->peer_session_id == 0)) {
- err = -EBADF;
- tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
- if (tunnel == NULL)
- goto end_put_sess;
-
+ tunnel = session->tunnel;
err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val);
- sock_put(ps->tunnel_sock);
- } else
+ } else {
err = pppol2tp_session_setsockopt(sk, session, optname, val);
+ }

-end_put_sess:
sock_put(sk);
end:
return err;
@@ -1547,7 +1504,6 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
struct l2tp_tunnel *tunnel;
int val, len;
int err;
- struct pppol2tp_session *ps;

if (level != SOL_PPPOL2TP)
return -EINVAL;
@@ -1571,16 +1527,10 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
goto end;

/* Special case: if session_id == 0x0000, treat as operation on tunnel */
- ps = l2tp_session_priv(session);
if ((session->session_id == 0) &&
(session->peer_session_id == 0)) {
- err = -EBADF;
- tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
- if (tunnel == NULL)
- goto end_put_sess;
-
+ tunnel = session->tunnel;
err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
- sock_put(ps->tunnel_sock);
if (err)
goto end_put_sess;
} else {
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index d8d65fed4e32..4b1a0e5dd701 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -3054,7 +3054,7 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
}
if (beacon->probe_resp_len) {
new_beacon->probe_resp_len = beacon->probe_resp_len;
- beacon->probe_resp = pos;
+ new_beacon->probe_resp = pos;
memcpy(pos, beacon->probe_resp, beacon->probe_resp_len);
pos += beacon->probe_resp_len;
}
diff --git a/net/netfilter/nf_nat_proto_common.c b/net/netfilter/nf_nat_proto_common.c
index 83a72a235cae..932d8a1a91d1 100644
--- a/net/netfilter/nf_nat_proto_common.c
+++ b/net/netfilter/nf_nat_proto_common.c
@@ -41,7 +41,7 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
const struct nf_conn *ct,
u16 *rover)
{
- unsigned int range_size, min, i;
+ unsigned int range_size, min, max, i;
__be16 *portptr;
u_int16_t off;

@@ -71,7 +71,10 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
}
} else {
min = ntohs(range->min_proto.all);
- range_size = ntohs(range->max_proto.all) - min + 1;
+ max = ntohs(range->max_proto.all);
+ if (unlikely(max < min))
+ swap(max, min);
+ range_size = max - min + 1;
}

if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) {
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index f407ebc13481..95b6dedc5ac7 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -146,11 +146,11 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
(unsigned long) info->timer);
info->timer->refcnt = 1;

+ INIT_WORK(&info->timer->work, idletimer_tg_work);
+
mod_timer(&info->timer->timer,
msecs_to_jiffies(info->timeout * 1000) + jiffies);

- INIT_WORK(&info->timer->work, idletimer_tg_work);
-
return 0;

out_free_attr:
@@ -191,7 +191,10 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
pr_debug("timeout value is zero\n");
return -EINVAL;
}
-
+ if (info->timeout >= INT_MAX / 1000) {
+ pr_debug("timeout value is too big\n");
+ return -EINVAL;
+ }
if (info->label[0] == '\0' ||
strnlen(info->label,
MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) {
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
index 993de2ba89d3..d944bc17c10c 100644
--- a/net/netfilter/xt_LED.c
+++ b/net/netfilter/xt_LED.c
@@ -139,10 +139,11 @@ static int led_tg_check(const struct xt_tgchk_param *par)
goto exit_alloc;
}

- /* See if we need to set up a timer */
- if (ledinfo->delay > 0)
- setup_timer(&ledinternal->timer, led_timeout_callback,
- (unsigned long)ledinternal);
+ /* Since the letinternal timer can be shared between multiple targets,
+ * always set it up, even if the current target does not need it
+ */
+ setup_timer(&ledinternal->timer, led_timeout_callback,
+ (unsigned long)ledinternal);

list_add_tail(&ledinternal->list, &xt_led_triggers);

@@ -179,8 +180,7 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par)

list_del(&ledinternal->list);

- if (ledinfo->delay > 0)
- del_timer_sync(&ledinternal->timer);
+ del_timer_sync(&ledinternal->timer);

led_trigger_unregister(&ledinternal->netfilter_led_trigger);

diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index 370adf622cef..a5f6bd1a663e 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -40,23 +40,31 @@ static void xt_rateest_hash_insert(struct xt_rateest *est)
hlist_add_head(&est->list, &rateest_hash[h]);
}

-struct xt_rateest *xt_rateest_lookup(const char *name)
+static struct xt_rateest *__xt_rateest_lookup(const char *name)
{
struct xt_rateest *est;
unsigned int h;

h = xt_rateest_hash(name);
- mutex_lock(&xt_rateest_mutex);
hlist_for_each_entry(est, &rateest_hash[h], list) {
if (strcmp(est->name, name) == 0) {
est->refcnt++;
- mutex_unlock(&xt_rateest_mutex);
return est;
}
}
- mutex_unlock(&xt_rateest_mutex);
+
return NULL;
}
+
+struct xt_rateest *xt_rateest_lookup(const char *name)
+{
+ struct xt_rateest *est;
+
+ mutex_lock(&xt_rateest_mutex);
+ est = __xt_rateest_lookup(name);
+ mutex_unlock(&xt_rateest_mutex);
+ return est;
+}
EXPORT_SYMBOL_GPL(xt_rateest_lookup);

void xt_rateest_put(struct xt_rateest *est)
@@ -104,8 +112,10 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
rnd_inited = true;
}

- est = xt_rateest_lookup(info->name);
+ mutex_lock(&xt_rateest_mutex);
+ est = __xt_rateest_lookup(info->name);
if (est) {
+ mutex_unlock(&xt_rateest_mutex);
/*
* If estimator parameters are specified, they must match the
* existing estimator.
@@ -143,11 +153,13 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)

info->est = est;
xt_rateest_hash_insert(est);
+ mutex_unlock(&xt_rateest_mutex);
return 0;

err2:
kfree(est);
err1:
+ mutex_unlock(&xt_rateest_mutex);
return ret;
}

diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 2eafcffb5f8e..95700e349463 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -990,6 +990,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
if (addr->sa_family != AF_NETLINK)
return -EINVAL;

+ if (alen < sizeof(struct sockaddr_nl))
+ return -EINVAL;
+
if ((nladdr->nl_groups || nladdr->nl_pid) &&
!netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
return -EPERM;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 76393f2f4b22..e2da52eeafd2 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -1042,6 +1042,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
{
struct sk_buff *tmp;
struct net *net, *prev = NULL;
+ bool delivered = false;
int err;

for_each_net_rcu(net) {
@@ -1053,14 +1054,21 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
}
err = nlmsg_multicast(prev->genl_sock, tmp,
portid, group, flags);
- if (err)
+ if (!err)
+ delivered = true;
+ else if (err != -ESRCH)
goto error;
}

prev = net;
}

- return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
+ err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
+ if (!err)
+ delivered = true;
+ else if (err != -ESRCH)
+ return err;
+ return delivered ? 0 : -ESRCH;
error:
kfree_skb(skb);
return err;
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
index a3ad69a4c648..1e7245792fc0 100644
--- a/net/nfc/llcp_commands.c
+++ b/net/nfc/llcp_commands.c
@@ -149,6 +149,10 @@ struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri,

pr_debug("uri: %s, len: %zu\n", uri, uri_len);

+ /* sdreq->tlv_len is u8, takes uri_len, + 3 for header, + 1 for NULL */
+ if (WARN_ON_ONCE(uri_len > U8_MAX - 4))
+ return NULL;
+
sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL);
if (sdreq == NULL)
return NULL;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 43cb1c17e267..d44369ed1436 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -60,7 +60,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
};

static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = {
- [NFC_SDP_ATTR_URI] = { .type = NLA_STRING },
+ [NFC_SDP_ATTR_URI] = { .type = NLA_STRING,
+ .len = U8_MAX - 4 },
[NFC_SDP_ATTR_SAP] = { .type = NLA_U8 },
};

diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index cbf402451c20..afdd343ad826 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -511,8 +511,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1<<(prandom_u32() % 8);
}

- if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
+ if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) {
+ /* qdisc_reshape_fail() can't handle segmented skb */
+ if (segs)
+ return qdisc_drop_all(skb, sch);
return qdisc_reshape_fail(skb, sch);
+ }

sch->qstats.backlog += qdisc_pkt_len(skb);

diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 642c11570285..bbf6abb6ae3c 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1367,10 +1367,14 @@ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
sctp_chunkhdr_t *chunk_hdr;
struct sk_buff *skb;
struct sock *sk;
+ int chunklen;
+
+ chunklen = WORD_ROUND(sizeof(*chunk_hdr) + paylen);
+ if (chunklen > SCTP_MAX_CHUNK_LEN)
+ goto nodata;

/* No need to allocate LL here, as this is only a chunk. */
- skb = alloc_skb(WORD_ROUND(sizeof(sctp_chunkhdr_t) + paylen),
- GFP_ATOMIC);
+ skb = alloc_skb(chunklen, GFP_ATOMIC);
if (!skb)
goto nodata;

diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 316c27f70560..677ec308d713 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -120,22 +120,17 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
struct xfrm_replay_state_esn *rs;

- if (p->flags & XFRM_STATE_ESN) {
- if (!rt)
- return -EINVAL;
+ if (!rt)
+ return (p->flags & XFRM_STATE_ESN) ? -EINVAL : 0;

- rs = nla_data(rt);
+ rs = nla_data(rt);

- if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
- return -EINVAL;
-
- if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
- nla_len(rt) != sizeof(*rs))
- return -EINVAL;
- }
+ if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
+ return -EINVAL;

- if (!rt)
- return 0;
+ if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
+ nla_len(rt) != sizeof(*rs))
+ return -EINVAL;

/* As only ESP and AH support ESN feature. */
if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH))
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 28aee9eb8b32..141b8a723d1b 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -198,7 +198,8 @@ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
if (opened & FILE_CREATED)
iint->flags |= IMA_NEW_FILE;
if ((iint->flags & IMA_NEW_FILE) &&
- !(iint->flags & IMA_DIGSIG_REQUIRED))
+ (!(iint->flags & IMA_DIGSIG_REQUIRED) ||
+ (inode->i_size == 0)))
status = INTEGRITY_PASS;
goto out;
}
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 96612762d623..c64b13761aca 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -1362,7 +1362,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const char __user *buf, size_t bytes)
{
size_t xfer = 0;
- ssize_t tmp;
+ ssize_t tmp = 0;
struct snd_pcm_runtime *runtime = substream->runtime;

if (atomic_read(&substream->mmap_count))
@@ -1469,7 +1469,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __user *buf, size_t bytes)
{
size_t xfer = 0;
- ssize_t tmp;
+ ssize_t tmp = 0;
struct snd_pcm_runtime *runtime = substream->runtime;

if (atomic_read(&substream->mmap_count))
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 35148fa49ccc..3835b818efa8 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -3219,7 +3219,7 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
area,
substream->runtime->dma_area,
substream->runtime->dma_addr,
- area->vm_end - area->vm_start);
+ substream->runtime->dma_bytes);
#elif defined(CONFIG_MIPS) && defined(CONFIG_DMA_NONCOHERENT)
if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV &&
!plat_device_is_coherent(substream->dma_buffer.dev.dev))
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 8923f7e69efc..b813ecc6322d 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -270,12 +270,12 @@ static int seq_free_client1(struct snd_seq_client *client)

if (!client)
return 0;
- snd_seq_delete_all_ports(client);
- snd_seq_queue_client_leave(client->number);
spin_lock_irqsave(&clients_lock, flags);
clienttablock[client->number] = 1;
clienttab[client->number] = NULL;
spin_unlock_irqrestore(&clients_lock, flags);
+ snd_seq_delete_all_ports(client);
+ snd_seq_queue_client_leave(client->number);
snd_use_lock_sync(&client->use_lock);
snd_seq_queue_client_termination(client->number);
if (client->pool)
@@ -919,7 +919,8 @@ int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop)
static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
struct snd_seq_event *event,
struct file *file, int blocking,
- int atomic, int hop)
+ int atomic, int hop,
+ struct mutex *mutexp)
{
struct snd_seq_event_cell *cell;
int err;
@@ -957,7 +958,8 @@ static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
return -ENXIO; /* queue is not allocated */

/* allocate an event cell */
- err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file);
+ err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic,
+ file, mutexp);
if (err < 0)
return err;

@@ -1012,7 +1014,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
{
struct snd_seq_client *client = file->private_data;
int written = 0, len;
- int err = -EINVAL;
+ int err;
struct snd_seq_event event;

if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
@@ -1026,12 +1028,15 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
return -ENXIO;

/* allocate the pool now if the pool is not allocated yet */
+ mutex_lock(&client->ioctl_mutex);
if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
- if (snd_seq_pool_init(client->pool) < 0)
- return -ENOMEM;
+ err = snd_seq_pool_init(client->pool);
+ if (err < 0)
+ goto out;
}

/* only process whole events */
+ err = -EINVAL;
while (count >= sizeof(struct snd_seq_event)) {
/* Read in the event header from the user */
len = sizeof(event);
@@ -1078,7 +1083,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
/* ok, enqueue it */
err = snd_seq_client_enqueue_event(client, &event, file,
!(file->f_flags & O_NONBLOCK),
- 0, 0);
+ 0, 0, &client->ioctl_mutex);
if (err < 0)
break;

@@ -1089,6 +1094,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
written += len;
}

+ out:
+ mutex_unlock(&client->ioctl_mutex);
return written ? written : err;
}

@@ -1925,6 +1932,9 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
(! snd_seq_write_pool_allocated(client) ||
info.output_pool != client->pool->size)) {
if (snd_seq_write_pool_allocated(client)) {
+ /* is the pool in use? */
+ if (atomic_read(&client->pool->counter))
+ return -EBUSY;
/* remove all existing cells */
snd_seq_pool_mark_closing(client->pool);
snd_seq_queue_client_leave_cells(client->number);
@@ -2348,7 +2358,8 @@ static int kernel_client_enqueue(int client, struct snd_seq_event *ev,
if (! cptr->accept_output)
result = -EPERM;
else /* send it */
- result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, atomic, hop);
+ result = snd_seq_client_enqueue_event(cptr, ev, file, blocking,
+ atomic, hop, NULL);

snd_seq_client_unlock(cptr);
return result;
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index 3490d21ab9e7..9acbed1ac982 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -123,7 +123,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
return -EINVAL;

snd_use_lock_use(&f->use_lock);
- err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */
+ err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */
if (err < 0) {
if ((err == -ENOMEM) || (err == -EAGAIN))
atomic_inc(&f->overflow);
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index b20d5b5368cd..f8420b660459 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -221,7 +221,8 @@ void snd_seq_cell_free(struct snd_seq_event_cell * cell)
*/
static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
struct snd_seq_event_cell **cellp,
- int nonblock, struct file *file)
+ int nonblock, struct file *file,
+ struct mutex *mutexp)
{
struct snd_seq_event_cell *cell;
unsigned long flags;
@@ -245,7 +246,11 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&pool->output_sleep, &wait);
spin_unlock_irq(&pool->lock);
+ if (mutexp)
+ mutex_unlock(mutexp);
schedule();
+ if (mutexp)
+ mutex_lock(mutexp);
spin_lock_irq(&pool->lock);
remove_wait_queue(&pool->output_sleep, &wait);
/* interrupted? */
@@ -288,7 +293,7 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
*/
int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
struct snd_seq_event_cell **cellp, int nonblock,
- struct file *file)
+ struct file *file, struct mutex *mutexp)
{
int ncells, err;
unsigned int extlen;
@@ -305,7 +310,7 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
if (ncells >= pool->total_elements)
return -ENOMEM;

- err = snd_seq_cell_alloc(pool, &cell, nonblock, file);
+ err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp);
if (err < 0)
return err;

@@ -331,7 +336,8 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
int size = sizeof(struct snd_seq_event);
if (len < size)
size = len;
- err = snd_seq_cell_alloc(pool, &tmp, nonblock, file);
+ err = snd_seq_cell_alloc(pool, &tmp, nonblock, file,
+ mutexp);
if (err < 0)
goto __error;
if (cell->event.data.ext.ptr == NULL)
diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h
index 32f959c17786..3abe306c394a 100644
--- a/sound/core/seq/seq_memory.h
+++ b/sound/core/seq/seq_memory.h
@@ -66,7 +66,8 @@ struct snd_seq_pool {
void snd_seq_cell_free(struct snd_seq_event_cell *cell);

int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
- struct snd_seq_event_cell **cellp, int nonblock, struct file *file);
+ struct snd_seq_event_cell **cellp, int nonblock,
+ struct file *file, struct mutex *mutexp);

/* return number of unused (free) cells */
static inline int snd_seq_unused_cells(struct snd_seq_pool *pool)
diff --git a/sound/core/seq/seq_prioq.c b/sound/core/seq/seq_prioq.c
index bc1c8488fc2a..2bc6759e4adc 100644
--- a/sound/core/seq/seq_prioq.c
+++ b/sound/core/seq/seq_prioq.c
@@ -87,7 +87,7 @@ void snd_seq_prioq_delete(struct snd_seq_prioq **fifo)
if (f->cells > 0) {
/* drain prioQ */
while (f->cells > 0)
- snd_seq_cell_free(snd_seq_prioq_cell_out(f));
+ snd_seq_cell_free(snd_seq_prioq_cell_out(f, NULL));
}

kfree(f);
@@ -214,8 +214,18 @@ int snd_seq_prioq_cell_in(struct snd_seq_prioq * f,
return 0;
}

+/* return 1 if the current time >= event timestamp */
+static int event_is_ready(struct snd_seq_event *ev, void *current_time)
+{
+ if ((ev->flags & SNDRV_SEQ_TIME_STAMP_MASK) == SNDRV_SEQ_TIME_STAMP_TICK)
+ return snd_seq_compare_tick_time(current_time, &ev->time.tick);
+ else
+ return snd_seq_compare_real_time(current_time, &ev->time.time);
+}
+
/* dequeue cell from prioq */
-struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f)
+struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f,
+ void *current_time)
{
struct snd_seq_event_cell *cell;
unsigned long flags;
@@ -227,6 +237,8 @@ struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f)
spin_lock_irqsave(&f->lock, flags);

cell = f->head;
+ if (cell && current_time && !event_is_ready(&cell->event, current_time))
+ cell = NULL;
if (cell) {
f->head = cell->next;

@@ -252,18 +264,6 @@ int snd_seq_prioq_avail(struct snd_seq_prioq * f)
return f->cells;
}

-
-/* peek at cell at the head of the prioq */
-struct snd_seq_event_cell *snd_seq_prioq_cell_peek(struct snd_seq_prioq * f)
-{
- if (f == NULL) {
- pr_debug("ALSA: seq: snd_seq_prioq_cell_in() called with NULL prioq\n");
- return NULL;
- }
- return f->head;
-}
-
-
static inline int prioq_match(struct snd_seq_event_cell *cell,
int client, int timestamp)
{
diff --git a/sound/core/seq/seq_prioq.h b/sound/core/seq/seq_prioq.h
index d38bb78d9345..2c315ca10fc4 100644
--- a/sound/core/seq/seq_prioq.h
+++ b/sound/core/seq/seq_prioq.h
@@ -44,14 +44,12 @@ void snd_seq_prioq_delete(struct snd_seq_prioq **fifo);
int snd_seq_prioq_cell_in(struct snd_seq_prioq *f, struct snd_seq_event_cell *cell);

/* dequeue cell from prioq */
-struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f);
+struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f,
+ void *current_time);

/* return number of events available in prioq */
int snd_seq_prioq_avail(struct snd_seq_prioq *f);

-/* peek at cell at the head of the prioq */
-struct snd_seq_event_cell *snd_seq_prioq_cell_peek(struct snd_seq_prioq *f);
-
/* client left queue */
void snd_seq_prioq_leave(struct snd_seq_prioq *f, int client, int timestamp);

diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
index f676ae53c477..e120e6780e3b 100644
--- a/sound/core/seq/seq_queue.c
+++ b/sound/core/seq/seq_queue.c
@@ -273,30 +273,20 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)

__again:
/* Process tick queue... */
- while ((cell = snd_seq_prioq_cell_peek(q->tickq)) != NULL) {
- if (snd_seq_compare_tick_time(&q->timer->tick.cur_tick,
- &cell->event.time.tick)) {
- cell = snd_seq_prioq_cell_out(q->tickq);
- if (cell)
- snd_seq_dispatch_event(cell, atomic, hop);
- } else {
- /* event remains in the queue */
+ for (;;) {
+ cell = snd_seq_prioq_cell_out(q->tickq,
+ &q->timer->tick.cur_tick);
+ if (!cell)
break;
- }
+ snd_seq_dispatch_event(cell, atomic, hop);
}

-
/* Process time queue... */
- while ((cell = snd_seq_prioq_cell_peek(q->timeq)) != NULL) {
- if (snd_seq_compare_real_time(&q->timer->cur_time,
- &cell->event.time.time)) {
- cell = snd_seq_prioq_cell_out(q->timeq);
- if (cell)
- snd_seq_dispatch_event(cell, atomic, hop);
- } else {
- /* event remains in the queue */
+ for (;;) {
+ cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time);
+ if (!cell)
break;
- }
+ snd_seq_dispatch_event(cell, atomic, hop);
}

/* free lock */
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index 61a3160af532..6c5b4e057521 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -193,6 +193,11 @@ static inline void loopback_timer_stop(struct loopback_pcm *dpcm)
dpcm->timer.expires = 0;
}

+static inline void loopback_timer_stop_sync(struct loopback_pcm *dpcm)
+{
+ del_timer_sync(&dpcm->timer);
+}
+
#define CABLE_VALID_PLAYBACK (1 << SNDRV_PCM_STREAM_PLAYBACK)
#define CABLE_VALID_CAPTURE (1 << SNDRV_PCM_STREAM_CAPTURE)
#define CABLE_VALID_BOTH (CABLE_VALID_PLAYBACK|CABLE_VALID_CAPTURE)
@@ -327,6 +332,8 @@ static int loopback_prepare(struct snd_pcm_substream *substream)
struct loopback_cable *cable = dpcm->cable;
int bps, salign;

+ loopback_timer_stop_sync(dpcm);
+
salign = (snd_pcm_format_width(runtime->format) *
runtime->channels) / 8;
bps = salign * runtime->rate;
@@ -660,7 +667,9 @@ static void free_cable(struct snd_pcm_substream *substream)
return;
if (cable->streams[!substream->stream]) {
/* other stream is still alive */
+ spin_lock_irq(&cable->lock);
cable->streams[substream->stream] = NULL;
+ spin_unlock_irq(&cable->lock);
} else {
/* free the cable */
loopback->cables[substream->number][dev] = NULL;
@@ -700,7 +709,6 @@ static int loopback_open(struct snd_pcm_substream *substream)
loopback->cables[substream->number][dev] = cable;
}
dpcm->cable = cable;
- cable->streams[substream->stream] = dpcm;

snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);

@@ -732,6 +740,11 @@ static int loopback_open(struct snd_pcm_substream *substream)
runtime->hw = loopback_pcm_hardware;
else
runtime->hw = cable->hw;
+
+ spin_lock_irq(&cable->lock);
+ cable->streams[substream->stream] = dpcm;
+ spin_unlock_irq(&cable->lock);
+
unlock:
if (err < 0) {
free_cable(substream);
@@ -746,7 +759,7 @@ static int loopback_close(struct snd_pcm_substream *substream)
struct loopback *loopback = substream->private_data;
struct loopback_pcm *dpcm = substream->runtime->private_data;

- loopback_timer_stop(dpcm);
+ loopback_timer_stop_sync(dpcm);
mutex_lock(&loopback->cable_lock);
free_cable(substream);
mutex_unlock(&loopback->cable_lock);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 5c2b5c949c78..75ffb0bc1bbf 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3289,6 +3289,19 @@ static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec,
spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
}

+static void alc269_fixup_pincfg_U7x7_headset_mic(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ unsigned int cfg_headphone = snd_hda_codec_get_pincfg(codec, 0x21);
+ unsigned int cfg_headset_mic = snd_hda_codec_get_pincfg(codec, 0x19);
+
+ if (cfg_headphone && cfg_headset_mic == 0x411111f0)
+ snd_hda_codec_set_pincfg(codec, 0x19,
+ (cfg_headphone & ~AC_DEFCFG_DEVICE) |
+ (AC_JACK_MIC_IN << AC_DEFCFG_DEVICE_SHIFT));
+}
+
static void alc269_fixup_hweq(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -3422,8 +3435,12 @@ static void alc269_fixup_mic_mute_hook(void *private_data, int enabled)
pinval = snd_hda_codec_get_pin_target(codec, spec->mute_led_nid);
pinval &= ~AC_PINCTL_VREFEN;
pinval |= enabled ? AC_PINCTL_VREF_HIZ : AC_PINCTL_VREF_80;
- if (spec->mute_led_nid)
+ if (spec->mute_led_nid) {
+ /* temporarily power up/down for setting VREF */
+ snd_hda_power_up(codec);
snd_hda_set_pin_ctl_cache(codec, spec->mute_led_nid, pinval);
+ snd_hda_power_down(codec);
+ }
}

/* Make sure the led works even in runtime suspend */
@@ -4292,6 +4309,7 @@ enum {
ALC269_FIXUP_LIFEBOOK_EXTMIC,
ALC269_FIXUP_LIFEBOOK_HP_PIN,
ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT,
+ ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC,
ALC269_FIXUP_AMIC,
ALC269_FIXUP_DMIC,
ALC269VB_FIXUP_AMIC,
@@ -4456,6 +4474,10 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc269_fixup_pincfg_no_hp_to_lineout,
},
+ [ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc269_fixup_pincfg_U7x7_headset_mic,
+ },
[ALC269_FIXUP_AMIC] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -4996,6 +5018,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
+ SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
diff --git a/sound/soc/au1x/ac97c.c b/sound/soc/au1x/ac97c.c
index c8a2de103c5f..7591e48662bf 100644
--- a/sound/soc/au1x/ac97c.c
+++ b/sound/soc/au1x/ac97c.c
@@ -91,8 +91,8 @@ static unsigned short au1xac97c_ac97_read(struct snd_ac97 *ac97,
do {
mutex_lock(&ctx->lock);

- tmo = 5;
- while ((RD(ctx, AC97_STATUS) & STAT_CP) && tmo--)
+ tmo = 6;
+ while ((RD(ctx, AC97_STATUS) & STAT_CP) && --tmo)
udelay(21); /* wait an ac97 frame time */
if (!tmo) {
pr_debug("ac97rd timeout #1\n");
@@ -105,7 +105,7 @@ static unsigned short au1xac97c_ac97_read(struct snd_ac97 *ac97,
* poll, Forrest, poll...
*/
tmo = 0x10000;
- while ((RD(ctx, AC97_STATUS) & STAT_CP) && tmo--)
+ while ((RD(ctx, AC97_STATUS) & STAT_CP) && --tmo)
asm volatile ("nop");
data = RD(ctx, AC97_CMDRESP);

diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c
index ea4b1c652a26..44e2ecb9bf5a 100644
--- a/sound/soc/codecs/rt5651.c
+++ b/sound/soc/codecs/rt5651.c
@@ -1732,6 +1732,7 @@ static const struct regmap_config rt5651_regmap = {
.num_reg_defaults = ARRAY_SIZE(rt5651_reg),
.ranges = rt5651_ranges,
.num_ranges = ARRAY_SIZE(rt5651_ranges),
+ .use_single_rw = true,
};

static const struct i2c_device_id rt5651_i2c_id[] = {
diff --git a/sound/soc/nuc900/nuc900-ac97.c b/sound/soc/nuc900/nuc900-ac97.c
index f2f67942b229..84a4fc84d688 100644
--- a/sound/soc/nuc900/nuc900-ac97.c
+++ b/sound/soc/nuc900/nuc900-ac97.c
@@ -67,7 +67,7 @@ static unsigned short nuc900_ac97_read(struct snd_ac97 *ac97,

/* polling the AC_R_FINISH */
while (!(AUDIO_READ(nuc900_audio->mmio + ACTL_ACCON) & AC_R_FINISH)
- && timeout--)
+ && --timeout)
mdelay(1);

if (!timeout) {
@@ -121,7 +121,7 @@ static void nuc900_ac97_write(struct snd_ac97 *ac97, unsigned short reg,

/* polling the AC_W_FINISH */
while ((AUDIO_READ(nuc900_audio->mmio + ACTL_ACCON) & AC_W_FINISH)
- && timeout--)
+ && --timeout)
mdelay(1);

if (!timeout)
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 0bf61d4a1080..17e611064334 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -343,6 +343,15 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
ep = 0x81;
iface = usb_ifnum_to_if(dev, 2);

+ if (!iface || iface->num_altsetting == 0)
+ return -EINVAL;
+
+ alts = &iface->altsetting[1];
+ goto add_sync_ep;
+ case USB_ID(0x1397, 0x0002):
+ ep = 0x81;
+ iface = usb_ifnum_to_if(dev, 1);
+
if (!iface || iface->num_altsetting == 0)
return -EINVAL;

diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index a1f08d8c7bd2..d74a9927c3d9 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3266,4 +3266,51 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
}
},

+{
+ /*
+ * Bower's & Wilkins PX headphones only support the 48 kHz sample rate
+ * even though it advertises more. The capture interface doesn't work
+ * even on windows.
+ */
+ USB_DEVICE(0x19b5, 0x0021),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_STANDARD_MIXER,
+ },
+ /* Capture */
+ {
+ .ifnum = 1,
+ .type = QUIRK_IGNORE_INTERFACE,
+ },
+ /* Playback */
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 2,
+ .iface = 2,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = UAC_EP_CS_ATTR_FILL_MAX |
+ UAC_EP_CS_ATTR_SAMPLE_RATE,
+ .endpoint = 0x03,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .nr_rates = 1,
+ .rate_table = (unsigned int[]) {
+ 48000
+ }
+ }
+ },
+ }
+ }
+},
+
#undef USB_DEVICE_VENDOR_SPEC
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 378b85b731a7..87cebac99533 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -140,6 +140,19 @@ static int record__open(struct record *rec)
struct record_opts *opts = &rec->opts;
int rc = 0;

+ /*
+ * For initial_delay we need to add a dummy event so that we can track
+ * PERF_RECORD_MMAP while we wait for the initial delay to enable the
+ * real events, the ones asked by the user.
+ */
+ if (opts->initial_delay) {
+ if (perf_evlist__add_dummy(evlist))
+ return -ENOMEM;
+
+ pos = perf_evlist__last(evlist);
+ pos->attr.enable_on_exec = 1;
+ }
+
perf_evlist__config(evlist, opts);

evlist__for_each(evlist, pos) {
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 809b4c50beae..7813a3827aa8 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -146,6 +146,8 @@ static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep)
return 0;

*addrp = strtoull(comment, &endptr, 16);
+ if (endptr == comment)
+ return 0;
name = strchr(endptr, '<');
if (name == NULL)
return -1;
@@ -251,8 +253,8 @@ static int mov__parse(struct ins_operands *ops)
while (comment[0] != '\0' && isspace(comment[0]))
++comment;

- comment__symbol(ops->source.raw, comment, &ops->source.addr, &ops->source.name);
- comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
+ comment__symbol(ops->source.raw, comment + 1, &ops->source.addr, &ops->source.name);
+ comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name);

return 0;

@@ -298,7 +300,7 @@ static int dec__parse(struct ins_operands *ops)
while (comment[0] != '\0' && isspace(comment[0]))
++comment;

- comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
+ comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name);

return 0;
}
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 886c88551c2f..173638d52e46 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -62,6 +62,18 @@ struct perf_evlist *perf_evlist__new_default(void)
return evlist;
}

+struct perf_evlist *perf_evlist__new_dummy(void)
+{
+ struct perf_evlist *evlist = perf_evlist__new();
+
+ if (evlist && perf_evlist__add_dummy(evlist)) {
+ perf_evlist__delete(evlist);
+ evlist = NULL;
+ }
+
+ return evlist;
+}
+
/**
* perf_evlist__set_id_pos - set the positions of event ids.
* @evlist: selected event list
@@ -188,6 +200,22 @@ int perf_evlist__add_default(struct perf_evlist *evlist)
return -ENOMEM;
}

+int perf_evlist__add_dummy(struct perf_evlist *evlist)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_DUMMY,
+ .size = sizeof(attr), /* to capture ABI version */
+ };
+ struct perf_evsel *evsel = perf_evsel__new(&attr);
+
+ if (evsel == NULL)
+ return -ENOMEM;
+
+ perf_evlist__add(evlist, evsel);
+ return 0;
+}
+
static int perf_evlist__add_attrs(struct perf_evlist *evlist,
struct perf_event_attr *attrs, size_t nr_attrs)
{
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index f5173cd63693..ac57c8c351a5 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -54,6 +54,7 @@ struct perf_evsel_str_handler {

struct perf_evlist *perf_evlist__new(void);
struct perf_evlist *perf_evlist__new_default(void);
+struct perf_evlist *perf_evlist__new_dummy(void);
void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
struct thread_map *threads);
void perf_evlist__exit(struct perf_evlist *evlist);
@@ -67,6 +68,8 @@ int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
#define perf_evlist__add_default_attrs(evlist, array) \
__perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))

+int perf_evlist__add_dummy(struct perf_evlist *evlist);
+
int perf_evlist__add_newtp(struct perf_evlist *evlist,
const char *sys, const char *name, void *handler);

diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 7a4517c4510d..51977b0633a6 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -996,10 +996,11 @@ static int perf_session_deliver_event(struct perf_session *session,
static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
struct perf_tool *tool, u64 file_offset)
{
+ struct perf_sample sample = { .time = 0, };
int fd = perf_data_file__fd(session->file);
int err;

- dump_event(session, event, file_offset, NULL);
+ dump_event(session, event, file_offset, &sample);

/* These events are processed right away */
switch (event->header.type) {
diff --git a/tools/testing/selftests/rcutorture/bin/configinit.sh b/tools/testing/selftests/rcutorture/bin/configinit.sh
index 9c3f3d39b934..bbe35e27f992 100755
--- a/tools/testing/selftests/rcutorture/bin/configinit.sh
+++ b/tools/testing/selftests/rcutorture/bin/configinit.sh
@@ -51,7 +51,7 @@ then
mkdir $builddir
fi
else
- echo Bad build directory: \"$builddir\"
+ echo Bad build directory: \"$buildloc\"
exit 2
fi
fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index 40285c58653e..b627e3cb75a9 100644
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -67,7 +67,7 @@ usage () {
echo " --kversion vN.NN"
echo " --mac nn:nn:nn:nn:nn:nn"
echo " --no-initrd"
- echo " --qemu-args qemu-system-..."
+ echo " --qemu-args qemu-arguments"
echo " --qemu-cmd qemu-system-..."
echo " --results absolute-pathname"
echo " --torture rcu"
@@ -142,7 +142,7 @@ do
TORTURE_INITRD=""; export TORTURE_INITRD
;;
--qemu-args)
- checkarg --qemu-args "-qemu args" $# "$2" '^-' '^error'
+ checkarg --qemu-args "(qemu arguments)" $# "$2" '^-' '^error'
TORTURE_QEMU_ARG="$2"
shift
;;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 52ecd3a15941..1cfe0c62b128 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -837,8 +837,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
/* Check for overlaps */
r = -EEXIST;
kvm_for_each_memslot(slot, kvm->memslots) {
- if ((slot->id >= KVM_USER_MEM_SLOTS) ||
- (slot->id == mem->slot))
+ if (slot->id == mem->slot)
continue;
if (!((base_gfn + npages <= slot->base_gfn) ||
(base_gfn >= slot->base_gfn + slot->npages)))

Attachment: signature.asc
Description: Digital signature