Re: Linux 4.19.205

From: Sasha Levin
Date: Thu Aug 26 2021 - 09:26:55 EST


diff --git a/Documentation/filesystems/mandatory-locking.txt b/Documentation/filesystems/mandatory-locking.txt
index 0979d1d2ca8b..a251ca33164a 100644
--- a/Documentation/filesystems/mandatory-locking.txt
+++ b/Documentation/filesystems/mandatory-locking.txt
@@ -169,3 +169,13 @@ havoc if they lock crucial files. The way around it is to change the file
permissions (remove the setgid bit) before trying to read or write to it.
Of course, that might be a bit tricky if the system is hung :-(

+7. The "mand" mount option
+--------------------------
+Mandatory locking is disabled on all filesystems by default, and must be
+administratively enabled by mounting with "-o mand". That mount option
+is only allowed if the mounting task has the CAP_SYS_ADMIN capability.
+
+Since kernel v4.5, it is possible to disable mandatory locking
+altogether by setting CONFIG_MANDATORY_FILE_LOCKING to "n". A kernel
+with this disabled will reject attempts to mount filesystems with the
+"mand" mount option with the error status EPERM.
diff --git a/Makefile b/Makefile
index d4ffcafb8efa..abc35829f47b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
-SUBLEVEL = 204
+SUBLEVEL = 205
EXTRAVERSION =
NAME = "People's Front"

diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index 02bbdfb3f258..0cc3ac6566c6 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -590,7 +590,7 @@
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins>;
- clock-frequency = <400000>;
+ clock-frequency = <100000>;

tps65218: tps65218@24 {
reg = <0x24>;
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index fca76a696d9d..9ba4d1630ca3 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -755,14 +755,14 @@
status = "disabled";
};

- vica: intc@10140000 {
+ vica: interrupt-controller@10140000 {
compatible = "arm,versatile-vic";
interrupt-controller;
#interrupt-cells = <1>;
reg = <0x10140000 0x20>;
};

- vicb: intc@10140020 {
+ vicb: interrupt-controller@10140020 {
compatible = "arm,versatile-vic";
interrupt-controller;
#interrupt-cells = <1>;
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 53a39661eb13..ccf16bccc2bc 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -277,7 +277,8 @@ int kprobe_handler(struct pt_regs *regs)
if (user_mode(regs))
return 0;

- if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR))
+ if (!IS_ENABLED(CONFIG_BOOKE) &&
+ (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)))
return 0;

/*
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index b8c935033d21..4f274d851986 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -215,6 +215,14 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
}
}

+static inline void fxsave(struct fxregs_state *fx)
+{
+ if (IS_ENABLED(CONFIG_X86_32))
+ asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx));
+ else
+ asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx));
+}
+
/* These macros all use (%edi)/(%rdi) as the single memory argument. */
#define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
#define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
@@ -283,28 +291,6 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory")

-/*
- * This function is called only during boot time when x86 caps are not set
- * up and alternative can not be used yet.
- */
-static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
-{
- u64 mask = -1;
- u32 lmask = mask;
- u32 hmask = mask >> 32;
- int err;
-
- WARN_ON(system_state != SYSTEM_BOOTING);
-
- if (static_cpu_has(X86_FEATURE_XSAVES))
- XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
- else
- XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
-
- /* We should never fault when copying to a kernel buffer: */
- WARN_ON_FPU(err);
-}
-
/*
* This function is called only during boot time when x86 caps are not set
* up and alternative can not be used yet.
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 93b462e48067..b6dedf6c835c 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -118,6 +118,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define V_IGN_TPR_SHIFT 20
#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)

+#define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK)
+
#define V_INTR_MASKING_SHIFT 24
#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)

diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index a89dac380243..677508baf95a 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1958,7 +1958,8 @@ static struct irq_chip ioapic_chip __read_mostly = {
.irq_set_affinity = ioapic_set_affinity,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_get_irqchip_state = ioapic_irq_get_chip_state,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .flags = IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_AFFINITY_PRE_STARTUP,
};

static struct irq_chip ioapic_ir_chip __read_mostly = {
@@ -1971,7 +1972,8 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
.irq_set_affinity = ioapic_set_affinity,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_get_irqchip_state = ioapic_irq_get_chip_state,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .flags = IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_AFFINITY_PRE_STARTUP,
};

static inline void init_IO_APIC_traps(void)
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
index fb26c956c442..ca17a3848834 100644
--- a/arch/x86/kernel/apic/msi.c
+++ b/arch/x86/kernel/apic/msi.c
@@ -89,11 +89,13 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
* The quirk bit is not set in this case.
* - The new vector is the same as the old vector
* - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
+ * - The interrupt is not yet started up
* - The new destination CPU is the same as the old destination CPU
*/
if (!irqd_msi_nomask_quirk(irqd) ||
cfg->vector == old_cfg.vector ||
old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
+ !irqd_is_started(irqd) ||
cfg->dest_apicid == old_cfg.dest_apicid) {
irq_msi_update_msg(irqd, cfg);
return ret;
@@ -181,7 +183,8 @@ static struct irq_chip pci_msi_controller = {
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_compose_msi_msg = irq_msi_compose_msg,
.irq_set_affinity = msi_set_affinity,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .flags = IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_AFFINITY_PRE_STARTUP,
};

int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
@@ -282,7 +285,8 @@ static struct irq_chip pci_msi_ir_controller = {
.irq_ack = irq_chip_ack_parent,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_vcpu_affinity = irq_chip_set_vcpu_affinity_parent,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .flags = IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_AFFINITY_PRE_STARTUP,
};

static struct msi_domain_info pci_msi_ir_domain_info = {
@@ -325,7 +329,8 @@ static struct irq_chip dmar_msi_controller = {
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_compose_msi_msg = irq_msi_compose_msg,
.irq_write_msi_msg = dmar_msi_write_msg,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .flags = IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_AFFINITY_PRE_STARTUP,
};

static irq_hw_number_t dmar_msi_get_hwirq(struct msi_domain_info *info,
@@ -423,7 +428,7 @@ static struct irq_chip hpet_msi_controller __ro_after_init = {
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_compose_msi_msg = irq_msi_compose_msg,
.irq_write_msi_msg = hpet_msi_write_msg,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP,
};

static irq_hw_number_t hpet_msi_get_hwirq(struct msi_domain_info *info,
diff --git a/arch/x86/kernel/cpu/intel_rdt_monitor.c b/arch/x86/kernel/cpu/intel_rdt_monitor.c
index 5dfa5ab9a5ae..6eeb17dfde48 100644
--- a/arch/x86/kernel/cpu/intel_rdt_monitor.c
+++ b/arch/x86/kernel/cpu/intel_rdt_monitor.c
@@ -233,15 +233,14 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr)
return chunks >>= shift;
}

-static int __mon_event_count(u32 rmid, struct rmid_read *rr)
+static u64 __mon_event_count(u32 rmid, struct rmid_read *rr)
{
struct mbm_state *m;
u64 chunks, tval;

tval = __rmid_read(rmid, rr->evtid);
if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) {
- rr->val = tval;
- return -EINVAL;
+ return tval;
}
switch (rr->evtid) {
case QOS_L3_OCCUP_EVENT_ID:
@@ -253,12 +252,6 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
case QOS_L3_MBM_LOCAL_EVENT_ID:
m = &rr->d->mbm_local[rmid];
break;
- default:
- /*
- * Code would never reach here because
- * an invalid event id would fail the __rmid_read.
- */
- return -EINVAL;
}

if (rr->first) {
@@ -308,23 +301,29 @@ void mon_event_count(void *info)
struct rdtgroup *rdtgrp, *entry;
struct rmid_read *rr = info;
struct list_head *head;
+ u64 ret_val;

rdtgrp = rr->rgrp;

- if (__mon_event_count(rdtgrp->mon.rmid, rr))
- return;
+ ret_val = __mon_event_count(rdtgrp->mon.rmid, rr);

/*
- * For Ctrl groups read data from child monitor groups.
+ * For Ctrl groups read data from child monitor groups and
+ * add them together. Count events which are read successfully.
+ * Discard the rmid_read's reporting errors.
*/
head = &rdtgrp->mon.crdtgrp_list;

if (rdtgrp->type == RDTCTRL_GROUP) {
list_for_each_entry(entry, head, mon.crdtgrp_list) {
- if (__mon_event_count(entry->mon.rmid, rr))
- return;
+ if (__mon_event_count(entry->mon.rmid, rr) == 0)
+ ret_val = 0;
}
}
+
+ /* Report error if none of rmid_reads are successful */
+ if (ret_val)
+ rr->val = ret_val;
}

/*
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 601a5da1d196..7d372db8bee1 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -404,6 +404,24 @@ static void __init print_xstate_offset_size(void)
}
}

+/*
+ * All supported features have either init state all zeros or are
+ * handled in setup_init_fpu() individually. This is an explicit
+ * feature list and does not use XFEATURE_MASK*SUPPORTED to catch
+ * newly added supported features at build time and make people
+ * actually look at the init state for the new feature.
+ */
+#define XFEATURES_INIT_FPSTATE_HANDLED \
+ (XFEATURE_MASK_FP | \
+ XFEATURE_MASK_SSE | \
+ XFEATURE_MASK_YMM | \
+ XFEATURE_MASK_OPMASK | \
+ XFEATURE_MASK_ZMM_Hi256 | \
+ XFEATURE_MASK_Hi16_ZMM | \
+ XFEATURE_MASK_PKRU | \
+ XFEATURE_MASK_BNDREGS | \
+ XFEATURE_MASK_BNDCSR)
+
/*
* setup the xstate image representing the init state
*/
@@ -411,6 +429,8 @@ static void __init setup_init_fpu_buf(void)
{
static int on_boot_cpu __initdata = 1;

+ BUILD_BUG_ON(XCNTXT_MASK != XFEATURES_INIT_FPSTATE_HANDLED);
+
WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0;

@@ -429,10 +449,22 @@ static void __init setup_init_fpu_buf(void)
copy_kernel_to_xregs_booting(&init_fpstate.xsave);

/*
- * Dump the init state again. This is to identify the init state
- * of any feature which is not represented by all zero's.
+ * All components are now in init state. Read the state back so
+ * that init_fpstate contains all non-zero init state. This only
+ * works with XSAVE, but not with XSAVEOPT and XSAVES because
+ * those use the init optimization which skips writing data for
+ * components in init state.
+ *
+ * XSAVE could be used, but that would require to reshuffle the
+ * data when XSAVES is available because XSAVES uses xstate
+ * compaction. But doing so is a pointless exercise because most
+ * components have an all zeros init state except for the legacy
+ * ones (FP and SSE). Those can be saved with FXSAVE into the
+ * legacy area. Adding new features requires to ensure that init
+ * state is all zeroes or if not to add the necessary handling
+ * here.
*/
- copy_xregs_to_kernel_booting(&init_fpstate.xsave);
+ fxsave(&init_fpstate.fxsave);
}

static int xfeature_uncompacted_offset(int xfeature_nr)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 72d729f34437..85181457413e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -513,6 +513,9 @@ static void recalc_intercepts(struct vcpu_svm *svm)
c->intercept_dr = h->intercept_dr | g->intercept_dr;
c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
c->intercept = h->intercept | g->intercept;
+
+ c->intercept |= (1ULL << INTERCEPT_VMLOAD);
+ c->intercept |= (1ULL << INTERCEPT_VMSAVE);
}

static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
@@ -1441,12 +1444,7 @@ static __init int svm_hardware_setup(void)
}
}

- if (vgif) {
- if (!boot_cpu_has(X86_FEATURE_VGIF))
- vgif = false;
- else
- pr_info("Virtual GIF supported\n");
- }
+ vgif = false; /* Disabled for CVE-2021-3653 */

return 0;

@@ -3590,7 +3588,13 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
svm->nested.intercept = nested_vmcb->control.intercept;

svm_flush_tlb(&svm->vcpu, true);
- svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
+
+ svm->vmcb->control.int_ctl &=
+ V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
+
+ svm->vmcb->control.int_ctl |= nested_vmcb->control.int_ctl &
+ (V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK);
+
if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
svm->vcpu.arch.hflags |= HF_VINTR_MASK;
else
diff --git a/arch/x86/tools/chkobjdump.awk b/arch/x86/tools/chkobjdump.awk
index fd1ab80be0de..a4cf678cf5c8 100644
--- a/arch/x86/tools/chkobjdump.awk
+++ b/arch/x86/tools/chkobjdump.awk
@@ -10,6 +10,7 @@ BEGIN {

/^GNU objdump/ {
verstr = ""
+ gsub(/\(.*\)/, "");
for (i = 3; i <= NF; i++)
if (match($(i), "^[0-9]")) {
verstr = $(i);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index cb88f3b43a94..58a756ca14d8 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2834,6 +2834,9 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
struct nd_mapping_desc *mapping;

+ /* range index 0 == unmapped in SPA or invalid-SPA */
+ if (memdev->range_index == 0 || spa->range_index == 0)
+ continue;
if (memdev->range_index != spa->range_index)
continue;
if (count >= ND_MAX_MAPPINGS) {
diff --git a/drivers/base/core.c b/drivers/base/core.c
index f7f601858f10..6e380ad9d08a 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1682,6 +1682,7 @@ void device_initialize(struct device *dev)
device_pm_init(dev);
set_dev_node(dev, -1);
#ifdef CONFIG_GENERIC_MSI_IRQ
+ raw_spin_lock_init(&dev->msi_lock);
INIT_LIST_HEAD(&dev->msi_list);
#endif
INIT_LIST_HEAD(&dev->links.consumers);
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index a36452bd9612..31b5655419b4 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -102,7 +102,11 @@ struct armada_37xx_dvfs {
};

static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
- {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
+ /*
+ * The cpufreq scaling for 1.2 GHz variant of the SOC is currently
+ * unstable because we do not know how to configure it properly.
+ */
+ /* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */
{.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
{.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
{.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 8344a60c2131..a9d3ab94749b 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -68,8 +68,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
return NULL;

ofdma_target = of_dma_find_controller(&dma_spec_target);
- if (!ofdma_target)
- return NULL;
+ if (!ofdma_target) {
+ ofdma->dma_router->route_free(ofdma->dma_router->dev,
+ route_data);
+ chan = ERR_PTR(-EPROBE_DEFER);
+ goto err;
+ }

chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
if (IS_ERR_OR_NULL(chan)) {
@@ -80,6 +84,7 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
chan->route_data = route_data;
}

+err:
/*
* Need to put the node back since the ofdma->of_dma_route_allocate
* has taken it for generating the new, translated dma_spec
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 6c94ed750049..d77bf325f038 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -860,8 +860,8 @@ static int usb_dmac_probe(struct platform_device *pdev)

error:
of_dma_controller_free(pdev->dev.of_node);
- pm_runtime_put(&pdev->dev);
error_pm:
+ pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return ret;
}
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 0c5668e897fe..d891ec05bc48 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -332,6 +332,7 @@ struct xilinx_dma_tx_descriptor {
* @genlock: Support genlock mode
* @err: Channel has errors
* @idle: Check for channel idle
+ * @terminating: Check for channel being synchronized by user
* @tasklet: Cleanup work after irq
* @config: Device configuration info
* @flush_on_fsync: Flush on Frame sync
@@ -369,6 +370,7 @@ struct xilinx_dma_chan {
bool genlock;
bool err;
bool idle;
+ bool terminating;
struct tasklet_struct tasklet;
struct xilinx_vdma_config config;
bool flush_on_fsync;
@@ -843,6 +845,13 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
/* Run any dependencies, then free the descriptor */
dma_run_dependencies(&desc->async_tx);
xilinx_dma_free_tx_descriptor(chan, desc);
+
+ /*
+ * While we ran a callback the user called a terminate function,
+ * which takes care of cleaning up any remaining descriptors
+ */
+ if (chan->terminating)
+ break;
}

spin_unlock_irqrestore(&chan->lock, flags);
@@ -1612,6 +1621,8 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
if (desc->cyclic)
chan->cyclic = true;

+ chan->terminating = false;
+
spin_unlock_irqrestore(&chan->lock, flags);

return cookie;
@@ -2068,6 +2079,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
}

/* Remove and free all of the descriptors in the lists */
+ chan->terminating = true;
xilinx_dma_free_descriptors(chan);
chan->idle = true;

diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 1d10ee86299d..57aece809841 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -149,7 +149,7 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
if (count > 8192)
count = 8192;

- tmp = kmalloc(count, GFP_KERNEL);
+ tmp = kzalloc(count, GFP_KERNEL);
if (tmp == NULL)
return -ENOMEM;

@@ -158,7 +158,8 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,

ret = i2c_master_recv(client, tmp, count);
if (ret >= 0)
- ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret;
+ if (copy_to_user(buf, tmp, ret))
+ ret = -EFAULT;
kfree(tmp);
return ret;
}
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index 69b9affeef1e..7dcd4213d38a 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -659,8 +659,8 @@ static int palmas_adc_wakeup_configure(struct palmas_gpadc *adc)

adc_period = adc->auto_conversion_period;
for (i = 0; i < 16; ++i) {
- if (((1000 * (1 << i)) / 32) < adc_period)
- continue;
+ if (((1000 * (1 << i)) / 32) >= adc_period)
+ break;
}
if (i > 0)
i--;
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index 0fcaa2c0b2f4..51ad5a9ed085 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -24,6 +24,8 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>

+#include <linux/time.h>
+
#define HDC100X_REG_TEMP 0x00
#define HDC100X_REG_HUMIDITY 0x01

@@ -165,7 +167,7 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
struct iio_chan_spec const *chan)
{
struct i2c_client *client = data->client;
- int delay = data->adc_int_us[chan->address];
+ int delay = data->adc_int_us[chan->address] + 1*USEC_PER_MSEC;
int ret;
__be16 val;

@@ -322,7 +324,7 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct hdc100x_data *data = iio_priv(indio_dev);
struct i2c_client *client = data->client;
- int delay = data->adc_int_us[0] + data->adc_int_us[1];
+ int delay = data->adc_int_us[0] + data->adc_int_us[1] + 2*USEC_PER_MSEC;
int ret;

/* dual read starts at temp register */
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index d2166dfc8b3f..dcb865d19309 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1928,7 +1928,7 @@ static inline int guestwidth_to_adjustwidth(int gaw)
static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
int guest_width)
{
- int adjust_width, agaw;
+ int adjust_width, agaw, cap_width;
unsigned long sagaw;
int err;

@@ -1942,8 +1942,9 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
domain_reserve_special_ranges(domain);

/* calculate AGAW */
- if (guest_width > cap_mgaw(iommu->cap))
- guest_width = cap_mgaw(iommu->cap);
+ cap_width = min_t(int, cap_mgaw(iommu->cap), agaw_to_width(iommu->agaw));
+ if (guest_width > cap_width)
+ guest_width = cap_width;
domain->gaw = guest_width;
adjust_width = guestwidth_to_adjustwidth(guest_width);
agaw = width_to_agaw(adjust_width);
diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c
index 7895320e50c1..4c8da6af2516 100644
--- a/drivers/ipack/carriers/tpci200.c
+++ b/drivers/ipack/carriers/tpci200.c
@@ -94,16 +94,13 @@ static void tpci200_unregister(struct tpci200_board *tpci200)
free_irq(tpci200->info->pdev->irq, (void *) tpci200);

pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs);
- pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);

pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR);
pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR);
pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR);
pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR);
- pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR);

pci_disable_device(tpci200->info->pdev);
- pci_dev_put(tpci200->info->pdev);
}

static void tpci200_enable_irq(struct tpci200_board *tpci200,
@@ -262,7 +259,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 2 !",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
- goto out_disable_pci;
+ goto err_disable_device;
}

/* Request IO ID INT space (Bar 3) */
@@ -274,7 +271,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 3 !",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
- goto out_release_ip_space;
+ goto err_ip_interface_bar;
}

/* Request MEM8 space (Bar 5) */
@@ -285,7 +282,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 5!",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
- goto out_release_ioid_int_space;
+ goto err_io_id_int_spaces_bar;
}

/* Request MEM16 space (Bar 4) */
@@ -296,7 +293,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 4!",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
- goto out_release_mem8_space;
+ goto err_mem8_space_bar;
}

/* Map internal tpci200 driver user space */
@@ -310,7 +307,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
res = -ENOMEM;
- goto out_release_mem8_space;
+ goto err_mem16_space_bar;
}

/* Initialize lock that protects interface_regs */
@@ -349,18 +346,22 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) unable to register IRQ !",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
- goto out_release_ioid_int_space;
+ goto err_interface_regs;
}

return 0;

-out_release_mem8_space:
+err_interface_regs:
+ pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs);
+err_mem16_space_bar:
+ pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR);
+err_mem8_space_bar:
pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR);
-out_release_ioid_int_space:
+err_io_id_int_spaces_bar:
pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR);
-out_release_ip_space:
+err_ip_interface_bar:
pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR);
-out_disable_pci:
+err_disable_device:
pci_disable_device(tpci200->info->pdev);
return res;
}
@@ -532,7 +533,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
tpci200->info = kzalloc(sizeof(struct tpci200_infos), GFP_KERNEL);
if (!tpci200->info) {
ret = -ENOMEM;
- goto out_err_info;
+ goto err_tpci200;
}

pci_dev_get(pdev);
@@ -543,7 +544,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
if (ret) {
dev_err(&pdev->dev, "Failed to allocate PCI Configuration Memory");
ret = -EBUSY;
- goto out_err_pci_request;
+ goto err_tpci200_info;
}
tpci200->info->cfg_regs = ioremap_nocache(
pci_resource_start(pdev, TPCI200_CFG_MEM_BAR),
@@ -551,7 +552,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
if (!tpci200->info->cfg_regs) {
dev_err(&pdev->dev, "Failed to map PCI Configuration Memory");
ret = -EFAULT;
- goto out_err_ioremap;
+ goto err_request_region;
}

/* Disable byte swapping for 16 bit IP module access. This will ensure
@@ -574,7 +575,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
if (ret) {
dev_err(&pdev->dev, "error during tpci200 install\n");
ret = -ENODEV;
- goto out_err_install;
+ goto err_cfg_regs;
}

/* Register the carrier in the industry pack bus driver */
@@ -586,7 +587,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
dev_err(&pdev->dev,
"error registering the carrier on ipack driver\n");
ret = -EFAULT;
- goto out_err_bus_register;
+ goto err_tpci200_install;
}

/* save the bus number given by ipack to logging purpose */
@@ -597,19 +598,16 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
tpci200_create_device(tpci200, i);
return 0;

-out_err_bus_register:
+err_tpci200_install:
tpci200_uninstall(tpci200);
- /* tpci200->info->cfg_regs is unmapped in tpci200_uninstall */
- tpci200->info->cfg_regs = NULL;
-out_err_install:
- if (tpci200->info->cfg_regs)
- iounmap(tpci200->info->cfg_regs);
-out_err_ioremap:
+err_cfg_regs:
+ pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
+err_request_region:
pci_release_region(pdev, TPCI200_CFG_MEM_BAR);
-out_err_pci_request:
- pci_dev_put(pdev);
+err_tpci200_info:
kfree(tpci200->info);
-out_err_info:
+ pci_dev_put(pdev);
+err_tpci200:
kfree(tpci200);
return ret;
}
@@ -619,6 +617,12 @@ static void __tpci200_pci_remove(struct tpci200_board *tpci200)
ipack_bus_unregister(tpci200->info->ipack_bus);
tpci200_uninstall(tpci200);

+ pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
+
+ pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR);
+
+ pci_dev_put(tpci200->info->pdev);
+
kfree(tpci200->info);
kfree(tpci200);
}
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 22c454c7aaca..8e09586f880f 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2043,8 +2043,8 @@ static void dw_mci_tasklet_func(unsigned long priv)
continue;
}

- dw_mci_stop_dma(host);
send_stop_abort(host, data);
+ dw_mci_stop_dma(host);
state = STATE_SENDING_STOP;
break;
}
@@ -2068,10 +2068,10 @@ static void dw_mci_tasklet_func(unsigned long priv)
*/
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
- dw_mci_stop_dma(host);
if (!(host->data_status & (SDMMC_INT_DRTO |
SDMMC_INT_EBE)))
send_stop_abort(host, data);
+ dw_mci_stop_dma(host);
state = STATE_DATA_ERROR;
break;
}
@@ -2104,10 +2104,10 @@ static void dw_mci_tasklet_func(unsigned long priv)
*/
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
- dw_mci_stop_dma(host);
if (!(host->data_status & (SDMMC_INT_DRTO |
SDMMC_INT_EBE)))
send_stop_abort(host, data);
+ dw_mci_stop_dma(host);
state = STATE_DATA_ERROR;
break;
}
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index b4f6e1a67dd9..b89c474e6b6b 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -566,12 +566,12 @@ static int lan9303_alr_make_entry_raw(struct lan9303 *chip, u32 dat0, u32 dat1)
return 0;
}

-typedef void alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1,
- int portmap, void *ctx);
+typedef int alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1,
+ int portmap, void *ctx);

-static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
+static int lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
{
- int i;
+ int ret = 0, i;

mutex_lock(&chip->alr_mutex);
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
@@ -591,13 +591,17 @@ static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
LAN9303_ALR_DAT1_PORT_BITOFFS;
portmap = alrport_2_portmap[alrport];

- cb(chip, dat0, dat1, portmap, ctx);
+ ret = cb(chip, dat0, dat1, portmap, ctx);
+ if (ret)
+ break;

lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
LAN9303_ALR_CMD_GET_NEXT);
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
}
mutex_unlock(&chip->alr_mutex);
+
+ return ret;
}

static void alr_reg_to_mac(u32 dat0, u32 dat1, u8 mac[6])
@@ -615,18 +619,20 @@ struct del_port_learned_ctx {
};

/* Clear learned (non-static) entry on given port */
-static void alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0,
- u32 dat1, int portmap, void *ctx)
+static int alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0,
+ u32 dat1, int portmap, void *ctx)
{
struct del_port_learned_ctx *del_ctx = ctx;
int port = del_ctx->port;

if (((BIT(port) & portmap) == 0) || (dat1 & LAN9303_ALR_DAT1_STATIC))
- return;
+ return 0;

/* learned entries has only one port, we can just delete */
dat1 &= ~LAN9303_ALR_DAT1_VALID; /* delete entry */
lan9303_alr_make_entry_raw(chip, dat0, dat1);
+
+ return 0;
}

struct port_fdb_dump_ctx {
@@ -635,19 +641,19 @@ struct port_fdb_dump_ctx {
dsa_fdb_dump_cb_t *cb;
};

-static void alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0,
- u32 dat1, int portmap, void *ctx)
+static int alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0,
+ u32 dat1, int portmap, void *ctx)
{
struct port_fdb_dump_ctx *dump_ctx = ctx;
u8 mac[ETH_ALEN];
bool is_static;

if ((BIT(dump_ctx->port) & portmap) == 0)
- return;
+ return 0;

alr_reg_to_mac(dat0, dat1, mac);
is_static = !!(dat1 & LAN9303_ALR_DAT1_STATIC);
- dump_ctx->cb(mac, 0, is_static, dump_ctx->data);
+ return dump_ctx->cb(mac, 0, is_static, dump_ctx->data);
}

/* Set a static ALR entry. Delete entry if port_map is zero */
@@ -1214,9 +1220,7 @@ static int lan9303_port_fdb_dump(struct dsa_switch *ds, int port,
};

dev_dbg(chip->dev, "%s(%d)\n", __func__, port);
- lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx);
-
- return 0;
+ return lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx);
}

static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 6335c4ea0957..2ff6a0be97de 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -54,6 +54,7 @@ static const struct mt7530_mib_desc mt7530_mib[] = {
MIB_DESC(2, 0x48, "TxBytes"),
MIB_DESC(1, 0x60, "RxDrop"),
MIB_DESC(1, 0x64, "RxFiltering"),
+ MIB_DESC(1, 0x68, "RxUnicast"),
MIB_DESC(1, 0x6c, "RxMulticast"),
MIB_DESC(1, 0x70, "RxBroadcast"),
MIB_DESC(1, 0x74, "RxAlignErr"),
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index ebcf4ea66385..55827ac65a15 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -282,6 +282,26 @@ static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
return md_dst->u.port_info.port_id;
}

+static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr,
+ struct netdev_queue *txq)
+{
+ netif_tx_stop_queue(txq);
+
+ /* netif_tx_stop_queue() must be done before checking
+ * tx index in bnxt_tx_avail() below, because in
+ * bnxt_tx_int(), we update tx index before checking for
+ * netif_tx_queue_stopped().
+ */
+ smp_mb();
+ if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
+ netif_tx_wake_queue(txq);
+ return false;
+ }
+
+ return true;
+}
+
static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
@@ -309,8 +329,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)

free_size = bnxt_tx_avail(bp, txr);
if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
- netif_tx_stop_queue(txq);
- return NETDEV_TX_BUSY;
+ if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
+ return NETDEV_TX_BUSY;
}

length = skb->len;
@@ -521,16 +541,7 @@ tx_done:
if (skb->xmit_more && !tx_buf->is_push)
bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);

- netif_tx_stop_queue(txq);
-
- /* netif_tx_stop_queue() must be done before checking
- * tx index in bnxt_tx_avail() below, because in
- * bnxt_tx_int(), we update tx index before checking for
- * netif_tx_queue_stopped().
- */
- smp_mb();
- if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
- netif_tx_wake_queue(txq);
+ bnxt_txr_netif_try_stop_queue(bp, txr, txq);
}
return NETDEV_TX_OK;

@@ -614,14 +625,9 @@ next_tx_int:
smp_mb();

if (unlikely(netif_tx_queue_stopped(txq)) &&
- (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
- __netif_tx_lock(txq, smp_processor_id());
- if (netif_tx_queue_stopped(txq) &&
- bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
- txr->dev_state != BNXT_DEV_STATE_CLOSING)
- netif_tx_wake_queue(txq);
- __netif_tx_unlock(txq);
- }
+ bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
+ READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
+ netif_tx_wake_queue(txq);
}

static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -6263,10 +6269,9 @@ static void bnxt_disable_napi(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;

+ napi_disable(&bp->bnapi[i]->napi);
if (bp->bnapi[i]->rx_ring)
cancel_work_sync(&cpr->dim.work);
-
- napi_disable(&bp->bnapi[i]->napi);
}
}

@@ -6294,9 +6299,11 @@ void bnxt_tx_disable(struct bnxt *bp)
if (bp->tx_ring) {
for (i = 0; i < bp->tx_nr_rings; i++) {
txr = &bp->tx_ring[i];
- txr->dev_state = BNXT_DEV_STATE_CLOSING;
+ WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
}
}
+ /* Make sure napi polls see @dev_state change */
+ synchronize_net();
/* Drop carrier first to prevent TX timeout */
netif_carrier_off(bp->dev);
/* Stop all TX queues */
@@ -6310,8 +6317,10 @@ void bnxt_tx_enable(struct bnxt *bp)

for (i = 0; i < bp->tx_nr_rings; i++) {
txr = &bp->tx_ring[i];
- txr->dev_state = 0;
+ WRITE_ONCE(txr->dev_state, 0);
}
+ /* Make sure napi polls see @dev_state change */
+ synchronize_net();
netif_tx_wake_all_queues(bp->dev);
if (bp->link_info.link_up)
netif_carrier_on(bp->dev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 6ed8294f7df8..a15845e511b2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -3158,8 +3158,10 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,

indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
ret = QLCRD32(adapter, indirect_addr, &err);
- if (err == -EIO)
+ if (err == -EIO) {
+ qlcnic_83xx_unlock_flash(adapter);
return err;
+ }

word = ret;
*(u32 *)p_data = word;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 8c636c493227..1001e9a2edd4 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -859,6 +859,12 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte)
return;
}

+ if (sp->rx_count_cooked + 2 >= sizeof(sp->cooked_buf)) {
+ pr_err("6pack: cooked buffer overrun, data loss\n");
+ sp->rx_count = 0;
+ return;
+ }
+
buf = sp->raw_buf;
sp->cooked_buf[sp->rx_count_cooked++] =
buf[0] | ((buf[1] << 2) & 0xc0);
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index 06aadebc2d5b..ed60e691cc2b 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -432,7 +432,7 @@ static int hwsim_new_edge_nl(struct sk_buff *msg, struct genl_info *info)
struct hwsim_edge *e;
u32 v0, v1;

- if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
+ if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] ||
!info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
return -EINVAL;

@@ -546,7 +546,7 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
u32 v0, v1;
u8 lqi;

- if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
+ if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] ||
!info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
return -EINVAL;

@@ -555,7 +555,7 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
hwsim_edge_policy, NULL))
return -EINVAL;

- if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] &&
+ if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] ||
!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_LQI])
return -EINVAL;

diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 0a86f1e4c02f..c16f875ed9ea 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -85,6 +85,17 @@ out:

static int parent_count;

+static void mdio_mux_uninit_children(struct mdio_mux_parent_bus *pb)
+{
+ struct mdio_mux_child_bus *cb = pb->children;
+
+ while (cb) {
+ mdiobus_unregister(cb->mii_bus);
+ mdiobus_free(cb->mii_bus);
+ cb = cb->next;
+ }
+}
+
int mdio_mux_init(struct device *dev,
struct device_node *mux_node,
int (*switch_fn)(int cur, int desired, void *data),
@@ -147,7 +158,7 @@ int mdio_mux_init(struct device *dev,
cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
if (!cb) {
ret_val = -ENOMEM;
- continue;
+ goto err_loop;
}
cb->bus_number = v;
cb->parent = pb;
@@ -155,8 +166,7 @@ int mdio_mux_init(struct device *dev,
cb->mii_bus = mdiobus_alloc();
if (!cb->mii_bus) {
ret_val = -ENOMEM;
- devm_kfree(dev, cb);
- continue;
+ goto err_loop;
}
cb->mii_bus->priv = cb;

@@ -168,11 +178,15 @@ int mdio_mux_init(struct device *dev,
cb->mii_bus->write = mdio_mux_write;
r = of_mdiobus_register(cb->mii_bus, child_bus_node);
if (r) {
+ mdiobus_free(cb->mii_bus);
+ if (r == -EPROBE_DEFER) {
+ ret_val = r;
+ goto err_loop;
+ }
+ devm_kfree(dev, cb);
dev_err(dev,
"Error: Failed to register MDIO bus for child %pOF\n",
child_bus_node);
- mdiobus_free(cb->mii_bus);
- devm_kfree(dev, cb);
} else {
cb->next = pb->children;
pb->children = cb;
@@ -185,6 +199,10 @@ int mdio_mux_init(struct device *dev,

dev_err(dev, "Error: No acceptable child buses found\n");
devm_kfree(dev, pb);
+
+err_loop:
+ mdio_mux_uninit_children(pb);
+ of_node_put(child_bus_node);
err_pb_kz:
put_device(&parent_bus->dev);
err_parent_bus:
@@ -196,14 +214,8 @@ EXPORT_SYMBOL_GPL(mdio_mux_init);
void mdio_mux_uninit(void *mux_handle)
{
struct mdio_mux_parent_bus *pb = mux_handle;
- struct mdio_mux_child_bus *cb = pb->children;
-
- while (cb) {
- mdiobus_unregister(cb->mii_bus);
- mdiobus_free(cb->mii_bus);
- cb = cb->next;
- }

+ mdio_mux_uninit_children(pb);
put_device(&pb->mii_bus->dev);
}
EXPORT_SYMBOL_GPL(mdio_mux_uninit);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 1af47aaa7ba5..dc9de8731c56 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1125,7 +1125,7 @@ static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
* the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows
* userspace to infer the device name using to the PPPIOCGUNIT ioctl.
*/
- if (!tb[IFLA_IFNAME])
+ if (!tb[IFLA_IFNAME] || !nla_len(tb[IFLA_IFNAME]) || !*(char *)nla_data(tb[IFLA_IFNAME]))
conf.ifname_is_set = false;

err = ppp_dev_configure(src_net, dev, &conf);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 5bd07cdb3e6e..ac5f72077b26 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1172,7 +1172,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
{
struct phy_device *phydev = dev->net->phydev;
struct ethtool_link_ksettings ecmd;
- int ladv, radv, ret;
+ int ladv, radv, ret, link;
u32 buf;

/* clear LAN78xx interrupt status */
@@ -1180,9 +1180,12 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
if (unlikely(ret < 0))
return -EIO;

+ mutex_lock(&phydev->lock);
phy_read_status(phydev);
+ link = phydev->link;
+ mutex_unlock(&phydev->lock);

- if (!phydev->link && dev->link_on) {
+ if (!link && dev->link_on) {
dev->link_on = false;

/* reset MAC */
@@ -1195,7 +1198,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
return -EIO;

del_timer(&dev->stat_monitor);
- } else if (phydev->link && !dev->link_on) {
+ } else if (link && !dev->link_on) {
dev->link_on = true;

phy_ethtool_ksettings_get(phydev, &ecmd);
@@ -1485,9 +1488,14 @@ static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)

static u32 lan78xx_get_link(struct net_device *net)
{
+ u32 link;
+
+ mutex_lock(&net->phydev->lock);
phy_read_status(net->phydev);
+ link = net->phydev->link;
+ mutex_unlock(&net->phydev->lock);

- return net->phydev->link;
+ return link;
}

static void lan78xx_get_drvinfo(struct net_device *net,
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 7a364eca46d6..f083fb9038c3 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -197,12 +197,13 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
bool ath_is_mybeacon(struct ath_common *common, struct ieee80211_hdr *hdr);

void ath_hw_setbssidmask(struct ath_common *common);
-void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key);
+void ath_key_delete(struct ath_common *common, u8 hw_key_idx);
int ath_key_config(struct ath_common *common,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key);
bool ath_hw_keyreset(struct ath_common *common, u16 entry);
+bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac);
void ath_hw_cycle_counters_update(struct ath_common *common);
int32_t ath_hw_get_listen_time(struct ath_common *common);

diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 16e052d02c94..0f4836fc3b7c 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -522,7 +522,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
break;
case DISABLE_KEY:
- ath_key_delete(common, key);
+ ath_key_delete(common, key->hw_key_idx);
break;
default:
ret = -EINVAL;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index a82ad739ab80..16a7bae62b7d 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1460,7 +1460,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
}
break;
case DISABLE_KEY:
- ath_key_delete(common, key);
+ ath_key_delete(common, key->hw_key_idx);
break;
default:
ret = -EINVAL;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 68956cdc8c9a..4b5687b6c0c9 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -818,6 +818,7 @@ struct ath_hw {
struct ath9k_pacal_info pacal_info;
struct ar5416Stats stats;
struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES];
+ DECLARE_BITMAP(pending_del_keymap, ATH_KEYMAX);

enum ath9k_int imask;
u32 imrs2_reg;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index e929020d7c9c..a0097bebcba3 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -823,12 +823,80 @@ exit:
ieee80211_free_txskb(hw, skb);
}

+static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix)
+{
+ struct ath_buf *bf;
+ struct ieee80211_tx_info *txinfo;
+ struct ath_frame_info *fi;
+
+ list_for_each_entry(bf, txq_list, list) {
+ if (bf->bf_state.stale || !bf->bf_mpdu)
+ continue;
+
+ txinfo = IEEE80211_SKB_CB(bf->bf_mpdu);
+ fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0];
+ if (fi->keyix == keyix)
+ return true;
+ }
+
+ return false;
+}
+
+static bool ath9k_txq_has_key(struct ath_softc *sc, u32 keyix)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ int i;
+ struct ath_txq *txq;
+ bool key_in_use = false;
+
+ for (i = 0; !key_in_use && i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (!ATH_TXQ_SETUP(sc, i))
+ continue;
+ txq = &sc->tx.txq[i];
+ if (!txq->axq_depth)
+ continue;
+ if (!ath9k_hw_numtxpending(ah, txq->axq_qnum))
+ continue;
+
+ ath_txq_lock(sc, txq);
+ key_in_use = ath9k_txq_list_has_key(&txq->axq_q, keyix);
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ int idx = txq->txq_tailidx;
+
+ while (!key_in_use &&
+ !list_empty(&txq->txq_fifo[idx])) {
+ key_in_use = ath9k_txq_list_has_key(
+ &txq->txq_fifo[idx], keyix);
+ INCR(idx, ATH_TXFIFO_DEPTH);
+ }
+ }
+ ath_txq_unlock(sc, txq);
+ }
+
+ return key_in_use;
+}
+
+static void ath9k_pending_key_del(struct ath_softc *sc, u8 keyix)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!test_bit(keyix, ah->pending_del_keymap) ||
+ ath9k_txq_has_key(sc, keyix))
+ return;
+
+ /* No more TXQ frames point to this key cache entry, so delete it. */
+ clear_bit(keyix, ah->pending_del_keymap);
+ ath_key_delete(common, keyix);
+}
+
static void ath9k_stop(struct ieee80211_hw *hw)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
bool prev_idle;
+ int i;

ath9k_deinit_channel_context(sc);

@@ -896,6 +964,14 @@ static void ath9k_stop(struct ieee80211_hw *hw)

spin_unlock_bh(&sc->sc_pcu_lock);

+ for (i = 0; i < ATH_KEYMAX; i++)
+ ath9k_pending_key_del(sc, i);
+
+ /* Clear key cache entries explicitly to get rid of any potentially
+ * remaining keys.
+ */
+ ath9k_cmn_init_crypto(sc->sc_ah);
+
ath9k_ps_restore(sc);

sc->ps_idle = prev_idle;
@@ -1541,12 +1617,11 @@ static void ath9k_del_ps_key(struct ath_softc *sc,
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_node *an = (struct ath_node *) sta->drv_priv;
- struct ieee80211_key_conf ps_key = { .hw_key_idx = an->ps_key };

if (!an->ps_key)
return;

- ath_key_delete(common, &ps_key);
+ ath_key_delete(common, an->ps_key);
an->ps_key = 0;
an->key_idx[0] = 0;
}
@@ -1708,6 +1783,12 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
if (sta)
an = (struct ath_node *)sta->drv_priv;

+ /* Delete pending key cache entries if no more frames are pointing to
+ * them in TXQs.
+ */
+ for (i = 0; i < ATH_KEYMAX; i++)
+ ath9k_pending_key_del(sc, i);
+
switch (cmd) {
case SET_KEY:
if (sta)
@@ -1737,7 +1818,15 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
}
break;
case DISABLE_KEY:
- ath_key_delete(common, key);
+ if (ath9k_txq_has_key(sc, key->hw_key_idx)) {
+ /* Delay key cache entry deletion until there are no
+ * remaining TXQ frames pointing to this entry.
+ */
+ set_bit(key->hw_key_idx, sc->sc_ah->pending_del_keymap);
+ ath_hw_keysetmac(common, key->hw_key_idx, NULL);
+ } else {
+ ath_key_delete(common, key->hw_key_idx);
+ }
if (an) {
for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
if (an->key_idx[i] != key->hw_key_idx)
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index 1816b4e7dc26..61b59a804e30 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -84,8 +84,7 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
}
EXPORT_SYMBOL(ath_hw_keyreset);

-static bool ath_hw_keysetmac(struct ath_common *common,
- u16 entry, const u8 *mac)
+bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac)
{
u32 macHi, macLo;
u32 unicast_flag = AR_KEYTABLE_VALID;
@@ -125,6 +124,7 @@ static bool ath_hw_keysetmac(struct ath_common *common,

return true;
}
+EXPORT_SYMBOL(ath_hw_keysetmac);

static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
const struct ath_keyval *k,
@@ -581,29 +581,38 @@ EXPORT_SYMBOL(ath_key_config);
/*
* Delete Key.
*/
-void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
+void ath_key_delete(struct ath_common *common, u8 hw_key_idx)
{
- ath_hw_keyreset(common, key->hw_key_idx);
- if (key->hw_key_idx < IEEE80211_WEP_NKID)
+ /* Leave CCMP and TKIP (main key) configured to avoid disabling
+ * encryption for potentially pending frames already in a TXQ with the
+ * keyix pointing to this key entry. Instead, only clear the MAC address
+ * to prevent RX processing from using this key cache entry.
+ */
+ if (test_bit(hw_key_idx, common->ccmp_keymap) ||
+ test_bit(hw_key_idx, common->tkip_keymap))
+ ath_hw_keysetmac(common, hw_key_idx, NULL);
+ else
+ ath_hw_keyreset(common, hw_key_idx);
+ if (hw_key_idx < IEEE80211_WEP_NKID)
return;

- clear_bit(key->hw_key_idx, common->keymap);
- clear_bit(key->hw_key_idx, common->ccmp_keymap);
- if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
+ clear_bit(hw_key_idx, common->keymap);
+ clear_bit(hw_key_idx, common->ccmp_keymap);
+ if (!test_bit(hw_key_idx, common->tkip_keymap))
return;

- clear_bit(key->hw_key_idx + 64, common->keymap);
+ clear_bit(hw_key_idx + 64, common->keymap);

- clear_bit(key->hw_key_idx, common->tkip_keymap);
- clear_bit(key->hw_key_idx + 64, common->tkip_keymap);
+ clear_bit(hw_key_idx, common->tkip_keymap);
+ clear_bit(hw_key_idx + 64, common->tkip_keymap);

if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) {
- ath_hw_keyreset(common, key->hw_key_idx + 32);
- clear_bit(key->hw_key_idx + 32, common->keymap);
- clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
+ ath_hw_keyreset(common, hw_key_idx + 32);
+ clear_bit(hw_key_idx + 32, common->keymap);
+ clear_bit(hw_key_idx + 64 + 32, common->keymap);

- clear_bit(key->hw_key_idx + 32, common->tkip_keymap);
- clear_bit(key->hw_key_idx + 64 + 32, common->tkip_keymap);
+ clear_bit(hw_key_idx + 32, common->tkip_keymap);
+ clear_bit(hw_key_idx + 64 + 32, common->tkip_keymap);
}
}
EXPORT_SYMBOL(ath_key_delete);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 23a363fd4c59..bc80b0f0ea1b 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -170,24 +170,25 @@ static inline __attribute_const__ u32 msi_mask(unsigned x)
* reliably as devices without an INTx disable bit will then generate a
* level IRQ which will never be cleared.
*/
-u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{
- u32 mask_bits = desc->masked;
+ raw_spinlock_t *lock = &desc->dev->msi_lock;
+ unsigned long flags;

if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit)
- return 0;
+ return;

- mask_bits &= ~mask;
- mask_bits |= flag;
+ raw_spin_lock_irqsave(lock, flags);
+ desc->masked &= ~mask;
+ desc->masked |= flag;
pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos,
- mask_bits);
-
- return mask_bits;
+ desc->masked);
+ raw_spin_unlock_irqrestore(lock, flags);
}

static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{
- desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag);
+ __pci_msi_desc_mask_irq(desc, mask, flag);
}

static void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
@@ -302,10 +303,28 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
/* Don't touch the hardware now */
} else if (entry->msi_attrib.is_msix) {
void __iomem *base = pci_msix_desc_addr(entry);
+ bool unmasked = !(entry->masked & PCI_MSIX_ENTRY_CTRL_MASKBIT);
+
+ /*
+ * The specification mandates that the entry is masked
+ * when the message is modified:
+ *
+ * "If software changes the Address or Data value of an
+ * entry while the entry is unmasked, the result is
+ * undefined."
+ */
+ if (unmasked)
+ __pci_msix_desc_mask_irq(entry, PCI_MSIX_ENTRY_CTRL_MASKBIT);

writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
+
+ if (unmasked)
+ __pci_msix_desc_mask_irq(entry, 0);
+
+ /* Ensure that the writes are visible in the device */
+ readl(base + PCI_MSIX_ENTRY_DATA);
} else {
int pos = dev->msi_cap;
u16 msgctl;
@@ -326,6 +345,8 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
pci_write_config_word(dev, pos + PCI_MSI_DATA_32,
msg->data);
}
+ /* Ensure that the writes are visible in the device */
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
}
entry->msg = *msg;
}
@@ -619,21 +640,21 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
/* Configure MSI capability structure */
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
if (ret) {
- msi_mask_irq(entry, mask, ~mask);
+ msi_mask_irq(entry, mask, 0);
free_msi_irqs(dev);
return ret;
}

ret = msi_verify_entries(dev);
if (ret) {
- msi_mask_irq(entry, mask, ~mask);
+ msi_mask_irq(entry, mask, 0);
free_msi_irqs(dev);
return ret;
}

ret = populate_msi_sysfs(dev);
if (ret) {
- msi_mask_irq(entry, mask, ~mask);
+ msi_mask_irq(entry, mask, 0);
free_msi_irqs(dev);
return ret;
}
@@ -674,6 +695,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
{
struct cpumask *curmsk, *masks = NULL;
struct msi_desc *entry;
+ void __iomem *addr;
int ret, i;

if (affd)
@@ -693,6 +715,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,

entry->msi_attrib.is_msix = 1;
entry->msi_attrib.is_64 = 1;
+
if (entries)
entry->msi_attrib.entry_nr = entries[i].entry;
else
@@ -700,6 +723,10 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
entry->msi_attrib.default_irq = dev->irq;
entry->mask_base = base;

+ addr = pci_msix_desc_addr(entry);
+ if (addr)
+ entry->masked = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
+
list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
if (masks)
curmsk++;
@@ -710,21 +737,27 @@ out:
return ret;
}

-static void msix_program_entries(struct pci_dev *dev,
- struct msix_entry *entries)
+static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
{
struct msi_desc *entry;
- int i = 0;

for_each_pci_msi_entry(entry, dev) {
- if (entries)
- entries[i++].vector = entry->irq;
- entry->masked = readl(pci_msix_desc_addr(entry) +
- PCI_MSIX_ENTRY_VECTOR_CTRL);
- msix_mask_irq(entry, 1);
+ if (entries) {
+ entries->vector = entry->irq;
+ entries++;
+ }
}
}

+static void msix_mask_all(void __iomem *base, int tsize)
+{
+ u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ int i;
+
+ for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
+ writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
+}
+
/**
* msix_capability_init - configure device's MSI-X capability
* @dev: pointer to the pci_dev data structure of MSI-X device function
@@ -739,22 +772,33 @@ static void msix_program_entries(struct pci_dev *dev,
static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
int nvec, const struct irq_affinity *affd)
{
- int ret;
- u16 control;
void __iomem *base;
+ int ret, tsize;
+ u16 control;

- /* Ensure MSI-X is disabled while it is set up */
- pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+ /*
+ * Some devices require MSI-X to be enabled before the MSI-X
+ * registers can be accessed. Mask all the vectors to prevent
+ * interrupts coming in before they're fully set up.
+ */
+ pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL |
+ PCI_MSIX_FLAGS_ENABLE);

pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
/* Request & Map MSI-X table region */
- base = msix_map_region(dev, msix_table_size(control));
- if (!base)
- return -ENOMEM;
+ tsize = msix_table_size(control);
+ base = msix_map_region(dev, tsize);
+ if (!base) {
+ ret = -ENOMEM;
+ goto out_disable;
+ }
+
+ /* Ensure that all table entries are masked. */
+ msix_mask_all(base, tsize);

ret = msix_setup_entries(dev, base, entries, nvec, affd);
if (ret)
- return ret;
+ goto out_disable;

ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
if (ret)
@@ -765,15 +809,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
if (ret)
goto out_free;

- /*
- * Some devices require MSI-X to be enabled before we can touch the
- * MSI-X registers. We need to mask all the vectors to prevent
- * interrupts coming in before they're fully set up.
- */
- pci_msix_clear_and_set_ctrl(dev, 0,
- PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE);
-
- msix_program_entries(dev, entries);
+ msix_update_entries(dev, entries);

ret = populate_msi_sysfs(dev);
if (ret)
@@ -807,6 +843,9 @@ out_avail:
out_free:
free_msi_irqs(dev);

+out_disable:
+ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+
return ret;
}

@@ -894,8 +933,7 @@ static void pci_msi_shutdown(struct pci_dev *dev)

/* Return the device with MSI unmasked as initial states */
mask = msi_mask(desc->msi_attrib.multi_cap);
- /* Keep cached state to be restored */
- __pci_msi_desc_mask_irq(desc, mask, ~mask);
+ msi_mask_irq(desc, mask, 0);

/* Restore dev->irq to its default pin-assertion irq */
dev->irq = desc->msi_attrib.default_irq;
@@ -980,10 +1018,8 @@ static void pci_msix_shutdown(struct pci_dev *dev)
}

/* Return the device with MSI-X masked as initial states */
- for_each_pci_msi_entry(entry, dev) {
- /* Keep cached states to be restored */
+ for_each_pci_msi_entry(entry, dev)
__pci_msix_desc_mask_irq(entry, 1);
- }

pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
pci_intx_for_msi(dev, 1);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index f287a9f919da..7e873b6b7d55 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1882,6 +1882,7 @@ static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1639, quirk_ryzen_xhci_d3hot);

#ifdef CONFIG_X86_IO_APIC
static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index d137c480db46..dd04aedd76e0 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -91,7 +91,8 @@ config DP83640_PHY
config PTP_1588_CLOCK_PCH
tristate "Intel PCH EG20T as PTP clock"
depends on X86_32 || COMPILE_TEST
- depends on HAS_IOMEM && NET
+ depends on HAS_IOMEM && PCI
+ depends on NET
imply PTP_1588_CLOCK
help
This driver adds support for using the PCH EG20T as a PTP
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 6c629ef1bc4e..b3c23edd4b6c 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -453,8 +453,8 @@ static int initialize_controller(struct scsi_device *sdev,
if (!h->ctlr)
err = SCSI_DH_RES_TEMP_UNAVAIL;
else {
- list_add_rcu(&h->node, &h->ctlr->dh_list);
h->sdev = sdev;
+ list_add_rcu(&h->node, &h->ctlr->dh_list);
}
spin_unlock(&list_lock);
err = SCSI_DH_OK;
@@ -779,11 +779,11 @@ static void rdac_bus_detach( struct scsi_device *sdev )
spin_lock(&list_lock);
if (h->ctlr) {
list_del_rcu(&h->node);
- h->sdev = NULL;
kref_put(&h->ctlr->kref, release_controller);
}
spin_unlock(&list_lock);
sdev->handler_data = NULL;
+ synchronize_rcu();
kfree(h);
}

diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 8428247015db..81df2c94b747 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -250,7 +250,7 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
mimd_t mimd;
uint32_t adapno;
int iterator;
-
+ bool is_found;

if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
*rval = -EFAULT;
@@ -266,12 +266,16 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)

adapter = NULL;
iterator = 0;
+ is_found = false;

list_for_each_entry(adapter, &adapters_list_g, list) {
- if (iterator++ == adapno) break;
+ if (iterator++ == adapno) {
+ is_found = true;
+ break;
+ }
}

- if (!adapter) {
+ if (!is_found) {
*rval = -ENODEV;
return NULL;
}
@@ -737,6 +741,7 @@ ioctl_done(uioc_t *kioc)
uint32_t adapno;
int iterator;
mraid_mmadp_t* adapter;
+ bool is_found;

/*
* When the kioc returns from driver, make sure it still doesn't
@@ -759,19 +764,23 @@ ioctl_done(uioc_t *kioc)
iterator = 0;
adapter = NULL;
adapno = kioc->adapno;
+ is_found = false;

con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
"ioctl that was timedout before\n"));

list_for_each_entry(adapter, &adapters_list_g, list) {
- if (iterator++ == adapno) break;
+ if (iterator++ == adapno) {
+ is_found = true;
+ break;
+ }
}

kioc->timedout = 0;

- if (adapter) {
+ if (is_found)
mraid_mm_dealloc_kioc( adapter, kioc );
- }
+
}
else {
wake_up(&wait_q);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 009a5b2aa3d0..149465de35b2 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -462,7 +462,8 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
error = shost->hostt->target_alloc(starget);

if(error) {
- dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
+ if (error != -ENXIO)
+ dev_err(dev, "target allocation failed, error %d\n", error);
/* don't want scsi_target_reap to do the final
* put because it will be under the host lock */
scsi_target_destroy(starget);
diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
index d5879142dbef..ddf0371ad52b 100644
--- a/drivers/slimbus/messaging.c
+++ b/drivers/slimbus/messaging.c
@@ -66,7 +66,7 @@ int slim_alloc_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn)
int ret = 0;

spin_lock_irqsave(&ctrl->txn_lock, flags);
- ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 0,
+ ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 1,
SLIM_MAX_TIDS, GFP_ATOMIC);
if (ret < 0) {
spin_unlock_irqrestore(&ctrl->txn_lock, flags);
@@ -131,7 +131,8 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
goto slim_xfer_err;
}
}
-
+ /* Initialize tid to invalid value */
+ txn->tid = 0;
need_tid = slim_tid_txn(txn->mt, txn->mc);

if (need_tid) {
@@ -163,7 +164,7 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
txn->mt, txn->mc, txn->la, ret);

slim_xfer_err:
- if (!clk_pause_msg && (!need_tid || ret == -ETIMEDOUT)) {
+ if (!clk_pause_msg && (txn->tid == 0 || ret == -ETIMEDOUT)) {
/*
* remove runtime-pm vote if this was TX only, or
* if there was error during this transaction
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 44021620d101..1a5311fb45a5 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -1060,7 +1060,8 @@ static void qcom_slim_ngd_setup(struct qcom_slim_ngd_ctrl *ctrl)
{
u32 cfg = readl_relaxed(ctrl->ngd->base);

- if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN)
+ if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN ||
+ ctrl->state == QCOM_SLIM_NGD_CTRL_ASLEEP)
qcom_slim_ngd_init_dma(ctrl);

/* By default enable message queues */
@@ -1111,6 +1112,7 @@ static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl)
dev_info(ctrl->dev, "Subsys restart: ADSP active framer\n");
return 0;
}
+ qcom_slim_ngd_setup(ctrl);
return 0;
}

@@ -1496,6 +1498,7 @@ static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev)
struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
int ret = 0;

+ qcom_slim_ngd_exit_dma(ctrl);
if (!ctrl->qmi.handle)
return 0;

diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 732327756ee1..7a58f629155d 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -678,10 +678,16 @@ static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
(sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
}

+/* Make sure 64 bit math will not overflow. */
static bool vhost_overflow(u64 uaddr, u64 size)
{
- /* Make sure 64 bit math will not overflow. */
- return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
+ if (uaddr > ULONG_MAX || size > ULONG_MAX)
+ return true;
+
+ if (!size)
+ return false;
+
+ return uaddr > ULONG_MAX - size + 1;
}

/* Caller should have vq mutex and device mutex. */
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index a2f8130e18fe..d138027034fd 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -133,12 +133,12 @@ static void disable_dynirq(struct irq_data *data);

static DEFINE_PER_CPU(unsigned int, irq_epoch);

-static void clear_evtchn_to_irq_row(unsigned row)
+static void clear_evtchn_to_irq_row(int *evtchn_row)
{
unsigned col;

for (col = 0; col < EVTCHN_PER_ROW; col++)
- WRITE_ONCE(evtchn_to_irq[row][col], -1);
+ WRITE_ONCE(evtchn_row[col], -1);
}

static void clear_evtchn_to_irq_all(void)
@@ -148,7 +148,7 @@ static void clear_evtchn_to_irq_all(void)
for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
if (evtchn_to_irq[row] == NULL)
continue;
- clear_evtchn_to_irq_row(row);
+ clear_evtchn_to_irq_row(evtchn_to_irq[row]);
}
}

@@ -156,6 +156,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
{
unsigned row;
unsigned col;
+ int *evtchn_row;

if (evtchn >= xen_evtchn_max_channels())
return -EINVAL;
@@ -168,11 +169,18 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
if (irq == -1)
return 0;

- evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL);
- if (evtchn_to_irq[row] == NULL)
+ evtchn_row = (int *) __get_free_pages(GFP_KERNEL, 0);
+ if (evtchn_row == NULL)
return -ENOMEM;

- clear_evtchn_to_irq_row(row);
+ clear_evtchn_to_irq_row(evtchn_row);
+
+ /*
+ * We've prepared an empty row for the mapping. If a different
+ * thread was faster inserting it, we can drop ours.
+ */
+ if (cmpxchg(&evtchn_to_irq[row], NULL, evtchn_row) != NULL)
+ free_page((unsigned long) evtchn_row);
}

WRITE_ONCE(evtchn_to_irq[row][col], irq);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index d29f4cf125d2..6f02a3f77fa8 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -9556,8 +9556,14 @@ static int btrfs_rename_exchange(struct inode *old_dir,
bool sync_log_dest = false;
bool commit_transaction = false;

- /* we only allow rename subvolume link between subvolumes */
- if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
+ /*
+ * For non-subvolumes allow exchange only within one subvolume, in the
+ * same inode namespace. Two subvolumes (represented as directory) can
+ * be exchanged as they're a logical link and have a fixed inode number.
+ */
+ if (root != dest &&
+ (old_ino != BTRFS_FIRST_FREE_OBJECTID ||
+ new_ino != BTRFS_FIRST_FREE_OBJECTID))
return -EXDEV;

btrfs_init_log_ctx(&ctx_root, old_inode);
diff --git a/fs/namespace.c b/fs/namespace.c
index edd397fa2991..2f3c6a0350a8 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1610,13 +1610,22 @@ static inline bool may_mount(void)
return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
}

+#ifdef CONFIG_MANDATORY_FILE_LOCKING
+static bool may_mandlock(void)
+{
+ pr_warn_once("======================================================\n"
+ "WARNING: the mand mount option is being deprecated and\n"
+ " will be removed in v5.15!\n"
+ "======================================================\n");
+ return capable(CAP_SYS_ADMIN);
+}
+#else
static inline bool may_mandlock(void)
{
-#ifndef CONFIG_MANDATORY_FILE_LOCKING
+ pr_warn("VFS: \"mand\" mount option not supported");
return false;
-#endif
- return capable(CAP_SYS_ADMIN);
}
+#endif

/*
* Now umount can handle mount points as well as block devices.
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index ad8766e1635e..a26e6f5034a6 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -508,6 +508,7 @@
NOINSTR_TEXT \
*(.text..refcount) \
*(.ref.text) \
+ *(.text.asan.* .text.tsan.*) \
MEM_KEEP(init.text*) \
MEM_KEEP(exit.text*) \

diff --git a/include/linux/device.h b/include/linux/device.h
index b1c8150e9ea5..37e359d81a86 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -998,6 +998,7 @@ struct device {
struct dev_pin_info *pins;
#endif
#ifdef CONFIG_GENERIC_MSI_IRQ
+ raw_spinlock_t msi_lock;
struct list_head msi_list;
#endif

diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index a64f21a97369..131f93f8d587 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -41,7 +41,7 @@ struct in_device {
unsigned long mr_qri; /* Query Response Interval */
unsigned char mr_qrv; /* Query Robustness Variable */
unsigned char mr_gq_running;
- unsigned char mr_ifc_count;
+ u32 mr_ifc_count;
struct timer_list mr_gq_timer; /* general query timer */
struct timer_list mr_ifc_timer; /* interface change timer */

diff --git a/include/linux/irq.h b/include/linux/irq.h
index a042faefb9b7..9504267414a4 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -535,6 +535,7 @@ struct irq_chip {
* IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask
* IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
* IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs
+ * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup
*/
enum {
IRQCHIP_SET_TYPE_MASKED = (1 << 0),
@@ -545,6 +546,7 @@ enum {
IRQCHIP_ONESHOT_SAFE = (1 << 5),
IRQCHIP_EOI_THREADED = (1 << 6),
IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7),
+ IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10),
};

#include <linux/irqdesc.h>
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 5dd171849a27..62982e6afddf 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -150,7 +150,7 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);

u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
-u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
+void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
void pci_msi_mask_irq(struct irq_data *data);
void pci_msi_unmask_irq(struct irq_data *data);

diff --git a/include/net/psample.h b/include/net/psample.h
index 94cb37a7bf75..796f01e5635d 100644
--- a/include/net/psample.h
+++ b/include/net/psample.h
@@ -18,6 +18,8 @@ struct psample_group {
struct psample_group *psample_group_get(struct net *net, u32 group_num);
void psample_group_put(struct psample_group *group);

+struct sk_buff;
+
#if IS_ENABLED(CONFIG_PSAMPLE)

void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 09d914e486a2..9afbd89b6096 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -265,8 +265,11 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
} else {
switch (__irq_startup_managed(desc, aff, force)) {
case IRQ_STARTUP_NORMAL:
+ if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)
+ irq_setup_affinity(desc);
ret = __irq_startup(desc);
- irq_setup_affinity(desc);
+ if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP))
+ irq_setup_affinity(desc);
break;
case IRQ_STARTUP_MANAGED:
irq_do_set_affinity(d, aff, false);
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 604974f2afb1..88269dd5a8ca 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -477,11 +477,6 @@ skip_activate:
return 0;

cleanup:
- for_each_msi_vector(desc, i, dev) {
- irq_data = irq_domain_get_irq_data(domain, i);
- if (irqd_is_activated(irq_data))
- irq_domain_deactivate_irq(irq_data);
- }
msi_domain_free_irqs(domain, dev);
return ret;
}
@@ -494,7 +489,15 @@ cleanup:
*/
void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
{
+ struct irq_data *irq_data;
struct msi_desc *desc;
+ int i;
+
+ for_each_msi_vector(desc, i, dev) {
+ irq_data = irq_domain_get_irq_data(domain, i);
+ if (irqd_is_activated(irq_data))
+ irq_domain_deactivate_irq(irq_data);
+ }

for_each_msi_entry(desc, dev) {
/*
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index bbde8d3d6c8a..44d1340634f6 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -3786,6 +3786,8 @@ onmatch_create_field_var(struct hist_trigger_data *hist_data,
event = data->onmatch.match_event;
}

+ if (!event)
+ goto free;
/*
* At this point, we're looking at a field on another
* event. Because we can't modify a hist trigger on
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 253975cce943..0cbd0bca971f 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -1282,7 +1282,7 @@ static int hidp_session_thread(void *arg)

/* cleanup runtime environment */
remove_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
- remove_wait_queue(sk_sleep(session->intr_sock->sk), &ctrl_wait);
+ remove_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait);
wake_up_interruptible(&session->report_queue);
hidp_del_timer(session);

diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 5aa508a08a69..b5fb2b682e19 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -604,6 +604,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,

err = dev_set_allmulti(dev, 1);
if (err) {
+ br_multicast_del_port(p);
kfree(p); /* kobject not yet init'd, manually free */
goto err1;
}
@@ -708,6 +709,7 @@ err4:
err3:
sysfs_remove_link(br->ifobj, p->dev->name);
err2:
+ br_multicast_del_port(p);
kobject_put(&p->kobj);
dev_set_allmulti(dev, -1);
err1:
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index f91e3816806b..aec3c724665f 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -44,9 +44,9 @@ extern bool dccp_debug;
#define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a)
#define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a)
#else
-#define dccp_pr_debug(format, a...)
-#define dccp_pr_debug_cat(format, a...)
-#define dccp_debug(format, a...)
+#define dccp_pr_debug(format, a...) do {} while (0)
+#define dccp_pr_debug_cat(format, a...) do {} while (0)
+#define dccp_debug(format, a...) do {} while (0)
#endif

extern struct inet_hashinfo dccp_hashinfo;
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index 89819745e482..14c6fac039f9 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -1002,6 +1002,11 @@ static const struct proto_ops ieee802154_dgram_ops = {
#endif
};

+static void ieee802154_sock_destruct(struct sock *sk)
+{
+ skb_queue_purge(&sk->sk_receive_queue);
+}
+
/* Create a socket. Initialise the socket, blank the addresses
* set the state.
*/
@@ -1042,7 +1047,7 @@ static int ieee802154_create(struct net *net, struct socket *sock,
sock->ops = ops;

sock_init_data(sock, sk);
- /* FIXME: sk->sk_destruct */
+ sk->sk_destruct = ieee802154_sock_destruct;
sk->sk_family = PF_IEEE802154;

/* Checksums on by default */
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index ffa847fc9619..dca7fe0ae24a 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -807,10 +807,17 @@ static void igmp_gq_timer_expire(struct timer_list *t)
static void igmp_ifc_timer_expire(struct timer_list *t)
{
struct in_device *in_dev = from_timer(in_dev, t, mr_ifc_timer);
+ u32 mr_ifc_count;

igmpv3_send_cr(in_dev);
- if (in_dev->mr_ifc_count) {
- in_dev->mr_ifc_count--;
+restart:
+ mr_ifc_count = READ_ONCE(in_dev->mr_ifc_count);
+
+ if (mr_ifc_count) {
+ if (cmpxchg(&in_dev->mr_ifc_count,
+ mr_ifc_count,
+ mr_ifc_count - 1) != mr_ifc_count)
+ goto restart;
igmp_ifc_start_timer(in_dev,
unsolicited_report_interval(in_dev));
}
@@ -822,7 +829,7 @@ static void igmp_ifc_event(struct in_device *in_dev)
struct net *net = dev_net(in_dev->dev);
if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
return;
- in_dev->mr_ifc_count = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+ WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv);
igmp_ifc_start_timer(in_dev, 1);
}

@@ -961,7 +968,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
in_dev->mr_qri;
}
/* cancel the interface change timer */
- in_dev->mr_ifc_count = 0;
+ WRITE_ONCE(in_dev->mr_ifc_count, 0);
if (del_timer(&in_dev->mr_ifc_timer))
__in_dev_put(in_dev);
/* clear deleted report items */
@@ -1739,7 +1746,7 @@ void ip_mc_down(struct in_device *in_dev)
igmp_group_dropped(pmc);

#ifdef CONFIG_IP_MULTICAST
- in_dev->mr_ifc_count = 0;
+ WRITE_ONCE(in_dev->mr_ifc_count, 0);
if (del_timer(&in_dev->mr_ifc_timer))
__in_dev_put(in_dev);
in_dev->mr_gq_running = 0;
@@ -1956,7 +1963,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
pmc->sfmode = MCAST_INCLUDE;
#ifdef CONFIG_IP_MULTICAST
pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
- in_dev->mr_ifc_count = pmc->crcount;
+ WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
for (psf = pmc->sources; psf; psf = psf->sf_next)
psf->sf_crcount = 0;
igmp_ifc_event(pmc->interface);
@@ -2135,7 +2142,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
/* else no filters; keep old mode for reports */

pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
- in_dev->mr_ifc_count = pmc->crcount;
+ WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
for (psf = pmc->sources; psf; psf = psf->sf_next)
psf->sf_crcount = 0;
igmp_ifc_event(in_dev);
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index b70c9365e131..1740de053072 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -985,7 +985,7 @@ static void bbr_init(struct sock *sk)
bbr->prior_cwnd = 0;
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
bbr->rtt_cnt = 0;
- bbr->next_rtt_delivered = 0;
+ bbr->next_rtt_delivered = tp->delivered;
bbr->prev_ca_state = TCP_CA_Open;
bbr->packet_conservation = 0;

diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 4105081dc1df..6f390c2e4c8e 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -80,6 +80,7 @@ static const char * const sta_flag_names[] = {
FLAG(MPSP_OWNER),
FLAG(MPSP_RECIPIENT),
FLAG(PS_DELIVER),
+ FLAG(USES_ENCRYPTION),
#undef FLAG
};

diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 6775d6cb7d3d..7fc55177db84 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -341,6 +341,7 @@ static void ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
if (sta) {
if (pairwise) {
rcu_assign_pointer(sta->ptk[idx], new);
+ set_sta_flag(sta, WLAN_STA_USES_ENCRYPTION);
sta->ptk_idx = idx;
ieee80211_check_fast_xmit(sta);
} else {
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index c33bc5fc0f2d..75d982ff7f3d 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -102,6 +102,7 @@ enum ieee80211_sta_info_flags {
WLAN_STA_MPSP_OWNER,
WLAN_STA_MPSP_RECIPIENT,
WLAN_STA_PS_DELIVER,
+ WLAN_STA_USES_ENCRYPTION,

NUM_WLAN_STA_FLAGS,
};
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 98d048630ad2..3530d1a5fc98 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -593,10 +593,13 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;

- if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT))
+ if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) {
tx->key = NULL;
- else if (tx->sta &&
- (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx])))
+ return TX_CONTINUE;
+ }
+
+ if (tx->sta &&
+ (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx])))
tx->key = key;
else if (ieee80211_is_group_privacy_action(tx->skb) &&
(key = rcu_dereference(tx->sdata->default_multicast_key)))
@@ -657,6 +660,9 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
if (!skip_hw && tx->key &&
tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
info->control.hw_key = &tx->key->conf;
+ } else if (!ieee80211_is_mgmt(hdr->frame_control) && tx->sta &&
+ test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) {
+ return TX_DROP;
}

return TX_CONTINUE;
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index 64e69d6683ca..93fee4106019 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -137,7 +137,6 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
unsigned int i, optl, tcphdr_len, offset;
struct tcphdr *tcph;
u8 *opt;
- u32 src;

tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
if (!tcph)
@@ -146,7 +145,6 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
opt = (u8 *)tcph;
for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
union {
- u8 octet;
__be16 v16;
__be32 v32;
} old, new;
@@ -167,13 +165,13 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
if (!tcph)
return;

- src = regs->data[priv->sreg];
offset = i + priv->offset;

switch (priv->len) {
case 2:
old.v16 = get_unaligned((u16 *)(opt + offset));
- new.v16 = src;
+ new.v16 = (__force __be16)nft_reg_load16(
+ &regs->data[priv->sreg]);

switch (priv->type) {
case TCPOPT_MSS:
@@ -191,7 +189,7 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
old.v16, new.v16, false);
break;
case 4:
- new.v32 = src;
+ new.v32 = regs->data[priv->sreg];
old.v32 = get_unaligned((u32 *)(opt + offset));

if (old.v32 == new.v32)
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index cc70d651d13e..e34979fcefd2 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -373,11 +373,14 @@ static void virtio_vsock_event_fill(struct virtio_vsock *vsock)

static void virtio_vsock_reset_sock(struct sock *sk)
{
- lock_sock(sk);
+ /* vmci_transport.c doesn't take sk_lock here either. At least we're
+ * under vsock_table_lock so the sock cannot disappear while we're
+ * executing.
+ */
+
sk->sk_state = TCP_CLOSE;
sk->sk_err = ECONNRESET;
sk->sk_error_report(sk);
- release_sock(sk);
}

static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock)
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 6099a9f1cb3d..ff263ad19230 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -3470,7 +3470,7 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
struct hda_gen_spec *spec = codec->spec;
const struct hda_input_mux *imux;
struct nid_path *path;
- int i, adc_idx, err = 0;
+ int i, adc_idx, ret, err = 0;

imux = &spec->input_mux;
adc_idx = kcontrol->id.index;
@@ -3480,9 +3480,13 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
if (!path || !path->ctls[type])
continue;
kcontrol->private_value = path->ctls[type];
- err = func(kcontrol, ucontrol);
- if (err < 0)
+ ret = func(kcontrol, ucontrol);
+ if (ret < 0) {
+ err = ret;
break;
+ }
+ if (ret > 0)
+ err = 1;
}
mutex_unlock(&codec->control_mutex);
if (err >= 0 && spec->cap_sync_hook)
diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c
index fddfd227a9c0..4cb3e11c66af 100644
--- a/sound/soc/codecs/cs42l42.c
+++ b/sound/soc/codecs/cs42l42.c
@@ -404,7 +404,7 @@ static const struct regmap_config cs42l42_regmap = {
.cache_type = REGCACHE_RBTREE,
};

-static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false);
+static DECLARE_TLV_DB_SCALE(adc_tlv, -9700, 100, true);
static DECLARE_TLV_DB_SCALE(mixer_tlv, -6300, 100, true);

static const char * const cs42l42_hpf_freq_text[] = {
@@ -424,34 +424,23 @@ static SOC_ENUM_SINGLE_DECL(cs42l42_wnf3_freq_enum, CS42L42_ADC_WNF_HPF_CTL,
CS42L42_ADC_WNF_CF_SHIFT,
cs42l42_wnf3_freq_text);

-static const char * const cs42l42_wnf05_freq_text[] = {
- "280Hz", "315Hz", "350Hz", "385Hz",
- "420Hz", "455Hz", "490Hz", "525Hz"
-};
-
-static SOC_ENUM_SINGLE_DECL(cs42l42_wnf05_freq_enum, CS42L42_ADC_WNF_HPF_CTL,
- CS42L42_ADC_WNF_CF_SHIFT,
- cs42l42_wnf05_freq_text);
-
static const struct snd_kcontrol_new cs42l42_snd_controls[] = {
/* ADC Volume and Filter Controls */
SOC_SINGLE("ADC Notch Switch", CS42L42_ADC_CTL,
- CS42L42_ADC_NOTCH_DIS_SHIFT, true, false),
+ CS42L42_ADC_NOTCH_DIS_SHIFT, true, true),
SOC_SINGLE("ADC Weak Force Switch", CS42L42_ADC_CTL,
CS42L42_ADC_FORCE_WEAK_VCM_SHIFT, true, false),
SOC_SINGLE("ADC Invert Switch", CS42L42_ADC_CTL,
CS42L42_ADC_INV_SHIFT, true, false),
SOC_SINGLE("ADC Boost Switch", CS42L42_ADC_CTL,
CS42L42_ADC_DIG_BOOST_SHIFT, true, false),
- SOC_SINGLE_SX_TLV("ADC Volume", CS42L42_ADC_VOLUME,
- CS42L42_ADC_VOL_SHIFT, 0xA0, 0x6C, adc_tlv),
+ SOC_SINGLE_S8_TLV("ADC Volume", CS42L42_ADC_VOLUME, -97, 12, adc_tlv),
SOC_SINGLE("ADC WNF Switch", CS42L42_ADC_WNF_HPF_CTL,
CS42L42_ADC_WNF_EN_SHIFT, true, false),
SOC_SINGLE("ADC HPF Switch", CS42L42_ADC_WNF_HPF_CTL,
CS42L42_ADC_HPF_EN_SHIFT, true, false),
SOC_ENUM("HPF Corner Freq", cs42l42_hpf_freq_enum),
SOC_ENUM("WNF 3dB Freq", cs42l42_wnf3_freq_enum),
- SOC_ENUM("WNF 05dB Freq", cs42l42_wnf05_freq_enum),

/* DAC Volume and Filter Controls */
SOC_SINGLE("DACA Invert Switch", CS42L42_DAC_CTL1,
@@ -670,15 +659,6 @@ static int cs42l42_pll_config(struct snd_soc_component *component)
CS42L42_FSYNC_PULSE_WIDTH_MASK,
CS42L42_FRAC1_VAL(fsync - 1) <<
CS42L42_FSYNC_PULSE_WIDTH_SHIFT);
- snd_soc_component_update_bits(component,
- CS42L42_ASP_FRM_CFG,
- CS42L42_ASP_5050_MASK,
- CS42L42_ASP_5050_MASK);
- /* Set the frame delay to 1.0 SCLK clocks */
- snd_soc_component_update_bits(component, CS42L42_ASP_FRM_CFG,
- CS42L42_ASP_FSD_MASK,
- CS42L42_ASP_FSD_1_0 <<
- CS42L42_ASP_FSD_SHIFT);
/* Set the sample rates (96k or lower) */
snd_soc_component_update_bits(component, CS42L42_FS_RATE_EN,
CS42L42_FS_EN_MASK,
@@ -774,7 +754,18 @@ static int cs42l42_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
/* interface format */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
- case SND_SOC_DAIFMT_LEFT_J:
+ /*
+ * 5050 mode, frame starts on falling edge of LRCLK,
+ * frame delayed by 1.0 SCLKs
+ */
+ snd_soc_component_update_bits(component,
+ CS42L42_ASP_FRM_CFG,
+ CS42L42_ASP_STP_MASK |
+ CS42L42_ASP_5050_MASK |
+ CS42L42_ASP_FSD_MASK,
+ CS42L42_ASP_5050_MASK |
+ (CS42L42_ASP_FSD_1_0 <<
+ CS42L42_ASP_FSD_SHIFT));
break;
default:
return -EINVAL;
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index be773101d876..682ee41ec75c 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -135,7 +135,7 @@ static void sst_fill_alloc_params(struct snd_pcm_substream *substream,
snd_pcm_uframes_t period_size;
ssize_t periodbytes;
ssize_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
- u32 buffer_addr = virt_to_phys(substream->dma_buffer.area);
+ u32 buffer_addr = virt_to_phys(substream->runtime->dma_area);

channels = substream->runtime->channels;
period_size = substream->runtime->period_size;
@@ -241,7 +241,6 @@ static int sst_platform_alloc_stream(struct snd_pcm_substream *substream,
/* set codec params and inform SST driver the same */
sst_fill_pcm_params(substream, &param);
sst_fill_alloc_params(substream, &alloc_params);
- substream->runtime->dma_area = substream->dma_buffer.area;
str_params.sparams = param;
str_params.aparams = alloc_params;
str_params.codec = SST_CODEC_TYPE_PCM;