Re: Linux 5.1.12

From: Greg KH
Date: Wed Jun 19 2019 - 08:41:28 EST


diff --git a/Makefile b/Makefile
index 5171900e5c93..6d7bfe9fcd7d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 1
-SUBLEVEL = 11
+SUBLEVEL = 12
EXTRAVERSION =
NAME = Shy Crocodile

diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
index d2b5ec9c4b92..ba88b1eca93c 100644
--- a/arch/arm/kvm/hyp/Makefile
+++ b/arch/arm/kvm/hyp/Makefile
@@ -11,6 +11,7 @@ CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve)

obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o

obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 82d1904328ad..ea710f674cb6 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -10,6 +10,7 @@ KVM=../../../../virt/kvm

obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o

obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-cpuif-proxy.o
obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 9a6099a2c633..f637447e96b0 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -171,9 +171,10 @@ void show_pte(unsigned long addr)
return;
}

- pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp = %p\n",
+ pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp=%016lx\n",
mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
- mm == &init_mm ? VA_BITS : (int) vabits_user, mm->pgd);
+ mm == &init_mm ? VA_BITS : (int)vabits_user,
+ (unsigned long)virt_to_phys(mm->pgd));
pgdp = pgd_offset(mm, addr);
pgd = READ_ONCE(*pgdp);
pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index e97f018ff740..ece9490e3018 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -936,13 +936,18 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)

int __init arch_ioremap_pud_supported(void)
{
- /* only 4k granule supports level 1 block mappings */
- return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
+ /*
+ * Only 4k granule supports level 1 block mappings.
+ * SW table walks can't handle removal of intermediate entries.
+ */
+ return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
+ !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
}

int __init arch_ioremap_pmd_supported(void)
{
- return 1;
+ /* See arch_ioremap_pud_supported() */
+ return !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
}

int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 581f91be9dd4..ebc6c03d4afe 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -875,6 +875,23 @@ static inline int pmd_present(pmd_t pmd)
return false;
}

+static inline int pmd_is_serializing(pmd_t pmd)
+{
+ /*
+ * If the pmd is undergoing a split, the _PAGE_PRESENT bit is clear
+ * and _PAGE_INVALID is set (see pmd_present, pmdp_invalidate).
+ *
+ * This condition may also occur when flushing a pmd while flushing
+ * it (see ptep_modify_prot_start), so callers must ensure this
+ * case is fine as well.
+ */
+ if ((pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)) ==
+ cpu_to_be64(_PAGE_INVALID))
+ return true;
+
+ return false;
+}
+
static inline int pmd_bad(pmd_t pmd)
{
if (radix_enabled())
@@ -1090,6 +1107,19 @@ static inline int pmd_protnone(pmd_t pmd)
#define pmd_access_permitted pmd_access_permitted
static inline bool pmd_access_permitted(pmd_t pmd, bool write)
{
+ /*
+ * pmdp_invalidate sets this combination (which is not caught by
+ * !pte_present() check in pte_access_permitted), to prevent
+ * lock-free lookups, as part of the serialize_against_pte_lookup()
+ * synchronisation.
+ *
+ * This also catches the case where the PTE's hardware PRESENT bit is
+ * cleared while TLB is flushed, which is suboptimal but should not
+ * be frequent.
+ */
+ if (pmd_is_serializing(pmd))
+ return false;
+
return pte_access_permitted(pmd_pte(pmd), write);
}

diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 4a585cba1787..c68476818753 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -94,6 +94,9 @@ static inline bool kdump_in_progress(void)
return crashing_cpu >= 0;
}

+void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer,
+ unsigned long start_address) __noreturn;
+
#ifdef CONFIG_KEXEC_FILE
extern const struct kexec_file_ops kexec_elf64_ops;

diff --git a/arch/powerpc/kernel/machine_kexec_32.c b/arch/powerpc/kernel/machine_kexec_32.c
index affe5dcce7f4..2b160d68db49 100644
--- a/arch/powerpc/kernel/machine_kexec_32.c
+++ b/arch/powerpc/kernel/machine_kexec_32.c
@@ -30,7 +30,6 @@ typedef void (*relocate_new_kernel_t)(
*/
void default_machine_kexec(struct kimage *image)
{
- extern const unsigned char relocate_new_kernel[];
extern const unsigned int relocate_new_kernel_size;
unsigned long page_list;
unsigned long reboot_code_buffer, reboot_code_buffer_phys;
@@ -58,6 +57,9 @@ void default_machine_kexec(struct kimage *image)
reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
printk(KERN_INFO "Bye!\n");

+ if (!IS_ENABLED(CONFIG_FSL_BOOKE) && !IS_ENABLED(CONFIG_44x))
+ relocate_new_kernel(page_list, reboot_code_buffer_phys, image->start);
+
/* now call it */
rnk = (relocate_new_kernel_t) reboot_code_buffer;
(*rnk)(page_list, reboot_code_buffer_phys, image->start);
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index a4341aba0af4..e538804d3f4a 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -116,6 +116,9 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
/*
* This ensures that generic code that rely on IRQ disabling
* to prevent a parallel THP split work as expected.
+ *
+ * Marking the entry with _PAGE_INVALID && ~_PAGE_PRESENT requires
+ * a special case check in pmd_access_permitted.
*/
serialize_against_pte_lookup(vma->vm_mm);
return __pmd(old_pmd);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c4180ecfbb2a..ee35f1112db9 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4413,21 +4413,28 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- int rc;
-
- /* If the basics of the memslot do not change, we do not want
- * to update the gmap. Every update causes several unnecessary
- * segment translation exceptions. This is usually handled just
- * fine by the normal fault handler + gmap, but it will also
- * cause faults on the prefix page of running guest CPUs.
- */
- if (old->userspace_addr == mem->userspace_addr &&
- old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
- old->npages * PAGE_SIZE == mem->memory_size)
- return;
+ int rc = 0;

- rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
- mem->guest_phys_addr, mem->memory_size);
+ switch (change) {
+ case KVM_MR_DELETE:
+ rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
+ old->npages * PAGE_SIZE);
+ break;
+ case KVM_MR_MOVE:
+ rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
+ old->npages * PAGE_SIZE);
+ if (rc)
+ break;
+ /* FALLTHROUGH */
+ case KVM_MR_CREATE:
+ rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
+ mem->guest_phys_addr, mem->memory_size);
+ break;
+ case KVM_MR_FLAGS_ONLY:
+ break;
+ default:
+ WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
+ }
if (rc)
pr_warn("failed to commit memory region\n");
return;
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 8a4a7823451a..f53658dde639 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -876,7 +876,7 @@ int __init microcode_init(void)
goto out_ucode_group;

register_syscore_ops(&mc_syscore_ops);
- cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
+ cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:online",
mc_cpu_online, mc_cpu_down_prep);

pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index 1573a0a6b525..ff6e8e561405 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -368,6 +368,9 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
struct list_head *head;
struct rdtgroup *entry;

+ if (!is_mbm_local_enabled())
+ return;
+
r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
closid = rgrp->closid;
rmid = rgrp->mon.rmid;
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index e39741997893..dd745b58ffd8 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -283,7 +283,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
bool fast_mode = idx & (1u << 31);
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct kvm_pmc *pmc;
- u64 ctr_val;
+ u64 mask = fast_mode ? ~0u : ~0ull;

if (!pmu->version)
return 1;
@@ -291,15 +291,11 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
if (is_vmware_backdoor_pmc(idx))
return kvm_pmu_rdpmc_vmware(vcpu, idx, data);

- pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx);
+ pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx, &mask);
if (!pmc)
return 1;

- ctr_val = pmc_read_counter(pmc);
- if (fast_mode)
- ctr_val = (u32)ctr_val;
-
- *data = ctr_val;
+ *data = pmc_read_counter(pmc) & mask;
return 0;
}

diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index ba8898e1a854..22dff661145a 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -25,7 +25,8 @@ struct kvm_pmu_ops {
unsigned (*find_fixed_event)(int idx);
bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
- struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx);
+ struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx,
+ u64 *mask);
int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx);
bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
index 50fa9450fcf1..d3118088f1cd 100644
--- a/arch/x86/kvm/pmu_amd.c
+++ b/arch/x86/kvm/pmu_amd.c
@@ -186,7 +186,7 @@ static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
}

/* idx is the ECX register of RDPMC instruction */
-static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx)
+static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *mask)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct kvm_pmc *counters;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ae6e51828a54..aa3b77acfbf9 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -379,6 +379,9 @@ module_param(vgif, int, 0444);
static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
module_param(sev, int, 0444);

+static bool __read_mostly dump_invalid_vmcb = 0;
+module_param(dump_invalid_vmcb, bool, 0644);
+
static u8 rsm_ins_bytes[] = "\x0f\xaa";

static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
@@ -4834,6 +4837,11 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
struct vmcb_control_area *control = &svm->vmcb->control;
struct vmcb_save_area *save = &svm->vmcb->save;

+ if (!dump_invalid_vmcb) {
+ pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
+ return;
+ }
+
pr_err("VMCB Control Area:\n");
pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
@@ -4992,7 +5000,6 @@ static int handle_exit(struct kvm_vcpu *vcpu)
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
kvm_run->fail_entry.hardware_entry_failure_reason
= svm->vmcb->control.exit_code;
- pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
dump_vmcb(vcpu);
return 0;
}
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 8f6f69c26c35..5fa0c17d0b41 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -5467,7 +5467,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
vmcs12->vmcs_link_pointer != -1ull) {
struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);

- if (kvm_state->size < sizeof(*kvm_state) + 2 * sizeof(*vmcs12))
+ if (kvm_state->size < sizeof(*kvm_state) + VMCS12_SIZE + sizeof(*vmcs12))
return -EINVAL;

if (copy_from_user(shadow_vmcs12,
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 5ab4a364348e..c3f103e2b08e 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -126,7 +126,7 @@ static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
}

static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
- unsigned idx)
+ unsigned idx, u64 *mask)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
bool fixed = idx & (1u << 30);
@@ -138,6 +138,7 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
if (fixed && idx >= pmu->nr_arch_fixed_counters)
return NULL;
counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
+ *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];

return &counters[idx];
}
@@ -183,9 +184,13 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
*data = pmu->global_ovf_ctrl;
return 0;
default:
- if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
- (pmc = get_fixed_pmc(pmu, msr))) {
- *data = pmc_read_counter(pmc);
+ if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
+ u64 val = pmc_read_counter(pmc);
+ *data = val & pmu->counter_bitmask[KVM_PMC_GP];
+ return 0;
+ } else if ((pmc = get_fixed_pmc(pmu, msr))) {
+ u64 val = pmc_read_counter(pmc);
+ *data = val & pmu->counter_bitmask[KVM_PMC_FIXED];
return 0;
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
*data = pmc->eventsel;
@@ -235,11 +240,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
break;
default:
- if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
- (pmc = get_fixed_pmc(pmu, msr))) {
- if (!msr_info->host_initiated)
- data = (s64)(s32)data;
- pmc->counter += data - pmc_read_counter(pmc);
+ if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
+ if (msr_info->host_initiated)
+ pmc->counter = data;
+ else
+ pmc->counter = (s32)data;
+ return 0;
+ } else if ((pmc = get_fixed_pmc(pmu, msr))) {
+ pmc->counter = data;
return 0;
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
if (data == pmc->eventsel)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 2b4a3d32c511..cfb8f1ec9a0a 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -114,6 +114,9 @@ static u64 __read_mostly host_xss;
bool __read_mostly enable_pml = 1;
module_param_named(pml, enable_pml, bool, S_IRUGO);

+static bool __read_mostly dump_invalid_vmcs = 0;
+module_param(dump_invalid_vmcs, bool, 0644);
+
#define MSR_BITMAP_MODE_X2APIC 1
#define MSR_BITMAP_MODE_X2APIC_APICV 2

@@ -5605,15 +5608,24 @@ static void vmx_dump_dtsel(char *name, uint32_t limit)

void dump_vmcs(void)
{
- u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
- u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
- u32 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
- u32 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
- u32 secondary_exec_control = 0;
- unsigned long cr4 = vmcs_readl(GUEST_CR4);
- u64 efer = vmcs_read64(GUEST_IA32_EFER);
+ u32 vmentry_ctl, vmexit_ctl;
+ u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control;
+ unsigned long cr4;
+ u64 efer;
int i, n;

+ if (!dump_invalid_vmcs) {
+ pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
+ return;
+ }
+
+ vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
+ vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
+ cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
+ pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
+ cr4 = vmcs_readl(GUEST_CR4);
+ efer = vmcs_read64(GUEST_IA32_EFER);
+ secondary_exec_control = 0;
if (cpu_has_secondary_exec_ctrls())
secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);

diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index f879529906b4..9cd72decdfe4 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -314,6 +314,7 @@ void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
+void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);

#define POSTED_INTR_ON 0
#define POSTED_INTR_SN 1
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index efc8adf7ca0e..b07868eb1656 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -143,7 +143,7 @@ module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
* tuning, i.e. allows priveleged userspace to set an exact advancement time.
*/
static int __read_mostly lapic_timer_advance_ns = -1;
-module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
+module_param(lapic_timer_advance_ns, int, S_IRUGO | S_IWUSR);

static bool __read_mostly vector_hashing = true;
module_param(vector_hashing, bool, S_IRUGO);
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 8dc0fc0b1382..296da58f3013 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -199,7 +199,7 @@ static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
if (!pgtable_l5_enabled())
return (p4d_t *)pgd;

- p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
+ p4d = pgd_val(*pgd) & PTE_PFN_MASK;
p4d += __START_KERNEL_map - phys_base;
return (p4d_t *)p4d + p4d_index(addr);
}
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index d669c5e797e0..5aef69b7ff52 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -52,7 +52,7 @@ static __initdata struct kaslr_memory_region {
} kaslr_regions[] = {
{ &page_offset_base, 0 },
{ &vmalloc_base, 0 },
- { &vmemmap_base, 1 },
+ { &vmemmap_base, 0 },
};

/* Get size in bytes used by the memory region */
@@ -78,6 +78,7 @@ void __init kernel_randomize_memory(void)
unsigned long rand, memory_tb;
struct rnd_state rand_state;
unsigned long remain_entropy;
+ unsigned long vmemmap_size;

vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
vaddr = vaddr_start;
@@ -109,6 +110,14 @@ void __init kernel_randomize_memory(void)
if (memory_tb < kaslr_regions[0].size_tb)
kaslr_regions[0].size_tb = memory_tb;

+ /*
+ * Calculate the vmemmap region size in TBs, aligned to a TB
+ * boundary.
+ */
+ vmemmap_size = (kaslr_regions[0].size_tb << (TB_SHIFT - PAGE_SHIFT)) *
+ sizeof(struct page);
+ kaslr_regions[2].size_tb = DIV_ROUND_UP(vmemmap_size, 1UL << TB_SHIFT);
+
/* Calculate entropy available between regions */
remain_entropy = vaddr_end - vaddr_start;
for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index adf28788cab5..133fed8e4a8b 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4476,9 +4476,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },

- /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
- { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
- { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+ /* drives which fail FPDMA_AA activation (some may freeze afterwards)
+ the ST disks also have LPM issues */
+ { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA |
+ ATA_HORKAGE_NOLPM, },
+ { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA |
+ ATA_HORKAGE_NOLPM, },
{ "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },

/* Blacklist entries taken from Silicon Image 3124/3132
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index ecf6f96df2ad..e6b07ece3910 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -594,7 +594,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t rptr = amdgpu_ring_get_rptr(ring);
+ uint32_t rptr;
unsigned i;
int r;

@@ -602,6 +602,8 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
if (r)
return r;

+ rptr = amdgpu_ring_get_rptr(ring);
+
amdgpu_ring_write(ring, VCN_ENC_CMD_END);
amdgpu_ring_commit(ring);

diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 2fe8397241ea..1611bef19a2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -715,6 +715,7 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
case CHIP_VEGA10:
return true;
case CHIP_RAVEN:
+ return (adev->pdev->device == 0x15d8);
case CHIP_VEGA12:
case CHIP_VEGA20:
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index c9edddf9f88a..be70e6e5f9df 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -170,13 +170,16 @@ static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t rptr = amdgpu_ring_get_rptr(ring);
+ uint32_t rptr;
unsigned i;
int r;

r = amdgpu_ring_alloc(ring, 16);
if (r)
return r;
+
+ rptr = amdgpu_ring_get_rptr(ring);
+
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
amdgpu_ring_commit(ring);

diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index dc461df48da0..9682f39d57c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -175,7 +175,7 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t rptr = amdgpu_ring_get_rptr(ring);
+ uint32_t rptr;
unsigned i;
int r;

@@ -185,6 +185,9 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 16);
if (r)
return r;
+
+ rptr = amdgpu_ring_get_rptr(ring);
+
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
amdgpu_ring_commit(ring);

diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 8db099b8077e..d94778915312 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1580,6 +1580,50 @@ static void connector_bad_edid(struct drm_connector *connector,
}
}

+/* Get override or firmware EDID */
+static struct edid *drm_get_override_edid(struct drm_connector *connector)
+{
+ struct edid *override = NULL;
+
+ if (connector->override_edid)
+ override = drm_edid_duplicate(connector->edid_blob_ptr->data);
+
+ if (!override)
+ override = drm_load_edid_firmware(connector);
+
+ return IS_ERR(override) ? NULL : override;
+}
+
+/**
+ * drm_add_override_edid_modes - add modes from override/firmware EDID
+ * @connector: connector we're probing
+ *
+ * Add modes from the override/firmware EDID, if available. Only to be used from
+ * drm_helper_probe_single_connector_modes() as a fallback for when DDC probe
+ * failed during drm_get_edid() and caused the override/firmware EDID to be
+ * skipped.
+ *
+ * Return: The number of modes added or 0 if we couldn't find any.
+ */
+int drm_add_override_edid_modes(struct drm_connector *connector)
+{
+ struct edid *override;
+ int num_modes = 0;
+
+ override = drm_get_override_edid(connector);
+ if (override) {
+ drm_connector_update_edid_property(connector, override);
+ num_modes = drm_add_edid_modes(connector, override);
+ kfree(override);
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] adding %d modes via fallback override/firmware EDID\n",
+ connector->base.id, connector->name, num_modes);
+ }
+
+ return num_modes;
+}
+EXPORT_SYMBOL(drm_add_override_edid_modes);
+
/**
* drm_do_get_edid - get EDID data using a custom EDID block read function
* @connector: connector we're probing
@@ -1607,15 +1651,10 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
{
int i, j = 0, valid_extensions = 0;
u8 *edid, *new;
- struct edid *override = NULL;
-
- if (connector->override_edid)
- override = drm_edid_duplicate(connector->edid_blob_ptr->data);
-
- if (!override)
- override = drm_load_edid_firmware(connector);
+ struct edid *override;

- if (!IS_ERR_OR_NULL(override))
+ override = drm_get_override_edid(connector);
+ if (override)
return override;

if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 6fd08e04b323..dd427c7ff967 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -479,6 +479,13 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,

count = (*connector_funcs->get_modes)(connector);

+ /*
+ * Fallback for when DDC probe failed in drm_get_edid() and thus skipped
+ * override/firmware EDID.
+ */
+ if (count == 0 && connector->status == connector_status_connected)
+ count = drm_add_override_edid_modes(connector);
+
if (count == 0 && connector->status == connector_status_connected)
count = drm_add_modes_noedid(connector, 1024, 768);
count += drm_helper_probe_add_cmdline_mode(connector);
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index e8ac04c33e29..b552d9d28127 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -300,10 +300,17 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
u32 dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
u32 i;
u32 *dmc_payload;
+ size_t fsize;

if (!fw)
return NULL;

+ fsize = sizeof(struct intel_css_header) +
+ sizeof(struct intel_package_header) +
+ sizeof(struct intel_dmc_header);
+ if (fsize > fw->size)
+ goto error_truncated;
+
/* Extract CSS Header information*/
css_header = (struct intel_css_header *)fw->data;
if (sizeof(struct intel_css_header) !=
@@ -363,6 +370,9 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
/* Convert dmc_offset into number of bytes. By default it is in dwords*/
dmc_offset *= 4;
readcount += dmc_offset;
+ fsize += dmc_offset;
+ if (fsize > fw->size)
+ goto error_truncated;

/* Extract dmc_header information. */
dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
@@ -394,6 +404,10 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,

/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
nbytes = dmc_header->fw_size * 4;
+ fsize += nbytes;
+ if (fsize > fw->size)
+ goto error_truncated;
+
if (nbytes > csr->max_fw_size) {
DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes);
return NULL;
@@ -407,6 +421,10 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
}

return memcpy(dmc_payload, &fw->data[readcount], nbytes);
+
+error_truncated:
+ DRM_ERROR("Truncated DMC firmware, rejecting.\n");
+ return NULL;
}

static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 421aac80a838..cd8a22d6370e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2444,10 +2444,14 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
* main surface.
*/
static const struct drm_format_info ccs_formats[] = {
- { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
- { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
- { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
- { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
+ .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
+ .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
+ .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
+ .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
};

static const struct drm_format_info *
@@ -11757,7 +11761,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
return 0;
}

-static bool intel_fuzzy_clock_check(int clock1, int clock2)
+bool intel_fuzzy_clock_check(int clock1, int clock2)
{
int diff;

diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index d5660ac1b0d6..18e17b422701 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1707,6 +1707,7 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
const struct dpll *dpll);
void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
+bool intel_fuzzy_clock_check(int clock1, int clock2);

/* modesetting asserts */
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index 06a11c35a784..2ec792ce49a5 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -871,6 +871,17 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
if (mipi_config->target_burst_mode_freq) {
u32 bitrate = intel_dsi_bitrate(intel_dsi);

+ /*
+ * Sometimes the VBT contains a slightly lower clock,
+ * then the bitrate we have calculated, in this case
+ * just replace it with the calculated bitrate.
+ */
+ if (mipi_config->target_burst_mode_freq < bitrate &&
+ intel_fuzzy_clock_check(
+ mipi_config->target_burst_mode_freq,
+ bitrate))
+ mipi_config->target_burst_mode_freq = bitrate;
+
if (mipi_config->target_burst_mode_freq < bitrate) {
DRM_ERROR("Burst mode freq is less than computed\n");
return false;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e7b0884ba5a5..366d5554e8e9 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -909,6 +909,13 @@ static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
}

+static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo,
+ u8 audio_state)
+{
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_AUDIO_STAT,
+ &audio_state, 1);
+}
+
#if 0
static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
{
@@ -1366,11 +1373,6 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
else
sdvox |= SDVO_PIPE_SEL(crtc->pipe);

- if (crtc_state->has_audio) {
- WARN_ON_ONCE(INTEL_GEN(dev_priv) < 4);
- sdvox |= SDVO_AUDIO_ENABLE;
- }
-
if (INTEL_GEN(dev_priv) >= 4) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
@@ -1510,8 +1512,13 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
if (sdvox & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;

- if (sdvox & SDVO_AUDIO_ENABLE)
- pipe_config->has_audio = true;
+ if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT,
+ &val, 1)) {
+ u8 mask = SDVO_AUDIO_ELD_VALID | SDVO_AUDIO_PRESENCE_DETECT;
+
+ if ((val & mask) == mask)
+ pipe_config->has_audio = true;
+ }

if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
&val, 1)) {
@@ -1524,6 +1531,32 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
pipe_config->pixel_multiplier, encoder_pixel_multiplier);
}

+static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo)
+{
+ intel_sdvo_set_audio_state(intel_sdvo, 0);
+}
+
+static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+ struct drm_connector *connector = conn_state->connector;
+ u8 *eld = connector->eld;
+
+ eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
+
+ intel_sdvo_set_audio_state(intel_sdvo, 0);
+
+ intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD,
+ SDVO_HBUF_TX_DISABLED,
+ eld, drm_eld_size(eld));
+
+ intel_sdvo_set_audio_state(intel_sdvo, SDVO_AUDIO_ELD_VALID |
+ SDVO_AUDIO_PRESENCE_DETECT);
+}
+
static void intel_disable_sdvo(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *conn_state)
@@ -1533,6 +1566,9 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
u32 temp;

+ if (old_crtc_state->has_audio)
+ intel_sdvo_disable_audio(intel_sdvo);
+
intel_sdvo_set_active_outputs(intel_sdvo, 0);
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo,
@@ -1618,6 +1654,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
intel_sdvo_set_encoder_power_state(intel_sdvo,
DRM_MODE_DPMS_ON);
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
+
+ if (pipe_config->has_audio)
+ intel_sdvo_enable_audio(intel_sdvo, pipe_config, conn_state);
}

static enum drm_mode_status
@@ -2480,7 +2519,6 @@ static bool
intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
{
struct drm_encoder *encoder = &intel_sdvo->base.base;
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct drm_connector *connector;
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
struct intel_connector *intel_connector;
@@ -2517,9 +2555,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;

- /* gen3 doesn't do the hdmi bits in the SDVO register */
- if (INTEL_GEN(dev_priv) >= 4 &&
- intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
+ if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
intel_sdvo_connector->is_hdmi = true;
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index db0ed499268a..e9ba3b047f93 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -707,6 +707,9 @@ struct intel_sdvo_enhancements_arg {
#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
#define SDVO_CMD_SET_AUDIO_STAT 0x91
#define SDVO_CMD_GET_AUDIO_STAT 0x92
+ #define SDVO_AUDIO_ELD_VALID (1 << 0)
+ #define SDVO_AUDIO_PRESENCE_DETECT (1 << 1)
+ #define SDVO_AUDIO_CP_READY (1 << 2)
#define SDVO_CMD_SET_HBUF_INDEX 0x93
#define SDVO_HBUF_INDEX_ELD 0
#define SDVO_HBUF_INDEX_AVI_IF 1
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 00cd9ab8948d..db28012dbf54 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -17,10 +17,21 @@ config DRM_NOUVEAU
select INPUT if ACPI && X86
select THERMAL if ACPI && X86
select ACPI_VIDEO if ACPI && X86
- select DRM_VM
help
Choose this option for open-source NVIDIA support.

+config NOUVEAU_LEGACY_CTX_SUPPORT
+ bool "Nouveau legacy context support"
+ depends on DRM_NOUVEAU
+ select DRM_VM
+ default y
+ help
+ There was a version of the nouveau DDX that relied on legacy
+ ctx ioctls not erroring out. But that was back in time a long
+ ways, so offer a way to disable it now. For uapi compat with
+ old nouveau ddx this should be on by default, but modern distros
+ should consider turning it off.
+
config NOUVEAU_PLATFORM_DRIVER
bool "Nouveau (NVIDIA) SoC GPUs"
depends on DRM_NOUVEAU && ARCH_TEGRA
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 5020265bfbd9..6ab9033f49da 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -1094,8 +1094,11 @@ nouveau_driver_fops = {
static struct drm_driver
driver_stub = {
.driver_features =
- DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
- DRIVER_KMS_LEGACY_CONTEXT,
+ DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER
+#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT)
+ | DRIVER_KMS_LEGACY_CONTEXT
+#endif
+ ,

.open = nouveau_drm_open,
.postclose = nouveau_drm_postclose,
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 1543c2f8d3d3..05d513d54555 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -169,7 +169,11 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);

if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT)
return drm_legacy_mmap(filp, vma);
+#else
+ return -EINVAL;
+#endif

return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index c0231a817d3b..6dc7d90a12b4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2351,7 +2351,8 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,

cmd = container_of(header, typeof(*cmd), header);

- if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
+ if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
+ cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
DRM_ERROR("Illegal shader type %u.\n",
(unsigned) cmd->body.type);
return -EINVAL;
@@ -2587,6 +2588,10 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
if (view_type == vmw_view_max)
return -EINVAL;
cmd = container_of(header, typeof(*cmd), header);
+ if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
+ DRM_ERROR("Invalid surface id.\n");
+ return -EINVAL;
+ }
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->sid, &srf);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 63a43726cce0..39eba8106d40 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1313,10 +1313,10 @@ static u32 __extract(u8 *report, unsigned offset, int n)
u32 hid_field_extract(const struct hid_device *hid, u8 *report,
unsigned offset, unsigned n)
{
- if (n > 256) {
- hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n",
+ if (n > 32) {
+ hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n",
n, current->comm);
- n = 256;
+ n = 32;
}

return __extract(report, offset, n);
@@ -1636,7 +1636,7 @@ static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
* Implement a generic .request() callback, using .raw_request()
* DO NOT USE in hid drivers directly, but through hid_hw_request instead.
*/
-void __hid_request(struct hid_device *hid, struct hid_report *report,
+int __hid_request(struct hid_device *hid, struct hid_report *report,
int reqtype)
{
char *buf;
@@ -1645,7 +1645,7 @@ void __hid_request(struct hid_device *hid, struct hid_report *report,

buf = hid_alloc_report_buf(report, GFP_KERNEL);
if (!buf)
- return;
+ return -ENOMEM;

len = hid_report_len(report);

@@ -1662,8 +1662,11 @@ void __hid_request(struct hid_device *hid, struct hid_report *report,
if (reqtype == HID_REQ_GET_REPORT)
hid_input_report(hid, report->type, buf, ret, 0);

+ ret = 0;
+
out:
kfree(buf);
+ return ret;
}
EXPORT_SYMBOL_GPL(__hid_request);

diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index b607286a0bc8..46c6efea1404 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1557,52 +1557,71 @@ static void hidinput_close(struct input_dev *dev)
hid_hw_close(hid);
}

-static void hidinput_change_resolution_multipliers(struct hid_device *hid)
+static bool __hidinput_change_resolution_multipliers(struct hid_device *hid,
+ struct hid_report *report, bool use_logical_max)
{
- struct hid_report_enum *rep_enum;
- struct hid_report *rep;
struct hid_usage *usage;
+ bool update_needed = false;
int i, j;

- rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
- list_for_each_entry(rep, &rep_enum->report_list, list) {
- bool update_needed = false;
+ if (report->maxfield == 0)
+ return false;

- if (rep->maxfield == 0)
- continue;
+ /*
+ * If we have more than one feature within this report we
+ * need to fill in the bits from the others before we can
+ * overwrite the ones for the Resolution Multiplier.
+ */
+ if (report->maxfield > 1) {
+ hid_hw_request(hid, report, HID_REQ_GET_REPORT);
+ hid_hw_wait(hid);
+ }

- /*
- * If we have more than one feature within this report we
- * need to fill in the bits from the others before we can
- * overwrite the ones for the Resolution Multiplier.
+ for (i = 0; i < report->maxfield; i++) {
+ __s32 value = use_logical_max ?
+ report->field[i]->logical_maximum :
+ report->field[i]->logical_minimum;
+
+ /* There is no good reason for a Resolution
+ * Multiplier to have a count other than 1.
+ * Ignore that case.
*/
- if (rep->maxfield > 1) {
- hid_hw_request(hid, rep, HID_REQ_GET_REPORT);
- hid_hw_wait(hid);
- }
+ if (report->field[i]->report_count != 1)
+ continue;

- for (i = 0; i < rep->maxfield; i++) {
- __s32 logical_max = rep->field[i]->logical_maximum;
+ for (j = 0; j < report->field[i]->maxusage; j++) {
+ usage = &report->field[i]->usage[j];

- /* There is no good reason for a Resolution
- * Multiplier to have a count other than 1.
- * Ignore that case.
- */
- if (rep->field[i]->report_count != 1)
+ if (usage->hid != HID_GD_RESOLUTION_MULTIPLIER)
continue;

- for (j = 0; j < rep->field[i]->maxusage; j++) {
- usage = &rep->field[i]->usage[j];
+ report->field[i]->value[j] = value;
+ update_needed = true;
+ }
+ }
+
+ return update_needed;
+}

- if (usage->hid != HID_GD_RESOLUTION_MULTIPLIER)
- continue;
+static void hidinput_change_resolution_multipliers(struct hid_device *hid)
+{
+ struct hid_report_enum *rep_enum;
+ struct hid_report *rep;
+ int ret;

- *rep->field[i]->value = logical_max;
- update_needed = true;
+ rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
+ list_for_each_entry(rep, &rep_enum->report_list, list) {
+ bool update_needed = __hidinput_change_resolution_multipliers(hid,
+ rep, true);
+
+ if (update_needed) {
+ ret = __hid_request(hid, rep, HID_REQ_SET_REPORT);
+ if (ret) {
+ __hidinput_change_resolution_multipliers(hid,
+ rep, false);
+ return;
}
}
- if (update_needed)
- hid_hw_request(hid, rep, HID_REQ_SET_REPORT);
}

/* refresh our structs */
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index c02d4cad1893..1565a307170a 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -641,6 +641,13 @@ static void mt_store_field(struct hid_device *hdev,
if (*target != DEFAULT_TRUE &&
*target != DEFAULT_FALSE &&
*target != DEFAULT_ZERO) {
+ if (usage->contactid == DEFAULT_ZERO ||
+ usage->x == DEFAULT_ZERO ||
+ usage->y == DEFAULT_ZERO) {
+ hid_dbg(hdev,
+ "ignoring duplicate usage on incomplete");
+ return;
+ }
usage = mt_allocate_usage(hdev, application);
if (!usage)
return;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 747730d32ab6..09b8e4aac82f 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1236,13 +1236,13 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
/* Add back in missing bits of ID for non-USI pens */
wacom->id[0] |= (wacom->serial[0] >> 32) & 0xFFFFF;
}
- wacom->tool[0] = wacom_intuos_get_tool_type(wacom_intuos_id_mangle(wacom->id[0]));

for (i = 0; i < pen_frames; i++) {
unsigned char *frame = &data[i*pen_frame_len + 1];
bool valid = frame[0] & 0x80;
bool prox = frame[0] & 0x40;
bool range = frame[0] & 0x20;
+ bool invert = frame[0] & 0x10;

if (!valid)
continue;
@@ -1251,9 +1251,24 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
wacom->shared->stylus_in_proximity = false;
wacom_exit_report(wacom);
input_sync(pen_input);
+
+ wacom->tool[0] = 0;
+ wacom->id[0] = 0;
+ wacom->serial[0] = 0;
return;
}
+
if (range) {
+ if (!wacom->tool[0]) { /* first in range */
+ /* Going into range select tool */
+ if (invert)
+ wacom->tool[0] = BTN_TOOL_RUBBER;
+ else if (wacom->id[0])
+ wacom->tool[0] = wacom_intuos_get_tool_type(wacom->id[0]);
+ else
+ wacom->tool[0] = BTN_TOOL_PEN;
+ }
+
input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));

@@ -1275,23 +1290,26 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
get_unaligned_le16(&frame[11]));
}
}
- input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
- if (wacom->features.type == INTUOSP2_BT) {
- input_report_abs(pen_input, ABS_DISTANCE,
- range ? frame[13] : wacom->features.distance_max);
- } else {
- input_report_abs(pen_input, ABS_DISTANCE,
- range ? frame[7] : wacom->features.distance_max);
- }

- input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x01);
- input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02);
- input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04);
+ if (wacom->tool[0]) {
+ input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
+ if (wacom->features.type == INTUOSP2_BT) {
+ input_report_abs(pen_input, ABS_DISTANCE,
+ range ? frame[13] : wacom->features.distance_max);
+ } else {
+ input_report_abs(pen_input, ABS_DISTANCE,
+ range ? frame[7] : wacom->features.distance_max);
+ }

- input_report_key(pen_input, wacom->tool[0], prox);
- input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]);
- input_report_abs(pen_input, ABS_MISC,
- wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */
+ input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x09);
+ input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02);
+ input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04);
+
+ input_report_key(pen_input, wacom->tool[0], prox);
+ input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]);
+ input_report_abs(pen_input, ABS_MISC,
+ wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */
+ }

wacom->shared->stylus_in_proximity = prox;

@@ -1353,11 +1371,17 @@ static void wacom_intuos_pro2_bt_touch(struct wacom_wac *wacom)
if (wacom->num_contacts_left <= 0) {
wacom->num_contacts_left = 0;
wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom);
+ input_sync(touch_input);
}
}

- input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7));
- input_sync(touch_input);
+ if (wacom->num_contacts_left == 0) {
+ // Be careful that we don't accidentally call input_sync with
+ // only a partial set of fingers of processed
+ input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7));
+ input_sync(touch_input);
+ }
+
}

static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
@@ -1365,7 +1389,7 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
struct input_dev *pad_input = wacom->pad_input;
unsigned char *data = wacom->data;

- int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01);
+ int buttons = data[282] | ((data[281] & 0x40) << 2);
int ring = data[285] & 0x7F;
bool ringstatus = data[285] & 0x80;
bool prox = buttons || ringstatus;
@@ -3814,7 +3838,7 @@ static void wacom_24hd_update_leds(struct wacom *wacom, int mask, int group)
static bool wacom_is_led_toggled(struct wacom *wacom, int button_count,
int mask, int group)
{
- int button_per_group;
+ int group_button;

/*
* 21UX2 has LED group 1 to the left and LED group 0
@@ -3824,9 +3848,12 @@ static bool wacom_is_led_toggled(struct wacom *wacom, int button_count,
if (wacom->wacom_wac.features.type == WACOM_21UX2)
group = 1 - group;

- button_per_group = button_count/wacom->led.count;
+ group_button = group * (button_count/wacom->led.count);
+
+ if (wacom->wacom_wac.features.type == INTUOSP2_BT)
+ group_button = 8;

- return mask & (1 << (group * button_per_group));
+ return mask & (1 << group_button);
}

static void wacom_update_led(struct wacom *wacom, int button_count, int mask,
diff --git a/drivers/i2c/busses/i2c-acorn.c b/drivers/i2c/busses/i2c-acorn.c
index f4a5ae69bf6a..fa3763e4b3ee 100644
--- a/drivers/i2c/busses/i2c-acorn.c
+++ b/drivers/i2c/busses/i2c-acorn.c
@@ -81,6 +81,7 @@ static struct i2c_algo_bit_data ioc_data = {

static struct i2c_adapter ioc_ops = {
.nr = 0,
+ .name = "ioc",
.algo_data = &ioc_data,
};

diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 045d93884164..7e63a15fa724 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -59,6 +59,15 @@

#include "arm-smmu-regs.h"

+/*
+ * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
+ * global register space are still, in fact, using a hypervisor to mediate it
+ * by trapping and emulating register accesses. Sadly, some deployed versions
+ * of said trapping code have bugs wherein they go horribly wrong for stores
+ * using r31 (i.e. XZR/WZR) as the source register.
+ */
+#define QCOM_DUMMY_VAL -1
+
#define ARM_MMU500_ACTLR_CPRE (1 << 1)

#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
@@ -422,7 +431,7 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
{
unsigned int spin_cnt, delay;

- writel_relaxed(0, sync);
+ writel_relaxed(QCOM_DUMMY_VAL, sync);
for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
@@ -1760,8 +1769,8 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
}

/* Invalidate the TLB, just in case */
- writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
- writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
+ writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLH);
+ writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);

reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);

diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 8f07fa6e1739..268f1b685084 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -887,12 +887,22 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
struct bset *i = bset_tree_last(b)->data;
struct bkey *m, *prev = NULL;
struct btree_iter iter;
+ struct bkey preceding_key_on_stack = ZERO_KEY;
+ struct bkey *preceding_key_p = &preceding_key_on_stack;

BUG_ON(b->ops->is_extents && !KEY_SIZE(k));

- m = bch_btree_iter_init(b, &iter, b->ops->is_extents
- ? PRECEDING_KEY(&START_KEY(k))
- : PRECEDING_KEY(k));
+ /*
+ * If k has preceding key, preceding_key_p will be set to address
+ * of k's preceding key; otherwise preceding_key_p will be set
+ * to NULL inside preceding_key().
+ */
+ if (b->ops->is_extents)
+ preceding_key(&START_KEY(k), &preceding_key_p);
+ else
+ preceding_key(k, &preceding_key_p);
+
+ m = bch_btree_iter_init(b, &iter, preceding_key_p);

if (b->ops->insert_fixup(b, k, &iter, replace_key))
return status;
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index bac76aabca6d..c71365e7c1fa 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -434,20 +434,26 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
return __bch_cut_back(where, k);
}

-#define PRECEDING_KEY(_k) \
-({ \
- struct bkey *_ret = NULL; \
- \
- if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \
- _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \
- \
- if (!_ret->low) \
- _ret->high--; \
- _ret->low--; \
- } \
- \
- _ret; \
-})
+/*
+ * Pointer '*preceding_key_p' points to a memory object to store preceding
+ * key of k. If the preceding key does not exist, set '*preceding_key_p' to
+ * NULL. So the caller of preceding_key() needs to take care of memory
+ * which '*preceding_key_p' pointed to before calling preceding_key().
+ * Currently the only caller of preceding_key() is bch_btree_insert_key(),
+ * and it points to an on-stack variable, so the memory release is handled
+ * by stackframe itself.
+ */
+static inline void preceding_key(struct bkey *k, struct bkey **preceding_key_p)
+{
+ if (KEY_INODE(k) || KEY_OFFSET(k)) {
+ (**preceding_key_p) = KEY(KEY_INODE(k), KEY_OFFSET(k), 0);
+ if (!(*preceding_key_p)->low)
+ (*preceding_key_p)->high--;
+ (*preceding_key_p)->low--;
+ } else {
+ (*preceding_key_p) = NULL;
+ }
+}

static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k)
{
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 17bae9c14ca0..eb42dcf52277 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -431,8 +431,13 @@ STORE(bch_cached_dev)
bch_writeback_queue(dc);
}

+ /*
+ * Only set BCACHE_DEV_WB_RUNNING when cached device attached to
+ * a cache set, otherwise it doesn't make sense.
+ */
if (attr == &sysfs_writeback_percent)
- if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
+ if ((dc->disk.c != NULL) &&
+ (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)))
schedule_delayed_work(&dc->writeback_rate_update,
dc->writeback_rate_update_seconds * HZ);

diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index fbdb4ecc7c50..7402c9834189 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -917,7 +917,7 @@ static void dvb_frontend_get_frequency_limits(struct dvb_frontend *fe,
"DVB: adapter %i frontend %u frequency limits undefined - fix the driver\n",
fe->dvb->num, fe->id);

- dprintk("frequency interval: tuner: %u...%u, frontend: %u...%u",
+ dev_dbg(fe->dvb->device, "frequency interval: tuner: %u...%u, frontend: %u...%u",
tuner_min, tuner_max, frontend_min, frontend_max);

/* If the standard is for satellite, convert frequencies to kHz */
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index de20bdaa148d..8b01257783dd 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -1135,7 +1135,7 @@ static void kgdbts_put_char(u8 chr)
static int param_set_kgdbts_var(const char *kmessage,
const struct kernel_param *kp)
{
- int len = strlen(kmessage);
+ size_t len = strlen(kmessage);

if (len >= MAX_CONFIG_LEN) {
printk(KERN_ERR "kgdbts: config string too long\n");
@@ -1155,7 +1155,7 @@ static int param_set_kgdbts_var(const char *kmessage,

strcpy(config, kmessage);
/* Chop out \n char as a result of echo */
- if (config[len - 1] == '\n')
+ if (len && config[len - 1] == '\n')
config[len - 1] = '\0';

/* Go and configure with the new params. */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index a6535e226d84..d005ed12b4d1 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -3377,7 +3377,7 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
if (!err)
err = -ENODEV;

- dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err);
return err;
}

@@ -3386,7 +3386,7 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
if (!err)
err = -ENODEV;

- dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to get hclk (%d)\n", err);
return err;
}

@@ -3404,31 +3404,31 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,

err = clk_prepare_enable(*pclk);
if (err) {
- dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
return err;
}

err = clk_prepare_enable(*hclk);
if (err) {
- dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err);
goto err_disable_pclk;
}

err = clk_prepare_enable(*tx_clk);
if (err) {
- dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
goto err_disable_hclk;
}

err = clk_prepare_enable(*rx_clk);
if (err) {
- dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
goto err_disable_txclk;
}

err = clk_prepare_enable(*tsu_clk);
if (err) {
- dev_err(&pdev->dev, "failed to enable tsu_clk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err);
goto err_disable_rxclk;
}

@@ -3902,7 +3902,7 @@ static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,

err = clk_prepare_enable(*pclk);
if (err) {
- dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
return err;
}

diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 5bb9eb35d76d..491475d87736 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -313,7 +313,9 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
bool is_eof = !!tx_swbd->skb;

- enetc_unmap_tx_buff(tx_ring, tx_swbd);
+ if (likely(tx_swbd->dma))
+ enetc_unmap_tx_buff(tx_ring, tx_swbd);
+
if (is_eof) {
napi_consume_skb(tx_swbd->skb, napi_budget);
tx_swbd->skb = NULL;
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 3d8a70d3ea9b..3d71f1716390 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -437,17 +437,18 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
dev);
dev->tx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;

+ netif_stop_queue(net);
retval = usb_submit_urb(dev->tx_urb, GFP_ATOMIC);
if (retval) {
dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n",
__func__, retval);
dev->net->stats.tx_errors++;
dev_kfree_skb_any(skb);
+ netif_wake_queue(net);
} else {
dev->net->stats.tx_packets++;
dev->net->stats.tx_bytes += skb->len;
dev_consume_skb_any(skb);
- netif_stop_queue(net);
}

return NETDEV_TX_OK;
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 7bbff0af29b2..99d5892ea98a 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -642,7 +642,7 @@ static struct attribute *nd_device_attributes[] = {
NULL,
};

-/**
+/*
* nd_device_attribute_group - generic attributes for all devices on an nd bus
*/
struct attribute_group nd_device_attribute_group = {
@@ -671,7 +671,7 @@ static umode_t nd_numa_attr_visible(struct kobject *kobj, struct attribute *a,
return a->mode;
}

-/**
+/*
* nd_numa_attribute_group - NUMA attributes for all devices on an nd bus
*/
struct attribute_group nd_numa_attribute_group = {
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 2030805aa216..edf278067e72 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -25,6 +25,8 @@ static guid_t nvdimm_btt2_guid;
static guid_t nvdimm_pfn_guid;
static guid_t nvdimm_dax_guid;

+static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
+
static u32 best_seq(u32 a, u32 b)
{
a &= NSINDEX_SEQ_MASK;
diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h
index e9a2ad3c2150..4bb7add39580 100644
--- a/drivers/nvdimm/label.h
+++ b/drivers/nvdimm/label.h
@@ -38,8 +38,6 @@ enum {
ND_NSINDEX_INIT = 0x1,
};

-static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
-
/**
* struct nd_namespace_index - label set superblock
* @sig: NAMESPACE_INDEX\0
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 8782d86a8ca3..35d2202ee2fd 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1362,9 +1362,14 @@ static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
{
#ifdef CONFIG_NVME_MULTIPATH
if (disk->fops == &nvme_ns_head_ops) {
+ struct nvme_ns *ns;
+
*head = disk->private_data;
*srcu_idx = srcu_read_lock(&(*head)->srcu);
- return nvme_find_path(*head);
+ ns = nvme_find_path(*head);
+ if (!ns)
+ srcu_read_unlock(&(*head)->srcu, *srcu_idx);
+ return ns;
}
#endif
*head = NULL;
@@ -1378,42 +1383,56 @@ static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
srcu_read_unlock(&head->srcu, idx);
}

-static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned cmd, unsigned long arg)
+static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
{
+ struct nvme_ns_head *head = NULL;
+ void __user *argp = (void __user *)arg;
+ struct nvme_ns *ns;
+ int srcu_idx, ret;
+
+ ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
+ if (unlikely(!ns))
+ return -EWOULDBLOCK;
+
+ /*
+ * Handle ioctls that apply to the controller instead of the namespace
+ * seperately and drop the ns SRCU reference early. This avoids a
+ * deadlock when deleting namespaces using the passthrough interface.
+ */
+ if (cmd == NVME_IOCTL_ADMIN_CMD || is_sed_ioctl(cmd)) {
+ struct nvme_ctrl *ctrl = ns->ctrl;
+
+ nvme_get_ctrl(ns->ctrl);
+ nvme_put_ns_from_disk(head, srcu_idx);
+
+ if (cmd == NVME_IOCTL_ADMIN_CMD)
+ ret = nvme_user_cmd(ctrl, NULL, argp);
+ else
+ ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
+
+ nvme_put_ctrl(ctrl);
+ return ret;
+ }
+
switch (cmd) {
case NVME_IOCTL_ID:
force_successful_syscall_return();
- return ns->head->ns_id;
- case NVME_IOCTL_ADMIN_CMD:
- return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg);
+ ret = ns->head->ns_id;
+ break;
case NVME_IOCTL_IO_CMD:
- return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg);
+ ret = nvme_user_cmd(ns->ctrl, ns, argp);
+ break;
case NVME_IOCTL_SUBMIT_IO:
- return nvme_submit_io(ns, (void __user *)arg);
+ ret = nvme_submit_io(ns, argp);
+ break;
default:
-#ifdef CONFIG_NVM
if (ns->ndev)
- return nvme_nvm_ioctl(ns, cmd, arg);
-#endif
- if (is_sed_ioctl(cmd))
- return sed_ioctl(ns->ctrl->opal_dev, cmd,
- (void __user *) arg);
- return -ENOTTY;
+ ret = nvme_nvm_ioctl(ns, cmd, arg);
+ else
+ ret = -ENOTTY;
}
-}

-static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct nvme_ns_head *head = NULL;
- struct nvme_ns *ns;
- int srcu_idx, ret;
-
- ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
- if (unlikely(!ns))
- ret = -EWOULDBLOCK;
- else
- ret = nvme_ns_ioctl(ns, cmd, arg);
nvme_put_ns_from_disk(head, srcu_idx);
return ret;
}
@@ -3680,6 +3699,7 @@ EXPORT_SYMBOL_GPL(nvme_start_ctrl);

void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
{
+ dev_pm_qos_hide_latency_tolerance(ctrl->device);
cdev_device_del(&ctrl->cdev, ctrl->device);
}
EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 372d3f4a106a..693f2a856200 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -500,7 +500,7 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
* affinity), so use the regular blk-mq cpu mapping
*/
map->queue_offset = qoff;
- if (i != HCTX_TYPE_POLL)
+ if (i != HCTX_TYPE_POLL && offset)
blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset);
else
blk_mq_map_queues(map);
@@ -2400,7 +2400,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)

static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
{
- bool dead = true;
+ bool dead = true, freeze = false;
struct pci_dev *pdev = to_pci_dev(dev->dev);

mutex_lock(&dev->shutdown_lock);
@@ -2408,8 +2408,10 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
u32 csts = readl(dev->bar + NVME_REG_CSTS);

if (dev->ctrl.state == NVME_CTRL_LIVE ||
- dev->ctrl.state == NVME_CTRL_RESETTING)
+ dev->ctrl.state == NVME_CTRL_RESETTING) {
+ freeze = true;
nvme_start_freeze(&dev->ctrl);
+ }
dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) ||
pdev->error_state != pci_channel_io_normal);
}
@@ -2418,10 +2420,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
* Give the controller a chance to complete all entered requests if
* doing a safe shutdown.
*/
- if (!dead) {
- if (shutdown)
- nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
- }
+ if (!dead && shutdown && freeze)
+ nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);

nvme_stop_queues(&dev->ctrl);

diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 7cb766dafe85..e120f933412a 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -855,16 +855,8 @@ static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages,
if (!pglist)
goto out_free_buf;

- for (i = 0; i < nr_pages; ++i) {
- struct page *page = virt_to_page(pages[i]);
-
- if (PagePrivate(page)) {
- pr_warn("unexpected high-order page for auxbuf!");
- goto out_free_pglist;
- }
-
+ for (i = 0; i < nr_pages; ++i)
pglist[i] = virt_to_page(pages[i]);
- }

buf->base = vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL);
if (!buf->base)
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index c7039f52ad51..b1d804376237 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -398,12 +398,45 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc)
*/
static const struct dmi_system_id critclk_systems[] = {
{
+ /* pmc_plt_clk0 is used for an external HSIC USB HUB */
.ident = "MPL CEC1x",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MPL AG"),
DMI_MATCH(DMI_PRODUCT_NAME, "CEC10 Family"),
},
},
+ {
+ /* pmc_plt_clk0 - 3 are used for the 4 ethernet controllers */
+ .ident = "Lex 3I380D",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Lex BayTrail"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "3I380D"),
+ },
+ },
+ {
+ /* pmc_plt_clk* - are used for ethernet controllers */
+ .ident = "Beckhoff CB3163",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
+ DMI_MATCH(DMI_BOARD_NAME, "CB3163"),
+ },
+ },
+ {
+ /* pmc_plt_clk* - are used for ethernet controllers */
+ .ident = "Beckhoff CB6263",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
+ DMI_MATCH(DMI_BOARD_NAME, "CB6263"),
+ },
+ },
+ {
+ /* pmc_plt_clk* - are used for ethernet controllers */
+ .ident = "Beckhoff CB6363",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
+ DMI_MATCH(DMI_BOARD_NAME, "CB6363"),
+ },
+ },
{ /*sentinel*/ }
};

diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index 2d9ec378a8bc..f85d6b7a1984 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -2,6 +2,7 @@
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
+#include <linux/workqueue.h>

#include <asm/mce.h>

@@ -123,16 +124,12 @@ static u64 dfs_pfn;
/* Amount of errors after which we offline */
static unsigned int count_threshold = COUNT_MASK;

-/*
- * The timer "decays" element count each timer_interval which is 24hrs by
- * default.
- */
-
-#define CEC_TIMER_DEFAULT_INTERVAL 24 * 60 * 60 /* 24 hrs */
-#define CEC_TIMER_MIN_INTERVAL 1 * 60 * 60 /* 1h */
-#define CEC_TIMER_MAX_INTERVAL 30 * 24 * 60 * 60 /* one month */
-static struct timer_list cec_timer;
-static u64 timer_interval = CEC_TIMER_DEFAULT_INTERVAL;
+/* Each element "decays" each decay_interval which is 24hrs by default. */
+#define CEC_DECAY_DEFAULT_INTERVAL 24 * 60 * 60 /* 24 hrs */
+#define CEC_DECAY_MIN_INTERVAL 1 * 60 * 60 /* 1h */
+#define CEC_DECAY_MAX_INTERVAL 30 * 24 * 60 * 60 /* one month */
+static struct delayed_work cec_work;
+static u64 decay_interval = CEC_DECAY_DEFAULT_INTERVAL;

/*
* Decrement decay value. We're using DECAY_BITS bits to denote decay of an
@@ -160,20 +157,21 @@ static void do_spring_cleaning(struct ce_array *ca)
/*
* @interval in seconds
*/
-static void cec_mod_timer(struct timer_list *t, unsigned long interval)
+static void cec_mod_work(unsigned long interval)
{
unsigned long iv;

- iv = interval * HZ + jiffies;
-
- mod_timer(t, round_jiffies(iv));
+ iv = interval * HZ;
+ mod_delayed_work(system_wq, &cec_work, round_jiffies(iv));
}

-static void cec_timer_fn(struct timer_list *unused)
+static void cec_work_fn(struct work_struct *work)
{
+ mutex_lock(&ce_mutex);
do_spring_cleaning(&ce_arr);
+ mutex_unlock(&ce_mutex);

- cec_mod_timer(&cec_timer, timer_interval);
+ cec_mod_work(decay_interval);
}

/*
@@ -183,32 +181,38 @@ static void cec_timer_fn(struct timer_list *unused)
*/
static int __find_elem(struct ce_array *ca, u64 pfn, unsigned int *to)
{
+ int min = 0, max = ca->n - 1;
u64 this_pfn;
- int min = 0, max = ca->n;

- while (min < max) {
- int tmp = (max + min) >> 1;
+ while (min <= max) {
+ int i = (min + max) >> 1;

- this_pfn = PFN(ca->array[tmp]);
+ this_pfn = PFN(ca->array[i]);

if (this_pfn < pfn)
- min = tmp + 1;
+ min = i + 1;
else if (this_pfn > pfn)
- max = tmp;
- else {
- min = tmp;
- break;
+ max = i - 1;
+ else if (this_pfn == pfn) {
+ if (to)
+ *to = i;
+
+ return i;
}
}

+ /*
+ * When the loop terminates without finding @pfn, min has the index of
+ * the element slot where the new @pfn should be inserted. The loop
+ * terminates when min > max, which means the min index points to the
+ * bigger element while the max index to the smaller element, in-between
+ * which the new @pfn belongs to.
+ *
+ * For more details, see exercise 1, Section 6.2.1 in TAOCP, vol. 3.
+ */
if (to)
*to = min;

- this_pfn = PFN(ca->array[min]);
-
- if (this_pfn == pfn)
- return min;
-
return -ENOKEY;
}

@@ -374,15 +378,15 @@ static int decay_interval_set(void *data, u64 val)
{
*(u64 *)data = val;

- if (val < CEC_TIMER_MIN_INTERVAL)
+ if (val < CEC_DECAY_MIN_INTERVAL)
return -EINVAL;

- if (val > CEC_TIMER_MAX_INTERVAL)
+ if (val > CEC_DECAY_MAX_INTERVAL)
return -EINVAL;

- timer_interval = val;
+ decay_interval = val;

- cec_mod_timer(&cec_timer, timer_interval);
+ cec_mod_work(decay_interval);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(decay_interval_ops, u64_get, decay_interval_set, "%lld\n");
@@ -426,7 +430,7 @@ static int array_dump(struct seq_file *m, void *v)

seq_printf(m, "Flags: 0x%x\n", ca->flags);

- seq_printf(m, "Timer interval: %lld seconds\n", timer_interval);
+ seq_printf(m, "Decay interval: %lld seconds\n", decay_interval);
seq_printf(m, "Decays: %lld\n", ca->decays_done);

seq_printf(m, "Action threshold: %d\n", count_threshold);
@@ -472,7 +476,7 @@ static int __init create_debugfs_nodes(void)
}

decay = debugfs_create_file("decay_interval", S_IRUSR | S_IWUSR, d,
- &timer_interval, &decay_interval_ops);
+ &decay_interval, &decay_interval_ops);
if (!decay) {
pr_warn("Error creating decay_interval debugfs node!\n");
goto err;
@@ -508,8 +512,8 @@ void __init cec_init(void)
if (create_debugfs_nodes())
return;

- timer_setup(&cec_timer, cec_timer_fn, 0);
- cec_mod_timer(&cec_timer, CEC_TIMER_DEFAULT_INTERVAL);
+ INIT_DELAYED_WORK(&cec_work, cec_work_fn);
+ schedule_delayed_work(&cec_work, CEC_DECAY_DEFAULT_INTERVAL);

pr_info("Correctable Errors collector initialized.\n");
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 039328d9ef13..30e6d78e82f0 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -830,7 +830,7 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
((u64)err_entry->data.err_warn_bitmap_hi << 32) |
(u64)err_entry->data.err_warn_bitmap_lo;
for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
- if (err_warn_bit_map & (u64) (1 << i)) {
+ if (err_warn_bit_map & ((u64)1 << i)) {
err_warn = i;
break;
}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index a09a742d7ec1..26a22e41204e 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -159,6 +159,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
int i;
int len = 0;
char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
+ unsigned long iflags = 0;

if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
@@ -337,7 +338,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
phba->sli4_hba.io_xri_max,
lpfc_sli4_get_els_iocb_cnt(phba));
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto buffer_done;
+ goto rcu_unlock_buf_done;

/* Port state is only one of two values for now. */
if (localport->port_id)
@@ -353,15 +354,15 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
wwn_to_u64(vport->fc_nodename.u.wwn),
localport->port_id, statep);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto buffer_done;
+ goto rcu_unlock_buf_done;

list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
nrport = NULL;
- spin_lock(&vport->phba->hbalock);
+ spin_lock_irqsave(&vport->phba->hbalock, iflags);
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
nrport = rport->remoteport;
- spin_unlock(&vport->phba->hbalock);
+ spin_unlock_irqrestore(&vport->phba->hbalock, iflags);
if (!nrport)
continue;

@@ -380,39 +381,39 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,

/* Tab in to show lport ownership. */
if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
- goto buffer_done;
+ goto rcu_unlock_buf_done;
if (phba->brd_no >= 10) {
if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
- goto buffer_done;
+ goto rcu_unlock_buf_done;
}

scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
nrport->port_name);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto buffer_done;
+ goto rcu_unlock_buf_done;

scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
nrport->node_name);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto buffer_done;
+ goto rcu_unlock_buf_done;

scnprintf(tmp, sizeof(tmp), "DID x%06x ",
nrport->port_id);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto buffer_done;
+ goto rcu_unlock_buf_done;

/* An NVME rport can have multiple roles. */
if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
- goto buffer_done;
+ goto rcu_unlock_buf_done;
}
if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
- goto buffer_done;
+ goto rcu_unlock_buf_done;
}
if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
- goto buffer_done;
+ goto rcu_unlock_buf_done;
}
if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
FC_PORT_ROLE_NVME_TARGET |
@@ -420,12 +421,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
nrport->port_role);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto buffer_done;
+ goto rcu_unlock_buf_done;
}

scnprintf(tmp, sizeof(tmp), "%s\n", statep);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto buffer_done;
+ goto rcu_unlock_buf_done;
}
rcu_read_unlock();

@@ -487,7 +488,13 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&lport->cmpl_fcp_err));
strlcat(buf, tmp, PAGE_SIZE);

-buffer_done:
+ /* RCU is already unlocked. */
+ goto buffer_done;
+
+ rcu_unlock_buf_done:
+ rcu_read_unlock();
+
+ buffer_done:
len = strnlen(buf, PAGE_SIZE);

if (unlikely(len >= (PAGE_SIZE - 1))) {
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index fc077cb87900..965f8a1a8f67 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -7338,7 +7338,10 @@ int
lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
{
struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
- rrq->nlp_DID);
+ rrq->nlp_DID);
+ if (!ndlp)
+ return 1;
+
if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
return lpfc_issue_els_rrq(rrq->vport, ndlp,
rrq->nlp_DID, rrq);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index dc933b6d7800..363b21c4255e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -994,15 +994,14 @@ lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* @ndlp: Targets nodelist pointer for this exchange.
* @xritag the xri in the bitmap to test.
*
- * This function is called with hbalock held. This function
- * returns 0 = rrq not active for this xri
- * 1 = rrq is valid for this xri.
+ * This function returns:
+ * 0 = rrq not active for this xri
+ * 1 = rrq is valid for this xri.
**/
int
lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t xritag)
{
- lockdep_assert_held(&phba->hbalock);
if (!ndlp)
return 0;
if (!ndlp->active_rrqs_xri_bitmap)
@@ -1105,10 +1104,11 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
* @phba: Pointer to HBA context object.
* @piocb: Pointer to the iocbq.
*
- * This function is called with the ring lock held. This function
- * gets a new driver sglq object from the sglq list. If the
- * list is not empty then it is successful, it returns pointer to the newly
- * allocated sglq object else it returns NULL.
+ * The driver calls this function with either the nvme ls ring lock
+ * or the fc els ring lock held depending on the iocb usage. This function
+ * gets a new driver sglq object from the sglq list. If the list is not empty
+ * then it is successful, it returns pointer to the newly allocated sglq
+ * object else it returns NULL.
**/
static struct lpfc_sglq *
__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
@@ -1118,9 +1118,15 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
struct lpfc_sglq *start_sglq = NULL;
struct lpfc_io_buf *lpfc_cmd;
struct lpfc_nodelist *ndlp;
+ struct lpfc_sli_ring *pring = NULL;
int found = 0;

- lockdep_assert_held(&phba->hbalock);
+ if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
+ pring = phba->sli4_hba.nvmels_wq->pring;
+ else
+ pring = lpfc_phba_elsring(phba);
+
+ lockdep_assert_held(&pring->ring_lock);

if (piocbq->iocb_flag & LPFC_IO_FCP) {
lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
@@ -1563,7 +1569,8 @@ lpfc_sli_ring_map(struct lpfc_hba *phba)
* @pring: Pointer to driver SLI ring object.
* @piocb: Pointer to the driver iocb object.
*
- * This function is called with hbalock held. The function adds the
+ * The driver calls this function with the hbalock held for SLI3 ports or
+ * the ring lock held for SLI4 ports. The function adds the
* new iocb to txcmplq of the given ring. This function always returns
* 0. If this function is called for ELS ring, this function checks if
* there is a vport associated with the ELS command. This function also
@@ -1573,7 +1580,10 @@ static int
lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
{
- lockdep_assert_held(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lockdep_assert_held(&pring->ring_lock);
+ else
+ lockdep_assert_held(&phba->hbalock);

BUG_ON(!piocb);

@@ -2970,8 +2980,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
*
* This function looks up the iocb_lookup table to get the command iocb
* corresponding to the given response iocb using the iotag of the
- * response iocb. This function is called with the hbalock held
- * for sli3 devices or the ring_lock for sli4 devices.
+ * response iocb. The driver calls this function with the hbalock held
+ * for SLI3 ports or the ring lock held for SLI4 ports.
* This function returns the command iocb object if it finds the command
* iocb else returns NULL.
**/
@@ -2982,8 +2992,15 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
{
struct lpfc_iocbq *cmd_iocb = NULL;
uint16_t iotag;
- lockdep_assert_held(&phba->hbalock);
+ spinlock_t *temp_lock = NULL;
+ unsigned long iflag = 0;

+ if (phba->sli_rev == LPFC_SLI_REV4)
+ temp_lock = &pring->ring_lock;
+ else
+ temp_lock = &phba->hbalock;
+
+ spin_lock_irqsave(temp_lock, iflag);
iotag = prspiocb->iocb.ulpIoTag;

if (iotag != 0 && iotag <= phba->sli.last_iotag) {
@@ -2993,10 +3010,12 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
list_del_init(&cmd_iocb->list);
cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
pring->txcmplq_cnt--;
+ spin_unlock_irqrestore(temp_lock, iflag);
return cmd_iocb;
}
}

+ spin_unlock_irqrestore(temp_lock, iflag);
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0317 iotag x%x is out of "
"range: max iotag x%x wd0 x%x\n",
@@ -3012,8 +3031,8 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
* @iotag: IOCB tag.
*
* This function looks up the iocb_lookup table to get the command iocb
- * corresponding to the given iotag. This function is called with the
- * hbalock held.
+ * corresponding to the given iotag. The driver calls this function with
+ * the ring lock held because this function is an SLI4 port only helper.
* This function returns the command iocb object if it finds the command
* iocb else returns NULL.
**/
@@ -3022,8 +3041,15 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring, uint16_t iotag)
{
struct lpfc_iocbq *cmd_iocb = NULL;
+ spinlock_t *temp_lock = NULL;
+ unsigned long iflag = 0;

- lockdep_assert_held(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ temp_lock = &pring->ring_lock;
+ else
+ temp_lock = &phba->hbalock;
+
+ spin_lock_irqsave(temp_lock, iflag);
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
@@ -3031,10 +3057,12 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
list_del_init(&cmd_iocb->list);
cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
pring->txcmplq_cnt--;
+ spin_unlock_irqrestore(temp_lock, iflag);
return cmd_iocb;
}
}

+ spin_unlock_irqrestore(temp_lock, iflag);
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0372 iotag x%x lookup error: max iotag (x%x) "
"iocb_flag x%x\n",
@@ -3068,17 +3096,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
int rc = 1;
unsigned long iflag;

- /* Based on the iotag field, get the cmd IOCB from the txcmplq */
- if (phba->sli_rev == LPFC_SLI_REV4)
- spin_lock_irqsave(&pring->ring_lock, iflag);
- else
- spin_lock_irqsave(&phba->hbalock, iflag);
cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
- if (phba->sli_rev == LPFC_SLI_REV4)
- spin_unlock_irqrestore(&pring->ring_lock, iflag);
- else
- spin_unlock_irqrestore(&phba->hbalock, iflag);
-
if (cmdiocbp) {
if (cmdiocbp->iocb_cmpl) {
/*
@@ -3409,8 +3427,10 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
break;
}

+ spin_unlock_irqrestore(&phba->hbalock, iflag);
cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
&rspiocbq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
if (unlikely(!cmdiocbq))
break;
if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
@@ -3604,9 +3624,12 @@ lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,

case LPFC_ABORT_IOCB:
cmdiocbp = NULL;
- if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
+ if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
saveq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ }
if (cmdiocbp) {
/* Call the specified completion routine */
if (cmdiocbp->iocb_cmpl) {
@@ -13070,13 +13093,11 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
return NULL;

wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
- spin_lock_irqsave(&pring->ring_lock, iflags);
pring->stats.iocb_event++;
/* Look up the ELS command IOCB and create pseudo response IOCB */
cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
if (unlikely(!cmdiocbq)) {
- spin_unlock_irqrestore(&pring->ring_lock, iflags);
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0386 ELS complete with no corresponding "
"cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
@@ -13086,6 +13107,7 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
return NULL;
}

+ spin_lock_irqsave(&pring->ring_lock, iflags);
/* Put the iocb back on the txcmplq */
lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
@@ -13856,9 +13878,9 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
/* Look up the FCP command IOCB and create pseudo response IOCB */
spin_lock_irqsave(&pring->ring_lock, iflags);
pring->stats.iocb_event++;
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
- spin_unlock_irqrestore(&pring->ring_lock, iflags);
if (unlikely(!cmdiocbq)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0374 FCP complete with no corresponding "
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index b8d54ef8cf6d..eb0dd566330a 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -818,7 +818,7 @@ static void myrs_log_event(struct myrs_hba *cs, struct myrs_event *ev)
unsigned char ev_type, *ev_msg;
struct Scsi_Host *shost = cs->host;
struct scsi_device *sdev;
- struct scsi_sense_hdr sshdr;
+ struct scsi_sense_hdr sshdr = {0};
unsigned char sense_info[4];
unsigned char cmd_specific[4];

diff --git a/drivers/scsi/qedi/qedi_dbg.c b/drivers/scsi/qedi/qedi_dbg.c
index 8fd28b056f73..3383314a3882 100644
--- a/drivers/scsi/qedi/qedi_dbg.c
+++ b/drivers/scsi/qedi/qedi_dbg.c
@@ -16,10 +16,6 @@ qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
{
va_list va;
struct va_format vaf;
- char nfunc[32];
-
- memset(nfunc, 0, sizeof(nfunc));
- memcpy(nfunc, func, sizeof(nfunc) - 1);

va_start(va, fmt);

@@ -28,9 +24,9 @@ qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line,

if (likely(qedi) && likely(qedi->pdev))
pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
- nfunc, line, qedi->host_no, &vaf);
+ func, line, qedi->host_no, &vaf);
else
- pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+ pr_err("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);

va_end(va);
}
@@ -41,10 +37,6 @@ qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
{
va_list va;
struct va_format vaf;
- char nfunc[32];
-
- memset(nfunc, 0, sizeof(nfunc));
- memcpy(nfunc, func, sizeof(nfunc) - 1);

va_start(va, fmt);

@@ -56,9 +48,9 @@ qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,

if (likely(qedi) && likely(qedi->pdev))
pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
- nfunc, line, qedi->host_no, &vaf);
+ func, line, qedi->host_no, &vaf);
else
- pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+ pr_warn("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);

ret:
va_end(va);
@@ -70,10 +62,6 @@ qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
{
va_list va;
struct va_format vaf;
- char nfunc[32];
-
- memset(nfunc, 0, sizeof(nfunc));
- memcpy(nfunc, func, sizeof(nfunc) - 1);

va_start(va, fmt);

@@ -85,10 +73,10 @@ qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,

if (likely(qedi) && likely(qedi->pdev))
pr_notice("[%s]:[%s:%d]:%d: %pV",
- dev_name(&qedi->pdev->dev), nfunc, line,
+ dev_name(&qedi->pdev->dev), func, line,
qedi->host_no, &vaf);
else
- pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+ pr_notice("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);

ret:
va_end(va);
@@ -100,10 +88,6 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
{
va_list va;
struct va_format vaf;
- char nfunc[32];
-
- memset(nfunc, 0, sizeof(nfunc));
- memcpy(nfunc, func, sizeof(nfunc) - 1);

va_start(va, fmt);

@@ -115,9 +99,9 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,

if (likely(qedi) && likely(qedi->pdev))
pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
- nfunc, line, qedi->host_no, &vaf);
+ func, line, qedi->host_no, &vaf);
else
- pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+ pr_info("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);

ret:
va_end(va);
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index bf371e7b957d..c3d0d246df14 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -809,8 +809,6 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
struct qedi_endpoint *qedi_ep;
struct sockaddr_in *addr;
struct sockaddr_in6 *addr6;
- struct qed_dev *cdev = NULL;
- struct qedi_uio_dev *udev = NULL;
struct iscsi_path path_req;
u32 msg_type = ISCSI_KEVENT_IF_DOWN;
u32 iscsi_cid = QEDI_CID_RESERVED;
@@ -830,8 +828,6 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
}

qedi = iscsi_host_priv(shost);
- cdev = qedi->cdev;
- udev = qedi->udev;

if (test_bit(QEDI_IN_OFFLINE, &qedi->flags) ||
test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 91f576d743fe..d377e50a6c19 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -6838,6 +6838,78 @@ qla2x00_release_firmware(void)
mutex_unlock(&qla_fw_lock);
}

+static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ struct qla_qpair *qpair = NULL;
+ struct scsi_qla_host *vp;
+ fc_port_t *fcport;
+ int i;
+ unsigned long flags;
+
+ ha->chip_reset++;
+
+ ha->base_qpair->chip_reset = ha->chip_reset;
+ for (i = 0; i < ha->max_qpairs; i++) {
+ if (ha->queue_pair_map[i])
+ ha->queue_pair_map[i]->chip_reset =
+ ha->base_qpair->chip_reset;
+ }
+
+ /* purge MBox commands */
+ if (atomic_read(&ha->num_pend_mbx_stage3)) {
+ clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+ complete(&ha->mbx_intr_comp);
+ }
+
+ i = 0;
+
+ while (atomic_read(&ha->num_pend_mbx_stage3) ||
+ atomic_read(&ha->num_pend_mbx_stage2) ||
+ atomic_read(&ha->num_pend_mbx_stage1)) {
+ msleep(20);
+ i++;
+ if (i > 50)
+ break;
+ }
+
+ ha->flags.purge_mbox = 0;
+
+ mutex_lock(&ha->mq_lock);
+ list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
+ qpair->online = 0;
+ mutex_unlock(&ha->mq_lock);
+
+ qla2x00_mark_all_devices_lost(vha, 0);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ qla2x00_mark_all_devices_lost(vp, 0);
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ /* Clear all async request states across all VPs. */
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ list_for_each_entry(fcport, &vp->vp_fcports, list)
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+}
+
+
static pci_ers_result_t
qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
@@ -6863,20 +6935,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
ha->flags.eeh_busy = 1;
- /* For ISP82XX complete any pending mailbox cmd */
- if (IS_QLA82XX(ha)) {
- ha->flags.isp82xx_fw_hung = 1;
- ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n");
- qla82xx_clear_pending_mbx(vha);
- }
- qla2x00_free_irqs(vha);
- pci_disable_device(pdev);
- /* Return back all IOs */
- qla2x00_abort_all_cmds(vha, DID_RESET << 16);
- if (ql2xmqsupport || ql2xnvmeenable) {
- set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
- }
+ qla_pci_error_cleanup(vha);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
ha->flags.pci_channel_io_perm_failure = 1;
@@ -6930,122 +6989,14 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}

-static uint32_t
-qla82xx_error_recovery(scsi_qla_host_t *base_vha)
-{
- uint32_t rval = QLA_FUNCTION_FAILED;
- uint32_t drv_active = 0;
- struct qla_hw_data *ha = base_vha->hw;
- int fn;
- struct pci_dev *other_pdev = NULL;
-
- ql_dbg(ql_dbg_aer, base_vha, 0x9006,
- "Entered %s.\n", __func__);
-
- set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
-
- if (base_vha->flags.online) {
- /* Abort all outstanding commands,
- * so as to be requeued later */
- qla2x00_abort_isp_cleanup(base_vha);
- }
-
-
- fn = PCI_FUNC(ha->pdev->devfn);
- while (fn > 0) {
- fn--;
- ql_dbg(ql_dbg_aer, base_vha, 0x9007,
- "Finding pci device at function = 0x%x.\n", fn);
- other_pdev =
- pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
- ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
- fn));
-
- if (!other_pdev)
- continue;
- if (atomic_read(&other_pdev->enable_cnt)) {
- ql_dbg(ql_dbg_aer, base_vha, 0x9008,
- "Found PCI func available and enable at 0x%x.\n",
- fn);
- pci_dev_put(other_pdev);
- break;
- }
- pci_dev_put(other_pdev);
- }
-
- if (!fn) {
- /* Reset owner */
- ql_dbg(ql_dbg_aer, base_vha, 0x9009,
- "This devfn is reset owner = 0x%x.\n",
- ha->pdev->devfn);
- qla82xx_idc_lock(ha);
-
- qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA8XXX_DEV_INITIALIZING);
-
- qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
- QLA82XX_IDC_VERSION);
-
- drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
- ql_dbg(ql_dbg_aer, base_vha, 0x900a,
- "drv_active = 0x%x.\n", drv_active);
-
- qla82xx_idc_unlock(ha);
- /* Reset if device is not already reset
- * drv_active would be 0 if a reset has already been done
- */
- if (drv_active)
- rval = qla82xx_start_firmware(base_vha);
- else
- rval = QLA_SUCCESS;
- qla82xx_idc_lock(ha);
-
- if (rval != QLA_SUCCESS) {
- ql_log(ql_log_info, base_vha, 0x900b,
- "HW State: FAILED.\n");
- qla82xx_clear_drv_active(ha);
- qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA8XXX_DEV_FAILED);
- } else {
- ql_log(ql_log_info, base_vha, 0x900c,
- "HW State: READY.\n");
- qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA8XXX_DEV_READY);
- qla82xx_idc_unlock(ha);
- ha->flags.isp82xx_fw_hung = 0;
- rval = qla82xx_restart_isp(base_vha);
- qla82xx_idc_lock(ha);
- /* Clear driver state register */
- qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
- qla82xx_set_drv_active(base_vha);
- }
- qla82xx_idc_unlock(ha);
- } else {
- ql_dbg(ql_dbg_aer, base_vha, 0x900d,
- "This devfn is not reset owner = 0x%x.\n",
- ha->pdev->devfn);
- if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
- QLA8XXX_DEV_READY)) {
- ha->flags.isp82xx_fw_hung = 0;
- rval = qla82xx_restart_isp(base_vha);
- qla82xx_idc_lock(ha);
- qla82xx_set_drv_active(base_vha);
- qla82xx_idc_unlock(ha);
- }
- }
- clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
-
- return rval;
-}
-
static pci_ers_result_t
qla2xxx_pci_slot_reset(struct pci_dev *pdev)
{
pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
struct qla_hw_data *ha = base_vha->hw;
- struct rsp_que *rsp;
- int rc, retries = 10;
+ int rc;
+ struct qla_qpair *qpair = NULL;

ql_dbg(ql_dbg_aer, base_vha, 0x9004,
"Slot Reset.\n");
@@ -7074,24 +7025,16 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
goto exit_slot_reset;
}

- rsp = ha->rsp_q_map[0];
- if (qla2x00_request_irqs(ha, rsp))
- goto exit_slot_reset;

if (ha->isp_ops->pci_config(base_vha))
goto exit_slot_reset;

- if (IS_QLA82XX(ha)) {
- if (qla82xx_error_recovery(base_vha) == QLA_SUCCESS) {
- ret = PCI_ERS_RESULT_RECOVERED;
- goto exit_slot_reset;
- } else
- goto exit_slot_reset;
- }
-
- while (ha->flags.mbox_busy && retries--)
- msleep(1000);
+ mutex_lock(&ha->mq_lock);
+ list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
+ qpair->online = 1;
+ mutex_unlock(&ha->mq_lock);

+ base_vha->flags.online = 1;
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS)
ret = PCI_ERS_RESULT_RECOVERED;
@@ -7115,13 +7058,13 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
ql_dbg(ql_dbg_aer, base_vha, 0x900f,
"pci_resume.\n");

+ ha->flags.eeh_busy = 0;
+
ret = qla2x00_wait_for_hba_online(base_vha);
if (ret != QLA_SUCCESS) {
ql_log(ql_log_fatal, base_vha, 0x9002,
"The device failed to resume I/O from slot/link_reset.\n");
}
-
- ha->flags.eeh_busy = 0;
}

static void
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 6082b008969b..6b6413073584 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -215,6 +215,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */
{ USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME },

+ /* Logitech HD Webcam C270 */
+ { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 3f087962f498..445213378a6a 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -2664,8 +2664,10 @@ static void dwc2_free_dma_aligned_buffer(struct urb *urb)
return;

/* Restore urb->transfer_buffer from the end of the allocated area */
- memcpy(&stored_xfer_buffer, urb->transfer_buffer +
- urb->transfer_buffer_length, sizeof(urb->transfer_buffer));
+ memcpy(&stored_xfer_buffer,
+ PTR_ALIGN(urb->transfer_buffer + urb->transfer_buffer_length,
+ dma_get_cache_alignment()),
+ sizeof(urb->transfer_buffer));

if (usb_urb_dir_in(urb)) {
if (usb_pipeisoc(urb->pipe))
@@ -2697,6 +2699,7 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
* DMA
*/
kmalloc_size = urb->transfer_buffer_length +
+ (dma_get_cache_alignment() - 1) +
sizeof(urb->transfer_buffer);

kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
@@ -2707,7 +2710,8 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
* Position value of original urb->transfer_buffer pointer to the end
* of allocation for later referencing
*/
- memcpy(kmalloc_ptr + urb->transfer_buffer_length,
+ memcpy(PTR_ALIGN(kmalloc_ptr + urb->transfer_buffer_length,
+ dma_get_cache_alignment()),
&urb->transfer_buffer, sizeof(urb->transfer_buffer));

if (usb_urb_dir_out(urb))
@@ -2792,7 +2796,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
chan->dev_addr = dwc2_hcd_get_dev_addr(&urb->pipe_info);
chan->ep_num = dwc2_hcd_get_ep_num(&urb->pipe_info);
chan->speed = qh->dev_speed;
- chan->max_packet = dwc2_max_packet(qh->maxp);
+ chan->max_packet = qh->maxp;

chan->xfer_started = 0;
chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
@@ -2870,7 +2874,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
* This value may be modified when the transfer is started
* to reflect the actual transfer length
*/
- chan->multi_count = dwc2_hb_mult(qh->maxp);
+ chan->multi_count = qh->maxp_mult;

if (hsotg->params.dma_desc_enable) {
chan->desc_list_addr = qh->desc_list_dma;
@@ -3990,19 +3994,21 @@ static struct dwc2_hcd_urb *dwc2_hcd_urb_alloc(struct dwc2_hsotg *hsotg,

static void dwc2_hcd_urb_set_pipeinfo(struct dwc2_hsotg *hsotg,
struct dwc2_hcd_urb *urb, u8 dev_addr,
- u8 ep_num, u8 ep_type, u8 ep_dir, u16 mps)
+ u8 ep_num, u8 ep_type, u8 ep_dir,
+ u16 maxp, u16 maxp_mult)
{
if (dbg_perio() ||
ep_type == USB_ENDPOINT_XFER_BULK ||
ep_type == USB_ENDPOINT_XFER_CONTROL)
dev_vdbg(hsotg->dev,
- "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, mps=%d\n",
- dev_addr, ep_num, ep_dir, ep_type, mps);
+ "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, maxp=%d (%d mult)\n",
+ dev_addr, ep_num, ep_dir, ep_type, maxp, maxp_mult);
urb->pipe_info.dev_addr = dev_addr;
urb->pipe_info.ep_num = ep_num;
urb->pipe_info.pipe_type = ep_type;
urb->pipe_info.pipe_dir = ep_dir;
- urb->pipe_info.mps = mps;
+ urb->pipe_info.maxp = maxp;
+ urb->pipe_info.maxp_mult = maxp_mult;
}

/*
@@ -4093,8 +4099,9 @@ void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg)
dwc2_hcd_is_pipe_in(&urb->pipe_info) ?
"IN" : "OUT");
dev_dbg(hsotg->dev,
- " Max packet size: %d\n",
- dwc2_hcd_get_mps(&urb->pipe_info));
+ " Max packet size: %d (%d mult)\n",
+ dwc2_hcd_get_maxp(&urb->pipe_info),
+ dwc2_hcd_get_maxp_mult(&urb->pipe_info));
dev_dbg(hsotg->dev,
" transfer_buffer: %p\n",
urb->buf);
@@ -4661,8 +4668,10 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
}

dev_vdbg(hsotg->dev, " Speed: %s\n", speed);
- dev_vdbg(hsotg->dev, " Max packet size: %d\n",
- usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
+ dev_vdbg(hsotg->dev, " Max packet size: %d (%d mult)\n",
+ usb_endpoint_maxp(&urb->ep->desc),
+ usb_endpoint_maxp_mult(&urb->ep->desc));
+
dev_vdbg(hsotg->dev, " Data buffer length: %d\n",
urb->transfer_buffer_length);
dev_vdbg(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n",
@@ -4745,8 +4754,8 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe), ep_type,
usb_pipein(urb->pipe),
- usb_maxpacket(urb->dev, urb->pipe,
- !(usb_pipein(urb->pipe))));
+ usb_endpoint_maxp(&ep->desc),
+ usb_endpoint_maxp_mult(&ep->desc));

buf = urb->transfer_buffer;

diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index c089ffa1f0a8..ce6445a06588 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -171,7 +171,8 @@ struct dwc2_hcd_pipe_info {
u8 ep_num;
u8 pipe_type;
u8 pipe_dir;
- u16 mps;
+ u16 maxp;
+ u16 maxp_mult;
};

struct dwc2_hcd_iso_packet_desc {
@@ -264,6 +265,7 @@ struct dwc2_hs_transfer_time {
* - USB_ENDPOINT_XFER_ISOC
* @ep_is_in: Endpoint direction
* @maxp: Value from wMaxPacketSize field of Endpoint Descriptor
+ * @maxp_mult: Multiplier for maxp
* @dev_speed: Device speed. One of the following values:
* - USB_SPEED_LOW
* - USB_SPEED_FULL
@@ -340,6 +342,7 @@ struct dwc2_qh {
u8 ep_type;
u8 ep_is_in;
u16 maxp;
+ u16 maxp_mult;
u8 dev_speed;
u8 data_toggle;
u8 ping_state;
@@ -503,9 +506,14 @@ static inline u8 dwc2_hcd_get_pipe_type(struct dwc2_hcd_pipe_info *pipe)
return pipe->pipe_type;
}

-static inline u16 dwc2_hcd_get_mps(struct dwc2_hcd_pipe_info *pipe)
+static inline u16 dwc2_hcd_get_maxp(struct dwc2_hcd_pipe_info *pipe)
+{
+ return pipe->maxp;
+}
+
+static inline u16 dwc2_hcd_get_maxp_mult(struct dwc2_hcd_pipe_info *pipe)
{
- return pipe->mps;
+ return pipe->maxp_mult;
}

static inline u8 dwc2_hcd_get_dev_addr(struct dwc2_hcd_pipe_info *pipe)
@@ -620,12 +628,6 @@ static inline bool dbg_urb(struct urb *urb)
static inline bool dbg_perio(void) { return false; }
#endif

-/* High bandwidth multiplier as encoded in highspeed endpoint descriptors */
-#define dwc2_hb_mult(wmaxpacketsize) (1 + (((wmaxpacketsize) >> 11) & 0x03))
-
-/* Packet size for any kind of endpoint descriptor */
-#define dwc2_max_packet(wmaxpacketsize) ((wmaxpacketsize) & 0x07ff)
-
/*
* Returns true if frame1 index is greater than frame2 index. The comparison
* is done modulo FRLISTEN_64_SIZE. This accounts for the rollover of the
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index 88b5dcf3aefc..a052d39b4375 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -1617,8 +1617,9 @@ static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,

dev_err(hsotg->dev, " Speed: %s\n", speed);

- dev_err(hsotg->dev, " Max packet size: %d\n",
- dwc2_hcd_get_mps(&urb->pipe_info));
+ dev_err(hsotg->dev, " Max packet size: %d (mult %d)\n",
+ dwc2_hcd_get_maxp(&urb->pipe_info),
+ dwc2_hcd_get_maxp_mult(&urb->pipe_info));
dev_err(hsotg->dev, " Data buffer length: %d\n", urb->length);
dev_err(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n",
urb->buf, (unsigned long)urb->dma);
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index ea3aa640c15c..68bbac64b753 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -708,7 +708,7 @@ static void dwc2_hs_pmap_unschedule(struct dwc2_hsotg *hsotg,
static int dwc2_uframe_schedule_split(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh)
{
- int bytecount = dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp);
+ int bytecount = qh->maxp_mult * qh->maxp;
int ls_search_slice;
int err = 0;
int host_interval_in_sched;
@@ -1332,7 +1332,7 @@ static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
u32 max_channel_xfer_size;
int status = 0;

- max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp);
+ max_xfer_size = qh->maxp * qh->maxp_mult;
max_channel_xfer_size = hsotg->params.max_transfer_size;

if (max_xfer_size > max_channel_xfer_size) {
@@ -1517,8 +1517,9 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
u32 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
bool do_split = (prtspd == HPRT0_SPD_HIGH_SPEED &&
dev_speed != USB_SPEED_HIGH);
- int maxp = dwc2_hcd_get_mps(&urb->pipe_info);
- int bytecount = dwc2_hb_mult(maxp) * dwc2_max_packet(maxp);
+ int maxp = dwc2_hcd_get_maxp(&urb->pipe_info);
+ int maxp_mult = dwc2_hcd_get_maxp_mult(&urb->pipe_info);
+ int bytecount = maxp_mult * maxp;
char *speed, *type;

/* Initialize QH */
@@ -1531,6 +1532,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,

qh->data_toggle = DWC2_HC_PID_DATA0;
qh->maxp = maxp;
+ qh->maxp_mult = maxp_mult;
INIT_LIST_HEAD(&qh->qtd_list);
INIT_LIST_HEAD(&qh->qh_list_entry);

diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 83869065b802..a0aaf0635359 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1171,6 +1171,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x1261),
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1900), /* Telit LN940 (QMI) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
@@ -1772,6 +1776,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
.driver_info = RSVD(5) | RSVD(6) },
{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */
+ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */
+ .driver_info = RSVD(7) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
.driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index bb3f9aa4a909..781e7949c45d 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -106,6 +106,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
{ USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
+ { USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
{ } /* Terminating entry */
};

diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 559941ca884d..b0175f17d1a2 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -155,3 +155,6 @@
#define SMART_VENDOR_ID 0x0b8c
#define SMART_PRODUCT_ID 0x2303

+/* Allied Telesis VT-Kit3 */
+#define AT_VENDOR_ID 0x0caa
+#define AT_VTKIT3_PRODUCT_ID 0x3001
diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h
index 6b2140f966ef..7e14c2d7cf73 100644
--- a/drivers/usb/storage/unusual_realtek.h
+++ b/drivers/usb/storage/unusual_realtek.h
@@ -17,6 +17,11 @@ UNUSUAL_DEV(0x0bda, 0x0138, 0x0000, 0x9999,
"USB Card Reader",
USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),

+UNUSUAL_DEV(0x0bda, 0x0153, 0x0000, 0x9999,
+ "Realtek",
+ "USB Card Reader",
+ USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
+
UNUSUAL_DEV(0x0bda, 0x0158, 0x0000, 0x9999,
"Realtek",
"USB Card Reader",
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 848a785abe25..e791741d193b 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -202,12 +202,17 @@ static inline const struct xattr_handler *f2fs_xattr_handler(int index)
return handler;
}

-static struct f2fs_xattr_entry *__find_xattr(void *base_addr, int index,
- size_t len, const char *name)
+static struct f2fs_xattr_entry *__find_xattr(void *base_addr,
+ void *last_base_addr, int index,
+ size_t len, const char *name)
{
struct f2fs_xattr_entry *entry;

list_for_each_xattr(entry, base_addr) {
+ if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
+ (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr)
+ return NULL;
+
if (entry->e_name_index != index)
continue;
if (entry->e_name_len != len)
@@ -297,20 +302,22 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
const char *name, struct f2fs_xattr_entry **xe,
void **base_addr, int *base_size)
{
- void *cur_addr, *txattr_addr, *last_addr = NULL;
+ void *cur_addr, *txattr_addr, *last_txattr_addr;
+ void *last_addr = NULL;
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
- unsigned int size = xnid ? VALID_XATTR_BLOCK_SIZE : 0;
unsigned int inline_size = inline_xattr_size(inode);
int err = 0;

- if (!size && !inline_size)
+ if (!xnid && !inline_size)
return -ENODATA;

- *base_size = inline_size + size + XATTR_PADDING_SIZE;
+ *base_size = XATTR_SIZE(xnid, inode) + XATTR_PADDING_SIZE;
txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode), *base_size, GFP_NOFS);
if (!txattr_addr)
return -ENOMEM;

+ last_txattr_addr = (void *)txattr_addr + XATTR_SIZE(xnid, inode);
+
/* read from inline xattr */
if (inline_size) {
err = read_inline_xattr(inode, ipage, txattr_addr);
@@ -337,7 +344,11 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
else
cur_addr = txattr_addr;

- *xe = __find_xattr(cur_addr, index, len, name);
+ *xe = __find_xattr(cur_addr, last_txattr_addr, index, len, name);
+ if (!*xe) {
+ err = -EFAULT;
+ goto out;
+ }
check:
if (IS_XATTR_LAST_ENTRY(*xe)) {
err = -ENODATA;
@@ -581,7 +592,8 @@ static int __f2fs_setxattr(struct inode *inode, int index,
struct page *ipage, int flags)
{
struct f2fs_xattr_entry *here, *last;
- void *base_addr;
+ void *base_addr, *last_base_addr;
+ nid_t xnid = F2FS_I(inode)->i_xattr_nid;
int found, newsize;
size_t len;
__u32 new_hsize;
@@ -605,8 +617,14 @@ static int __f2fs_setxattr(struct inode *inode, int index,
if (error)
return error;

+ last_base_addr = (void *)base_addr + XATTR_SIZE(xnid, inode);
+
/* find entry with wanted name. */
- here = __find_xattr(base_addr, index, len, name);
+ here = __find_xattr(base_addr, last_base_addr, index, len, name);
+ if (!here) {
+ error = -EFAULT;
+ goto exit;
+ }

found = IS_XATTR_LAST_ENTRY(here) ? 0 : 1;

diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h
index 9172ee082ca8..a90920e2f949 100644
--- a/fs/f2fs/xattr.h
+++ b/fs/f2fs/xattr.h
@@ -71,6 +71,8 @@ struct f2fs_xattr_entry {
entry = XATTR_NEXT_ENTRY(entry))
#define VALID_XATTR_BLOCK_SIZE (PAGE_SIZE - sizeof(struct node_footer))
#define XATTR_PADDING_SIZE (sizeof(__u32))
+#define XATTR_SIZE(x,i) (((x) ? VALID_XATTR_BLOCK_SIZE : 0) + \
+ (inline_xattr_size(i)))
#define MIN_OFFSET(i) XATTR_ALIGN(inline_xattr_size(i) + \
VALID_XATTR_BLOCK_SIZE)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 28269a0c5037..4e32a033394c 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2633,8 +2633,10 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_sqe_files_unregister(ctx);

#if defined(CONFIG_UNIX)
- if (ctx->ring_sock)
+ if (ctx->ring_sock) {
+ ctx->ring_sock->file = NULL; /* so that iput() is called */
sock_release(ctx->ring_sock);
+ }
#endif

io_mem_free(ctx->sq_ring);
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index 290373024d9d..e8ace3b54e9c 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -310,6 +310,18 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry,

out_attach:
spin_lock(&dentry_attach_lock);
+ if (unlikely(dentry->d_fsdata && !alias)) {
+ /* d_fsdata is set by a racing thread which is doing
+ * the same thing as this thread is doing. Leave the racing
+ * thread going ahead and we return here.
+ */
+ spin_unlock(&dentry_attach_lock);
+ iput(dl->dl_inode);
+ ocfs2_lock_res_free(&dl->dl_lockres);
+ kfree(dl);
+ return 0;
+ }
+
dentry->d_fsdata = dl;
dl->dl_count++;
spin_unlock(&dentry_attach_lock);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 8dc1a081fb36..19b15fd28854 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -465,6 +465,7 @@ struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
struct i2c_adapter *adapter);
struct edid *drm_edid_duplicate(const struct edid *edid);
int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
+int drm_add_override_edid_modes(struct drm_connector *connector);

u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 81f58b4a5418..b6a8df3b7e96 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -487,7 +487,7 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
*
* Find the css for the (@task, @subsys_id) combination, increment a
* reference on and return it. This function is guaranteed to return a
- * valid css.
+ * valid css. The returned css may already have been offlined.
*/
static inline struct cgroup_subsys_state *
task_get_css(struct task_struct *task, int subsys_id)
@@ -497,7 +497,13 @@ task_get_css(struct task_struct *task, int subsys_id)
rcu_read_lock();
while (true) {
css = task_css(task, subsys_id);
- if (likely(css_tryget_online(css)))
+ /*
+ * Can't use css_tryget_online() here. A task which has
+ * PF_EXITING set may stay associated with an offline css.
+ * If such task calls this function, css_tryget_online()
+ * will keep failing.
+ */
+ if (likely(css_tryget(css)))
break;
cpu_relax();
}
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index e78281d07b70..63fedd85c6c5 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -101,6 +101,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
+ CPUHP_AP_MICROCODE_LOADER,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
diff --git a/include/linux/hid.h b/include/linux/hid.h
index ac0c70b4ce10..5a24ebfb5b47 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -894,7 +894,7 @@ struct hid_field *hidinput_get_led_field(struct hid_device *hid);
unsigned int hidinput_count_leds(struct hid_device *hid);
__s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code);
void hid_output_report(struct hid_report *report, __u8 *data);
-void __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype);
+int __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype);
u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags);
struct hid_device *hid_allocate_device(void);
struct hid_report *hid_register_report(struct hid_device *device,
diff --git a/kernel/Makefile b/kernel/Makefile
index 6c57e78817da..62471e75a2b0 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -30,6 +30,7 @@ KCOV_INSTRUMENT_extable.o := n
# Don't self-instrument.
KCOV_INSTRUMENT_kcov.o := n
KASAN_SANITIZE_kcov.o := n
+CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)

# cond_syscall is currently not LTO compatible
CFLAGS_sys_ni.o = $(DISABLE_LTO)
diff --git a/kernel/cred.c b/kernel/cred.c
index 45d77284aed0..07e069d00696 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -450,6 +450,15 @@ int commit_creds(struct cred *new)
if (task->mm)
set_dumpable(task->mm, suid_dumpable);
task->pdeath_signal = 0;
+ /*
+ * If a task drops privileges and becomes nondumpable,
+ * the dumpability change must become visible before
+ * the credential change; otherwise, a __ptrace_may_access()
+ * racing with this change may be able to attach to a task it
+ * shouldn't be able to attach to (as if the task had dropped
+ * privileges without becoming nondumpable).
+ * Pairs with a read barrier in __ptrace_may_access().
+ */
smp_wmb();
}

diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 6f357f4fc859..c9b4646ad375 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -323,6 +323,16 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
return -EPERM;
ok:
rcu_read_unlock();
+ /*
+ * If a task drops privileges and becomes nondumpable (through a syscall
+ * like setresuid()) while we are trying to access it, we must ensure
+ * that the dumpability is read after the credentials; otherwise,
+ * we may be able to attach to a task that we shouldn't be able to
+ * attach to (as if the task had dropped privileges without becoming
+ * nondumpable).
+ * Pairs with a write barrier in commit_creds().
+ */
+ smp_rmb();
mm = task->mm;
if (mm &&
((get_dumpable(mm) != SUID_DUMP_USER) &&
@@ -704,6 +714,10 @@ static int ptrace_peek_siginfo(struct task_struct *child,
if (arg.nr < 0)
return -EINVAL;

+ /* Ensure arg.off fits in an unsigned long */
+ if (arg.off > ULONG_MAX)
+ return 0;
+
if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
pending = &child->signal->shared_pending;
else
@@ -711,18 +725,20 @@ static int ptrace_peek_siginfo(struct task_struct *child,

for (i = 0; i < arg.nr; ) {
kernel_siginfo_t info;
- s32 off = arg.off + i;
+ unsigned long off = arg.off + i;
+ bool found = false;

spin_lock_irq(&child->sighand->siglock);
list_for_each_entry(q, &pending->list, list) {
if (!off--) {
+ found = true;
copy_siginfo(&info, &q->info);
break;
}
}
spin_unlock_irq(&child->sighand->siglock);

- if (off >= 0) /* beyond the end of the list */
+ if (!found) /* beyond the end of the list */
break;

#ifdef CONFIG_COMPAT
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index f136c56c2805..7b2b0a3c720f 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -807,17 +807,18 @@ ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
struct timekeeper *tk = &tk_core.timekeeper;
unsigned int seq;
ktime_t base, *offset = offsets[offs];
+ u64 nsecs;

WARN_ON(timekeeping_suspended);

do {
seq = read_seqcount_begin(&tk_core.seq);
base = ktime_add(tk->tkr_mono.base, *offset);
+ nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;

} while (read_seqcount_retry(&tk_core.seq, seq));

- return base;
-
+ return base + nsecs;
}
EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);

diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 0a200d42fa96..f50d57eac875 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -1815,6 +1815,9 @@ static u64 hist_field_var_ref(struct hist_field *hist_field,
struct hist_elt_data *elt_data;
u64 var_val = 0;

+ if (WARN_ON_ONCE(!elt))
+ return var_val;
+
elt_data = elt->private_data;
var_val = elt_data->var_ref_vals[hist_field->var_ref_idx];

diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index be78d99ee6bc..9cd4d0d22a9e 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -434,10 +434,17 @@ static int trace_uprobe_create(int argc, const char **argv)
ret = 0;
ref_ctr_offset = 0;

- /* argc must be >= 1 */
- if (argv[0][0] == 'r')
+ switch (argv[0][0]) {
+ case 'r':
is_return = true;
- else if (argv[0][0] != 'p' || argc < 2)
+ break;
+ case 'p':
+ break;
+ default:
+ return -ECANCELED;
+ }
+
+ if (argc < 2)
return -ECANCELED;

if (argv[0][1] == ':')
diff --git a/mm/list_lru.c b/mm/list_lru.c
index d3b538146efd..6468eb8d4d4d 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -353,7 +353,7 @@ static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
}
return 0;
fail:
- __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
+ __memcg_destroy_list_lru_node(memcg_lrus, begin, i);
return -ENOMEM;
}

diff --git a/mm/vmscan.c b/mm/vmscan.c
index a815f73ee4d5..3fb1d75804de 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1502,7 +1502,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,

list_for_each_entry_safe(page, next, page_list, lru) {
if (page_is_file_cache(page) && !PageDirty(page) &&
- !__PageMovable(page)) {
+ !__PageMovable(page) && !PageUnevictable(page)) {
ClearPageActive(page);
list_move(&page->lru, &clean_pages);
}
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index cc94d921476c..93bffaad2135 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -411,6 +411,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
sk_mem_charge(sk, skb->len);
copied = skb->len;
msg->sg.start = 0;
+ msg->sg.size = copied;
msg->sg.end = num_sge == MAX_MSG_FRAGS ? 0 : num_sge;
msg->skb = skb;

@@ -554,8 +555,10 @@ static void sk_psock_destroy_deferred(struct work_struct *gc)
struct sk_psock *psock = container_of(gc, struct sk_psock, gc);

/* No sk_callback_lock since already detached. */
- strp_stop(&psock->parser.strp);
- strp_done(&psock->parser.strp);
+
+ /* Parser has been stopped */
+ if (psock->progs.skb_parser)
+ strp_done(&psock->parser.strp);

cancel_work_sync(&psock->work);

diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 1bb7321a256d..3d1e15401384 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -27,7 +27,10 @@ static int tcp_bpf_wait_data(struct sock *sk, struct sk_psock *psock,
int flags, long timeo, int *err)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
- int ret;
+ int ret = 0;
+
+ if (!timeo)
+ return ret;

add_wait_queue(sk_sleep(sk), &wait);
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
@@ -528,8 +531,6 @@ static void tcp_bpf_remove(struct sock *sk, struct sk_psock *psock)
{
struct sk_psock_link *link;

- sk_psock_cork_free(psock);
- __sk_psock_purge_ingress_msg(psock);
while ((link = sk_psock_link_pop(psock))) {
sk_psock_unlink(sk, link);
sk_psock_free_link(link);
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 8346a4f7c5d7..a99be508f93d 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -739,14 +739,20 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
rc = security_sid_to_context_inval(sad->state, sad->ssid, &scontext,
&scontext_len);
if (!rc && scontext) {
- audit_log_format(ab, " srawcon=%s", scontext);
+ if (scontext_len && scontext[scontext_len - 1] == '\0')
+ scontext_len--;
+ audit_log_format(ab, " srawcon=");
+ audit_log_n_untrustedstring(ab, scontext, scontext_len);
kfree(scontext);
}

rc = security_sid_to_context_inval(sad->state, sad->tsid, &scontext,
&scontext_len);
if (!rc && scontext) {
- audit_log_format(ab, " trawcon=%s", scontext);
+ if (scontext_len && scontext[scontext_len - 1] == '\0')
+ scontext_len--;
+ audit_log_format(ab, " trawcon=");
+ audit_log_n_untrustedstring(ab, scontext, scontext_len);
kfree(scontext);
}
}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 28bff30c2f15..614bc753822c 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1048,15 +1048,24 @@ static int selinux_add_mnt_opt(const char *option, const char *val, int len,
if (token == Opt_error)
return -EINVAL;

- if (token != Opt_seclabel)
+ if (token != Opt_seclabel) {
val = kmemdup_nul(val, len, GFP_KERNEL);
+ if (!val) {
+ rc = -ENOMEM;
+ goto free_opt;
+ }
+ }
rc = selinux_add_opt(token, val, mnt_opts);
if (unlikely(rc)) {
kfree(val);
- if (*mnt_opts) {
- selinux_free_mnt_opts(*mnt_opts);
- *mnt_opts = NULL;
- }
+ goto free_opt;
+ }
+ return rc;
+
+free_opt:
+ if (*mnt_opts) {
+ selinux_free_mnt_opts(*mnt_opts);
+ *mnt_opts = NULL;
}
return rc;
}
@@ -2603,10 +2612,11 @@ static int selinux_sb_eat_lsm_opts(char *options, void **mnt_opts)
char *from = options;
char *to = options;
bool first = true;
+ int rc;

while (1) {
int len = opt_len(from);
- int token, rc;
+ int token;
char *arg = NULL;

token = match_opt_prefix(from, len, &arg);
@@ -2622,15 +2632,15 @@ static int selinux_sb_eat_lsm_opts(char *options, void **mnt_opts)
*q++ = c;
}
arg = kmemdup_nul(arg, q - arg, GFP_KERNEL);
+ if (!arg) {
+ rc = -ENOMEM;
+ goto free_opt;
+ }
}
rc = selinux_add_opt(token, arg, mnt_opts);
if (unlikely(rc)) {
kfree(arg);
- if (*mnt_opts) {
- selinux_free_mnt_opts(*mnt_opts);
- *mnt_opts = NULL;
- }
- return rc;
+ goto free_opt;
}
} else {
if (!first) { // copy with preceding comma
@@ -2648,6 +2658,13 @@ static int selinux_sb_eat_lsm_opts(char *options, void **mnt_opts)
}
*to = '\0';
return 0;
+
+free_opt:
+ if (*mnt_opts) {
+ selinux_free_mnt_opts(*mnt_opts);
+ *mnt_opts = NULL;
+ }
+ return rc;
}

static int selinux_sb_remount(struct super_block *sb, void *mnt_opts)
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 5c1613519d5a..c51f4bb6c1e7 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -67,6 +67,7 @@ static struct {
int len;
int opt;
} smk_mount_opts[] = {
+ {"smackfsdef", sizeof("smackfsdef") - 1, Opt_fsdefault},
A(fsdefault), A(fsfloor), A(fshat), A(fsroot), A(fstransmute)
};
#undef A
@@ -681,11 +682,12 @@ static int smack_fs_context_dup(struct fs_context *fc,
}

static const struct fs_parameter_spec smack_param_specs[] = {
- fsparam_string("fsdefault", Opt_fsdefault),
- fsparam_string("fsfloor", Opt_fsfloor),
- fsparam_string("fshat", Opt_fshat),
- fsparam_string("fsroot", Opt_fsroot),
- fsparam_string("fstransmute", Opt_fstransmute),
+ fsparam_string("smackfsdef", Opt_fsdefault),
+ fsparam_string("smackfsdefault", Opt_fsdefault),
+ fsparam_string("smackfsfloor", Opt_fsfloor),
+ fsparam_string("smackfshat", Opt_fshat),
+ fsparam_string("smackfsroot", Opt_fsroot),
+ fsparam_string("smackfstransmute", Opt_fstransmute),
{}
};

diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 38e7deab6384..c99e1b77a45b 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1900,20 +1900,14 @@ static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client,
int result;
struct snd_seq_client *sender = NULL;
struct snd_seq_client_port *sport = NULL;
- struct snd_seq_subscribers *p;

result = -EINVAL;
if ((sender = snd_seq_client_use_ptr(subs->sender.client)) == NULL)
goto __end;
if ((sport = snd_seq_port_use_ptr(sender, subs->sender.port)) == NULL)
goto __end;
- p = snd_seq_port_get_subscription(&sport->c_src, &subs->dest);
- if (p) {
- result = 0;
- *subs = p->info;
- } else
- result = -ENOENT;
-
+ result = snd_seq_port_get_subscription(&sport->c_src, &subs->dest,
+ subs);
__end:
if (sport)
snd_seq_port_unlock(sport);
diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
index da31aa8e216e..16289aefb443 100644
--- a/sound/core/seq/seq_ports.c
+++ b/sound/core/seq/seq_ports.c
@@ -635,20 +635,23 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector,


/* get matched subscriber */
-struct snd_seq_subscribers *snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp,
- struct snd_seq_addr *dest_addr)
+int snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp,
+ struct snd_seq_addr *dest_addr,
+ struct snd_seq_port_subscribe *subs)
{
- struct snd_seq_subscribers *s, *found = NULL;
+ struct snd_seq_subscribers *s;
+ int err = -ENOENT;

down_read(&src_grp->list_mutex);
list_for_each_entry(s, &src_grp->list_head, src_list) {
if (addr_match(dest_addr, &s->info.dest)) {
- found = s;
+ *subs = s->info;
+ err = 0;
break;
}
}
up_read(&src_grp->list_mutex);
- return found;
+ return err;
}

/*
diff --git a/sound/core/seq/seq_ports.h b/sound/core/seq/seq_ports.h
index 26bd71f36c41..06003b36652e 100644
--- a/sound/core/seq/seq_ports.h
+++ b/sound/core/seq/seq_ports.h
@@ -135,7 +135,8 @@ int snd_seq_port_subscribe(struct snd_seq_client_port *port,
struct snd_seq_port_subscribe *info);

/* get matched subscriber */
-struct snd_seq_subscribers *snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp,
- struct snd_seq_addr *dest_addr);
+int snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp,
+ struct snd_seq_addr *dest_addr,
+ struct snd_seq_port_subscribe *subs);

#endif
diff --git a/sound/firewire/motu/motu-stream.c b/sound/firewire/motu/motu-stream.c
index 73e7a5e527fc..483a8771d502 100644
--- a/sound/firewire/motu/motu-stream.c
+++ b/sound/firewire/motu/motu-stream.c
@@ -345,7 +345,7 @@ static void destroy_stream(struct snd_motu *motu,
}

amdtp_stream_destroy(stream);
- fw_iso_resources_free(resources);
+ fw_iso_resources_destroy(resources);
}

int snd_motu_stream_init_duplex(struct snd_motu *motu)
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index 3d27f3378d5d..b4bef574929d 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -148,9 +148,6 @@ static int detect_quirks(struct snd_oxfw *oxfw)
oxfw->midi_input_ports = 0;
oxfw->midi_output_ports = 0;

- /* Output stream exists but no data channels are useful. */
- oxfw->has_output = false;
-
return snd_oxfw_scs1x_add(oxfw);
}

diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 239697421118..d0e543ff6b64 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4084,18 +4084,19 @@ static struct coef_fw alc225_pre_hsmode[] = {
static void alc_headset_mode_unplugged(struct hda_codec *codec)
{
static struct coef_fw coef0255[] = {
+ WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */
{}
};
- static struct coef_fw coef0255_1[] = {
- WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
- {}
- };
static struct coef_fw coef0256[] = {
WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */
+ WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
+ WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
+ WRITE_COEFEX(0x57, 0x03, 0x09a3), /* Direct Drive HP Amp control */
+ UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
{}
};
static struct coef_fw coef0233[] = {
@@ -4158,13 +4159,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)

switch (codec->core.vendor_id) {
case 0x10ec0255:
- alc_process_coef_fw(codec, coef0255_1);
alc_process_coef_fw(codec, coef0255);
break;
case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0256);
- alc_process_coef_fw(codec, coef0255);
break;
case 0x10ec0234:
case 0x10ec0274:
@@ -4217,6 +4216,12 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
WRITE_COEF(0x06, 0x6100), /* Set MIC2 Vref gate to normal */
{}
};
+ static struct coef_fw coef0256[] = {
+ UPDATE_COEFEX(0x57, 0x05, 1<<14, 1<<14), /* Direct Drive HP Amp control(Set to verb control)*/
+ WRITE_COEFEX(0x57, 0x03, 0x09a3),
+ WRITE_COEF(0x06, 0x6100), /* Set MIC2 Vref gate to normal */
+ {}
+ };
static struct coef_fw coef0233[] = {
UPDATE_COEF(0x35, 0, 1<<14),
WRITE_COEF(0x06, 0x2100),
@@ -4264,14 +4269,19 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
};

switch (codec->core.vendor_id) {
- case 0x10ec0236:
case 0x10ec0255:
- case 0x10ec0256:
alc_write_coef_idx(codec, 0x45, 0xc489);
snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
alc_process_coef_fw(codec, coef0255);
snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
break;
+ case 0x10ec0236:
+ case 0x10ec0256:
+ alc_write_coef_idx(codec, 0x45, 0xc489);
+ snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
+ alc_process_coef_fw(codec, coef0256);
+ snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
+ break;
case 0x10ec0234:
case 0x10ec0274:
case 0x10ec0294:
@@ -4353,6 +4363,14 @@ static void alc_headset_mode_default(struct hda_codec *codec)
WRITE_COEF(0x49, 0x0049),
{}
};
+ static struct coef_fw coef0256[] = {
+ WRITE_COEF(0x45, 0xc489),
+ WRITE_COEFEX(0x57, 0x03, 0x0da3),
+ WRITE_COEF(0x49, 0x0049),
+ UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
+ WRITE_COEF(0x06, 0x6100),
+ {}
+ };
static struct coef_fw coef0233[] = {
WRITE_COEF(0x06, 0x2100),
WRITE_COEF(0x32, 0x4ea3),
@@ -4403,11 +4421,16 @@ static void alc_headset_mode_default(struct hda_codec *codec)
alc_process_coef_fw(codec, alc225_pre_hsmode);
alc_process_coef_fw(codec, coef0225);
break;
- case 0x10ec0236:
case 0x10ec0255:
- case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0236:
+ case 0x10ec0256:
+ alc_write_coef_idx(codec, 0x1b, 0x0e4b);
+ alc_write_coef_idx(codec, 0x45, 0xc089);
+ msleep(50);
+ alc_process_coef_fw(codec, coef0256);
+ break;
case 0x10ec0234:
case 0x10ec0274:
case 0x10ec0294:
@@ -4451,8 +4474,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
};
static struct coef_fw coef0256[] = {
WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */
- WRITE_COEF(0x1b, 0x0c6b),
- WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+ WRITE_COEF(0x1b, 0x0e6b),
{}
};
static struct coef_fw coef0233[] = {
@@ -4570,8 +4592,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
};
static struct coef_fw coef0256[] = {
WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */
- WRITE_COEF(0x1b, 0x0c6b),
- WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+ WRITE_COEF(0x1b, 0x0e6b),
{}
};
static struct coef_fw coef0233[] = {
@@ -4703,13 +4724,37 @@ static void alc_determine_headset_type(struct hda_codec *codec)
};

switch (codec->core.vendor_id) {
- case 0x10ec0236:
case 0x10ec0255:
+ alc_process_coef_fw(codec, coef0255);
+ msleep(300);
+ val = alc_read_coef_idx(codec, 0x46);
+ is_ctia = (val & 0x0070) == 0x0070;
+ break;
+ case 0x10ec0236:
case 0x10ec0256:
+ alc_write_coef_idx(codec, 0x1b, 0x0e4b);
+ alc_write_coef_idx(codec, 0x06, 0x6104);
+ alc_write_coefex_idx(codec, 0x57, 0x3, 0x09a3);
+
+ snd_hda_codec_write(codec, 0x21, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+ msleep(80);
+ snd_hda_codec_write(codec, 0x21, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
alc_process_coef_fw(codec, coef0255);
msleep(300);
val = alc_read_coef_idx(codec, 0x46);
is_ctia = (val & 0x0070) == 0x0070;
+
+ alc_write_coefex_idx(codec, 0x57, 0x3, 0x0da3);
+ alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0);
+
+ snd_hda_codec_write(codec, 0x21, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+ msleep(80);
+ snd_hda_codec_write(codec, 0x21, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
break;
case 0x10ec0234:
case 0x10ec0274:
@@ -6166,15 +6211,13 @@ static const struct hda_fixup alc269_fixups[] = {
.chain_id = ALC269_FIXUP_THINKPAD_ACPI,
},
[ALC255_FIXUP_ACER_MIC_NO_PRESENCE] = {
- .type = HDA_FIXUP_VERBS,
- .v.verbs = (const struct hda_verb[]) {
- /* Enable the Mic */
- { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
- { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
- {}
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+ { }
},
.chained = true,
- .chain_id = ALC269_FIXUP_LIFEBOOK_EXTMIC
+ .chain_id = ALC255_FIXUP_HEADSET_MODE
},
[ALC255_FIXUP_ASUS_MIC_NO_PRESENCE] = {
.type = HDA_FIXUP_PINS,
@@ -7219,10 +7262,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x18, 0x02a11030},
{0x19, 0x0181303F},
{0x21, 0x0221102f}),
- SND_HDA_PIN_QUIRK(0x10ec0255, 0x1025, "Acer", ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
- {0x12, 0x90a60140},
- {0x14, 0x90170120},
- {0x21, 0x02211030}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1025, "Acer", ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
{0x12, 0x90a601c0},
{0x14, 0x90171120},
diff --git a/sound/pci/ice1712/ews.c b/sound/pci/ice1712/ews.c
index 7646c93e8268..d492dde88e16 100644
--- a/sound/pci/ice1712/ews.c
+++ b/sound/pci/ice1712/ews.c
@@ -826,7 +826,7 @@ static int snd_ice1712_6fire_read_pca(struct snd_ice1712 *ice, unsigned char reg

snd_i2c_lock(ice->i2c);
byte = reg;
- if (snd_i2c_sendbytes(spec->i2cdevs[EWS_I2C_6FIRE], &byte, 1)) {
+ if (snd_i2c_sendbytes(spec->i2cdevs[EWS_I2C_6FIRE], &byte, 1) != 1) {
snd_i2c_unlock(ice->i2c);
dev_err(ice->card->dev, "cannot send pca\n");
return -EIO;
diff --git a/sound/soc/codecs/cs42xx8.c b/sound/soc/codecs/cs42xx8.c
index ebb9e0cf8364..28a4ac36c4f8 100644
--- a/sound/soc/codecs/cs42xx8.c
+++ b/sound/soc/codecs/cs42xx8.c
@@ -558,6 +558,7 @@ static int cs42xx8_runtime_resume(struct device *dev)
msleep(5);

regcache_cache_only(cs42xx8->regmap, false);
+ regcache_mark_dirty(cs42xx8->regmap);

ret = regcache_sync(cs42xx8->regmap);
if (ret) {
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
index 0b937924d2e4..ea035c12a325 100644
--- a/sound/soc/fsl/fsl_asrc.c
+++ b/sound/soc/fsl/fsl_asrc.c
@@ -282,8 +282,8 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair)
return -EINVAL;
}

- if ((outrate > 8000 && outrate < 30000) &&
- (outrate/inrate > 24 || inrate/outrate > 8)) {
+ if ((outrate >= 8000 && outrate <= 30000) &&
+ (outrate > 24 * inrate || inrate > 8 * outrate)) {
pair_err("exceed supported ratio range [1/24, 8] for \
inrate/outrate: %d/%d\n", inrate, outrate);
return -EINVAL;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index fe99b02bbf17..a7b4fab92f26 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -228,7 +228,10 @@ static void soc_init_card_debugfs(struct snd_soc_card *card)

static void soc_cleanup_card_debugfs(struct snd_soc_card *card)
{
+ if (!card->debugfs_card_root)
+ return;
debugfs_remove_recursive(card->debugfs_card_root);
+ card->debugfs_card_root = NULL;
}

static void snd_soc_debugfs_init(void)
@@ -2034,8 +2037,10 @@ static void soc_check_tplg_fes(struct snd_soc_card *card)
static int soc_cleanup_card_resources(struct snd_soc_card *card)
{
/* free the ALSA card at first; this syncs with pending operations */
- if (card->snd_card)
+ if (card->snd_card) {
snd_card_free(card->snd_card);
+ card->snd_card = NULL;
+ }

/* remove and free each DAI */
soc_remove_dai_links(card);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 0382a47b30bd..5d9d7678e4fa 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2192,7 +2192,10 @@ static void dapm_debugfs_add_widget(struct snd_soc_dapm_widget *w)

static void dapm_debugfs_cleanup(struct snd_soc_dapm_context *dapm)
{
+ if (!dapm->debugfs_dapm)
+ return;
debugfs_remove_recursive(dapm->debugfs_dapm);
+ dapm->debugfs_dapm = NULL;
}

#else
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index d2be5a06c339..ed8ef5c82256 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -873,6 +873,8 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
}
}

+ set_max_rlimit();
+
obj = __bpf_object__open_xattr(&attr, bpf_flags);
if (IS_ERR_OR_NULL(obj)) {
p_err("failed to open object file");
@@ -952,8 +954,6 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
goto err_close_obj;
}

- set_max_rlimit();
-
err = bpf_object__load(obj);
if (err) {
p_err("failed to load object file");
diff --git a/tools/io_uring/Makefile b/tools/io_uring/Makefile
index f79522fc37b5..00f146c54c53 100644
--- a/tools/io_uring/Makefile
+++ b/tools/io_uring/Makefile
@@ -8,7 +8,7 @@ all: io_uring-cp io_uring-bench
$(CC) $(CFLAGS) -o $@ $^

io_uring-bench: syscall.o io_uring-bench.o
- $(CC) $(CFLAGS) $(LDLIBS) -o $@ $^
+ $(CC) $(CFLAGS) -o $@ $^ $(LDLIBS)

io_uring-cp: setup.o syscall.o queue.o

diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index 2ed395b817cb..bc508dae286c 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -575,8 +575,12 @@ class TracepointProvider(Provider):
def update_fields(self, fields_filter):
"""Refresh fields, applying fields_filter"""
self.fields = [field for field in self._get_available_fields()
- if self.is_field_wanted(fields_filter, field) or
- ARCH.tracepoint_is_child(field)]
+ if self.is_field_wanted(fields_filter, field)]
+ # add parents for child fields - otherwise we won't see any output!
+ for field in self._fields:
+ parent = ARCH.tracepoint_is_child(field)
+ if (parent and parent not in self._fields):
+ self.fields.append(parent)

@staticmethod
def _get_online_cpus():
@@ -735,8 +739,12 @@ class DebugfsProvider(Provider):
def update_fields(self, fields_filter):
"""Refresh fields, applying fields_filter"""
self._fields = [field for field in self._get_available_fields()
- if self.is_field_wanted(fields_filter, field) or
- ARCH.debugfs_is_child(field)]
+ if self.is_field_wanted(fields_filter, field)]
+ # add parents for child fields - otherwise we won't see any output!
+ for field in self._fields:
+ parent = ARCH.debugfs_is_child(field)
+ if (parent and parent not in self._fields):
+ self.fields.append(parent)

@property
def fields(self):
diff --git a/tools/kvm/kvm_stat/kvm_stat.txt b/tools/kvm/kvm_stat/kvm_stat.txt
index 0811d860fe75..c057ba52364e 100644
--- a/tools/kvm/kvm_stat/kvm_stat.txt
+++ b/tools/kvm/kvm_stat/kvm_stat.txt
@@ -34,6 +34,8 @@ INTERACTIVE COMMANDS
*c*:: clear filter

*f*:: filter by regular expression
+ :: *Note*: Child events pull in their parents, and parents' stats summarize
+ all child events, not just the filtered ones

*g*:: filter by guest name/PID

diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index c81fc350f7ad..a43a52cdd3f0 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -246,7 +246,7 @@ static int (*bpf_skb_change_type)(void *ctx, __u32 type) =
(void *) BPF_FUNC_skb_change_type;
static unsigned int (*bpf_get_hash_recalc)(void *ctx) =
(void *) BPF_FUNC_get_hash_recalc;
-static unsigned long long (*bpf_get_current_task)(void *ctx) =
+static unsigned long long (*bpf_get_current_task)(void) =
(void *) BPF_FUNC_get_current_task;
static int (*bpf_skb_change_tail)(void *ctx, __u32 len, __u64 flags) =
(void *) BPF_FUNC_skb_change_tail;
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 93f99c6b7d79..a2542ed42819 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -292,7 +292,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
* A little more than 1G of guest page sized pages. Cover the
* case where the size is not aligned to 64 pages.
*/
- guest_num_pages = (1ul << (30 - guest_page_shift)) + 3;
+ guest_num_pages = (1ul << (30 - guest_page_shift)) + 16;
host_page_size = getpagesize();
host_num_pages = (guest_num_pages * guest_page_size) / host_page_size +
!!((guest_num_pages * guest_page_size) % host_page_size);
diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
index e8c42506a09d..fa6cd340137c 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
@@ -226,7 +226,7 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
uint64_t extra_pg_pages = (extra_mem_pages / ptrs_per_4k_pte) * 2;
struct kvm_vm *vm;

- vm = vm_create(VM_MODE_P52V48_4K, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
+ vm = vm_create(VM_MODE_P40V48_4K, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);

kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
vm_vcpu_add_default(vm, vcpuid, guest_code);
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
index 9a21e912097c..63b9fc3fdfbe 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
@@ -58,9 +58,8 @@ static void test_hv_cpuid(struct kvm_cpuid2 *hv_cpuid_entries,
TEST_ASSERT(entry->flags == 0,
".flags field should be zero");

- TEST_ASSERT(entry->padding[0] == entry->padding[1]
- == entry->padding[2] == 0,
- ".index field should be zero");
+ TEST_ASSERT(!entry->padding[0] && !entry->padding[1] &&
+ !entry->padding[2], "padding should be zero");

/*
* If needed for debug:
diff --git a/tools/testing/selftests/net/fib_rule_tests.sh b/tools/testing/selftests/net/fib_rule_tests.sh
index 4b7e107865bf..1ba069967fa2 100755
--- a/tools/testing/selftests/net/fib_rule_tests.sh
+++ b/tools/testing/selftests/net/fib_rule_tests.sh
@@ -55,7 +55,7 @@ setup()

$IP link add dummy0 type dummy
$IP link set dev dummy0 up
- $IP address add 198.51.100.1/24 dev dummy0
+ $IP address add 192.51.100.1/24 dev dummy0
$IP -6 address add 2001:db8:1::1/64 dev dummy0

set +e
diff --git a/tools/testing/selftests/timers/adjtick.c b/tools/testing/selftests/timers/adjtick.c
index 0caca3a06bd2..54d8d87f36b3 100644
--- a/tools/testing/selftests/timers/adjtick.c
+++ b/tools/testing/selftests/timers/adjtick.c
@@ -136,6 +136,7 @@ int check_tick_adj(long tickval)

eppm = get_ppm_drift();
printf("%lld usec, %lld ppm", systick + (systick * eppm / MILLION), eppm);
+ fflush(stdout);

tx1.modes = 0;
adjtimex(&tx1);
diff --git a/tools/testing/selftests/timers/leapcrash.c b/tools/testing/selftests/timers/leapcrash.c
index 830c462f605d..dc80728ed191 100644
--- a/tools/testing/selftests/timers/leapcrash.c
+++ b/tools/testing/selftests/timers/leapcrash.c
@@ -101,6 +101,7 @@ int main(void)
}
clear_time_state();
printf(".");
+ fflush(stdout);
}
printf("[OK]\n");
return ksft_exit_pass();
diff --git a/tools/testing/selftests/timers/mqueue-lat.c b/tools/testing/selftests/timers/mqueue-lat.c
index 1867db5d6f5e..7916cf5cc6ff 100644
--- a/tools/testing/selftests/timers/mqueue-lat.c
+++ b/tools/testing/selftests/timers/mqueue-lat.c
@@ -102,6 +102,7 @@ int main(int argc, char **argv)
int ret;

printf("Mqueue latency : ");
+ fflush(stdout);

ret = mqueue_lat_test();
if (ret < 0) {
diff --git a/tools/testing/selftests/timers/nanosleep.c b/tools/testing/selftests/timers/nanosleep.c
index 8adb0bb51d4d..71b5441c2fd9 100644
--- a/tools/testing/selftests/timers/nanosleep.c
+++ b/tools/testing/selftests/timers/nanosleep.c
@@ -142,6 +142,7 @@ int main(int argc, char **argv)
continue;

printf("Nanosleep %-31s ", clockstring(clockid));
+ fflush(stdout);

length = 10;
while (length <= (NSEC_PER_SEC * 10)) {
diff --git a/tools/testing/selftests/timers/nsleep-lat.c b/tools/testing/selftests/timers/nsleep-lat.c
index c3c3dc10db17..eb3e79ed7b4a 100644
--- a/tools/testing/selftests/timers/nsleep-lat.c
+++ b/tools/testing/selftests/timers/nsleep-lat.c
@@ -155,6 +155,7 @@ int main(int argc, char **argv)
continue;

printf("nsleep latency %-26s ", clockstring(clockid));
+ fflush(stdout);

length = 10;
while (length <= (NSEC_PER_SEC * 10)) {
diff --git a/tools/testing/selftests/timers/raw_skew.c b/tools/testing/selftests/timers/raw_skew.c
index dcf73c5dab6e..b41d8dd0c40c 100644
--- a/tools/testing/selftests/timers/raw_skew.c
+++ b/tools/testing/selftests/timers/raw_skew.c
@@ -112,6 +112,7 @@ int main(int argv, char **argc)
printf("WARNING: ADJ_OFFSET in progress, this will cause inaccurate results\n");

printf("Estimating clock drift: ");
+ fflush(stdout);
sleep(120);

get_monotonic_and_raw(&mon, &raw);
diff --git a/tools/testing/selftests/timers/set-tai.c b/tools/testing/selftests/timers/set-tai.c
index 70fed27d8fd3..8c4179ee2ca2 100644
--- a/tools/testing/selftests/timers/set-tai.c
+++ b/tools/testing/selftests/timers/set-tai.c
@@ -55,6 +55,7 @@ int main(int argc, char **argv)
printf("tai offset started at %i\n", ret);

printf("Checking tai offsets can be properly set: ");
+ fflush(stdout);
for (i = 1; i <= 60; i++) {
ret = set_tai(i);
ret = get_tai();
diff --git a/tools/testing/selftests/timers/set-tz.c b/tools/testing/selftests/timers/set-tz.c
index 877fd5532fee..62bd33eb16f0 100644
--- a/tools/testing/selftests/timers/set-tz.c
+++ b/tools/testing/selftests/timers/set-tz.c
@@ -65,6 +65,7 @@ int main(int argc, char **argv)
printf("tz_minuteswest started at %i, dst at %i\n", min, dst);

printf("Checking tz_minuteswest can be properly set: ");
+ fflush(stdout);
for (i = -15*60; i < 15*60; i += 30) {
ret = set_tz(i, dst);
ret = get_tz_min();
@@ -76,6 +77,7 @@ int main(int argc, char **argv)
printf("[OK]\n");

printf("Checking invalid tz_minuteswest values are caught: ");
+ fflush(stdout);

if (!set_tz(-15*60-1, dst)) {
printf("[FAILED] %i didn't return failure!\n", -15*60-1);
diff --git a/tools/testing/selftests/timers/threadtest.c b/tools/testing/selftests/timers/threadtest.c
index 759c9c06f1a0..cf3e48919874 100644
--- a/tools/testing/selftests/timers/threadtest.c
+++ b/tools/testing/selftests/timers/threadtest.c
@@ -163,6 +163,7 @@ int main(int argc, char **argv)
strftime(buf, 255, "%a, %d %b %Y %T %z", localtime(&start));
printf("%s\n", buf);
printf("Testing consistency with %i threads for %ld seconds: ", thread_count, runtime);
+ fflush(stdout);

/* spawn */
for (i = 0; i < thread_count; i++)
diff --git a/tools/testing/selftests/timers/valid-adjtimex.c b/tools/testing/selftests/timers/valid-adjtimex.c
index d9d3ab93b31a..5397de708d3c 100644
--- a/tools/testing/selftests/timers/valid-adjtimex.c
+++ b/tools/testing/selftests/timers/valid-adjtimex.c
@@ -123,6 +123,7 @@ int validate_freq(void)
/* Set the leap second insert flag */

printf("Testing ADJ_FREQ... ");
+ fflush(stdout);
for (i = 0; i < NUM_FREQ_VALID; i++) {
tx.modes = ADJ_FREQUENCY;
tx.freq = valid_freq[i];
@@ -250,6 +251,7 @@ int set_bad_offset(long sec, long usec, int use_nano)
int validate_set_offset(void)
{
printf("Testing ADJ_SETOFFSET... ");
+ fflush(stdout);

/* Test valid values */
if (set_offset(NSEC_PER_SEC - 1, 1))
diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c
index 5abbe9b3c652..6880236974b8 100644
--- a/virt/kvm/arm/aarch32.c
+++ b/virt/kvm/arm/aarch32.c
@@ -25,127 +25,6 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>

-/*
- * stolen from arch/arm/kernel/opcodes.c
- *
- * condition code lookup table
- * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
- *
- * bit position in short is condition code: NZCV
- */
-static const unsigned short cc_map[16] = {
- 0xF0F0, /* EQ == Z set */
- 0x0F0F, /* NE */
- 0xCCCC, /* CS == C set */
- 0x3333, /* CC */
- 0xFF00, /* MI == N set */
- 0x00FF, /* PL */
- 0xAAAA, /* VS == V set */
- 0x5555, /* VC */
- 0x0C0C, /* HI == C set && Z clear */
- 0xF3F3, /* LS == C clear || Z set */
- 0xAA55, /* GE == (N==V) */
- 0x55AA, /* LT == (N!=V) */
- 0x0A05, /* GT == (!Z && (N==V)) */
- 0xF5FA, /* LE == (Z || (N!=V)) */
- 0xFFFF, /* AL always */
- 0 /* NV */
-};
-
-/*
- * Check if a trapped instruction should have been executed or not.
- */
-bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
-{
- unsigned long cpsr;
- u32 cpsr_cond;
- int cond;
-
- /* Top two bits non-zero? Unconditional. */
- if (kvm_vcpu_get_hsr(vcpu) >> 30)
- return true;
-
- /* Is condition field valid? */
- cond = kvm_vcpu_get_condition(vcpu);
- if (cond == 0xE)
- return true;
-
- cpsr = *vcpu_cpsr(vcpu);
-
- if (cond < 0) {
- /* This can happen in Thumb mode: examine IT state. */
- unsigned long it;
-
- it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
-
- /* it == 0 => unconditional. */
- if (it == 0)
- return true;
-
- /* The cond for this insn works out as the top 4 bits. */
- cond = (it >> 4);
- }
-
- cpsr_cond = cpsr >> 28;
-
- if (!((cc_map[cond] >> cpsr_cond) & 1))
- return false;
-
- return true;
-}
-
-/**
- * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
- * @vcpu: The VCPU pointer
- *
- * When exceptions occur while instructions are executed in Thumb IF-THEN
- * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
- * to do this little bit of work manually. The fields map like this:
- *
- * IT[7:0] -> CPSR[26:25],CPSR[15:10]
- */
-static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
-{
- unsigned long itbits, cond;
- unsigned long cpsr = *vcpu_cpsr(vcpu);
- bool is_arm = !(cpsr & PSR_AA32_T_BIT);
-
- if (is_arm || !(cpsr & PSR_AA32_IT_MASK))
- return;
-
- cond = (cpsr & 0xe000) >> 13;
- itbits = (cpsr & 0x1c00) >> (10 - 2);
- itbits |= (cpsr & (0x3 << 25)) >> 25;
-
- /* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */
- if ((itbits & 0x7) == 0)
- itbits = cond = 0;
- else
- itbits = (itbits << 1) & 0x1f;
-
- cpsr &= ~PSR_AA32_IT_MASK;
- cpsr |= cond << 13;
- cpsr |= (itbits & 0x1c) << (10 - 2);
- cpsr |= (itbits & 0x3) << 25;
- *vcpu_cpsr(vcpu) = cpsr;
-}
-
-/**
- * kvm_skip_instr - skip a trapped instruction and proceed to the next
- * @vcpu: The vcpu pointer
- */
-void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
-{
- bool is_thumb;
-
- is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
- if (is_thumb && !is_wide_instr)
- *vcpu_pc(vcpu) += 2;
- else
- *vcpu_pc(vcpu) += 4;
- kvm_adjust_itstate(vcpu);
-}
-
/*
* Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
*/
diff --git a/virt/kvm/arm/hyp/aarch32.c b/virt/kvm/arm/hyp/aarch32.c
new file mode 100644
index 000000000000..d31f267961e7
--- /dev/null
+++ b/virt/kvm/arm/hyp/aarch32.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hyp portion of the (not much of an) Emulation layer for 32bit guests.
+ *
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@xxxxxxx>
+ *
+ * based on arch/arm/kvm/emulate.c
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@xxxxxxxxxxxxxxxxxxxxxx>
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_hyp.h>
+
+/*
+ * stolen from arch/arm/kernel/opcodes.c
+ *
+ * condition code lookup table
+ * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
+ *
+ * bit position in short is condition code: NZCV
+ */
+static const unsigned short cc_map[16] = {
+ 0xF0F0, /* EQ == Z set */
+ 0x0F0F, /* NE */
+ 0xCCCC, /* CS == C set */
+ 0x3333, /* CC */
+ 0xFF00, /* MI == N set */
+ 0x00FF, /* PL */
+ 0xAAAA, /* VS == V set */
+ 0x5555, /* VC */
+ 0x0C0C, /* HI == C set && Z clear */
+ 0xF3F3, /* LS == C clear || Z set */
+ 0xAA55, /* GE == (N==V) */
+ 0x55AA, /* LT == (N!=V) */
+ 0x0A05, /* GT == (!Z && (N==V)) */
+ 0xF5FA, /* LE == (Z || (N!=V)) */
+ 0xFFFF, /* AL always */
+ 0 /* NV */
+};
+
+/*
+ * Check if a trapped instruction should have been executed or not.
+ */
+bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
+{
+ unsigned long cpsr;
+ u32 cpsr_cond;
+ int cond;
+
+ /* Top two bits non-zero? Unconditional. */
+ if (kvm_vcpu_get_hsr(vcpu) >> 30)
+ return true;
+
+ /* Is condition field valid? */
+ cond = kvm_vcpu_get_condition(vcpu);
+ if (cond == 0xE)
+ return true;
+
+ cpsr = *vcpu_cpsr(vcpu);
+
+ if (cond < 0) {
+ /* This can happen in Thumb mode: examine IT state. */
+ unsigned long it;
+
+ it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
+
+ /* it == 0 => unconditional. */
+ if (it == 0)
+ return true;
+
+ /* The cond for this insn works out as the top 4 bits. */
+ cond = (it >> 4);
+ }
+
+ cpsr_cond = cpsr >> 28;
+
+ if (!((cc_map[cond] >> cpsr_cond) & 1))
+ return false;
+
+ return true;
+}
+
+/**
+ * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
+ * @vcpu: The VCPU pointer
+ *
+ * When exceptions occur while instructions are executed in Thumb IF-THEN
+ * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
+ * to do this little bit of work manually. The fields map like this:
+ *
+ * IT[7:0] -> CPSR[26:25],CPSR[15:10]
+ */
+static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
+{
+ unsigned long itbits, cond;
+ unsigned long cpsr = *vcpu_cpsr(vcpu);
+ bool is_arm = !(cpsr & PSR_AA32_T_BIT);
+
+ if (is_arm || !(cpsr & PSR_AA32_IT_MASK))
+ return;
+
+ cond = (cpsr & 0xe000) >> 13;
+ itbits = (cpsr & 0x1c00) >> (10 - 2);
+ itbits |= (cpsr & (0x3 << 25)) >> 25;
+
+ /* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */
+ if ((itbits & 0x7) == 0)
+ itbits = cond = 0;
+ else
+ itbits = (itbits << 1) & 0x1f;
+
+ cpsr &= ~PSR_AA32_IT_MASK;
+ cpsr |= cond << 13;
+ cpsr |= (itbits & 0x1c) << (10 - 2);
+ cpsr |= (itbits & 0x3) << 25;
+ *vcpu_cpsr(vcpu) = cpsr;
+}
+
+/**
+ * kvm_skip_instr - skip a trapped instruction and proceed to the next
+ * @vcpu: The vcpu pointer
+ */
+void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
+{
+ bool is_thumb;
+
+ is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
+ if (is_thumb && !is_wide_instr)
+ *vcpu_pc(vcpu) += 2;
+ else
+ *vcpu_pc(vcpu) += 4;
+ kvm_adjust_itstate(vcpu);
+}