Re: Linux 3.10.5

From: Greg KH
Date: Sun Aug 04 2013 - 05:42:24 EST


diff --git a/Makefile b/Makefile
index b4df9b2..f8349d0 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 10
-SUBLEVEL = 4
+SUBLEVEL = 5
EXTRAVERSION =
NAME = Unicycling Gorilla

diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c
index aabc02a..d1153c8 100644
--- a/arch/arm/boot/compressed/atags_to_fdt.c
+++ b/arch/arm/boot/compressed/atags_to_fdt.c
@@ -53,6 +53,17 @@ static const void *getprop(const void *fdt, const char *node_path,
return fdt_getprop(fdt, offset, property, len);
}

+static uint32_t get_cell_size(const void *fdt)
+{
+ int len;
+ uint32_t cell_size = 1;
+ const uint32_t *size_len = getprop(fdt, "/", "#size-cells", &len);
+
+ if (size_len)
+ cell_size = fdt32_to_cpu(*size_len);
+ return cell_size;
+}
+
static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline)
{
char cmdline[COMMAND_LINE_SIZE];
@@ -95,9 +106,11 @@ static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline)
int atags_to_fdt(void *atag_list, void *fdt, int total_space)
{
struct tag *atag = atag_list;
- uint32_t mem_reg_property[2 * NR_BANKS];
+ /* In the case of 64 bits memory size, need to reserve 2 cells for
+ * address and size for each bank */
+ uint32_t mem_reg_property[2 * 2 * NR_BANKS];
int memcount = 0;
- int ret;
+ int ret, memsize;

/* make sure we've got an aligned pointer */
if ((u32)atag_list & 0x3)
@@ -137,8 +150,25 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space)
continue;
if (!atag->u.mem.size)
continue;
- mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.start);
- mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.size);
+ memsize = get_cell_size(fdt);
+
+ if (memsize == 2) {
+ /* if memsize is 2, that means that
+ * each data needs 2 cells of 32 bits,
+ * so the data are 64 bits */
+ uint64_t *mem_reg_prop64 =
+ (uint64_t *)mem_reg_property;
+ mem_reg_prop64[memcount++] =
+ cpu_to_fdt64(atag->u.mem.start);
+ mem_reg_prop64[memcount++] =
+ cpu_to_fdt64(atag->u.mem.size);
+ } else {
+ mem_reg_property[memcount++] =
+ cpu_to_fdt32(atag->u.mem.start);
+ mem_reg_property[memcount++] =
+ cpu_to_fdt32(atag->u.mem.size);
+ }
+
} else if (atag->hdr.tag == ATAG_INITRD2) {
uint32_t initrd_start, initrd_size;
initrd_start = atag->u.initrd.start;
@@ -150,8 +180,10 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space)
}
}

- if (memcount)
- setprop(fdt, "/memory", "reg", mem_reg_property, 4*memcount);
+ if (memcount) {
+ setprop(fdt, "/memory", "reg", mem_reg_property,
+ 4 * memcount * memsize);
+ }

return fdt_pack(fdt);
}
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index c1df590..49fa55b 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -82,10 +82,9 @@ struct exception_table_entry;
void sort_ex_table(struct exception_table_entry *start,
struct exception_table_entry *finish);

-#ifdef CONFIG_MODVERSIONS
+#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
#define ARCH_RELOCATES_KCRCTAB
-
-extern const unsigned long reloc_start[];
+#define reloc_start PHYSICAL_START
#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MODULE_H */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 654e479..f096e72 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -38,9 +38,6 @@ jiffies = jiffies_64 + 4;
#endif
SECTIONS
{
- . = 0;
- reloc_start = .;
-
. = KERNELBASE;

/*
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index b44577b..ec94e11 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -48,9 +48,20 @@ int acpi_suspend_lowlevel(void)
#ifndef CONFIG_64BIT
native_store_gdt((struct desc_ptr *)&header->pmode_gdt);

+ /*
+ * We have to check that we can write back the value, and not
+ * just read it. At least on 90 nm Pentium M (Family 6, Model
+ * 13), reading an invalid MSR is not guaranteed to trap, see
+ * Erratum X4 in "Intel Pentium M Processor on 90 nm Process
+ * with 2-MB L2 Cache and Intel® Processor A100 and A110 on 90
+ * nm process with 512-KB L2 Cache Specification Update".
+ */
if (!rdmsr_safe(MSR_EFER,
&header->pmode_efer_low,
- &header->pmode_efer_high))
+ &header->pmode_efer_high) &&
+ !wrmsr_safe(MSR_EFER,
+ header->pmode_efer_low,
+ header->pmode_efer_high))
header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER);
#endif /* !CONFIG_64BIT */

@@ -61,7 +72,10 @@ int acpi_suspend_lowlevel(void)
}
if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
&header->pmode_misc_en_low,
- &header->pmode_misc_en_high))
+ &header->pmode_misc_en_high) &&
+ !wrmsr_safe(MSR_IA32_MISC_ENABLE,
+ header->pmode_misc_en_low,
+ header->pmode_misc_en_high))
header->pmode_behavior |=
(1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
header->realmode_flags = acpi_realmode_flags;
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index fa72a39..3982357 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -510,8 +510,9 @@ generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
static void generic_get_mtrr(unsigned int reg, unsigned long *base,
unsigned long *size, mtrr_type *type)
{
- unsigned int mask_lo, mask_hi, base_lo, base_hi;
- unsigned int tmp, hi;
+ u32 mask_lo, mask_hi, base_lo, base_hi;
+ unsigned int hi;
+ u64 tmp, mask;

/*
* get_mtrr doesn't need to update mtrr_state, also it could be called
@@ -532,18 +533,18 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);

/* Work out the shifted address mask: */
- tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
- mask_lo = size_or_mask | tmp;
+ tmp = (u64)mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
+ mask = size_or_mask | tmp;

/* Expand tmp with high bits to all 1s: */
- hi = fls(tmp);
+ hi = fls64(tmp);
if (hi > 0) {
- tmp |= ~((1<<(hi - 1)) - 1);
+ tmp |= ~((1ULL<<(hi - 1)) - 1);

- if (tmp != mask_lo) {
+ if (tmp != mask) {
printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
- mask_lo = tmp;
+ mask = tmp;
}
}

@@ -551,8 +552,8 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
* This works correctly if size is a power of two, i.e. a
* contiguous range:
*/
- *size = -mask_lo;
- *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
+ *size = -mask;
+ *base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
*type = base_lo & 0xff;

out_put_cpu:
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 726bf96..ca22b73 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -305,7 +305,8 @@ int mtrr_add_page(unsigned long base, unsigned long size,
return -EINVAL;
}

- if (base & size_or_mask || size & size_or_mask) {
+ if ((base | (base + size - 1)) >>
+ (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) {
pr_warning("mtrr: base or size exceeds the MTRR width\n");
return -EINVAL;
}
@@ -583,6 +584,7 @@ static struct syscore_ops mtrr_syscore_ops = {

int __initdata changed_by_mtrr_cleanup;

+#define SIZE_OR_MASK_BITS(n) (~((1ULL << ((n) - PAGE_SHIFT)) - 1))
/**
* mtrr_bp_init - initialize mtrrs on the boot CPU
*
@@ -600,7 +602,7 @@ void __init mtrr_bp_init(void)

if (cpu_has_mtrr) {
mtrr_if = &generic_mtrr_ops;
- size_or_mask = 0xff000000; /* 36 bits */
+ size_or_mask = SIZE_OR_MASK_BITS(36);
size_and_mask = 0x00f00000;
phys_addr = 36;

@@ -619,7 +621,7 @@ void __init mtrr_bp_init(void)
boot_cpu_data.x86_mask == 0x4))
phys_addr = 36;

- size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1);
+ size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
size_and_mask = ~size_or_mask & 0xfffff00000ULL;
} else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
boot_cpu_data.x86 == 6) {
@@ -627,7 +629,7 @@ void __init mtrr_bp_init(void)
* VIA C* family have Intel style MTRRs,
* but don't support PAE
*/
- size_or_mask = 0xfff00000; /* 32 bits */
+ size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0;
phys_addr = 32;
}
@@ -637,21 +639,21 @@ void __init mtrr_bp_init(void)
if (cpu_has_k6_mtrr) {
/* Pre-Athlon (K6) AMD CPU MTRRs */
mtrr_if = mtrr_ops[X86_VENDOR_AMD];
- size_or_mask = 0xfff00000; /* 32 bits */
+ size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0;
}
break;
case X86_VENDOR_CENTAUR:
if (cpu_has_centaur_mcr) {
mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
- size_or_mask = 0xfff00000; /* 32 bits */
+ size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0;
}
break;
case X86_VENDOR_CYRIX:
if (cpu_has_cyrix_arr) {
mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
- size_or_mask = 0xfff00000; /* 32 bits */
+ size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0;
}
break;
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 321d65e..a836860 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -513,7 +513,7 @@ ENTRY(phys_base)
#include "../../x86/xen/xen-head.S"

.section .bss, "aw", @nobits
- .align L1_CACHE_BYTES
+ .align PAGE_SIZE
ENTRY(idt_table)
.skip IDT_ENTRIES * 16

diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 5e6301e..2cf0244 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -283,6 +283,7 @@ static int acpi_memory_device_add(struct acpi_device *device,
/* Get the range from the _CRS */
result = acpi_memory_get_device_resources(mem_device);
if (result) {
+ device->driver_data = NULL;
kfree(mem_device);
return result;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 14807e5..af658b2 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -237,10 +237,12 @@ static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source)

mutex_lock(&acpi_scan_lock);

- acpi_bus_get_device(handle, &device);
- if (device) {
- dev_warn(&device->dev, "Attempt to re-insert\n");
- goto out;
+ if (ost_source != ACPI_NOTIFY_BUS_CHECK) {
+ acpi_bus_get_device(handle, &device);
+ if (device) {
+ dev_warn(&device->dev, "Attempt to re-insert\n");
+ goto out;
+ }
}
acpi_evaluate_hotplug_ost(handle, ost_source,
ACPI_OST_SC_INSERT_IN_PROGRESS, NULL);
@@ -1890,6 +1892,9 @@ static acpi_status acpi_bus_device_attach(acpi_handle handle, u32 lvl_not_used,
if (acpi_bus_get_device(handle, &device))
return AE_CTRL_DEPTH;

+ if (device->handler)
+ return AE_OK;
+
ret = acpi_scan_attach_handler(device);
if (ret)
return ret > 0 ? AE_OK : AE_CTRL_DEPTH;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 440eadf..0e4b96b 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -450,6 +450,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
},
{
.callback = video_ignore_initial_backlight,
+ .ident = "Fujitsu E753",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E753"),
+ },
+ },
+ {
+ .callback = video_ignore_initial_backlight,
.ident = "HP Pavilion dm4",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index a5a3ebc..78eabff 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -107,7 +107,7 @@ config SATA_FSL
If unsure, say N.

config SATA_INIC162X
- tristate "Initio 162x SATA support"
+ tristate "Initio 162x SATA support (Very Experimental)"
depends on PCI
help
This option enables support for Initio 162x Serial ATA.
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 8eae659..b92913a 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -330,7 +330,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
/* SATA Controller IDE (Wellsburg) */
{ 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Wellsburg) */
- { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
/* SATA Controller IDE (Wellsburg) */
{ 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Wellsburg) */
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 1e6827c..74456fa 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -6,6 +6,18 @@
*
* This file is released under GPL v2.
*
+ * **** WARNING ****
+ *
+ * This driver never worked properly and unfortunately data corruption is
+ * relatively common. There isn't anyone working on the driver and there's
+ * no support from the vendor. Do not use this driver in any production
+ * environment.
+ *
+ * http://thread.gmane.org/gmane.linux.debian.devel.bugs.rc/378525/focus=54491
+ * https://bugzilla.kernel.org/show_bug.cgi?id=60565
+ *
+ * *****************
+ *
* This controller is eccentric and easily locks up if something isn't
* right. Documentation is available at initio's website but it only
* documents registers (not programming model).
@@ -807,6 +819,8 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)

ata_print_version_once(&pdev->dev, DRV_VERSION);

+ dev_alert(&pdev->dev, "inic162x support is broken with common data corruption issues and will be disabled by default, contact linux-ide@xxxxxxxxxxxxxxx if in production use\n");
+
/* alloc host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS);
hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index a941dcf..d0c81d1 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1717,7 +1717,7 @@ int regmap_async_complete(struct regmap *map)
int ret;

/* Nothing to do with no async support */
- if (!map->bus->async_write)
+ if (!map->bus || !map->bus->async_write)
return 0;

trace_regmap_async_complete_start(map->dev);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index dd5b2fe..d81dfca 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -647,7 +647,18 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
int status = BLKIF_RSP_OKAY;
struct block_device *bdev = blkif->vbd.bdev;
unsigned long secure;
+ struct phys_req preq;
+
+ preq.sector_number = req->u.discard.sector_number;
+ preq.nr_sects = req->u.discard.nr_sectors;

+ err = xen_vbd_translate(&preq, blkif, WRITE);
+ if (err) {
+ pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n",
+ preq.sector_number,
+ preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
+ goto fail_response;
+ }
blkif->st_ds_req++;

xen_blkif_get(blkif);
@@ -658,7 +669,7 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
req->u.discard.nr_sectors,
GFP_KERNEL, secure);
-
+fail_response:
if (err == -EOPNOTSUPP) {
pr_debug(DRV_PFX "discard op failed, not supported\n");
status = BLKIF_RSP_EOPNOTSUPP;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 07f2840..6d6a0b4 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -103,10 +103,10 @@ struct pstate_adjust_policy {
static struct pstate_adjust_policy default_policy = {
.sample_rate_ms = 10,
.deadband = 0,
- .setpoint = 109,
- .p_gain_pct = 17,
+ .setpoint = 97,
+ .p_gain_pct = 20,
.d_gain_pct = 0,
- .i_gain_pct = 4,
+ .i_gain_pct = 0,
};

struct perf_limits {
@@ -468,12 +468,12 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
{
int32_t busy_scaled;
- int32_t core_busy, turbo_pstate, current_pstate;
+ int32_t core_busy, max_pstate, current_pstate;

core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy);
- turbo_pstate = int_tofp(cpu->pstate.turbo_pstate);
+ max_pstate = int_tofp(cpu->pstate.max_pstate);
current_pstate = int_tofp(cpu->pstate.current_pstate);
- busy_scaled = mul_fp(core_busy, div_fp(turbo_pstate, current_pstate));
+ busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate));

return fp_toint(busy_scaled);
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 5996521..84573b4 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -429,7 +429,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
dma_addr_t src_dma, dst_dma;
int ret = 0;

- desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+ desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
return -ENOMEM;
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 7ef316f..ac1b43a 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -54,6 +54,7 @@
#define FW_CDEV_KERNEL_VERSION 5
#define FW_CDEV_VERSION_EVENT_REQUEST2 4
#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
+#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5

struct client {
u32 version;
@@ -1005,6 +1006,8 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
a->channel, a->speed, a->header_size, cb, client);
if (IS_ERR(context))
return PTR_ERR(context);
+ if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
+ context->drop_overflow_headers = true;

/* We only support one context at this time. */
spin_lock_irq(&client->lock);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 9e1db64..afb701e 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2749,8 +2749,11 @@ static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
{
u32 *ctx_hdr;

- if (ctx->header_length + ctx->base.header_size > PAGE_SIZE)
+ if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) {
+ if (ctx->base.drop_overflow_headers)
+ return;
flush_iso_completions(ctx);
+ }

ctx_hdr = ctx->header + ctx->header_length;
ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
@@ -2910,8 +2913,11 @@ static int handle_it_packet(struct context *context,

sync_it_packet_for_cpu(context, d);

- if (ctx->header_length + 4 > PAGE_SIZE)
+ if (ctx->header_length + 4 > PAGE_SIZE) {
+ if (ctx->base.drop_overflow_headers)
+ return 1;
flush_iso_completions(ctx);
+ }

ctx_hdr = ctx->header + ctx->header_length;
ctx->last_timestamp = le16_to_cpu(last->res_count);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 3b315ba..f968590 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1511,6 +1511,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->dev = dev;
dev_priv->info = info;

+ spin_lock_init(&dev_priv->irq_lock);
+ spin_lock_init(&dev_priv->gpu_error.lock);
+ spin_lock_init(&dev_priv->rps.lock);
+ mutex_init(&dev_priv->dpio_lock);
+ mutex_init(&dev_priv->rps.hw_lock);
+ mutex_init(&dev_priv->modeset_restore_lock);
+
i915_dump_device_info(dev_priv);

if (i915_get_bridge_dev(dev)) {
@@ -1601,6 +1608,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_detect_pch(dev);

intel_irq_init(dev);
+ intel_pm_init(dev);
+ intel_gt_sanitize(dev);
intel_gt_init(dev);

/* Try to make sure MCHBAR is enabled before poking at it */
@@ -1626,14 +1635,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev);

- spin_lock_init(&dev_priv->irq_lock);
- spin_lock_init(&dev_priv->gpu_error.lock);
- spin_lock_init(&dev_priv->rps.lock);
- mutex_init(&dev_priv->dpio_lock);
-
- mutex_init(&dev_priv->rps.hw_lock);
- mutex_init(&dev_priv->modeset_restore_lock);
-
dev_priv->num_plane = 1;
if (IS_VALLEYVIEW(dev))
dev_priv->num_plane = 2;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index a2e4953..bc6cd31 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -685,7 +685,7 @@ static int i915_drm_thaw(struct drm_device *dev)
{
int error = 0;

- intel_gt_reset(dev);
+ intel_gt_sanitize(dev);

if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
@@ -711,7 +711,7 @@ int i915_resume(struct drm_device *dev)

pci_set_master(dev->pdev);

- intel_gt_reset(dev);
+ intel_gt_sanitize(dev);

/*
* Platforms with opregion should have sane BIOS, older ones (gen3 and
@@ -1247,21 +1247,21 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)

#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
+ unsigned long irqflags; \
u##x val = 0; \
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (IS_GEN5(dev_priv->dev)) \
ilk_dummy_write(dev_priv); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- unsigned long irqflags; \
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (dev_priv->forcewake_count == 0) \
dev_priv->gt.force_wake_get(dev_priv); \
val = read##y(dev_priv->regs + reg); \
if (dev_priv->forcewake_count == 0) \
dev_priv->gt.force_wake_put(dev_priv); \
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
} else { \
val = read##y(dev_priv->regs + reg); \
} \
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
trace_i915_reg_rw(false, reg, val, sizeof(val)); \
return val; \
}
@@ -1274,8 +1274,10 @@ __i915_read(64, q)

#define __i915_write(x, y) \
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
+ unsigned long irqflags; \
u32 __fifo_ret = 0; \
trace_i915_reg_rw(true, reg, val, sizeof(val)); \
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
@@ -1287,6 +1289,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
hsw_unclaimed_reg_check(dev_priv, reg); \
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
}
__i915_write(8, b)
__i915_write(16, w)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9669a0b..47d8b68 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -491,6 +491,7 @@ enum intel_sbi_destination {
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
+#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)

struct intel_fbdev;
struct intel_fbc_work;
@@ -1474,9 +1475,10 @@ void i915_hangcheck_elapsed(unsigned long data);
void i915_handle_error(struct drm_device *dev, bool wedged);

extern void intel_irq_init(struct drm_device *dev);
+extern void intel_pm_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
extern void intel_gt_init(struct drm_device *dev);
-extern void intel_gt_reset(struct drm_device *dev);
+extern void intel_gt_sanitize(struct drm_device *dev);

void i915_error_state_free(struct kref *error_ref);

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 34118b0..0a30088 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1881,6 +1881,10 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
u32 seqno = intel_ring_get_seqno(ring);

BUG_ON(ring == NULL);
+ if (obj->ring != ring && obj->last_write_seqno) {
+ /* Keep the seqno relative to the current ring */
+ obj->last_write_seqno = seqno;
+ }
obj->ring = ring;

/* Add a reference if we're newly entering the active list. */
@@ -2134,7 +2138,17 @@ void i915_gem_restore_fences(struct drm_device *dev)

for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
- i915_gem_write_fence(dev, i, reg->obj);
+
+ /*
+ * Commit delayed tiling changes if we have an object still
+ * attached to the fence, otherwise just clear the fence.
+ */
+ if (reg->obj) {
+ i915_gem_object_update_fence(reg->obj, reg,
+ reg->obj->tiling_mode);
+ } else {
+ i915_gem_write_fence(dev, i, NULL);
+ }
}
}

@@ -2534,7 +2548,6 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
drm_i915_private_t *dev_priv = dev->dev_private;
int fence_reg;
int fence_pitch_shift;
- uint64_t val;

if (INTEL_INFO(dev)->gen >= 6) {
fence_reg = FENCE_REG_SANDYBRIDGE_0;
@@ -2544,8 +2557,23 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
}

+ fence_reg += reg * 8;
+
+ /* To w/a incoherency with non-atomic 64-bit register updates,
+ * we split the 64-bit update into two 32-bit writes. In order
+ * for a partial fence not to be evaluated between writes, we
+ * precede the update with write to turn off the fence register,
+ * and only enable the fence as the last step.
+ *
+ * For extra levels of paranoia, we make sure each step lands
+ * before applying the next step.
+ */
+ I915_WRITE(fence_reg, 0);
+ POSTING_READ(fence_reg);
+
if (obj) {
u32 size = obj->gtt_space->size;
+ uint64_t val;

val = (uint64_t)((obj->gtt_offset + size - 4096) &
0xfffff000) << 32;
@@ -2554,12 +2582,16 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
- } else
- val = 0;

- fence_reg += reg * 8;
- I915_WRITE64(fence_reg, val);
- POSTING_READ(fence_reg);
+ I915_WRITE(fence_reg + 4, val >> 32);
+ POSTING_READ(fence_reg + 4);
+
+ I915_WRITE(fence_reg + 0, val);
+ POSTING_READ(fence_reg);
+ } else {
+ I915_WRITE(fence_reg + 4, 0);
+ POSTING_READ(fence_reg + 4);
+ }
}

static void i915_write_fence_reg(struct drm_device *dev, int reg,
@@ -2654,6 +2686,10 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
mb();

+ WARN(obj && (!obj->stride || !obj->tiling_mode),
+ "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+ obj->stride, obj->tiling_mode);
+
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
@@ -2713,6 +2749,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
fence->obj = NULL;
list_del_init(&fence->lru_list);
}
+ obj->fence_dirty = false;
}

static int
@@ -2842,7 +2879,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
return 0;

i915_gem_object_update_fence(obj, reg, enable);
- obj->fence_dirty = false;

return 0;
}
@@ -4457,7 +4493,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
- list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list)
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 56746dc..e1f4e6e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8146,15 +8146,20 @@ static void intel_set_config_restore_state(struct drm_device *dev,
}

static bool
-is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors,
- int num_connectors)
+is_crtc_connector_off(struct drm_mode_set *set)
{
int i;

- for (i = 0; i < num_connectors; i++)
- if (connectors[i].encoder &&
- connectors[i].encoder->crtc == crtc &&
- connectors[i].dpms != DRM_MODE_DPMS_ON)
+ if (set->num_connectors == 0)
+ return false;
+
+ if (WARN_ON(set->connectors == NULL))
+ return false;
+
+ for (i = 0; i < set->num_connectors; i++)
+ if (set->connectors[i]->encoder &&
+ set->connectors[i]->encoder->crtc == set->crtc &&
+ set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
return true;

return false;
@@ -8167,10 +8172,8 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,

/* We should be able to check here if the fb has the same properties
* and then just flip_or_move it */
- if (set->connectors != NULL &&
- is_crtc_connector_off(set->crtc, *set->connectors,
- set->num_connectors)) {
- config->mode_changed = true;
+ if (is_crtc_connector_off(set)) {
+ config->mode_changed = true;
} else if (set->crtc->fb != set->fb) {
/* If we have no fb then treat it as a full mode set */
if (set->crtc->fb == NULL) {
@@ -8914,6 +8917,17 @@ static void quirk_invert_brightness(struct drm_device *dev)
DRM_INFO("applying inverted panel brightness quirk\n");
}

+/*
+ * Some machines (Dell XPS13) suffer broken backlight controls if
+ * BLM_PCH_PWM_ENABLE is set.
+ */
+static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
+ DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -8983,6 +8997,11 @@ static struct intel_quirk intel_quirks[] = {

/* Acer Aspire 4736Z */
{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+
+ /* Dell XPS13 HD Sandy Bridge */
+ { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
+ /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
+ { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
};

static void intel_init_quirks(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index eb5e6e9..33cb87f 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -354,7 +354,8 @@ void intel_panel_enable_backlight(struct drm_device *dev,
POSTING_READ(reg);
I915_WRITE(reg, tmp | BLM_PWM_ENABLE);

- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev) &&
+ !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
tmp = I915_READ(BLC_PWM_PCH_CTL1);
tmp |= BLM_PCH_PWM_ENABLE;
tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index aa01128..2cfe9f6 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4486,7 +4486,7 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
gen6_gt_check_fifodbg(dev_priv);
}

-void intel_gt_reset(struct drm_device *dev)
+void intel_gt_sanitize(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;

@@ -4497,6 +4497,10 @@ void intel_gt_reset(struct drm_device *dev)
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
__gen6_gt_force_wake_mt_reset(dev_priv);
}
+
+ /* BIOS often leaves RC6 enabled, but disable it for hw init */
+ if (INTEL_INFO(dev)->gen >= 6)
+ intel_disable_gt_powersave(dev);
}

void intel_gt_init(struct drm_device *dev)
@@ -4505,18 +4509,51 @@ void intel_gt_init(struct drm_device *dev)

spin_lock_init(&dev_priv->gt_lock);

- intel_gt_reset(dev);
-
if (IS_VALLEYVIEW(dev)) {
dev_priv->gt.force_wake_get = vlv_force_wake_get;
dev_priv->gt.force_wake_put = vlv_force_wake_put;
- } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+ } else if (IS_HASWELL(dev)) {
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
+ } else if (IS_IVYBRIDGE(dev)) {
+ u32 ecobus;
+
+ /* IVB configs may use multi-threaded forcewake */
+
+ /* A small trick here - if the bios hasn't configured
+ * MT forcewake, and if the device is in RC6, then
+ * force_wake_mt_get will not wake the device and the
+ * ECOBUS read will return zero. Which will be
+ * (correctly) interpreted by the test below as MT
+ * forcewake being disabled.
+ */
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = I915_READ_NOTRACE(ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ dev_priv->gt.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->gt.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ } else {
+ DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
+ DRM_INFO("when using vblank-synced partial screen updates.\n");
+ dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
+ dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
+ }
} else if (IS_GEN6(dev)) {
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
}
+}
+
+void intel_pm_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1d5d613..1424f20 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -490,9 +490,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
struct pipe_control *pc = ring->private;
struct drm_i915_gem_object *obj;

- if (!ring->private)
- return;
-
obj = pc->obj;

kunmap(sg_page(obj->pages->sgl));
@@ -500,7 +497,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
drm_gem_object_unreference(&obj->base);

kfree(pc);
- ring->private = NULL;
}

static int init_render_ring(struct intel_ring_buffer *ring)
@@ -571,7 +567,10 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
if (HAS_BROKEN_CS_TLB(dev))
drm_gem_object_unreference(to_gem_object(ring->private));

- cleanup_pipe_control(ring);
+ if (INTEL_INFO(dev)->gen >= 5)
+ cleanup_pipe_control(ring);
+
+ ring->private = NULL;
}

static void
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 8e47a9b..22aa996 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -76,7 +76,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
struct nouveau_object *object;
u32 start = mem->start * PAGE_SIZE;
- u32 limit = mem->start + mem->size - 1;
+ u32 limit = start + mem->size - 1;
int ret = 0;

fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index f9701e5..0ee3638 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -39,6 +39,8 @@ nv50_fence_context_new(struct nouveau_channel *chan)
struct nv10_fence_chan *fctx;
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
struct nouveau_object *object;
+ u32 start = mem->start * PAGE_SIZE;
+ u32 limit = start + mem->size - 1;
int ret, i;

fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -51,26 +53,28 @@ nv50_fence_context_new(struct nouveau_channel *chan)
fctx->base.sync = nv17_fence_sync;

ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvSema, 0x0002,
+ NvSema, 0x003d,
&(struct nv_dma_class) {
.flags = NV_DMA_TARGET_VRAM |
NV_DMA_ACCESS_RDWR,
- .start = mem->start * PAGE_SIZE,
- .limit = mem->size - 1,
+ .start = start,
+ .limit = limit,
}, sizeof(struct nv_dma_class),
&object);

/* dma objects for display sync channel semaphore blocks */
for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+ u32 start = bo->bo.mem.start * PAGE_SIZE;
+ u32 limit = start + bo->bo.mem.size - 1;

ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
NvEvoSema0 + i, 0x003d,
&(struct nv_dma_class) {
.flags = NV_DMA_TARGET_VRAM |
NV_DMA_ACCESS_RDWR,
- .start = bo->bo.offset,
- .limit = bo->bo.offset + 0xfff,
+ .start = start,
+ .limit = limit,
}, sizeof(struct nv_dma_class),
&object);
}
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index fb441a7..15da7ef 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1222,12 +1222,17 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
int r;

mutex_lock(&ctx->mutex);
+ /* reset data block */
+ ctx->data_block = 0;
/* reset reg block */
ctx->reg_block = 0;
/* reset fb window */
ctx->fb_base = 0;
/* reset io mode */
ctx->io_mode = ATOM_IO_MM;
+ /* reset divmul */
+ ctx->divmul[0] = 0;
+ ctx->divmul[1] = 0;
r = atom_execute_table_locked(ctx, index, params);
mutex_unlock(&ctx->mutex);
return r;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 064023b..32501f6 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -44,6 +44,41 @@ static char *pre_emph_names[] = {
};

/***** radeon AUX functions *****/
+
+/* Atom needs data in little endian format
+ * so swap as appropriate when copying data to
+ * or from atom. Note that atom operates on
+ * dw units.
+ */
+static void radeon_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
+{
+#ifdef __BIG_ENDIAN
+ u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
+ u32 *dst32, *src32;
+ int i;
+
+ memcpy(src_tmp, src, num_bytes);
+ src32 = (u32 *)src_tmp;
+ dst32 = (u32 *)dst_tmp;
+ if (to_le) {
+ for (i = 0; i < ((num_bytes + 3) / 4); i++)
+ dst32[i] = cpu_to_le32(src32[i]);
+ memcpy(dst, dst_tmp, num_bytes);
+ } else {
+ u8 dws = num_bytes & ~3;
+ for (i = 0; i < ((num_bytes + 3) / 4); i++)
+ dst32[i] = le32_to_cpu(src32[i]);
+ memcpy(dst, dst_tmp, dws);
+ if (num_bytes % 4) {
+ for (i = 0; i < (num_bytes % 4); i++)
+ dst[dws+i] = dst_tmp[dws+i];
+ }
+ }
+#else
+ memcpy(dst, src, num_bytes);
+#endif
+}
+
union aux_channel_transaction {
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
@@ -65,10 +100,10 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,

base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);

- memcpy(base, send, send_bytes);
+ radeon_copy_swap(base, send, send_bytes, true);

- args.v1.lpAuxRequest = 0 + 4;
- args.v1.lpDataOut = 16 + 4;
+ args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
+ args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
args.v1.ucDataOutLen = 0;
args.v1.ucChannelID = chan->rec.i2c_id;
args.v1.ucDelay = delay / 10;
@@ -102,7 +137,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
recv_bytes = recv_size;

if (recv && recv_size)
- memcpy(recv, base + 16, recv_bytes);
+ radeon_copy_swap(recv, base + 16, recv_bytes, false);

return recv_bytes;
}
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index b9c6f76..bb9ea36 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -157,9 +157,9 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
*/
+ WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
- WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
}


@@ -177,6 +177,9 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
uint32_t offset;
ssize_t err;

+ if (!dig || !dig->afmt)
+ return;
+
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
return;
@@ -280,6 +283,9 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;

+ if (!dig || !dig->afmt)
+ return;
+
/* Silent, r600_hdmi_enable will raise WARN for us */
if (enable && dig->afmt->enabled)
return;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6948eb8..b60004e 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2986,7 +2986,7 @@ void r600_uvd_fence_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
- uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr;
+ uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;

radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
radeon_ring_write(ring, fence->seq);
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index e73b2a7..f48240b 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -266,6 +266,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
uint32_t offset;
ssize_t err;

+ if (!dig || !dig->afmt)
+ return;
+
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
return;
@@ -448,6 +451,9 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 hdmi = HDMI0_ERROR_ACK;

+ if (!dig || !dig->afmt)
+ return;
+
/* Silent, r600_hdmi_enable will raise WARN for us */
if (enable && dig->afmt->enabled)
return;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 142ce6c..f4dcfdd 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -408,6 +408,7 @@ struct radeon_sa_manager {
uint64_t gpu_addr;
void *cpu_ptr;
uint32_t domain;
+ uint32_t align;
};

struct radeon_sa_bo;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 78edadc..68ce360 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -147,7 +147,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
enum radeon_combios_table_offset table)
{
struct radeon_device *rdev = dev->dev_private;
- int rev;
+ int rev, size;
uint16_t offset = 0, check_offset;

if (!rdev->bios)
@@ -156,174 +156,106 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
switch (table) {
/* absolute offset tables */
case COMBIOS_ASIC_INIT_1_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0xc);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0xc;
break;
case COMBIOS_BIOS_SUPPORT_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x14);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x14;
break;
case COMBIOS_DAC_PROGRAMMING_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x2a);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x2a;
break;
case COMBIOS_MAX_COLOR_DEPTH_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x2c);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x2c;
break;
case COMBIOS_CRTC_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x2e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x2e;
break;
case COMBIOS_PLL_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x30);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x30;
break;
case COMBIOS_TV_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x32);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x32;
break;
case COMBIOS_DFP_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x34);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x34;
break;
case COMBIOS_HW_CONFIG_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x36);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x36;
break;
case COMBIOS_MULTIMEDIA_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x38);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x38;
break;
case COMBIOS_TV_STD_PATCH_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x3e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x3e;
break;
case COMBIOS_LCD_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x40);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x40;
break;
case COMBIOS_MOBILE_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x42);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x42;
break;
case COMBIOS_PLL_INIT_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x46);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x46;
break;
case COMBIOS_MEM_CONFIG_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x48);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x48;
break;
case COMBIOS_SAVE_MASK_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x4a);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x4a;
break;
case COMBIOS_HARDCODED_EDID_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x4c);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x4c;
break;
case COMBIOS_ASIC_INIT_2_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x4e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x4e;
break;
case COMBIOS_CONNECTOR_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x50);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x50;
break;
case COMBIOS_DYN_CLK_1_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x52);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x52;
break;
case COMBIOS_RESERVED_MEM_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x54);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x54;
break;
case COMBIOS_EXT_TMDS_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x58);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x58;
break;
case COMBIOS_MEM_CLK_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x5a);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x5a;
break;
case COMBIOS_EXT_DAC_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x5c);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x5c;
break;
case COMBIOS_MISC_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x5e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x5e;
break;
case COMBIOS_CRT_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x60);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x60;
break;
case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x62);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x62;
break;
case COMBIOS_COMPONENT_VIDEO_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x64);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x64;
break;
case COMBIOS_FAN_SPEED_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x66);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x66;
break;
case COMBIOS_OVERDRIVE_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x68);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x68;
break;
case COMBIOS_OEM_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x6a);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x6a;
break;
case COMBIOS_DYN_CLK_2_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x6c);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x6c;
break;
case COMBIOS_POWER_CONNECTOR_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x6e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x6e;
break;
case COMBIOS_I2C_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x70);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x70;
break;
/* relative offset tables */
case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */
@@ -439,11 +371,16 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
}
break;
default:
+ check_offset = 0;
break;
}

- return offset;
+ size = RBIOS8(rdev->bios_header_start + 0x6);
+ /* check absolute offset tables */
+ if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size)
+ offset = RBIOS16(rdev->bios_header_start + check_offset);

+ return offset;
}

bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
@@ -965,16 +902,22 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
dac = RBIOS8(dac_info + 0x3) & 0xf;
p_dac->ps2_pdac_adj = (bg << 8) | (dac);
}
- /* if the values are all zeros, use the table */
- if (p_dac->ps2_pdac_adj)
+ /* if the values are zeros, use the table */
+ if ((dac == 0) || (bg == 0))
+ found = 0;
+ else
found = 1;
}

/* quirks */
+ /* Radeon 7000 (RV100) */
+ if (((dev->pdev->device == 0x5159) &&
+ (dev->pdev->subsystem_vendor == 0x174B) &&
+ (dev->pdev->subsystem_device == 0x7c28)) ||
/* Radeon 9100 (R200) */
- if ((dev->pdev->device == 0x514D) &&
+ ((dev->pdev->device == 0x514D) &&
(dev->pdev->subsystem_vendor == 0x174B) &&
- (dev->pdev->subsystem_device == 0x7149)) {
+ (dev->pdev->subsystem_device == 0x7149))) {
/* vbios value is bad, use the default */
found = 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 43ec4a4..5ce190b 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -467,6 +467,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
size *= 2;
r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
RADEON_GPU_PAGE_ALIGN(size),
+ RADEON_GPU_PAGE_SIZE,
RADEON_GEM_DOMAIN_VRAM);
if (r) {
dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 5a99d43..1fe12ab 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -241,9 +241,6 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
{
int r = 0;

- INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
- INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
-
spin_lock_init(&rdev->irq.lock);
r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
if (r) {
@@ -265,6 +262,10 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
rdev->irq.installed = false;
return r;
}
+
+ INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+ INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
+
DRM_INFO("radeon: irq initialized.\n");
return 0;
}
@@ -284,8 +285,8 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
rdev->irq.installed = false;
if (rdev->msi_enabled)
pci_disable_msi(rdev->pdev);
+ flush_work(&rdev->hotplug_work);
}
- flush_work(&rdev->hotplug_work);
}

/**
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index e2cb80a..2943823 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -158,7 +158,7 @@ static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)

extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- unsigned size, u32 domain);
+ unsigned size, u32 align, u32 domain);
extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 82434018..83f6295 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -224,6 +224,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
}
r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
RADEON_IB_POOL_SIZE*64*1024,
+ RADEON_GPU_PAGE_SIZE,
RADEON_GEM_DOMAIN_GTT);
if (r) {
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 0abe5a9..f0bac68 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -49,7 +49,7 @@ static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);

int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- unsigned size, u32 domain)
+ unsigned size, u32 align, u32 domain)
{
int i, r;

@@ -57,13 +57,14 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
sa_manager->bo = NULL;
sa_manager->size = size;
sa_manager->domain = domain;
+ sa_manager->align = align;
sa_manager->hole = &sa_manager->olist;
INIT_LIST_HEAD(&sa_manager->olist);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
INIT_LIST_HEAD(&sa_manager->flist[i]);
}

- r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
+ r = radeon_bo_create(rdev, size, align, true,
domain, NULL, &sa_manager->bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
@@ -317,7 +318,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
unsigned tries[RADEON_NUM_RINGS];
int i, r;

- BUG_ON(align > RADEON_GPU_PAGE_SIZE);
+ BUG_ON(align > sa_manager->align);
BUG_ON(size > sa_manager->size);

*sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 4c605c7..deb5c25 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -562,7 +562,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
struct hv_hotadd_state *has)
{
int ret = 0;
- int i, nid, t;
+ int i, nid;
unsigned long start_pfn;
unsigned long processed_pfn;
unsigned long total_pfn = pfn_count;
@@ -607,14 +607,11 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,

/*
* Wait for the memory block to be onlined.
+ * Since the hot add has succeeded, it is ok to
+ * proceed even if the pages in the hot added region
+ * have not been "onlined" within the allowed time.
*/
- t = wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
- if (t == 0) {
- pr_info("hot_add memory timedout\n");
- has->ha_end_pfn -= HA_CHUNK;
- has->covered_end_pfn -= processed_pfn;
- break;
- }
+ wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);

}

@@ -978,6 +975,14 @@ static void post_status(struct hv_dynmem_device *dm)
dm->num_pages_ballooned +
compute_balloon_floor();

+ /*
+ * If our transaction ID is no longer current, just don't
+ * send the status. This can happen if we were interrupted
+ * after we picked our transaction ID.
+ */
+ if (status.hdr.trans_id != atomic_read(&trans_id))
+ return;
+
vmbus_sendpacket(dm->dev->channel, &status,
sizeof(struct dm_status),
(unsigned long)NULL,
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 41712f0..5849dc0 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -388,6 +388,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
init_waitqueue_head(&isert_conn->conn_wait_comp_err);
kref_init(&isert_conn->conn_kref);
kref_get(&isert_conn->conn_kref);
+ mutex_init(&isert_conn->conn_mutex);

cma_id->context = isert_conn;
isert_conn->conn_cm_id = cma_id;
@@ -540,15 +541,32 @@ isert_disconnect_work(struct work_struct *work)
struct isert_conn, conn_logout_work);

pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
-
+ mutex_lock(&isert_conn->conn_mutex);
isert_conn->state = ISER_CONN_DOWN;

if (isert_conn->post_recv_buf_count == 0 &&
atomic_read(&isert_conn->post_send_buf_count) == 0) {
pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
- wake_up(&isert_conn->conn_wait);
+ mutex_unlock(&isert_conn->conn_mutex);
+ goto wake_up;
+ }
+ if (!isert_conn->conn_cm_id) {
+ mutex_unlock(&isert_conn->conn_mutex);
+ isert_put_conn(isert_conn);
+ return;
+ }
+ if (!isert_conn->logout_posted) {
+ pr_debug("Calling rdma_disconnect for !logout_posted from"
+ " isert_disconnect_work\n");
+ rdma_disconnect(isert_conn->conn_cm_id);
+ mutex_unlock(&isert_conn->conn_mutex);
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ goto wake_up;
}
+ mutex_unlock(&isert_conn->conn_mutex);

+wake_up:
+ wake_up(&isert_conn->conn_wait);
isert_put_conn(isert_conn);
}

@@ -934,16 +952,11 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
}

sequence_cmd:
- rc = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);

if (!rc && dump_payload == false && unsol_data)
iscsit_set_unsoliticed_dataout(cmd);

- if (rc == CMDSN_ERROR_CANNOT_RECOVER)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_PROTOCOL_ERROR,
- 1, 0, (unsigned char *)hdr, cmd);
-
return 0;
}

@@ -1184,14 +1197,12 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
{
struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
struct isert_conn *isert_conn = isert_cmd->conn;
- struct iscsi_conn *conn;
+ struct iscsi_conn *conn = isert_conn->conn;

pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);

switch (cmd->iscsi_opcode) {
case ISCSI_OP_SCSI_CMD:
- conn = isert_conn->conn;
-
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
list_del(&cmd->i_conn_node);
@@ -1201,16 +1212,18 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
iscsit_stop_dataout_timer(cmd);

isert_unmap_cmd(isert_cmd, isert_conn);
- /*
- * Fall-through
- */
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
+ break;
case ISCSI_OP_SCSI_TMFUNC:
+ spin_lock_bh(&conn->cmd_lock);
+ if (!list_empty(&cmd->i_conn_node))
+ list_del(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
case ISCSI_OP_REJECT:
case ISCSI_OP_NOOP_OUT:
- conn = isert_conn->conn;
-
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
list_del(&cmd->i_conn_node);
@@ -1222,6 +1235,9 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
* associated cmd->se_cmd needs to be released.
*/
if (cmd->se_cmd.se_tfo != NULL) {
+ pr_debug("Calling transport_generic_free_cmd from"
+ " isert_put_cmd for 0x%02x\n",
+ cmd->iscsi_opcode);
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
}
@@ -1318,8 +1334,8 @@ isert_do_control_comp(struct work_struct *work)
atomic_dec(&isert_conn->post_send_buf_count);

cmd->i_state = ISTATE_SENT_STATUS;
- complete(&cmd->reject_comp);
isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
+ break;
case ISTATE_SEND_LOGOUTRSP:
pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
/*
@@ -1345,7 +1361,8 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;

if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
- cmd->i_state == ISTATE_SEND_LOGOUTRSP) {
+ cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
+ cmd->i_state == ISTATE_SEND_REJECT) {
isert_unmap_tx_desc(tx_desc, ib_dev);

INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
@@ -1419,7 +1436,11 @@ isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
pr_debug("Calling wake_up from isert_cq_comp_err\n");

- isert_conn->state = ISER_CONN_TERMINATING;
+ mutex_lock(&isert_conn->conn_mutex);
+ if (isert_conn->state != ISER_CONN_DOWN)
+ isert_conn->state = ISER_CONN_TERMINATING;
+ mutex_unlock(&isert_conn->conn_mutex);
+
wake_up(&isert_conn->conn_wait_comp_err);
}
}
@@ -1637,11 +1658,25 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
struct isert_cmd, iscsi_cmd);
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
+ struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
+ struct iscsi_reject *hdr =
+ (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;

isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
- iscsit_build_reject(cmd, conn, (struct iscsi_reject *)
- &isert_cmd->tx_desc.iscsi_header);
+ iscsit_build_reject(cmd, conn, hdr);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+
+ hton24(hdr->dlength, ISCSI_HDR_LEN);
+ isert_cmd->sense_buf_dma = ib_dma_map_single(ib_dev,
+ (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
+ DMA_TO_DEVICE);
+ isert_cmd->sense_buf_len = ISCSI_HDR_LEN;
+ tx_dsg->addr = isert_cmd->sense_buf_dma;
+ tx_dsg->length = ISCSI_HDR_LEN;
+ tx_dsg->lkey = isert_conn->conn_mr->lkey;
+ isert_cmd->tx_desc.num_sge = 2;
+
isert_init_send_wr(isert_cmd, send_wr);

pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
@@ -2175,6 +2210,17 @@ isert_free_np(struct iscsi_np *np)
kfree(isert_np);
}

+static int isert_check_state(struct isert_conn *isert_conn, int state)
+{
+ int ret;
+
+ mutex_lock(&isert_conn->conn_mutex);
+ ret = (isert_conn->state == state);
+ mutex_unlock(&isert_conn->conn_mutex);
+
+ return ret;
+}
+
static void isert_free_conn(struct iscsi_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
@@ -2184,26 +2230,43 @@ static void isert_free_conn(struct iscsi_conn *conn)
* Decrement post_send_buf_count for special case when called
* from isert_do_control_comp() -> iscsit_logout_post_handler()
*/
+ mutex_lock(&isert_conn->conn_mutex);
if (isert_conn->logout_posted)
atomic_dec(&isert_conn->post_send_buf_count);

- if (isert_conn->conn_cm_id)
+ if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
+ pr_debug("Calling rdma_disconnect from isert_free_conn\n");
rdma_disconnect(isert_conn->conn_cm_id);
+ }
/*
* Only wait for conn_wait_comp_err if the isert_conn made it
* into full feature phase..
*/
- if (isert_conn->state > ISER_CONN_INIT) {
+ if (isert_conn->state == ISER_CONN_UP) {
pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
isert_conn->state);
+ mutex_unlock(&isert_conn->conn_mutex);
+
wait_event(isert_conn->conn_wait_comp_err,
- isert_conn->state == ISER_CONN_TERMINATING);
- pr_debug("isert_free_conn: After wait_event #1 >>>>>>>>>>>>\n");
+ (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
+
+ wait_event(isert_conn->conn_wait,
+ (isert_check_state(isert_conn, ISER_CONN_DOWN)));
+
+ isert_put_conn(isert_conn);
+ return;
+ }
+ if (isert_conn->state == ISER_CONN_INIT) {
+ mutex_unlock(&isert_conn->conn_mutex);
+ isert_put_conn(isert_conn);
+ return;
}
+ pr_debug("isert_free_conn: wait_event conn_wait %d\n",
+ isert_conn->state);
+ mutex_unlock(&isert_conn->conn_mutex);

- pr_debug("isert_free_conn: wait_event conn_wait %d\n", isert_conn->state);
- wait_event(isert_conn->conn_wait, isert_conn->state == ISER_CONN_DOWN);
- pr_debug("isert_free_conn: After wait_event #2 >>>>>>>>>>>>>>>>>>>>\n");
+ wait_event(isert_conn->conn_wait,
+ (isert_check_state(isert_conn, ISER_CONN_DOWN)));

isert_put_conn(isert_conn);
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index b104f4c..5795c82 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -102,6 +102,7 @@ struct isert_conn {
struct ib_qp *conn_qp;
struct isert_device *conn_device;
struct work_struct conn_logout_work;
+ struct mutex conn_mutex;
wait_queue_head_t conn_wait;
wait_queue_head_t conn_wait_comp_err;
struct kref conn_kref;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index aa04f02..81a79b7 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1644,7 +1644,10 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
}

if (!dmi) {
+ unsigned noio_flag;
+ noio_flag = memalloc_noio_save();
dmi = __vmalloc(param_kernel->data_size, GFP_NOIO | __GFP_REPEAT | __GFP_HIGH, PAGE_KERNEL);
+ memalloc_noio_restore(noio_flag);
if (dmi)
*param_flags |= DM_PARAMS_VMALLOC;
}
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index bdf26f5..5adede1 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1561,7 +1561,6 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
unsigned long flags;
int r;

-again:
bdev = NULL;
mode = 0;
r = 0;
@@ -1579,7 +1578,7 @@ again:
}

if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
- r = -EAGAIN;
+ r = -ENOTCONN;
else if (!bdev)
r = -EIO;

@@ -1591,11 +1590,8 @@ again:
if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
r = scsi_verify_blk_ioctl(NULL, cmd);

- if (r == -EAGAIN && !fatal_signal_pending(current)) {
+ if (r == -ENOTCONN && !fatal_signal_pending(current))
queue_work(kmultipathd, &m->process_queued_ios);
- msleep(10);
- goto again;
- }

return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
}
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index b948fd8..0d2e812 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -831,9 +831,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
for (i = v->levels - 1; i >= 0; i--) {
sector_t s;
v->hash_level_block[i] = hash_position;
- s = verity_position_at_level(v, v->data_blocks, i);
- s = (s >> v->hash_per_block_bits) +
- !!(s & ((1 << v->hash_per_block_bits) - 1));
+ s = (v->data_blocks + ((sector_t)1 << ((i + 1) * v->hash_per_block_bits)) - 1)
+ >> ((i + 1) * v->hash_per_block_bits);
if (hash_position + s < hash_position) {
ti->error = "Hash device offset overflow";
r = -E2BIG;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index d5370a9..33f2010 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -386,10 +386,12 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct mapped_device *md = bdev->bd_disk->private_data;
- struct dm_table *map = dm_get_live_table(md);
+ struct dm_table *map;
struct dm_target *tgt;
int r = -ENOTTY;

+retry:
+ map = dm_get_live_table(md);
if (!map || !dm_table_get_size(map))
goto out;

@@ -410,6 +412,11 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
out:
dm_table_put(map);

+ if (r == -ENOTCONN) {
+ msleep(10);
+ goto retry;
+ }
+
return r;
}

diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9b82377..51f0345 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7697,20 +7697,6 @@ static int remove_and_add_spares(struct mddev *mddev,
continue;

rdev->recovery_offset = 0;
- if (rdev->saved_raid_disk >= 0 && mddev->in_sync) {
- spin_lock_irq(&mddev->write_lock);
- if (mddev->in_sync)
- /* OK, this device, which is in_sync,
- * will definitely be noticed before
- * the next write, so recovery isn't
- * needed.
- */
- rdev->recovery_offset = mddev->recovery_cp;
- spin_unlock_irq(&mddev->write_lock);
- }
- if (mddev->ro && rdev->recovery_offset != MaxSector)
- /* not safe to add this disk now */
- continue;
if (mddev->pers->
hot_add_disk(mddev, rdev) == 0) {
if (sysfs_link_rdev(mddev, rdev))
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 6e17f81..6f48244 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1848,6 +1848,36 @@ static int process_checks(struct r1bio *r1_bio)
int i;
int vcnt;

+ /* Fix variable parts of all bios */
+ vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
+ for (i = 0; i < conf->raid_disks * 2; i++) {
+ int j;
+ int size;
+ struct bio *b = r1_bio->bios[i];
+ if (b->bi_end_io != end_sync_read)
+ continue;
+ /* fixup the bio for reuse */
+ bio_reset(b);
+ b->bi_vcnt = vcnt;
+ b->bi_size = r1_bio->sectors << 9;
+ b->bi_sector = r1_bio->sector +
+ conf->mirrors[i].rdev->data_offset;
+ b->bi_bdev = conf->mirrors[i].rdev->bdev;
+ b->bi_end_io = end_sync_read;
+ b->bi_private = r1_bio;
+
+ size = b->bi_size;
+ for (j = 0; j < vcnt ; j++) {
+ struct bio_vec *bi;
+ bi = &b->bi_io_vec[j];
+ bi->bv_offset = 0;
+ if (size > PAGE_SIZE)
+ bi->bv_len = PAGE_SIZE;
+ else
+ bi->bv_len = size;
+ size -= PAGE_SIZE;
+ }
+ }
for (primary = 0; primary < conf->raid_disks * 2; primary++)
if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
@@ -1856,12 +1886,10 @@ static int process_checks(struct r1bio *r1_bio)
break;
}
r1_bio->read_disk = primary;
- vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
for (i = 0; i < conf->raid_disks * 2; i++) {
int j;
struct bio *pbio = r1_bio->bios[primary];
struct bio *sbio = r1_bio->bios[i];
- int size;

if (sbio->bi_end_io != end_sync_read)
continue;
@@ -1887,27 +1915,6 @@ static int process_checks(struct r1bio *r1_bio)
rdev_dec_pending(conf->mirrors[i].rdev, mddev);
continue;
}
- /* fixup the bio for reuse */
- bio_reset(sbio);
- sbio->bi_vcnt = vcnt;
- sbio->bi_size = r1_bio->sectors << 9;
- sbio->bi_sector = r1_bio->sector +
- conf->mirrors[i].rdev->data_offset;
- sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
- sbio->bi_end_io = end_sync_read;
- sbio->bi_private = r1_bio;
-
- size = sbio->bi_size;
- for (j = 0; j < vcnt ; j++) {
- struct bio_vec *bi;
- bi = &sbio->bi_io_vec[j];
- bi->bv_offset = 0;
- if (size > PAGE_SIZE)
- bi->bv_len = PAGE_SIZE;
- else
- bi->bv_len = size;
- size -= PAGE_SIZE;
- }

bio_copy_data(sbio, pbio);
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index d61eb7e..081bb33 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2268,12 +2268,18 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
d = r10_bio->devs[1].devnum;
wbio = r10_bio->devs[1].bio;
wbio2 = r10_bio->devs[1].repl_bio;
+ /* Need to test wbio2->bi_end_io before we call
+ * generic_make_request as if the former is NULL,
+ * the latter is free to free wbio2.
+ */
+ if (wbio2 && !wbio2->bi_end_io)
+ wbio2 = NULL;
if (wbio->bi_end_io) {
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
generic_make_request(wbio);
}
- if (wbio2 && wbio2->bi_end_io) {
+ if (wbio2) {
atomic_inc(&conf->mirrors[d].replacement->nr_pending);
md_sync_acct(conf->mirrors[d].replacement->bdev,
bio_sectors(wbio2));
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 05e4a10..a35b846 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3462,6 +3462,7 @@ static void handle_stripe(struct stripe_head *sh)
test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
set_bit(STRIPE_SYNCING, &sh->state);
clear_bit(STRIPE_INSYNC, &sh->state);
+ clear_bit(STRIPE_REPLACED, &sh->state);
}
spin_unlock(&sh->stripe_lock);
}
@@ -3607,19 +3608,23 @@ static void handle_stripe(struct stripe_head *sh)
handle_parity_checks5(conf, sh, &s, disks);
}

- if (s.replacing && s.locked == 0
- && !test_bit(STRIPE_INSYNC, &sh->state)) {
+ if ((s.replacing || s.syncing) && s.locked == 0
+ && !test_bit(STRIPE_COMPUTE_RUN, &sh->state)
+ && !test_bit(STRIPE_REPLACED, &sh->state)) {
/* Write out to replacement devices where possible */
for (i = 0; i < conf->raid_disks; i++)
- if (test_bit(R5_UPTODATE, &sh->dev[i].flags) &&
- test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
+ if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
+ WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags));
set_bit(R5_WantReplace, &sh->dev[i].flags);
set_bit(R5_LOCKED, &sh->dev[i].flags);
s.locked++;
}
- set_bit(STRIPE_INSYNC, &sh->state);
+ if (s.replacing)
+ set_bit(STRIPE_INSYNC, &sh->state);
+ set_bit(STRIPE_REPLACED, &sh->state);
}
if ((s.syncing || s.replacing) && s.locked == 0 &&
+ !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
test_bit(STRIPE_INSYNC, &sh->state)) {
md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
clear_bit(STRIPE_SYNCING, &sh->state);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index b0b663b..70c4932 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -306,6 +306,7 @@ enum {
STRIPE_SYNC_REQUESTED,
STRIPE_SYNCING,
STRIPE_INSYNC,
+ STRIPE_REPLACED,
STRIPE_PREREAD_ACTIVE,
STRIPE_DELAYED,
STRIPE_DEGRADED,
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index c97e9d3..e70b4ff 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -1008,19 +1008,6 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
return;
}

-static void rtl_lps_change_work_callback(struct work_struct *work)
-{
- struct rtl_works *rtlworks =
- container_of(work, struct rtl_works, lps_change_work);
- struct ieee80211_hw *hw = rtlworks->hw;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (rtlpriv->enter_ps)
- rtl_lps_enter(hw);
- else
- rtl_lps_leave(hw);
-}
-
static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 884bcea..71e917d 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -611,6 +611,18 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
MSECS(sleep_intv * mac->vif->bss_conf.beacon_int - 40));
}

+void rtl_lps_change_work_callback(struct work_struct *work)
+{
+ struct rtl_works *rtlworks =
+ container_of(work, struct rtl_works, lps_change_work);
+ struct ieee80211_hw *hw = rtlworks->hw;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->enter_ps)
+ rtl_lps_enter(hw);
+ else
+ rtl_lps_leave(hw);
+}

void rtl_swlps_wq_callback(void *data)
{
diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/rtlwifi/ps.h
index 4d682b7..88bd76e 100644
--- a/drivers/net/wireless/rtlwifi/ps.h
+++ b/drivers/net/wireless/rtlwifi/ps.h
@@ -49,5 +49,6 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw);
void rtl_swlps_rf_sleep(struct ieee80211_hw *hw);
void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len);
+void rtl_lps_change_work_callback(struct work_struct *work);

#endif
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index a3532e0..1feebdc 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -1070,6 +1070,8 @@ int rtl_usb_probe(struct usb_interface *intf,
spin_lock_init(&rtlpriv->locks.usb_lock);
INIT_WORK(&rtlpriv->works.fill_h2c_cmd,
rtl_fill_h2c_cmd_work_callback);
+ INIT_WORK(&rtlpriv->works.lps_change_work,
+ rtl_lps_change_work_callback);

rtlpriv->usb_data_index = 0;
init_completion(&rtlpriv->firmware_loading_complete);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 1db10141..0c01b8e 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -276,8 +276,7 @@ no_skb:
break;
}

- __skb_fill_page_desc(skb, 0, page, 0, 0);
- skb_shinfo(skb)->nr_frags = 1;
+ skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
__skb_queue_tail(&np->rx_batch, skb);
}

@@ -822,7 +821,6 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
struct sk_buff_head *list)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
- int nr_frags = shinfo->nr_frags;
RING_IDX cons = np->rx.rsp_cons;
struct sk_buff *nskb;

@@ -831,19 +829,21 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
RING_GET_RESPONSE(&np->rx, ++cons);
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];

- __skb_fill_page_desc(skb, nr_frags,
- skb_frag_page(nfrag),
- rx->offset, rx->status);
+ if (shinfo->nr_frags == MAX_SKB_FRAGS) {
+ unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;

- skb->data_len += rx->status;
+ BUG_ON(pull_to <= skb_headlen(skb));
+ __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
+ }
+ BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
+
+ skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
+ rx->offset, rx->status, PAGE_SIZE);

skb_shinfo(nskb)->nr_frags = 0;
kfree_skb(nskb);
-
- nr_frags++;
}

- shinfo->nr_frags = nr_frags;
return cons;
}

@@ -929,7 +929,8 @@ static int handle_incoming_queue(struct net_device *dev,
while ((skb = __skb_dequeue(rxq)) != NULL) {
int pull_to = NETFRONT_SKB_CB(skb)->pull_to;

- __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
+ if (pull_to > skb_headlen(skb))
+ __pskb_pull_tail(skb, pull_to - skb_headlen(skb));

/* Ethernet work: Delayed to here as it peeks the header. */
skb->protocol = eth_type_trans(skb, dev);
@@ -1015,16 +1016,10 @@ err:
skb_shinfo(skb)->frags[0].page_offset = rx->offset;
skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
skb->data_len = rx->status;
+ skb->len += rx->status;

i = xennet_fill_frags(np, skb, &tmpq);

- /*
- * Truesize is the actual allocation size, even if the
- * allocation is only partially used.
- */
- skb->truesize += PAGE_SIZE * skb_shinfo(skb)->nr_frags;
- skb->len += skb->data_len;
-
if (rx->flags & XEN_NETRXF_csum_blank)
skb->ip_summed = CHECKSUM_PARTIAL;
else if (rx->flags & XEN_NETRXF_data_validated)
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 9bb020a..0d30ca8 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -491,6 +491,7 @@ int isci_task_abort_task(struct sas_task *task)
struct isci_tmf tmf;
int ret = TMF_RESP_FUNC_FAILED;
unsigned long flags;
+ int target_done_already = 0;

/* Get the isci_request reference from the task. Note that
* this check does not depend on the pending request list
@@ -505,9 +506,11 @@ int isci_task_abort_task(struct sas_task *task)
/* If task is already done, the request isn't valid */
if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
(task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
- old_request)
+ old_request) {
idev = isci_get_device(task->dev->lldd_dev);
-
+ target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET,
+ &old_request->flags);
+ }
spin_unlock(&task->task_state_lock);
spin_unlock_irqrestore(&ihost->scic_lock, flags);

@@ -561,7 +564,7 @@ int isci_task_abort_task(struct sas_task *task)

if (task->task_proto == SAS_PROTOCOL_SMP ||
sas_protocol_ata(task->task_proto) ||
- test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags) ||
+ target_done_already ||
test_bit(IDEV_GONE, &idev->flags)) {

spin_unlock_irqrestore(&ihost->scic_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 15e4080..51cd27a 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -419,6 +419,8 @@ qla2x00_start_scsi(srb_t *sp)
__constant_cpu_to_le16(CF_SIMPLE_TAG);
break;
}
+ } else {
+ cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
}

/* Load SCSI command packet. */
@@ -1308,11 +1310,11 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
fcp_cmnd->task_attribute = TSK_ORDERED;
break;
default:
- fcp_cmnd->task_attribute = 0;
+ fcp_cmnd->task_attribute = TSK_SIMPLE;
break;
}
} else {
- fcp_cmnd->task_attribute = 0;
+ fcp_cmnd->task_attribute = TSK_SIMPLE;
}

cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
@@ -1527,7 +1529,12 @@ qla24xx_start_scsi(srb_t *sp)
case ORDERED_QUEUE_TAG:
cmd_pkt->task = TSK_ORDERED;
break;
+ default:
+ cmd_pkt->task = TSK_SIMPLE;
+ break;
}
+ } else {
+ cmd_pkt->task = TSK_SIMPLE;
}

/* Load SCSI command packet. */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 1b1125e..610417e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -828,10 +828,17 @@ static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)

static void sd_unprep_fn(struct request_queue *q, struct request *rq)
{
+ struct scsi_cmnd *SCpnt = rq->special;
+
if (rq->cmd_flags & REQ_DISCARD) {
free_page((unsigned long)rq->buffer);
rq->buffer = NULL;
}
+ if (SCpnt->cmnd != rq->cmd) {
+ mempool_free(SCpnt->cmnd, sd_cdb_pool);
+ SCpnt->cmnd = NULL;
+ SCpnt->cmd_len = 0;
+ }
}

/**
@@ -1710,21 +1717,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
sd_dif_complete(SCpnt, good_bytes);

- if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type)
- == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) {
-
- /* We have to print a failed command here as the
- * extended CDB gets freed before scsi_io_completion()
- * is called.
- */
- if (result)
- scsi_print_command(SCpnt);
-
- mempool_free(SCpnt->cmnd, sd_cdb_pool);
- SCpnt->cmnd = NULL;
- SCpnt->cmd_len = 0;
- }
-
return good_bytes;
}

diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
index 9bd8747..34519ea 100644
--- a/drivers/staging/android/logger.c
+++ b/drivers/staging/android/logger.c
@@ -469,7 +469,7 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t ppos)
{
struct logger_log *log = file_get_log(iocb->ki_filp);
- size_t orig = log->w_off;
+ size_t orig;
struct logger_entry header;
struct timespec now;
ssize_t ret = 0;
@@ -490,6 +490,8 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,

mutex_lock(&log->mutex);

+ orig = log->w_off;
+
/*
* Fix up any readers, pulling them forward to the first readable
* entry after (what will be) the new write offset. We do this now
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 924c54c..0ae406a 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -1401,22 +1401,19 @@ static int do_cmd_ioctl(struct comedi_device *dev,
DPRINTK("subdevice busy\n");
return -EBUSY;
}
- s->busy = file;

/* make sure channel/gain list isn't too long */
if (cmd.chanlist_len > s->len_chanlist) {
DPRINTK("channel/gain list too long %u > %d\n",
cmd.chanlist_len, s->len_chanlist);
- ret = -EINVAL;
- goto cleanup;
+ return -EINVAL;
}

/* make sure channel/gain list isn't too short */
if (cmd.chanlist_len < 1) {
DPRINTK("channel/gain list too short %u < 1\n",
cmd.chanlist_len);
- ret = -EINVAL;
- goto cleanup;
+ return -EINVAL;
}

async->cmd = cmd;
@@ -1426,8 +1423,7 @@ static int do_cmd_ioctl(struct comedi_device *dev,
kmalloc(async->cmd.chanlist_len * sizeof(int), GFP_KERNEL);
if (!async->cmd.chanlist) {
DPRINTK("allocation failed\n");
- ret = -ENOMEM;
- goto cleanup;
+ return -ENOMEM;
}

if (copy_from_user(async->cmd.chanlist, user_chanlist,
@@ -1479,6 +1475,9 @@ static int do_cmd_ioctl(struct comedi_device *dev,

comedi_set_subdevice_runflags(s, ~0, SRF_USER | SRF_RUNNING);

+ /* set s->busy _after_ setting SRF_RUNNING flag to avoid race with
+ * comedi_read() or comedi_write() */
+ s->busy = file;
ret = s->do_cmd(dev, s);
if (ret == 0)
return 0;
@@ -1693,6 +1692,7 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
void *file)
{
struct comedi_subdevice *s;
+ int ret;

if (arg >= dev->n_subdevices)
return -EINVAL;
@@ -1709,7 +1709,11 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
if (s->busy != file)
return -EBUSY;

- return do_cancel(dev, s);
+ ret = do_cancel(dev, s);
+ if (comedi_get_subdevice_runflags(s) & SRF_USER)
+ wake_up_interruptible(&s->async->wait_head);
+
+ return ret;
}

/*
@@ -2041,11 +2045,13 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,

if (!comedi_is_subdevice_running(s)) {
if (count == 0) {
+ mutex_lock(&dev->mutex);
if (comedi_is_subdevice_in_error(s))
retval = -EPIPE;
else
retval = 0;
do_become_nonbusy(dev, s);
+ mutex_unlock(&dev->mutex);
}
break;
}
@@ -2144,11 +2150,13 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,

if (n == 0) {
if (!comedi_is_subdevice_running(s)) {
+ mutex_lock(&dev->mutex);
do_become_nonbusy(dev, s);
if (comedi_is_subdevice_in_error(s))
retval = -EPIPE;
else
retval = 0;
+ mutex_unlock(&dev->mutex);
break;
}
if (file->f_flags & O_NONBLOCK) {
@@ -2186,9 +2194,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
buf += n;
break; /* makes device work like a pipe */
}
- if (comedi_is_subdevice_idle(s) &&
- async->buf_read_count - async->buf_write_count == 0) {
- do_become_nonbusy(dev, s);
+ if (comedi_is_subdevice_idle(s)) {
+ mutex_lock(&dev->mutex);
+ if (async->buf_read_count - async->buf_write_count == 0)
+ do_become_nonbusy(dev, s);
+ mutex_unlock(&dev->mutex);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&async->wait_head, &wait);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index d7705e5..012ff8b 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -628,25 +628,18 @@ static void __exit iscsi_target_cleanup_module(void)
}

static int iscsit_add_reject(
+ struct iscsi_conn *conn,
u8 reason,
- int fail_conn,
- unsigned char *buf,
- struct iscsi_conn *conn)
+ unsigned char *buf)
{
struct iscsi_cmd *cmd;
- struct iscsi_reject *hdr;
- int ret;

cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
if (!cmd)
return -1;

cmd->iscsi_opcode = ISCSI_OP_REJECT;
- if (fail_conn)
- cmd->cmd_flags |= ICF_REJECT_FAIL_CONN;
-
- hdr = (struct iscsi_reject *) cmd->pdu;
- hdr->reason = reason;
+ cmd->reject_reason = reason;

cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
@@ -662,23 +655,16 @@ static int iscsit_add_reject(
cmd->i_state = ISTATE_SEND_REJECT;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);

- ret = wait_for_completion_interruptible(&cmd->reject_comp);
- if (ret != 0)
- return -1;
-
- return (!fail_conn) ? 0 : -1;
+ return -1;
}

-int iscsit_add_reject_from_cmd(
+static int iscsit_add_reject_from_cmd(
+ struct iscsi_cmd *cmd,
u8 reason,
- int fail_conn,
- int add_to_conn,
- unsigned char *buf,
- struct iscsi_cmd *cmd)
+ bool add_to_conn,
+ unsigned char *buf)
{
struct iscsi_conn *conn;
- struct iscsi_reject *hdr;
- int ret;

if (!cmd->conn) {
pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
@@ -688,11 +674,7 @@ int iscsit_add_reject_from_cmd(
conn = cmd->conn;

cmd->iscsi_opcode = ISCSI_OP_REJECT;
- if (fail_conn)
- cmd->cmd_flags |= ICF_REJECT_FAIL_CONN;
-
- hdr = (struct iscsi_reject *) cmd->pdu;
- hdr->reason = reason;
+ cmd->reject_reason = reason;

cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
@@ -709,8 +691,6 @@ int iscsit_add_reject_from_cmd(

cmd->i_state = ISTATE_SEND_REJECT;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
-
- ret = wait_for_completion_interruptible(&cmd->reject_comp);
/*
* Perform the kref_put now if se_cmd has already been setup by
* scsit_setup_scsi_cmd()
@@ -719,12 +699,19 @@ int iscsit_add_reject_from_cmd(
pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
}
- if (ret != 0)
- return -1;
+ return -1;
+}

- return (!fail_conn) ? 0 : -1;
+static int iscsit_add_reject_cmd(struct iscsi_cmd *cmd, u8 reason,
+ unsigned char *buf)
+{
+ return iscsit_add_reject_from_cmd(cmd, reason, true, buf);
+}
+
+int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8 reason, unsigned char *buf)
+{
+ return iscsit_add_reject_from_cmd(cmd, reason, false, buf);
}
-EXPORT_SYMBOL(iscsit_add_reject_from_cmd);

/*
* Map some portion of the allocated scatterlist to an iovec, suitable for
@@ -844,8 +831,8 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
" not set. Bad iSCSI Initiator.\n");
- return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
- 1, 1, buf, cmd);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_INVALID, buf);
}

if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
@@ -865,8 +852,8 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
" set when Expected Data Transfer Length is 0 for"
" CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
- return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
- 1, 1, buf, cmd);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_INVALID, buf);
}
done:

@@ -875,62 +862,62 @@ done:
pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
" MUST be set if Expected Data Transfer Length is not 0."
" Bad iSCSI Initiator\n");
- return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
- 1, 1, buf, cmd);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_INVALID, buf);
}

if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
(hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
pr_err("Bidirectional operations not supported!\n");
- return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
- 1, 1, buf, cmd);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_INVALID, buf);
}

if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
pr_err("Illegally set Immediate Bit in iSCSI Initiator"
" Scsi Command PDU.\n");
- return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
- 1, 1, buf, cmd);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_INVALID, buf);
}

if (payload_length && !conn->sess->sess_ops->ImmediateData) {
pr_err("ImmediateData=No but DataSegmentLength=%u,"
" protocol error.\n", payload_length);
- return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
- 1, 1, buf, cmd);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_PROTOCOL_ERROR, buf);
}

- if ((be32_to_cpu(hdr->data_length )== payload_length) &&
+ if ((be32_to_cpu(hdr->data_length) == payload_length) &&
(!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
pr_err("Expected Data Transfer Length and Length of"
" Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
" bit is not set protocol error\n");
- return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
- 1, 1, buf, cmd);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_PROTOCOL_ERROR, buf);
}

if (payload_length > be32_to_cpu(hdr->data_length)) {
pr_err("DataSegmentLength: %u is greater than"
" EDTL: %u, protocol error.\n", payload_length,
hdr->data_length);
- return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
- 1, 1, buf, cmd);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_PROTOCOL_ERROR, buf);
}

if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
pr_err("DataSegmentLength: %u is greater than"
" MaxXmitDataSegmentLength: %u, protocol error.\n",
payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
- return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
- 1, 1, buf, cmd);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_PROTOCOL_ERROR, buf);
}

if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
pr_err("DataSegmentLength: %u is greater than"
" FirstBurstLength: %u, protocol error.\n",
payload_length, conn->sess->sess_ops->FirstBurstLength);
- return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
- 1, 1, buf, cmd);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_INVALID, buf);
}

data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
@@ -985,9 +972,8 @@ done:

dr = iscsit_allocate_datain_req();
if (!dr)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 1, buf, cmd);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);

iscsit_attach_datain_req(cmd, dr);
}
@@ -1015,18 +1001,16 @@ done:
cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
if (cmd->sense_reason) {
if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 1, buf, cmd);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}

goto attach_cmd;
}

if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 1, buf, cmd);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}

attach_cmd:
@@ -1068,17 +1052,13 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
* be acknowledged. (See below)
*/
if (!cmd->immediate_data) {
- cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
- if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
- if (!cmd->sense_reason)
- return 0;
-
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
+ (unsigned char *)hdr, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return -1;
+ else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
return 0;
- } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_PROTOCOL_ERROR,
- 1, 0, (unsigned char *)hdr, cmd);
}
}

@@ -1103,6 +1083,9 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
* iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below.
*/
if (cmd->sense_reason) {
+ if (cmd->reject_reason)
+ return 0;
+
target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
return 1;
}
@@ -1111,10 +1094,8 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
* the backend memory allocation.
*/
cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
- if (cmd->sense_reason) {
- target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+ if (cmd->sense_reason)
return 1;
- }

return 0;
}
@@ -1124,6 +1105,7 @@ static int
iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
bool dump_payload)
{
+ struct iscsi_conn *conn = cmd->conn;
int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
/*
* Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
@@ -1140,20 +1122,25 @@ after_immediate_data:
* DataCRC, check against ExpCmdSN/MaxCmdSN if
* Immediate Bit is not set.
*/
- cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd, hdr->cmdsn);
+ cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd,
+ (unsigned char *)hdr, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
+ return -1;
+ } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+ return 0;
+ }

if (cmd->sense_reason) {
- if (iscsit_dump_data_payload(cmd->conn,
- cmd->first_burst_len, 1) < 0)
- return -1;
+ int rc;
+
+ rc = iscsit_dump_data_payload(cmd->conn,
+ cmd->first_burst_len, 1);
+ target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+ return rc;
} else if (cmd->unsolicited_data)
iscsit_set_unsoliticed_dataout(cmd);

- if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_PROTOCOL_ERROR,
- 1, 0, (unsigned char *)hdr, cmd);
-
} else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
/*
* Immediate Data failed DataCRC and ERL>=1,
@@ -1184,15 +1171,14 @@ iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,

rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
if (rc < 0)
- return rc;
+ return 0;
/*
* Allocation iovecs needed for struct socket operations for
* traditional iSCSI block I/O.
*/
if (iscsit_allocate_iovecs(cmd) < 0) {
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 0, buf, cmd);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}
immed_data = cmd->immediate_data;

@@ -1283,8 +1269,8 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,

if (!payload_length) {
pr_err("DataOUT payload is ZERO, protocol error.\n");
- return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buf, conn);
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ buf);
}

/* iSCSI write */
@@ -1301,8 +1287,8 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
pr_err("DataSegmentLength: %u is greater than"
" MaxXmitDataSegmentLength: %u\n", payload_length,
conn->conn_ops->MaxXmitDataSegmentLength);
- return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buf, conn);
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ buf);
}

cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt,
@@ -1325,8 +1311,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
if (cmd->data_direction != DMA_TO_DEVICE) {
pr_err("Command ITT: 0x%08x received DataOUT for a"
" NON-WRITE command.\n", cmd->init_task_tag);
- return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
- 1, 0, buf, cmd);
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
}
se_cmd = &cmd->se_cmd;
iscsit_mod_dataout_timer(cmd);
@@ -1335,8 +1320,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
pr_err("DataOut Offset: %u, Length %u greater than"
" iSCSI Command EDTL %u, protocol error.\n",
hdr->offset, payload_length, cmd->se_cmd.data_length);
- return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
- 1, 0, buf, cmd);
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
}

if (cmd->unsolicited_data) {
@@ -1528,7 +1512,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)

rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
if (rc < 0)
- return rc;
+ return 0;
else if (!cmd)
return 0;

@@ -1557,8 +1541,8 @@ int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
" not set, protocol error.\n");
- return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buf, conn);
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
}

if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
@@ -1566,8 +1550,8 @@ int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
" greater than MaxXmitDataSegmentLength: %u, protocol"
" error.\n", payload_length,
conn->conn_ops->MaxXmitDataSegmentLength);
- return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buf, conn);
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
}

pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%08x,"
@@ -1584,9 +1568,9 @@ int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
*/
if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
if (!cmd)
- return iscsit_add_reject(
+ return iscsit_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, buf, conn);
+ (unsigned char *)hdr);

cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT;
cmd->i_state = ISTATE_SEND_NOPIN;
@@ -1700,15 +1684,14 @@ int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
return 0;
}

- cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
+ (unsigned char *)hdr, hdr->cmdsn);
if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
ret = 0;
goto ping_out;
}
if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_PROTOCOL_ERROR,
- 1, 0, buf, cmd);
+ return -1;

return 0;
}
@@ -1757,8 +1740,8 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
struct se_tmr_req *se_tmr;
struct iscsi_tmr_req *tmr_req;
struct iscsi_tm *hdr;
- int out_of_order_cmdsn = 0;
- int ret;
+ int out_of_order_cmdsn = 0, ret;
+ bool sess_ref = false;
u8 function;

hdr = (struct iscsi_tm *) buf;
@@ -1782,8 +1765,8 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
pr_err("Task Management Request TASK_REASSIGN not"
" issued as immediate command, bad iSCSI Initiator"
"implementation\n");
- return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
- 1, 1, buf, cmd);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_PROTOCOL_ERROR, buf);
}
if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
be32_to_cpu(hdr->refcmdsn) != ISCSI_RESERVED_TAG)
@@ -1795,9 +1778,9 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (!cmd->tmr_req) {
pr_err("Unable to allocate memory for"
" Task Management command!\n");
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 1, buf, cmd);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ buf);
}

/*
@@ -1814,6 +1797,9 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
conn->sess->se_sess, 0, DMA_NONE,
MSG_SIMPLE_TAG, cmd->sense_buffer + 2);

+ target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
+ sess_ref = true;
+
switch (function) {
case ISCSI_TM_FUNC_ABORT_TASK:
tcm_function = TMR_ABORT_TASK;
@@ -1839,17 +1825,15 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
default:
pr_err("Unknown iSCSI TMR Function:"
" 0x%02x\n", function);
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 1, buf, cmd);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}

ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req,
tcm_function, GFP_KERNEL);
if (ret < 0)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 1, buf, cmd);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);

cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
}
@@ -1908,9 +1892,8 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
break;

if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_INVALID, 1, 1,
- buf, cmd);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_INVALID, buf);
break;
default:
pr_err("Unknown TMR function: 0x%02x, protocol"
@@ -1928,15 +1911,13 @@ attach:
spin_unlock_bh(&conn->cmd_lock);

if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
- int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
out_of_order_cmdsn = 1;
else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
return 0;
else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_PROTOCOL_ERROR,
- 1, 0, buf, cmd);
+ return -1;
}
iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));

@@ -1956,6 +1937,11 @@ attach:
* For connection recovery, this is also the default action for
* TMR TASK_REASSIGN.
*/
+ if (sess_ref) {
+ pr_debug("Handle TMR, using sess_ref=true check\n");
+ target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+ }
+
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0;
}
@@ -1981,8 +1967,7 @@ static int iscsit_handle_text_cmd(
pr_err("Unable to accept text parameter length: %u"
"greater than MaxXmitDataSegmentLength %u.\n",
payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
- return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buf, conn);
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, buf);
}

pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
@@ -2084,8 +2069,8 @@ static int iscsit_handle_text_cmd(

cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
if (!cmd)
- return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, buf, conn);
+ return iscsit_add_reject(conn,
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);

cmd->iscsi_opcode = ISCSI_OP_TEXT;
cmd->i_state = ISTATE_SEND_TEXTRSP;
@@ -2103,11 +2088,10 @@ static int iscsit_handle_text_cmd(
iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));

if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
- cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
+ (unsigned char *)hdr, hdr->cmdsn);
if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_PROTOCOL_ERROR,
- 1, 0, buf, cmd);
+ return -1;

return 0;
}
@@ -2292,14 +2276,11 @@ iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (ret < 0)
return ret;
} else {
- cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
- if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
logout_remove = 0;
- } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_PROTOCOL_ERROR,
- 1, 0, buf, cmd);
- }
+ else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return -1;
}

return logout_remove;
@@ -2323,8 +2304,8 @@ static int iscsit_handle_snack(
if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
pr_err("Initiator sent SNACK request while in"
" ErrorRecoveryLevel=0.\n");
- return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buf, conn);
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ buf);
}
/*
* SNACK_DATA and SNACK_R2T are both 0, so check which function to
@@ -2348,13 +2329,13 @@ static int iscsit_handle_snack(
case ISCSI_FLAG_SNACK_TYPE_RDATA:
/* FIXME: Support R-Data SNACK */
pr_err("R-Data SNACK Not Supported.\n");
- return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buf, conn);
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ buf);
default:
pr_err("Unknown SNACK type 0x%02x, protocol"
" error.\n", hdr->flags & 0x0f);
- return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buf, conn);
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ buf);
}

return 0;
@@ -2426,14 +2407,14 @@ static int iscsit_handle_immediate_data(
pr_err("Unable to recover from"
" Immediate Data digest failure while"
" in ERL=0.\n");
- iscsit_add_reject_from_cmd(
+ iscsit_reject_cmd(cmd,
ISCSI_REASON_DATA_DIGEST_ERROR,
- 1, 0, (unsigned char *)hdr, cmd);
+ (unsigned char *)hdr);
return IMMEDIATE_DATA_CANNOT_RECOVER;
} else {
- iscsit_add_reject_from_cmd(
+ iscsit_reject_cmd(cmd,
ISCSI_REASON_DATA_DIGEST_ERROR,
- 0, 0, (unsigned char *)hdr, cmd);
+ (unsigned char *)hdr);
return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
}
} else {
@@ -3533,6 +3514,7 @@ iscsit_build_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
struct iscsi_reject *hdr)
{
hdr->opcode = ISCSI_OP_REJECT;
+ hdr->reason = cmd->reject_reason;
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
hton24(hdr->dlength, ISCSI_HDR_LEN);
hdr->ffffffff = cpu_to_be32(0xffffffff);
@@ -3806,18 +3788,11 @@ check_rsp_state:
case ISTATE_SEND_STATUS_RECOVERY:
case ISTATE_SEND_TEXTRSP:
case ISTATE_SEND_TASKMGTRSP:
+ case ISTATE_SEND_REJECT:
spin_lock_bh(&cmd->istate_lock);
cmd->i_state = ISTATE_SENT_STATUS;
spin_unlock_bh(&cmd->istate_lock);
break;
- case ISTATE_SEND_REJECT:
- if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) {
- cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN;
- complete(&cmd->reject_comp);
- goto err;
- }
- complete(&cmd->reject_comp);
- break;
default:
pr_err("Unknown Opcode: 0x%02x ITT:"
" 0x%08x, i_state: %d on CID: %hu\n",
@@ -3922,8 +3897,7 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
case ISCSI_OP_SCSI_CMD:
cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
if (!cmd)
- return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, buf, conn);
+ goto reject;

ret = iscsit_handle_scsi_cmd(conn, cmd, buf);
break;
@@ -3935,16 +3909,14 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
if (!cmd)
- return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, buf, conn);
+ goto reject;
}
ret = iscsit_handle_nop_out(conn, cmd, buf);
break;
case ISCSI_OP_SCSI_TMFUNC:
cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
if (!cmd)
- return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, buf, conn);
+ goto reject;

ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
break;
@@ -3954,8 +3926,7 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
case ISCSI_OP_LOGOUT:
cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
if (!cmd)
- return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, buf, conn);
+ goto reject;

ret = iscsit_handle_logout_cmd(conn, cmd, buf);
if (ret > 0)
@@ -3987,6 +3958,8 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
}

return ret;
+reject:
+ return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}

int iscsi_target_rx_thread(void *arg)
@@ -4086,8 +4059,8 @@ restart:
(!(opcode & ISCSI_OP_LOGOUT)))) {
pr_err("Received illegal iSCSI Opcode: 0x%02x"
" while in Discovery Session, rejecting.\n", opcode);
- iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
- buffer, conn);
+ iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ buffer);
goto transport_err;
}

diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index a0050b2..2c437cb 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -15,7 +15,7 @@ extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
struct iscsi_portal_group *);
extern int iscsit_del_np(struct iscsi_np *);
-extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *, struct iscsi_cmd *);
+extern int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8, unsigned char *);
extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *);
extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *);
extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 8d8b3ff..421344d 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -474,7 +474,7 @@ static ssize_t __iscsi_##prefix##_store_##name( \
if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \
\
- snprintf(auth->name, PAGE_SIZE, "%s", page); \
+ snprintf(auth->name, sizeof(auth->name), "%s", page); \
if (!strncmp("NULL", auth->name, 4)) \
auth->naf_flags &= ~flags; \
else \
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 60ec4b9..8907dcd 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -132,7 +132,6 @@ enum cmd_flags_table {
ICF_CONTIG_MEMORY = 0x00000020,
ICF_ATTACHED_TO_RQUEUE = 0x00000040,
ICF_OOO_CMDSN = 0x00000080,
- ICF_REJECT_FAIL_CONN = 0x00000100,
};

/* struct iscsi_cmd->i_state */
@@ -366,6 +365,8 @@ struct iscsi_cmd {
u8 maxcmdsn_inc;
/* Immediate Unsolicited Dataout */
u8 unsolicited_data;
+ /* Reject reason code */
+ u8 reject_reason;
/* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */
u16 logout_cid;
/* Command flags */
@@ -446,7 +447,6 @@ struct iscsi_cmd {
struct list_head datain_list;
/* R2T List */
struct list_head cmd_r2t_list;
- struct completion reject_comp;
/* Timer for DataOUT */
struct timer_list dataout_timer;
/* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index dcb199d..08bd878 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -746,13 +746,12 @@ int iscsit_check_post_dataout(
if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
pr_err("Unable to recover from DataOUT CRC"
" failure while ERL=0, closing session.\n");
- iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
- 1, 0, buf, cmd);
+ iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
+ buf);
return DATAOUT_CANNOT_RECOVER;
}

- iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
- 0, 0, buf, cmd);
+ iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, buf);
return iscsit_dataout_post_crc_failed(cmd, buf);
}
}
@@ -909,6 +908,7 @@ void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
wait_for_completion(&conn->conn_wait_comp);
complete(&conn->conn_post_wait_comp);
}
+EXPORT_SYMBOL(iscsit_cause_connection_reinstatement);

void iscsit_fall_back_to_erl0(struct iscsi_session *sess)
{
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 40d9dbc..586c268 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -162,9 +162,8 @@ static int iscsit_handle_r2t_snack(
" protocol error.\n", cmd->init_task_tag, begrun,
(begrun + runlength), cmd->acked_data_sn);

- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_PROTOCOL_ERROR,
- 1, 0, buf, cmd);
+ return iscsit_reject_cmd(cmd,
+ ISCSI_REASON_PROTOCOL_ERROR, buf);
}

if (runlength) {
@@ -173,8 +172,8 @@ static int iscsit_handle_r2t_snack(
" with BegRun: 0x%08x, RunLength: 0x%08x, exceeds"
" current R2TSN: 0x%08x, protocol error.\n",
cmd->init_task_tag, begrun, runlength, cmd->r2t_sn);
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_INVALID, 1, 0, buf, cmd);
+ return iscsit_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_INVALID, buf);
}
last_r2tsn = (begrun + runlength);
} else
@@ -433,8 +432,7 @@ static int iscsit_handle_recovery_datain(
" protocol error.\n", cmd->init_task_tag, begrun,
(begrun + runlength), cmd->acked_data_sn);

- return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
- 1, 0, buf, cmd);
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
}

/*
@@ -445,14 +443,14 @@ static int iscsit_handle_recovery_datain(
pr_err("Initiator requesting BegRun: 0x%08x, RunLength"
": 0x%08x greater than maximum DataSN: 0x%08x.\n",
begrun, runlength, (cmd->data_sn - 1));
- return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
- 1, 0, buf, cmd);
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID,
+ buf);
}

dr = iscsit_allocate_datain_req();
if (!dr)
- return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 0, buf, cmd);
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ buf);

dr->data_sn = dr->begrun = begrun;
dr->runlength = runlength;
@@ -1090,7 +1088,7 @@ int iscsit_handle_ooo_cmdsn(

ooo_cmdsn = iscsit_allocate_ooo_cmdsn();
if (!ooo_cmdsn)
- return CMDSN_ERROR_CANNOT_RECOVER;
+ return -ENOMEM;

ooo_cmdsn->cmd = cmd;
ooo_cmdsn->batch_count = (batch) ?
@@ -1101,10 +1099,10 @@ int iscsit_handle_ooo_cmdsn(

if (iscsit_attach_ooo_cmdsn(sess, ooo_cmdsn) < 0) {
kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
- return CMDSN_ERROR_CANNOT_RECOVER;
+ return -ENOMEM;
}

- return CMDSN_HIGHER_THAN_EXP;
+ return 0;
}

static int iscsit_set_dataout_timeout_values(
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 08a3bac..96e7fdb 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -178,7 +178,6 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
INIT_LIST_HEAD(&cmd->i_conn_node);
INIT_LIST_HEAD(&cmd->datain_list);
INIT_LIST_HEAD(&cmd->cmd_r2t_list);
- init_completion(&cmd->reject_comp);
spin_lock_init(&cmd->datain_lock);
spin_lock_init(&cmd->dataout_timeout_lock);
spin_lock_init(&cmd->istate_lock);
@@ -284,13 +283,12 @@ static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cm
* Commands may be received out of order if MC/S is in use.
* Ensure they are executed in CmdSN order.
*/
-int iscsit_sequence_cmd(
- struct iscsi_conn *conn,
- struct iscsi_cmd *cmd,
- __be32 cmdsn)
+int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ unsigned char *buf, __be32 cmdsn)
{
- int ret;
- int cmdsn_ret;
+ int ret, cmdsn_ret;
+ bool reject = false;
+ u8 reason = ISCSI_REASON_BOOKMARK_NO_RESOURCES;

mutex_lock(&conn->sess->cmdsn_mutex);

@@ -300,9 +298,19 @@ int iscsit_sequence_cmd(
ret = iscsit_execute_cmd(cmd, 0);
if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
iscsit_execute_ooo_cmdsns(conn->sess);
+ else if (ret < 0) {
+ reject = true;
+ ret = CMDSN_ERROR_CANNOT_RECOVER;
+ }
break;
case CMDSN_HIGHER_THAN_EXP:
ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, be32_to_cpu(cmdsn));
+ if (ret < 0) {
+ reject = true;
+ ret = CMDSN_ERROR_CANNOT_RECOVER;
+ break;
+ }
+ ret = CMDSN_HIGHER_THAN_EXP;
break;
case CMDSN_LOWER_THAN_EXP:
cmd->i_state = ISTATE_REMOVE;
@@ -310,11 +318,16 @@ int iscsit_sequence_cmd(
ret = cmdsn_ret;
break;
default:
+ reason = ISCSI_REASON_PROTOCOL_ERROR;
+ reject = true;
ret = cmdsn_ret;
break;
}
mutex_unlock(&conn->sess->cmdsn_mutex);

+ if (reject)
+ iscsit_reject_cmd(cmd, reason, buf);
+
return ret;
}
EXPORT_SYMBOL(iscsit_sequence_cmd);
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index a442265..e4fc34a 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -13,7 +13,8 @@ extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
-int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, __be32 cmdsn);
+extern int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ unsigned char * ,__be32 cmdsn);
extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 121aeb9..f597e88 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -256,10 +256,9 @@ void tty_port_tty_hangup(struct tty_port *port, bool check_clocal)
{
struct tty_struct *tty = tty_port_tty_get(port);

- if (tty && (!check_clocal || !C_CLOCAL(tty))) {
+ if (tty && (!check_clocal || !C_CLOCAL(tty)))
tty_hangup(tty);
- tty_kref_put(tty);
- }
+ tty_kref_put(tty);
}
EXPORT_SYMBOL_GPL(tty_port_tty_hangup);

diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index feef935..b93fc88 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -668,6 +668,15 @@ resubmit:
static inline int
hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt)
{
+ /* Need to clear both directions for control ep */
+ if (((devinfo >> 11) & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_CONTROL) {
+ int status = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
+ HUB_CLEAR_TT_BUFFER, USB_RT_PORT,
+ devinfo ^ 0x8000, tt, NULL, 0, 1000);
+ if (status)
+ return status;
+ }
return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo,
tt, NULL, 0, 1000);
@@ -2846,6 +2855,15 @@ static int usb_disable_function_remotewakeup(struct usb_device *udev)
USB_CTRL_SET_TIMEOUT);
}

+/* Count of wakeup-enabled devices at or below udev */
+static unsigned wakeup_enabled_descendants(struct usb_device *udev)
+{
+ struct usb_hub *hub = usb_hub_to_struct_hub(udev);
+
+ return udev->do_remote_wakeup +
+ (hub ? hub->wakeup_enabled_descendants : 0);
+}
+
/*
* usb_port_suspend - suspend a usb device's upstream port
* @udev: device that's no longer in active use, not a root hub
@@ -2886,8 +2904,8 @@ static int usb_disable_function_remotewakeup(struct usb_device *udev)
* Linux (2.6) currently has NO mechanisms to initiate that: no khubd
* timer, no SRP, no requests through sysfs.
*
- * If Runtime PM isn't enabled or used, non-SuperSpeed devices really get
- * suspended only when their bus goes into global suspend (i.e., the root
+ * If Runtime PM isn't enabled or used, non-SuperSpeed devices may not get
+ * suspended until their bus goes into global suspend (i.e., the root
* hub is suspended). Nevertheless, we change @udev->state to
* USB_STATE_SUSPENDED as this is the device's "logical" state. The actual
* upstream port setting is stored in @udev->port_is_suspended.
@@ -2958,15 +2976,21 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
/* see 7.1.7.6 */
if (hub_is_superspeed(hub->hdev))
status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3);
- else if (PMSG_IS_AUTO(msg))
- status = set_port_feature(hub->hdev, port1,
- USB_PORT_FEAT_SUSPEND);
+
/*
* For system suspend, we do not need to enable the suspend feature
* on individual USB-2 ports. The devices will automatically go
* into suspend a few ms after the root hub stops sending packets.
* The USB 2.0 spec calls this "global suspend".
+ *
+ * However, many USB hubs have a bug: They don't relay wakeup requests
+ * from a downstream port if the port's suspend feature isn't on.
+ * Therefore we will turn on the suspend feature if udev or any of its
+ * descendants is enabled for remote wakeup.
*/
+ else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0)
+ status = set_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_SUSPEND);
else {
really_suspend = false;
status = 0;
@@ -3001,15 +3025,16 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
if (!PMSG_IS_AUTO(msg))
status = 0;
} else {
- /* device has up to 10 msec to fully suspend */
dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n",
(PMSG_IS_AUTO(msg) ? "auto-" : ""),
udev->do_remote_wakeup);
- usb_set_device_state(udev, USB_STATE_SUSPENDED);
if (really_suspend) {
udev->port_is_suspended = 1;
+
+ /* device has up to 10 msec to fully suspend */
msleep(10);
}
+ usb_set_device_state(udev, USB_STATE_SUSPENDED);
}

/*
@@ -3291,7 +3316,11 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
unsigned port1;
int status;

- /* Warn if children aren't already suspended */
+ /*
+ * Warn if children aren't already suspended.
+ * Also, add up the number of wakeup-enabled descendants.
+ */
+ hub->wakeup_enabled_descendants = 0;
for (port1 = 1; port1 <= hdev->maxchild; port1++) {
struct usb_device *udev;

@@ -3301,6 +3330,9 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
if (PMSG_IS_AUTO(msg))
return -EBUSY;
}
+ if (udev)
+ hub->wakeup_enabled_descendants +=
+ wakeup_enabled_descendants(udev);
}

if (hdev->do_remote_wakeup && hub->quirk_check_port_auto_suspend) {
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 80ab9ee..f608b39 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -59,6 +59,9 @@ struct usb_hub {
struct usb_tt tt; /* Transaction Translator */

unsigned mA_per_port; /* current for each child */
+#ifdef CONFIG_PM
+ unsigned wakeup_enabled_descendants;
+#endif

unsigned limited_power:1;
unsigned quiescing:1;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index c35d49d..358375e 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -450,7 +450,7 @@ static int dwc3_probe(struct platform_device *pdev)
}

if (IS_ERR(dwc->usb3_phy)) {
- ret = PTR_ERR(dwc->usb2_phy);
+ ret = PTR_ERR(dwc->usb3_phy);

/*
* if -ENXIO is returned, it means PHY layer wasn't
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index b69d322..27dad99 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -759,8 +759,8 @@ struct dwc3 {

struct dwc3_event_type {
u32 is_devspec:1;
- u32 type:6;
- u32 reserved8_31:25;
+ u32 type:7;
+ u32 reserved8_31:24;
} __packed;

#define DWC3_DEPEVT_XFERCOMPLETE 0x01
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index b5e5b35..f77083f 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1584,6 +1584,7 @@ err1:
__dwc3_gadget_ep_disable(dwc->eps[0]);

err0:
+ dwc->gadget_driver = NULL;
spin_unlock_irqrestore(&dwc->lock, flags);

return ret;
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index ffd8fa5..5514822 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -105,7 +105,7 @@ void usb_gadget_set_state(struct usb_gadget *gadget,
enum usb_device_state state)
{
gadget->state = state;
- sysfs_notify(&gadget->dev.kobj, NULL, "status");
+ sysfs_notify(&gadget->dev.kobj, NULL, "state");
}
EXPORT_SYMBOL_GPL(usb_gadget_set_state);

diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 9ab4a4d..ca6289b 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -858,6 +858,7 @@ static int ehci_hub_control (
ehci->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(20);
usb_hcd_start_port_resume(&hcd->self, wIndex);
+ set_bit(wIndex, &ehci->resuming_ports);
/* check the port again */
mod_timer(&ehci_to_hcd(ehci)->rh_timer,
ehci->reset_done[wIndex]);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index cc24e39..f00cb20 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -93,7 +93,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
- xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
xhci->limit_active_eps = 64;
xhci->quirks |= XHCI_SW_BW_CHECKING;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 1969c00..cc3bfc5 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -434,7 +434,7 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,

/* A ring has pending URBs if its TD list is not empty */
if (!(ep->ep_state & EP_HAS_STREAMS)) {
- if (!(list_empty(&ep->ring->td_list)))
+ if (ep->ring && !(list_empty(&ep->ring->td_list)))
xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
return;
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index d8f640b..9a550b6 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1171,9 +1171,6 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
}

xhci = hcd_to_xhci(hcd);
- if (xhci->xhc_state & XHCI_STATE_HALTED)
- return -ENODEV;
-
if (check_virt_dev) {
if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
printk(KERN_DEBUG "xHCI %s called with unaddressed "
@@ -1189,6 +1186,9 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
}
}

+ if (xhci->xhc_state & XHCI_STATE_HALTED)
+ return -ENODEV;
+
return 1;
}

@@ -4697,6 +4697,13 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)

get_quirks(dev, xhci);

+ /* In xhci controllers which follow xhci 1.0 spec gives a spurious
+ * success event after a short transfer. This quirk will ignore such
+ * spurious event.
+ */
+ if (xhci->hci_version > 0x96)
+ xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
if (retval)
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index c21386e..de98906 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3247,6 +3247,7 @@ static const struct usb_device_id sisusb_table[] = {
{ USB_DEVICE(0x0711, 0x0903) },
{ USB_DEVICE(0x0711, 0x0918) },
{ USB_DEVICE(0x0711, 0x0920) },
+ { USB_DEVICE(0x0711, 0x0950) },
{ USB_DEVICE(0x182d, 0x021c) },
{ USB_DEVICE(0x182d, 0x0269) },
{ }
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 7260ec6..b65e657 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -735,9 +735,34 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
- { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
- { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) },
- { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57B_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29A_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29B_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29F_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62B_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S01_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29C_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_81B_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_82B_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5D_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K4Y_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5G_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S05_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_60_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_61_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63B_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_64_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_65_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92D_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_W5R_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_A5R_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_PW1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 6dd7925..1b8af46 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -815,11 +815,35 @@
/*
* RT Systems programming cables for various ham radios
*/
-#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
-#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */
-#define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */
-#define RTSYSTEMS_RTS01_PID 0x9e57 /* USB-RTS01 Radio Cable */
-
+#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
+#define RTSYSTEMS_USB_S03_PID 0x9001 /* RTS-03 USB to Serial Adapter */
+#define RTSYSTEMS_USB_59_PID 0x9e50 /* USB-59 USB to 8 pin plug */
+#define RTSYSTEMS_USB_57A_PID 0x9e51 /* USB-57A USB to 4pin 3.5mm plug */
+#define RTSYSTEMS_USB_57B_PID 0x9e52 /* USB-57B USB to extended 4pin 3.5mm plug */
+#define RTSYSTEMS_USB_29A_PID 0x9e53 /* USB-29A USB to 3.5mm stereo plug */
+#define RTSYSTEMS_USB_29B_PID 0x9e54 /* USB-29B USB to 6 pin mini din */
+#define RTSYSTEMS_USB_29F_PID 0x9e55 /* USB-29F USB to 6 pin modular plug */
+#define RTSYSTEMS_USB_62B_PID 0x9e56 /* USB-62B USB to 8 pin mini din plug*/
+#define RTSYSTEMS_USB_S01_PID 0x9e57 /* USB-RTS01 USB to 3.5 mm stereo plug*/
+#define RTSYSTEMS_USB_63_PID 0x9e58 /* USB-63 USB to 9 pin female*/
+#define RTSYSTEMS_USB_29C_PID 0x9e59 /* USB-29C USB to 4 pin modular plug*/
+#define RTSYSTEMS_USB_81B_PID 0x9e5A /* USB-81 USB to 8 pin mini din plug*/
+#define RTSYSTEMS_USB_82B_PID 0x9e5B /* USB-82 USB to 2.5 mm stereo plug*/
+#define RTSYSTEMS_USB_K5D_PID 0x9e5C /* USB-K5D USB to 8 pin modular plug*/
+#define RTSYSTEMS_USB_K4Y_PID 0x9e5D /* USB-K4Y USB to 2.5/3.5 mm plugs*/
+#define RTSYSTEMS_USB_K5G_PID 0x9e5E /* USB-K5G USB to 8 pin modular plug*/
+#define RTSYSTEMS_USB_S05_PID 0x9e5F /* USB-RTS05 USB to 2.5 mm stereo plug*/
+#define RTSYSTEMS_USB_60_PID 0x9e60 /* USB-60 USB to 6 pin din*/
+#define RTSYSTEMS_USB_61_PID 0x9e61 /* USB-61 USB to 6 pin mini din*/
+#define RTSYSTEMS_USB_62_PID 0x9e62 /* USB-62 USB to 8 pin mini din*/
+#define RTSYSTEMS_USB_63B_PID 0x9e63 /* USB-63 USB to 9 pin female*/
+#define RTSYSTEMS_USB_64_PID 0x9e64 /* USB-64 USB to 9 pin male*/
+#define RTSYSTEMS_USB_65_PID 0x9e65 /* USB-65 USB to 9 pin female null modem*/
+#define RTSYSTEMS_USB_92_PID 0x9e66 /* USB-92 USB to 12 pin plug*/
+#define RTSYSTEMS_USB_92D_PID 0x9e67 /* USB-92D USB to 12 pin plug data*/
+#define RTSYSTEMS_USB_W5R_PID 0x9e68 /* USB-W5R USB to 8 pin modular plug*/
+#define RTSYSTEMS_USB_A5R_PID 0x9e69 /* USB-A5R USB to 8 pin modular plug*/
+#define RTSYSTEMS_USB_PW1_PID 0x9e6A /* USB-PW1 USB to 8 pin modular plug*/

/*
* Physik Instrumente
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 7e99808..62b86a6 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -914,20 +914,20 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data);
if (status < 0) {
dev_dbg(&port->dev, "Reading Spreg failed\n");
- return -1;
+ goto err;
}
Data |= 0x80;
status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
if (status < 0) {
dev_dbg(&port->dev, "writing Spreg failed\n");
- return -1;
+ goto err;
}

Data &= ~0x80;
status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
if (status < 0) {
dev_dbg(&port->dev, "writing Spreg failed\n");
- return -1;
+ goto err;
}
/* End of block to be checked */

@@ -936,7 +936,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
&Data);
if (status < 0) {
dev_dbg(&port->dev, "Reading Controlreg failed\n");
- return -1;
+ goto err;
}
Data |= 0x08; /* Driver done bit */
Data |= 0x20; /* rx_disable */
@@ -944,7 +944,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
mos7840_port->ControlRegOffset, Data);
if (status < 0) {
dev_dbg(&port->dev, "writing Controlreg failed\n");
- return -1;
+ goto err;
}
/* do register settings here */
/* Set all regs to the device default values. */
@@ -955,21 +955,21 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "disabling interrupts failed\n");
- return -1;
+ goto err;
}
/* Set FIFO_CONTROL_REGISTER to the default value */
Data = 0x00;
status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n");
- return -1;
+ goto err;
}

Data = 0xcf;
status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n");
- return -1;
+ goto err;
}

Data = 0x03;
@@ -1114,6 +1114,15 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
/* mos7840_change_port_settings(mos7840_port,old_termios); */

return 0;
+err:
+ for (j = 0; j < NUM_URBS; ++j) {
+ urb = mos7840_port->write_urb_pool[j];
+ if (!urb)
+ continue;
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ }
+ return status;
}

/*****************************************************************************
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index e581c25..01f79f1 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -371,7 +371,7 @@ static int ti_startup(struct usb_serial *serial)
usb_set_serial_data(serial, tdev);

/* determine device type */
- if (usb_match_id(serial->interface, ti_id_table_3410))
+ if (serial->type == &ti_1port_device)
tdev->td_is_3410 = 1;
dev_dbg(&dev->dev, "%s - device type is %s\n", __func__,
tdev->td_is_3410 ? "3410" : "5052");
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 179933528..c015f2c 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -665,6 +665,13 @@ UNUSUAL_DEV( 0x054c, 0x016a, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY ),

+/* Submitted by Ren Bigcren <bigcren.ren@xxxxxxxxxxxxxx> */
+UNUSUAL_DEV( 0x054c, 0x02a5, 0x0100, 0x0100,
+ "Sony Corp.",
+ "MicroVault Flash Drive",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_READ_CAPACITY_16 ),
+
/* floppy reports multiple luns */
UNUSUAL_DEV( 0x055d, 0x2020, 0x0000, 0x0210,
"SAMSUNG",
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 45c8efa..34924fb 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -377,18 +377,12 @@ static long evtchn_ioctl(struct file *file,
if (unbind.port >= NR_EVENT_CHANNELS)
break;

- spin_lock_irq(&port_user_lock);
-
rc = -ENOTCONN;
- if (get_port_user(unbind.port) != u) {
- spin_unlock_irq(&port_user_lock);
+ if (get_port_user(unbind.port) != u)
break;
- }

disable_irq(irq_from_evtchn(unbind.port));

- spin_unlock_irq(&port_user_lock);
-
evtchn_unbind_from_user(u, unbind.port);

rc = 0;
@@ -488,26 +482,15 @@ static int evtchn_release(struct inode *inode, struct file *filp)
int i;
struct per_user_data *u = filp->private_data;

- spin_lock_irq(&port_user_lock);
-
- free_page((unsigned long)u->ring);
-
for (i = 0; i < NR_EVENT_CHANNELS; i++) {
if (get_port_user(i) != u)
continue;

disable_irq(irq_from_evtchn(i));
- }
-
- spin_unlock_irq(&port_user_lock);
-
- for (i = 0; i < NR_EVENT_CHANNELS; i++) {
- if (get_port_user(i) != u)
- continue;
-
evtchn_unbind_from_user(get_port_user(i), i);
}

+ free_page((unsigned long)u->ring);
kfree(u->name);
kfree(u);

diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index df472ab..0b272d0 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -7298,6 +7298,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
int err = 0;
int ret;
int level;
+ bool root_dropped = false;

path = btrfs_alloc_path();
if (!path) {
@@ -7355,6 +7356,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
while (1) {
btrfs_tree_lock(path->nodes[level]);
btrfs_set_lock_blocking(path->nodes[level]);
+ path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;

ret = btrfs_lookup_extent_info(trans, root,
path->nodes[level]->start,
@@ -7370,6 +7372,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
break;

btrfs_tree_unlock(path->nodes[level]);
+ path->locks[level] = 0;
WARN_ON(wc->refs[level] != 1);
level--;
}
@@ -7471,12 +7474,22 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
free_extent_buffer(root->commit_root);
kfree(root);
}
+ root_dropped = true;
out_end_trans:
btrfs_end_transaction_throttle(trans, tree_root);
out_free:
kfree(wc);
btrfs_free_path(path);
out:
+ /*
+ * So if we need to stop dropping the snapshot for whatever reason we
+ * need to make sure to add it back to the dead root list so that we
+ * keep trying to do the work later. This also cleans up roots if we
+ * don't have it in the radix (like when we recover after a power fail
+ * or unmount) so we don't leak memory.
+ */
+ if (root_dropped == false)
+ btrfs_add_dead_root(root);
if (err)
btrfs_std_error(root->fs_info, err);
return err;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 79bd479..eb84c2d 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2501,7 +2501,7 @@ again:
ret = scrub_extent(sctx, extent_logical, extent_len,
extent_physical, extent_dev, flags,
generation, extent_mirror_num,
- extent_physical);
+ extent_logical - logical + physical);
if (ret)
goto out;

diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 84ce601..baf149a 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -802,9 +802,10 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
flags = O_WRONLY|O_LARGEFILE;
}
*filp = dentry_open(&path, flags, current_cred());
- if (IS_ERR(*filp))
+ if (IS_ERR(*filp)) {
host_err = PTR_ERR(*filp);
- else {
+ *filp = NULL;
+ } else {
host_err = ima_file_check(*filp, may_flags);

if (may_flags & NFSD_MAY_64BIT_COOKIE)
diff --git a/fs/super.c b/fs/super.c
index 7465d43..68307c0 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -336,19 +336,19 @@ EXPORT_SYMBOL(deactivate_super);
* and want to turn it into a full-blown active reference. grab_super()
* is called with sb_lock held and drops it. Returns 1 in case of
* success, 0 if we had failed (superblock contents was already dead or
- * dying when grab_super() had been called).
+ * dying when grab_super() had been called). Note that this is only
+ * called for superblocks not in rundown mode (== ones still on ->fs_supers
+ * of their type), so increment of ->s_count is OK here.
*/
static int grab_super(struct super_block *s) __releases(sb_lock)
{
- if (atomic_inc_not_zero(&s->s_active)) {
- spin_unlock(&sb_lock);
- return 1;
- }
- /* it's going away */
s->s_count++;
spin_unlock(&sb_lock);
- /* wait for it to die */
down_write(&s->s_umount);
+ if ((s->s_flags & MS_BORN) && atomic_inc_not_zero(&s->s_active)) {
+ put_super(s);
+ return 1;
+ }
up_write(&s->s_umount);
put_super(s);
return 0;
@@ -463,11 +463,6 @@ retry:
destroy_super(s);
s = NULL;
}
- down_write(&old->s_umount);
- if (unlikely(!(old->s_flags & MS_BORN))) {
- deactivate_locked_super(old);
- goto retry;
- }
return old;
}
}
@@ -660,10 +655,10 @@ restart:
if (hlist_unhashed(&sb->s_instances))
continue;
if (sb->s_bdev == bdev) {
- if (grab_super(sb)) /* drops sb_lock */
- return sb;
- else
+ if (!grab_super(sb))
goto restart;
+ up_write(&sb->s_umount);
+ return sb;
}
}
spin_unlock(&sb_lock);
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 191501a..217e4b4 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -434,6 +434,7 @@ struct fw_iso_context {
int type;
int channel;
int speed;
+ bool drop_overflow_headers;
size_t header_size;
union {
fw_iso_callback_t sc;
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index 23a87d0..c5aade5 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -34,8 +34,6 @@ extern void iscsit_put_transport(struct iscsit_transport *);
/*
* From iscsi_target.c
*/
-extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *,
- struct iscsi_cmd *);
extern int iscsit_setup_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *,
unsigned char *);
extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *);
@@ -67,6 +65,10 @@ extern int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
*/
extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
/*
+ * From iscsi_target_erl0.c
+ */
+extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
+/*
* From iscsi_target_erl1.c
*/
extern void iscsit_stop_dataout_timer(struct iscsi_cmd *);
@@ -80,4 +82,5 @@ extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
* From iscsi_target_util.c
*/
extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
-extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *, __be32);
+extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+ unsigned char *, __be32);
diff --git a/include/uapi/linux/firewire-cdev.h b/include/uapi/linux/firewire-cdev.h
index d500369..1db453e 100644
--- a/include/uapi/linux/firewire-cdev.h
+++ b/include/uapi/linux/firewire-cdev.h
@@ -215,8 +215,8 @@ struct fw_cdev_event_request2 {
* with the %FW_CDEV_ISO_INTERRUPT bit set, when explicitly requested with
* %FW_CDEV_IOC_FLUSH_ISO, or when there have been so many completed packets
* without the interrupt bit set that the kernel's internal buffer for @header
- * is about to overflow. (In the last case, kernels with ABI version < 5 drop
- * header data up to the next interrupt packet.)
+ * is about to overflow. (In the last case, ABI versions < 5 drop header data
+ * up to the next interrupt packet.)
*
* Isochronous transmit events (context type %FW_CDEV_ISO_CONTEXT_TRANSMIT):
*
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 0b936d8..f7bc3ce 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1163,18 +1163,17 @@ void tracing_reset_current(int cpu)
tracing_reset(&global_trace.trace_buffer, cpu);
}

+/* Must have trace_types_lock held */
void tracing_reset_all_online_cpus(void)
{
struct trace_array *tr;

- mutex_lock(&trace_types_lock);
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
tracing_reset_online_cpus(&tr->trace_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
tracing_reset_online_cpus(&tr->max_buffer);
#endif
}
- mutex_unlock(&trace_types_lock);
}

#define SAVED_CMDLINES 128
@@ -2956,7 +2955,6 @@ static int tracing_release(struct inode *inode, struct file *file)

iter = m->private;
tr = iter->tr;
- trace_array_put(tr);

mutex_lock(&trace_types_lock);

@@ -2971,6 +2969,9 @@ static int tracing_release(struct inode *inode, struct file *file)
if (!iter->snapshot)
/* reenable tracing if it was previously enabled */
tracing_start_tr(tr);
+
+ __trace_array_put(tr);
+
mutex_unlock(&trace_types_lock);

mutex_destroy(&iter->mutex);
@@ -3395,6 +3396,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
static int tracing_trace_options_open(struct inode *inode, struct file *file)
{
struct trace_array *tr = inode->i_private;
+ int ret;

if (tracing_disabled)
return -ENODEV;
@@ -3402,7 +3404,11 @@ static int tracing_trace_options_open(struct inode *inode, struct file *file)
if (trace_array_get(tr) < 0)
return -ENODEV;

- return single_open(file, tracing_trace_options_show, inode->i_private);
+ ret = single_open(file, tracing_trace_options_show, inode->i_private);
+ if (ret < 0)
+ trace_array_put(tr);
+
+ return ret;
}

static const struct file_operations tracing_iter_fops = {
@@ -3906,6 +3912,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (!iter) {
ret = -ENOMEM;
+ __trace_array_put(tr);
goto out;
}

@@ -4652,21 +4659,24 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
ret = PTR_ERR(iter);
} else {
/* Writes still need the seq_file to hold the private data */
+ ret = -ENOMEM;
m = kzalloc(sizeof(*m), GFP_KERNEL);
if (!m)
- return -ENOMEM;
+ goto out;
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (!iter) {
kfree(m);
- return -ENOMEM;
+ goto out;
}
+ ret = 0;
+
iter->tr = tr;
iter->trace_buffer = &tc->tr->max_buffer;
iter->cpu_file = tc->cpu;
m->private = iter;
file->private_data = m;
}
-
+out:
if (ret < 0)
trace_array_put(tr);

@@ -4896,8 +4906,6 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)

mutex_lock(&trace_types_lock);

- tr->ref++;
-
info->iter.tr = tr;
info->iter.cpu_file = tc->cpu;
info->iter.trace = tr->current_trace;
@@ -5276,9 +5284,10 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
}

static const struct file_operations tracing_stats_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_generic_tc,
.read = tracing_stats_read,
.llseek = generic_file_llseek,
+ .release = tracing_release_generic_tc,
};

#ifdef CONFIG_DYNAMIC_FTRACE
@@ -5926,8 +5935,10 @@ static int new_instance_create(const char *name)
goto out_free_tr;

ret = event_trace_add_tracer(tr->dir, tr);
- if (ret)
+ if (ret) {
+ debugfs_remove_recursive(tr->dir);
goto out_free_tr;
+ }

init_tracer_debugfs(tr, tr->dir);

diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 6dfd48b..6953263 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1221,6 +1221,7 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)

static int ftrace_event_avail_open(struct inode *inode, struct file *file);
static int ftrace_event_set_open(struct inode *inode, struct file *file);
+static int ftrace_event_release(struct inode *inode, struct file *file);

static const struct seq_operations show_event_seq_ops = {
.start = t_start,
@@ -1248,7 +1249,7 @@ static const struct file_operations ftrace_set_event_fops = {
.read = seq_read,
.write = ftrace_event_write,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = ftrace_event_release,
};

static const struct file_operations ftrace_enable_fops = {
@@ -1326,6 +1327,15 @@ ftrace_event_open(struct inode *inode, struct file *file,
return ret;
}

+static int ftrace_event_release(struct inode *inode, struct file *file)
+{
+ struct trace_array *tr = inode->i_private;
+
+ trace_array_put(tr);
+
+ return seq_release(inode, file);
+}
+
static int
ftrace_event_avail_open(struct inode *inode, struct file *file)
{
@@ -1339,12 +1349,19 @@ ftrace_event_set_open(struct inode *inode, struct file *file)
{
const struct seq_operations *seq_ops = &show_set_event_seq_ops;
struct trace_array *tr = inode->i_private;
+ int ret;
+
+ if (trace_array_get(tr) < 0)
+ return -ENODEV;

if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC))
ftrace_clear_events(tr);

- return ftrace_event_open(inode, file, seq_ops);
+ ret = ftrace_event_open(inode, file, seq_ops);
+ if (ret < 0)
+ trace_array_put(tr);
+ return ret;
}

static struct event_subsystem *
diff --git a/mm/memory.c b/mm/memory.c
index 61a262b..5e50800 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1101,6 +1101,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
spinlock_t *ptl;
pte_t *start_pte;
pte_t *pte;
+ unsigned long range_start = addr;

again:
init_rss_vec(rss);
@@ -1206,12 +1207,14 @@ again:
force_flush = 0;

#ifdef HAVE_GENERIC_MMU_GATHER
- tlb->start = addr;
- tlb->end = end;
+ tlb->start = range_start;
+ tlb->end = addr;
#endif
tlb_flush_mmu(tlb);
- if (addr != end)
+ if (addr != end) {
+ range_start = addr;
goto again;
+ }
}

return addr;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 7431001..4baf12e 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -732,7 +732,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
if (prev) {
vma = prev;
next = vma->vm_next;
- continue;
+ if (mpol_equal(vma_policy(vma), new_pol))
+ continue;
+ /* vma_merge() joined vma && vma->next, case 8 */
+ goto replace;
}
if (vma->vm_start != vmstart) {
err = split_vma(vma->vm_mm, vma, vmstart, 1);
@@ -744,6 +747,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
if (err)
goto out;
}
+ replace:
err = vma_replace_policy(vma, new_pol);
if (err)
goto out;
diff --git a/mm/mmap.c b/mm/mmap.c
index f681e18..7dbe397 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -865,7 +865,7 @@ again: remove_next = 1 + (end > next->vm_end);
if (next->anon_vma)
anon_vma_merge(vma, next);
mm->map_count--;
- vma_set_policy(vma, vma_policy(next));
+ mpol_put(vma_policy(next));
kmem_cache_free(vm_area_cachep, next);
/*
* In mprotect's case 6 (see comments on vma_merge),
diff --git a/net/sunrpc/xprtrdma/svc_rdma_marshal.c b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
index 8d2eddd..65b1462 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_marshal.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
@@ -98,6 +98,7 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch,
*/
static u32 *decode_write_list(u32 *va, u32 *vaend)
{
+ unsigned long start, end;
int nchunks;

struct rpcrdma_write_array *ary =
@@ -113,9 +114,12 @@ static u32 *decode_write_list(u32 *va, u32 *vaend)
return NULL;
}
nchunks = ntohl(ary->wc_nchunks);
- if (((unsigned long)&ary->wc_array[0] +
- (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
- (unsigned long)vaend) {
+
+ start = (unsigned long)&ary->wc_array[0];
+ end = (unsigned long)vaend;
+ if (nchunks < 0 ||
+ nchunks > (SIZE_MAX - start) / sizeof(struct rpcrdma_write_chunk) ||
+ (start + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > end) {
dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
ary, nchunks, vaend);
return NULL;
@@ -129,6 +133,7 @@ static u32 *decode_write_list(u32 *va, u32 *vaend)

static u32 *decode_reply_array(u32 *va, u32 *vaend)
{
+ unsigned long start, end;
int nchunks;
struct rpcrdma_write_array *ary =
(struct rpcrdma_write_array *)va;
@@ -143,9 +148,12 @@ static u32 *decode_reply_array(u32 *va, u32 *vaend)
return NULL;
}
nchunks = ntohl(ary->wc_nchunks);
- if (((unsigned long)&ary->wc_array[0] +
- (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
- (unsigned long)vaend) {
+
+ start = (unsigned long)&ary->wc_array[0];
+ end = (unsigned long)vaend;
+ if (nchunks < 0 ||
+ nchunks > (SIZE_MAX - start) / sizeof(struct rpcrdma_write_chunk) ||
+ (start + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > end) {
dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
ary, nchunks, vaend);
return NULL;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 403010c..051c03d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3495,9 +3495,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05f8, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x05f9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x05fb, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x1973, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index 3eeada5..566a367 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -1612,7 +1612,7 @@ static int max98088_dai2_digital_mute(struct snd_soc_dai *codec_dai, int mute)

static void max98088_sync_cache(struct snd_soc_codec *codec)
{
- u16 *reg_cache = codec->reg_cache;
+ u8 *reg_cache = codec->reg_cache;
int i;

if (!codec->cache_sync)
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 92bbfec..ea47938 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -37,7 +37,7 @@
static const u16 sgtl5000_regs[SGTL5000_MAX_REG_OFFSET] = {
[SGTL5000_CHIP_CLK_CTRL] = 0x0008,
[SGTL5000_CHIP_I2S_CTRL] = 0x0010,
- [SGTL5000_CHIP_SSS_CTRL] = 0x0008,
+ [SGTL5000_CHIP_SSS_CTRL] = 0x0010,
[SGTL5000_CHIP_DAC_VOL] = 0x3c3c,
[SGTL5000_CHIP_PAD_STRENGTH] = 0x015f,
[SGTL5000_CHIP_ANA_HP_CTRL] = 0x1818,
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index e971028..730dd0c 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -1600,7 +1600,6 @@ static int wm8962_put_hp_sw(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- u16 *reg_cache = codec->reg_cache;
int ret;

/* Apply the update (if any) */
@@ -1609,16 +1608,19 @@ static int wm8962_put_hp_sw(struct snd_kcontrol *kcontrol,
return 0;

/* If the left PGA is enabled hit that VU bit... */
- if (snd_soc_read(codec, WM8962_PWR_MGMT_2) & WM8962_HPOUTL_PGA_ENA)
- return snd_soc_write(codec, WM8962_HPOUTL_VOLUME,
- reg_cache[WM8962_HPOUTL_VOLUME]);
+ ret = snd_soc_read(codec, WM8962_PWR_MGMT_2);
+ if (ret & WM8962_HPOUTL_PGA_ENA) {
+ snd_soc_write(codec, WM8962_HPOUTL_VOLUME,
+ snd_soc_read(codec, WM8962_HPOUTL_VOLUME));
+ return 1;
+ }

/* ...otherwise the right. The VU is stereo. */
- if (snd_soc_read(codec, WM8962_PWR_MGMT_2) & WM8962_HPOUTR_PGA_ENA)
- return snd_soc_write(codec, WM8962_HPOUTR_VOLUME,
- reg_cache[WM8962_HPOUTR_VOLUME]);
+ if (ret & WM8962_HPOUTR_PGA_ENA)
+ snd_soc_write(codec, WM8962_HPOUTR_VOLUME,
+ snd_soc_read(codec, WM8962_HPOUTR_VOLUME));

- return 0;
+ return 1;
}

/* The VU bits for the speakers are in a different register to the mute
@@ -3374,7 +3376,6 @@ static int wm8962_probe(struct snd_soc_codec *codec)
int ret;
struct wm8962_priv *wm8962 = snd_soc_codec_get_drvdata(codec);
struct wm8962_pdata *pdata = dev_get_platdata(codec->dev);
- u16 *reg_cache = codec->reg_cache;
int i, trigger, irq_pol;
bool dmicclk, dmicdat;

@@ -3432,8 +3433,9 @@ static int wm8962_probe(struct snd_soc_codec *codec)

/* Put the speakers into mono mode? */
if (pdata->spk_mono)
- reg_cache[WM8962_CLASS_D_CONTROL_2]
- |= WM8962_SPK_MONO;
+ snd_soc_update_bits(codec, WM8962_CLASS_D_CONTROL_2,
+ WM8962_SPK_MONO_MASK, WM8962_SPK_MONO);
+

/* Micbias setup, detection enable and detection
* threasholds. */
diff --git a/sound/soc/tegra/tegra20_ac97.c b/sound/soc/tegra/tegra20_ac97.c
index 2f70ea7..05676c0 100644
--- a/sound/soc/tegra/tegra20_ac97.c
+++ b/sound/soc/tegra/tegra20_ac97.c
@@ -399,9 +399,9 @@ static int tegra20_ac97_platform_probe(struct platform_device *pdev)
ac97->capture_dma_data.slave_id = of_dma[1];

ac97->playback_dma_data.addr = mem->start + TEGRA20_AC97_FIFO_TX1;
- ac97->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- ac97->capture_dma_data.maxburst = 4;
- ac97->capture_dma_data.slave_id = of_dma[0];
+ ac97->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ ac97->playback_dma_data.maxburst = 4;
+ ac97->playback_dma_data.slave_id = of_dma[1];

ret = snd_soc_register_component(&pdev->dev, &tegra20_ac97_component,
&tegra20_ac97_dai, 1);
diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
index 5eaa12c..551b3c9 100644
--- a/sound/soc/tegra/tegra20_spdif.c
+++ b/sound/soc/tegra/tegra20_spdif.c
@@ -323,8 +323,8 @@ static int tegra20_spdif_platform_probe(struct platform_device *pdev)
}

spdif->playback_dma_data.addr = mem->start + TEGRA20_SPDIF_DATA_OUT;
- spdif->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- spdif->capture_dma_data.maxburst = 4;
+ spdif->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ spdif->playback_dma_data.maxburst = 4;
spdif->playback_dma_data.slave_id = dmareq->start;

pm_runtime_enable(&pdev->dev);
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index 5a1f648..274e178 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -1016,9 +1016,10 @@ kvp_get_ip_info(int family, char *if_name, int op,

if (sn_offset == 0)
strcpy(sn_str, cidr_mask);
- else
+ else {
+ strcat((char *)ip_buffer->sub_net, ";");
strcat(sn_str, cidr_mask);
- strcat((char *)ip_buffer->sub_net, ";");
+ }
sn_offset += strlen(sn_str) + 1;
}

diff --git a/tools/perf/config/utilities.mak b/tools/perf/config/utilities.mak
index 8ef3bd3..3e89719 100644
--- a/tools/perf/config/utilities.mak
+++ b/tools/perf/config/utilities.mak
@@ -173,7 +173,7 @@ _ge-abspath = $(if $(is-executable),$(1))
# Usage: absolute-executable-path-or-empty = $(call get-executable-or-default,variable,default)
#
define get-executable-or-default
-$(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2),$(1)))
+$(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2)))
endef
_ge_attempt = $(if $(get-executable),$(get-executable),$(_gea_warn)$(call _gea_err,$(2)))
_gea_warn = $(warning The path '$(1)' is not executable.)
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/