Re: [3.5.y.z extended stable] Linux 3.5.7.6

From: Herton Ronaldo Krzesinski
Date: Fri Feb 22 2013 - 13:50:44 EST


diff --git a/MAINTAINERS b/MAINTAINERS
index 0d67d1b..5927882 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2663,7 +2663,7 @@ S: Maintained
F: drivers/net/ethernet/i825xx/eexpress.*

ETHERNET BRIDGE
-M: Stephen Hemminger <shemminger@xxxxxxxxxx>
+M: Stephen Hemminger <stephen@xxxxxxxxxxxxxxxxxx>
L: bridge@xxxxxxxxxxxxxxxxxxxxxxxxxx
L: netdev@xxxxxxxxxxxxxxx
W: http://www.linuxfoundation.org/en/Net:Bridge
@@ -4385,7 +4385,7 @@ S: Maintained

MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
M: Mirko Lindner <mlindner@xxxxxxxxxxx>
-M: Stephen Hemminger <shemminger@xxxxxxxxxx>
+M: Stephen Hemminger <stephen@xxxxxxxxxxxxxxxxxx>
L: netdev@xxxxxxxxxxxxxxx
S: Maintained
F: drivers/net/ethernet/marvell/sk*
@@ -4630,7 +4630,7 @@ S: Supported
F: drivers/infiniband/hw/nes/

NETEM NETWORK EMULATOR
-M: Stephen Hemminger <shemminger@xxxxxxxxxx>
+M: Stephen Hemminger <stephen@xxxxxxxxxxxxxxxxxx>
L: netem@xxxxxxxxxxxxxxxxxxxxxxxxxx
S: Maintained
F: net/sched/sch_netem.c
diff --git a/Makefile b/Makefile
index 66e3cb7..fc43551 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 5
SUBLEVEL = 7
-EXTRAVERSION = .5
+EXTRAVERSION = .6
NAME = Saber-toothed Squirrel

# *DOCUMENTATION*
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index c5531db..747ab28 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -121,6 +121,9 @@ static int s390_next_ktime(ktime_t expires,
nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
do_div(nsecs, 125);
S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
+ /* Program the maximum value if we have an overflow (== year 2042) */
+ if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc))
+ S390_lowcore.clock_comparator = -1ULL;
set_clock_comparator(S390_lowcore.clock_comparator);
return 0;
}
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 20e5f7b..f6d477a 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -204,7 +204,7 @@ sysexit_from_sys_call:
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jnz ia32_ret_from_sys_call
TRACE_IRQS_ON
- sti
+ ENABLE_INTERRUPTS(CLBR_NONE)
movl %eax,%esi /* second arg, syscall return value */
cmpl $-MAX_ERRNO,%eax /* is it an error ? */
jbe 1f
@@ -214,7 +214,7 @@ sysexit_from_sys_call:
call __audit_syscall_exit
movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
- cli
+ DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jz \exit
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index c3520d7..3f3dd52 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -142,6 +142,11 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
}

+static inline unsigned long pud_pfn(pud_t pud)
+{
+ return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
+}
+
#define pte_page(pte) pfn_to_page(pte_pfn(pte))

static inline int pmd_large(pmd_t pte)
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index c17e982..d14fee3 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -20,18 +20,19 @@ static int set_x2apic_phys_mode(char *arg)
}
early_param("x2apic_phys", set_x2apic_phys_mode);

-static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+static bool x2apic_fadt_phys(void)
{
- if (x2apic_phys)
- return x2apic_enabled();
- else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
- (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) &&
- x2apic_enabled()) {
+ if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
+ (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
printk(KERN_DEBUG "System requires x2apic physical mode\n");
- return 1;
+ return true;
}
- else
- return 0;
+ return false;
+}
+
+static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+ return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys());
}

static void
@@ -114,7 +115,7 @@ static void init_x2apic_ldr(void)

static int x2apic_phys_probe(void)
{
- if (x2apic_mode && x2apic_phys)
+ if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys()))
return 1;

return apic == &apic_x2apic_phys;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 76dcd9d..c6b10e2 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -747,13 +747,15 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
return;
}
#endif
+ /* Kernel addresses are always protection faults: */
+ if (address >= TASK_SIZE)
+ error_code |= PF_PROT;

- if (unlikely(show_unhandled_signals))
+ if (likely(show_unhandled_signals))
show_signal_msg(regs, error_code, address, tsk);

- /* Kernel addresses are always protection faults: */
tsk->thread.cr2 = address;
- tsk->thread.error_code = error_code | (address >= TASK_SIZE);
+ tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_PF;

force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 3baff25..ce42da7 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -829,6 +829,9 @@ int kern_addr_valid(unsigned long addr)
if (pud_none(*pud))
return 0;

+ if (pud_large(*pud))
+ return pfn_valid(pud_pfn(*pud));
+
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
return 0;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 6fcd4ad..3705bb0 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -86,7 +86,7 @@ EXPORT_SYMBOL(efi_enabled);

static int __init setup_noefi(char *arg)
{
- clear_bit(EFI_BOOT, &x86_efi_facility);
+ clear_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
return 0;
}
early_param("noefi", setup_noefi);
diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h
index 6a0955e..53ecac5 100644
--- a/drivers/atm/iphase.h
+++ b/drivers/atm/iphase.h
@@ -636,82 +636,82 @@ struct rx_buf_desc {
#define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE
#define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE

-typedef volatile u_int freg_t;
+typedef volatile u_int ffreg_t;
typedef u_int rreg_t;

typedef struct _ffredn_t {
- freg_t idlehead_high; /* Idle cell header (high) */
- freg_t idlehead_low; /* Idle cell header (low) */
- freg_t maxrate; /* Maximum rate */
- freg_t stparms; /* Traffic Management Parameters */
- freg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
- freg_t rm_type; /* */
- u_int filler5[0x17 - 0x06];
- freg_t cmd_reg; /* Command register */
- u_int filler18[0x20 - 0x18];
- freg_t cbr_base; /* CBR Pointer Base */
- freg_t vbr_base; /* VBR Pointer Base */
- freg_t abr_base; /* ABR Pointer Base */
- freg_t ubr_base; /* UBR Pointer Base */
- u_int filler24;
- freg_t vbrwq_base; /* VBR Wait Queue Base */
- freg_t abrwq_base; /* ABR Wait Queue Base */
- freg_t ubrwq_base; /* UBR Wait Queue Base */
- freg_t vct_base; /* Main VC Table Base */
- freg_t vcte_base; /* Extended Main VC Table Base */
- u_int filler2a[0x2C - 0x2A];
- freg_t cbr_tab_beg; /* CBR Table Begin */
- freg_t cbr_tab_end; /* CBR Table End */
- freg_t cbr_pointer; /* CBR Pointer */
- u_int filler2f[0x30 - 0x2F];
- freg_t prq_st_adr; /* Packet Ready Queue Start Address */
- freg_t prq_ed_adr; /* Packet Ready Queue End Address */
- freg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
- freg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
- freg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
- freg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
- freg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
- freg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
- u_int filler38[0x40 - 0x38];
- freg_t queue_base; /* Base address for PRQ and TCQ */
- freg_t desc_base; /* Base address of descriptor table */
- u_int filler42[0x45 - 0x42];
- freg_t mode_reg_0; /* Mode register 0 */
- freg_t mode_reg_1; /* Mode register 1 */
- freg_t intr_status_reg;/* Interrupt Status register */
- freg_t mask_reg; /* Mask Register */
- freg_t cell_ctr_high1; /* Total cell transfer count (high) */
- freg_t cell_ctr_lo1; /* Total cell transfer count (low) */
- freg_t state_reg; /* Status register */
- u_int filler4c[0x58 - 0x4c];
- freg_t curr_desc_num; /* Contains the current descriptor num */
- freg_t next_desc; /* Next descriptor */
- freg_t next_vc; /* Next VC */
- u_int filler5b[0x5d - 0x5b];
- freg_t present_slot_cnt;/* Present slot count */
- u_int filler5e[0x6a - 0x5e];
- freg_t new_desc_num; /* New descriptor number */
- freg_t new_vc; /* New VC */
- freg_t sched_tbl_ptr; /* Schedule table pointer */
- freg_t vbrwq_wptr; /* VBR wait queue write pointer */
- freg_t vbrwq_rptr; /* VBR wait queue read pointer */
- freg_t abrwq_wptr; /* ABR wait queue write pointer */
- freg_t abrwq_rptr; /* ABR wait queue read pointer */
- freg_t ubrwq_wptr; /* UBR wait queue write pointer */
- freg_t ubrwq_rptr; /* UBR wait queue read pointer */
- freg_t cbr_vc; /* CBR VC */
- freg_t vbr_sb_vc; /* VBR SB VC */
- freg_t abr_sb_vc; /* ABR SB VC */
- freg_t ubr_sb_vc; /* UBR SB VC */
- freg_t vbr_next_link; /* VBR next link */
- freg_t abr_next_link; /* ABR next link */
- freg_t ubr_next_link; /* UBR next link */
- u_int filler7a[0x7c-0x7a];
- freg_t out_rate_head; /* Out of rate head */
- u_int filler7d[0xca-0x7d]; /* pad out to full address space */
- freg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
- freg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
- u_int fillercc[0x100-0xcc]; /* pad out to full address space */
+ ffreg_t idlehead_high; /* Idle cell header (high) */
+ ffreg_t idlehead_low; /* Idle cell header (low) */
+ ffreg_t maxrate; /* Maximum rate */
+ ffreg_t stparms; /* Traffic Management Parameters */
+ ffreg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
+ ffreg_t rm_type; /* */
+ u_int filler5[0x17 - 0x06];
+ ffreg_t cmd_reg; /* Command register */
+ u_int filler18[0x20 - 0x18];
+ ffreg_t cbr_base; /* CBR Pointer Base */
+ ffreg_t vbr_base; /* VBR Pointer Base */
+ ffreg_t abr_base; /* ABR Pointer Base */
+ ffreg_t ubr_base; /* UBR Pointer Base */
+ u_int filler24;
+ ffreg_t vbrwq_base; /* VBR Wait Queue Base */
+ ffreg_t abrwq_base; /* ABR Wait Queue Base */
+ ffreg_t ubrwq_base; /* UBR Wait Queue Base */
+ ffreg_t vct_base; /* Main VC Table Base */
+ ffreg_t vcte_base; /* Extended Main VC Table Base */
+ u_int filler2a[0x2C - 0x2A];
+ ffreg_t cbr_tab_beg; /* CBR Table Begin */
+ ffreg_t cbr_tab_end; /* CBR Table End */
+ ffreg_t cbr_pointer; /* CBR Pointer */
+ u_int filler2f[0x30 - 0x2F];
+ ffreg_t prq_st_adr; /* Packet Ready Queue Start Address */
+ ffreg_t prq_ed_adr; /* Packet Ready Queue End Address */
+ ffreg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
+ ffreg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
+ ffreg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
+ ffreg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
+ ffreg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
+ ffreg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
+ u_int filler38[0x40 - 0x38];
+ ffreg_t queue_base; /* Base address for PRQ and TCQ */
+ ffreg_t desc_base; /* Base address of descriptor table */
+ u_int filler42[0x45 - 0x42];
+ ffreg_t mode_reg_0; /* Mode register 0 */
+ ffreg_t mode_reg_1; /* Mode register 1 */
+ ffreg_t intr_status_reg;/* Interrupt Status register */
+ ffreg_t mask_reg; /* Mask Register */
+ ffreg_t cell_ctr_high1; /* Total cell transfer count (high) */
+ ffreg_t cell_ctr_lo1; /* Total cell transfer count (low) */
+ ffreg_t state_reg; /* Status register */
+ u_int filler4c[0x58 - 0x4c];
+ ffreg_t curr_desc_num; /* Contains the current descriptor num */
+ ffreg_t next_desc; /* Next descriptor */
+ ffreg_t next_vc; /* Next VC */
+ u_int filler5b[0x5d - 0x5b];
+ ffreg_t present_slot_cnt;/* Present slot count */
+ u_int filler5e[0x6a - 0x5e];
+ ffreg_t new_desc_num; /* New descriptor number */
+ ffreg_t new_vc; /* New VC */
+ ffreg_t sched_tbl_ptr; /* Schedule table pointer */
+ ffreg_t vbrwq_wptr; /* VBR wait queue write pointer */
+ ffreg_t vbrwq_rptr; /* VBR wait queue read pointer */
+ ffreg_t abrwq_wptr; /* ABR wait queue write pointer */
+ ffreg_t abrwq_rptr; /* ABR wait queue read pointer */
+ ffreg_t ubrwq_wptr; /* UBR wait queue write pointer */
+ ffreg_t ubrwq_rptr; /* UBR wait queue read pointer */
+ ffreg_t cbr_vc; /* CBR VC */
+ ffreg_t vbr_sb_vc; /* VBR SB VC */
+ ffreg_t abr_sb_vc; /* ABR SB VC */
+ ffreg_t ubr_sb_vc; /* UBR SB VC */
+ ffreg_t vbr_next_link; /* VBR next link */
+ ffreg_t abr_next_link; /* ABR next link */
+ ffreg_t ubr_next_link; /* UBR next link */
+ u_int filler7a[0x7c-0x7a];
+ ffreg_t out_rate_head; /* Out of rate head */
+ u_int filler7d[0xca-0x7d]; /* pad out to full address space */
+ ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
+ ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
+ u_int fillercc[0x100-0xcc]; /* pad out to full address space */
} ffredn_t;

typedef struct _rfredn_t {
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index cdf2f54..f77e341 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1808,7 +1808,8 @@ static void virtcons_remove(struct virtio_device *vdev)
/* Disable interrupts for vqs */
vdev->config->reset(vdev);
/* Finish up work that's lined up */
- cancel_work_sync(&portdev->control_work);
+ if (use_multiport(portdev))
+ cancel_work_sync(&portdev->control_work);

list_for_each_entry_safe(port, port2, &portdev->ports, list)
unplug_port(port);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 2aa7fdb..ccac010 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1147,14 +1147,18 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
radeon_wait_for_vblank(rdev, i);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
radeon_wait_for_vblank(rdev, i);
tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
}
/* wait for the next frame */
@@ -1179,6 +1183,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
blackout &= ~BLACKOUT_MODE_MASK;
WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
}
+ /* wait for the MC to settle */
+ udelay(100);
}

void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -1212,11 +1218,15 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
if (ASIC_IS_DCE6(rdev)) {
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
/* wait for the next frame */
frame_count = radeon_get_vblank_counter(rdev, i);
@@ -1853,9 +1863,20 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);

- tmp = gb_addr_config & NUM_PIPES_MASK;
- tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
- EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+ if ((rdev->config.evergreen.max_backends == 1) &&
+ (rdev->flags & RADEON_IS_IGP)) {
+ if ((disabled_rb_mask & 3) == 1) {
+ /* RB0 disabled, RB1 enabled */
+ tmp = 0x11111111;
+ } else {
+ /* RB1 disabled, RB0 enabled */
+ tmp = 0x00000000;
+ }
+ } else {
+ tmp = gb_addr_config & NUM_PIPES_MASK;
+ tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
+ EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+ }
WREG32(GB_BACKEND_MAP, tmp);

WREG32(CGTS_SYS_TCC_DISABLE, 0);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index bff6272..0c7476d 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1383,12 +1383,15 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 disabled_rb_mask)
{
u32 rendering_pipe_num, rb_num_width, req_rb_num;
- u32 pipe_rb_ratio, pipe_rb_remain;
+ u32 pipe_rb_ratio, pipe_rb_remain, tmp;
u32 data = 0, mask = 1 << (max_rb_num - 1);
unsigned i, j;

/* mask out the RBs that don't exist on that asic */
- disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
+ tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
+ /* make sure at least one RB is available */
+ if ((tmp & 0xff) != 0xff)
+ disabled_rb_mask = tmp;

rendering_pipe_num = 1 << tiling_pipe_num;
req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 558e5c0..7139341 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2455,6 +2455,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1),
ATOM_DEVICE_CRT1_SUPPORT);
}
+ /* RV100 board with external TDMS bit mis-set.
+ * Actually uses internal TMDS, clear the bit.
+ */
+ if (dev->pdev->device == 0x5159 &&
+ dev->pdev->subsystem_vendor == 0x1014 &&
+ dev->pdev->subsystem_device == 0x029A) {
+ tmp &= ~(1 << 4);
+ }
if ((tmp >> 4) & 0x1) {
devices |= ATOM_DEVICE_DFP2_SUPPORT;
radeon_add_legacy_encoder(dev,
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 4ea672e..70eb18a 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1111,8 +1111,10 @@ radeon_user_framebuffer_create(struct drm_device *dev,
}

radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
- if (radeon_fb == NULL)
+ if (radeon_fb == NULL) {
+ drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(-ENOMEM);
+ }

ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
if (ret) {
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 7843b36..727ebfe 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -219,6 +219,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
{
int r;

+ /* make sure we aren't trying to allocate more space than there is on the ring */
+ if (ndw > (ring->ring_size / 4))
+ return -ENOMEM;
/* Align requested size with padding so unlock_commit can
* pad safely */
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index 0f656b1..a072fa8 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -1,5 +1,6 @@
cayman 0x9400
0x0000802C GRBM_GFX_INDEX
+0x00008040 WAIT_UNTIL
0x000084FC CP_STRMOUT_CNTL
0x000085F0 CP_COHER_CNTL
0x000085F4 CP_COHER_SIZE
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5ddfcc7..dc48cd1 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -296,6 +296,9 @@
#define USB_VENDOR_ID_EZKEY 0x0518
#define USB_DEVICE_ID_BTC_8193 0x0002

+#define USB_VENDOR_ID_FORMOSA 0x147a
+#define USB_DEVICE_ID_FORMOSA_IR_RECEIVER 0xe03e
+
#define USB_VENDOR_ID_FREESCALE 0x15A2
#define USB_DEVICE_ID_FREESCALE_MX28 0x004F

diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 8865fa3..1a4bc41 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -70,6 +70,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 27e4a3e..f45b5b0 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -248,6 +248,8 @@ static inline void dump_rawmsg(enum debuglevel level, const char *tag,
CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l,
CAPIMSG_CONTROL(data));
l -= 12;
+ if (l <= 0)
+ return;
dbgline = kmalloc(3 * l, GFP_ATOMIC);
if (!dbgline)
return;
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 21a3d77..64647d4 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -446,8 +446,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,

priv->write_reg(priv, &priv->regs->ifregs[iface].mask1,
IFX_WRITE_LOW_16BIT(mask));
+
+ /* According to C_CAN documentation, the reserved bit
+ * in IFx_MASK2 register is fixed 1
+ */
priv->write_reg(priv, &priv->regs->ifregs[iface].mask2,
- IFX_WRITE_HIGH_16BIT(mask));
+ IFX_WRITE_HIGH_16BIT(mask) | BIT(13));

priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
IFX_WRITE_LOW_16BIT(id));
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 15f8b00..4da6a86 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -1136,14 +1136,26 @@ static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
}

-#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
- tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
- MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
- MII_TG3_AUXCTL_ACTL_TX_6DB)
+static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
+{
+ u32 val;
+ int err;

-#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
- tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
- MII_TG3_AUXCTL_ACTL_TX_6DB);
+ err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
+
+ if (err)
+ return err;
+ if (enable)
+
+ val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
+ else
+ val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
+
+ err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
+ val | MII_TG3_AUXCTL_ACTL_TX_6DB);
+
+ return err;
+}

static int tg3_bmcr_reset(struct tg3 *tp)
{
@@ -2076,7 +2088,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)

otp = tp->phy_otp;

- if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
+ if (tg3_phy_toggle_auxctl_smdsp(tp, true))
return;

phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
@@ -2101,7 +2113,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);

- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}

static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
@@ -2137,9 +2149,9 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)

if (!tp->setlpicnt) {
if (current_link_up == 1 &&
- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}

val = tr32(TG3_CPMU_EEE_MODE);
@@ -2155,11 +2167,11 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
tg3_flag(tp, 57765_CLASS)) &&
- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
val = MII_TG3_DSP_TAP26_ALNOKO |
MII_TG3_DSP_TAP26_RMRXSTO;
tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}

val = tr32(TG3_CPMU_EEE_MODE);
@@ -2303,7 +2315,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
tg3_writephy(tp, MII_CTRL1000,
CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);

- err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
+ err = tg3_phy_toggle_auxctl_smdsp(tp, true);
if (err)
return err;

@@ -2324,7 +2336,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);

- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);

tg3_writephy(tp, MII_CTRL1000, phy9_orig);

@@ -2413,10 +2425,10 @@ static int tg3_phy_reset(struct tg3 *tp)

out:
if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
tg3_phydsp_write(tp, 0x201f, 0x2aaa);
tg3_phydsp_write(tp, 0x000a, 0x0323);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}

if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
@@ -2425,14 +2437,14 @@ out:
}

if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
- if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
tg3_phydsp_write(tp, 0x000a, 0x310b);
tg3_phydsp_write(tp, 0x201f, 0x9506);
tg3_phydsp_write(tp, 0x401f, 0x14e2);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}
} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
- if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
@@ -2441,7 +2453,7 @@ out:
} else
tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);

- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}
}

@@ -3858,7 +3870,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
tw32(TG3_CPMU_EEE_MODE,
tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);

- err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
+ err = tg3_phy_toggle_auxctl_smdsp(tp, true);
if (!err) {
u32 err2;

@@ -3891,7 +3903,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
MII_TG3_DSP_CH34TP2_HIBW01);
}

- err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
if (!err)
err = err2;
}
@@ -6594,6 +6606,9 @@ static void tg3_poll_controller(struct net_device *dev)
int i;
struct tg3 *tp = netdev_priv(dev);

+ if (tg3_irq_sync(tp))
+ return;
+
for (i = 0; i < tp->irq_cnt; i++)
tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
}
@@ -15556,6 +15571,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tp->pm_cap = pm_cap;
tp->rx_mode = TG3_DEF_RX_MODE;
tp->tx_mode = TG3_DEF_TX_MODE;
+ tp->irq_sync = 1;

if (tg3_debug > 0)
tp->msg_enable = tg3_debug;
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 8b0a0e4..8a3cd87 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -546,6 +546,10 @@ static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
return -1;
}

+ /* All frames should fit into a single buffer */
+ if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG))
+ return -1;
+
/* Check if packet has checksum already */
if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
!(ext_status & RXDESC_IP_PAYLOAD_MASK))
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index c5c4c0e..793ee6b 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -573,6 +573,11 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
return val;
}

+static inline bool is_ipv4_pkt(struct sk_buff *skb)
+{
+ return skb->protocol == ntohs(ETH_P_IP) && ip_hdr(skb)->version == 4;
+}
+
static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
{
u32 addr;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index bd5cf7e..dc36f5c 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -576,6 +576,11 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
return vlan_tag;
}

+static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
+{
+ return vlan_tx_tag_present(skb) || adapter->pvid;
+}
+
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
struct sk_buff *skb, u32 wrb_cnt, u32 len)
{
@@ -703,33 +708,56 @@ dma_err:
return 0;
}

+static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
+ struct sk_buff *skb)
+{
+ u16 vlan_tag = 0;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return skb;
+
+ if (vlan_tx_tag_present(skb)) {
+ vlan_tag = be_get_tx_vlan_tag(adapter, skb);
+ __vlan_put_tag(skb, vlan_tag);
+ skb->vlan_tci = 0;
+ }
+
+ return skb;
+}
+
static netdev_tx_t be_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
struct be_queue_info *txq = &txo->q;
+ struct iphdr *ip = NULL;
u32 wrb_cnt = 0, copied = 0;
- u32 start = txq->head;
+ u32 start = txq->head, eth_hdr_len;
bool dummy_wrb, stopped = false;

- /* For vlan tagged pkts, BE
- * 1) calculates checksum even when CSO is not requested
- * 2) calculates checksum wrongly for padded pkt less than
- * 60 bytes long.
- * As a workaround disable TX vlan offloading in such cases.
+ eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
+ VLAN_ETH_HLEN : ETH_HLEN;
+
+ /* HW has a bug which considers padding bytes as legal
+ * and modifies the IPv4 hdr's 'tot_len' field
*/
- if (unlikely(vlan_tx_tag_present(skb) &&
- (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (unlikely(!skb))
- goto tx_drop;
+ if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
+ is_ipv4_pkt(skb)) {
+ ip = (struct iphdr *)ip_hdr(skb);
+ pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
+ }

- skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
+ /* HW has a bug wherein it will calculate CSUM for VLAN
+ * pkts even though it is disabled.
+ * Manually insert VLAN in pkt.
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL &&
+ be_vlan_tag_chk(adapter, skb)) {
+ skb = be_insert_vlan_in_pkt(adapter, skb);
if (unlikely(!skb))
goto tx_drop;
-
- skb->vlan_tci = 0;
}

wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 76edbc1..cccbbe8 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -233,6 +233,7 @@
#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
+#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
@@ -391,6 +392,12 @@

#define E1000_PBS_16K E1000_PBA_16K

+/* Uncorrectable/correctable ECC Error counts and enable bits */
+#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF
+#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00
+#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8
+#define E1000_PBECCSTS_ECC_ENABLE 0x00010000
+
#define IFS_MAX 80
#define IFS_MIN 40
#define IFS_RATIO 4
@@ -410,6 +417,7 @@
#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
+#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
@@ -446,6 +454,7 @@
#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
+#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */
#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index fa47b85..63a51ae 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -314,6 +314,8 @@ struct e1000_adapter {

struct napi_struct napi;

+ unsigned int uncorr_errors; /* uncorrectable ECC errors */
+ unsigned int corr_errors; /* correctable ECC errors */
unsigned int restart_queue;
u32 txd_cmd;

diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 905e214..3d3f3bb 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -108,6 +108,8 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
E1000_STAT("dropped_smbus", stats.mgpdc),
E1000_STAT("rx_dma_failed", rx_dma_failed),
E1000_STAT("tx_dma_failed", tx_dma_failed),
+ E1000_STAT("uncorr_ecc_errors", uncorr_errors),
+ E1000_STAT("corr_ecc_errors", corr_errors),
};

#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index d37bfd9..a64f22e 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -77,6 +77,7 @@ enum e1e_registers {
#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
E1000_PBS = 0x01008, /* Packet Buffer Size */
+ E1000_PBECCSTS = 0x0100C, /* Packet Buffer ECC Status - RW */
E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */
E1000_FLOP = 0x0103C, /* FLASH Opcode Register */
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index e3a7b07..abc9770 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -3690,6 +3690,17 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
if (hw->mac.type == e1000_ich8lan)
reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
ew32(RFCTL, reg);
+
+ /* Enable ECC on Lynxpoint */
+ if (hw->mac.type == e1000_pch_lpt) {
+ reg = er32(PBECCSTS);
+ reg |= E1000_PBECCSTS_ECC_ENABLE;
+ ew32(PBECCSTS, reg);
+
+ reg = er32(CTRL);
+ reg |= E1000_CTRL_MEHE;
+ ew32(CTRL, reg);
+ }
}

/**
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 7e750ae..2c78ef0 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1671,6 +1671,23 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}

+ /* Reset on uncorrectable ECC error */
+ if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
+ u32 pbeccsts = er32(PBECCSTS);
+
+ adapter->corr_errors +=
+ pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+ adapter->uncorr_errors +=
+ (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+ E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+
+ /* return immediately since reset is imminent */
+ return IRQ_HANDLED;
+ }
+
if (napi_schedule_prep(&adapter->napi)) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
@@ -1738,6 +1755,23 @@ static irqreturn_t e1000_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}

+ /* Reset on uncorrectable ECC error */
+ if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
+ u32 pbeccsts = er32(PBECCSTS);
+
+ adapter->corr_errors +=
+ pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+ adapter->uncorr_errors +=
+ (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+ E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+
+ /* return immediately since reset is imminent */
+ return IRQ_HANDLED;
+ }
+
if (napi_schedule_prep(&adapter->napi)) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
@@ -2101,6 +2135,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
if (adapter->msix_entries) {
ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
+ } else if (hw->mac.type == e1000_pch_lpt) {
+ ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
} else {
ew32(IMS, IMS_ENABLE_MASK);
}
@@ -4249,6 +4285,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
adapter->stats.mgptc += er32(MGTPTC);
adapter->stats.mgprc += er32(MGTPRC);
adapter->stats.mgpdc += er32(MGTPDC);
+
+ /* Correctable ECC Errors */
+ if (hw->mac.type == e1000_pch_lpt) {
+ u32 pbeccsts = er32(PBECCSTS);
+ adapter->corr_errors +=
+ pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+ adapter->uncorr_errors +=
+ (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+ E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+ }
}

/**
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 019d856..8ba03c9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -633,10 +633,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->tx_csum++;
}

- /* Copy dst mac address to wqe */
- ethh = (struct ethhdr *)skb->data;
- tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
- tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
+ if (mlx4_is_mfunc(mdev->dev) || priv->validate_loopback) {
+ /* Copy dst mac address to wqe. This allows loopback in eSwitch,
+ * so that VFs and PF can communicate with each other
+ */
+ ethh = (struct ethhdr *)skb->data;
+ tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
+ tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
+ }
+
/* Handle LSO (TSO) packets */
if (lso_header_size) {
/* Mark opcode as LSO */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index a0313de..c749b82 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1545,15 +1545,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
int i;

if (msi_x) {
- /* In multifunction mode each function gets 2 msi-X vectors
- * one for data path completions anf the other for asynch events
- * or command completions */
- if (mlx4_is_mfunc(dev)) {
- nreq = 2;
- } else {
- nreq = min_t(int, dev->caps.num_eqs -
- dev->caps.reserved_eqs, nreq);
- }
+ nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
+ nreq);

entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
if (!entries)
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 8694124..fdddfcc 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -144,7 +144,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
buffrag->length, PCI_DMA_TODEVICE);
buffrag->dma = 0ULL;
}
- for (j = 0; j < cmd_buf->frag_count; j++) {
+ for (j = 1; j < cmd_buf->frag_count; j++) {
buffrag++;
if (buffrag->dma) {
pci_unmap_page(adapter->pdev, buffrag->dma,
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index a77c558..d6a8218 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1963,10 +1963,12 @@ unwind:
while (--i >= 0) {
nf = &pbuf->frag_array[i+1];
pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ nf->dma = 0ULL;
}

nf = &pbuf->frag_array[0];
pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+ nf->dma = 0ULL;

out_err:
return -ENOMEM;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 0e09bb8..67f73ae 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5812,13 +5812,6 @@ process_pkt:
tp->rx_stats.bytes += pkt_size;
u64_stats_update_end(&tp->rx_stats.syncp);
}
-
- /* Work around for AMD plateform. */
- if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
- (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
- desc->opts2 = 0;
- cur_rx++;
- }
}

count = cur_rx - tp->cur_rx;
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 0459c09..046526e0 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1802,7 +1802,7 @@ static void rhine_tx(struct net_device *dev)
rp->tx_skbuff[entry]->len,
PCI_DMA_TODEVICE);
}
- dev_kfree_skb_irq(rp->tx_skbuff[entry]);
+ dev_kfree_skb(rp->tx_skbuff[entry]);
rp->tx_skbuff[entry] = NULL;
entry = (++rp->dirty_tx) % TX_RING_SIZE;
}
@@ -2011,11 +2011,7 @@ static void rhine_slow_event_task(struct work_struct *work)
if (intr_status & IntrPCIErr)
netif_warn(rp, hw, dev, "PCI error\n");

- napi_disable(&rp->napi);
- rhine_irq_disable(rp);
- /* Slow and safe. Consider __napi_schedule as a replacement ? */
- napi_enable(&rp->napi);
- napi_schedule(&rp->napi);
+ iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);

out_unlock:
mutex_unlock(&rp->task_lock);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 32eb94e..a3d4707 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -77,6 +77,11 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,

skb_orphan(skb);

+ /* Before queueing this packet to netif_rx(),
+ * make sure dst is refcounted.
+ */
+ skb_dst_force(skb);
+
skb->protocol = eth_type_trans(skb, dev);

/* it's OK to use per_cpu_ptr() because BHs are off */
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 66a9bfe..62ce7b8 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -822,7 +822,10 @@ static int macvlan_changelink(struct net_device *dev,

static size_t macvlan_get_size(const struct net_device *dev)
{
- return nla_total_size(4);
+ return (0
+ + nla_total_size(4) /* IFLA_MACVLAN_MODE */
+ + nla_total_size(2) /* IFLA_MACVLAN_FLAGS */
+ );
}

static int macvlan_fill_info(struct sk_buff *skb,
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 74f0457..9ca1b9c 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -1553,7 +1553,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
scan_rsp->number_of_sets);
ret = -1;
- goto done;
+ goto check_next_scan;
}

bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
@@ -1624,7 +1624,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
if (!beacon_size || beacon_size > bytes_left) {
bss_info += bytes_left;
bytes_left = 0;
- return -1;
+ ret = -1;
+ goto check_next_scan;
}

/* Initialize the current working beacon pointer for this BSS
@@ -1680,7 +1681,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
dev_err(priv->adapter->dev,
"%s: bytes left < IE length\n",
__func__);
- goto done;
+ goto check_next_scan;
}
if (element_id == WLAN_EID_DS_PARAMS) {
channel = *(u8 *) (current_ptr +
@@ -1744,6 +1745,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
}
}

+check_next_scan:
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
if (list_empty(&adapter->scan_pending_q)) {
spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
@@ -1782,7 +1784,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
}

-done:
return ret;
}

diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index f4c852c..4110a6d 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -980,7 +980,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
is_tx ? "Tx" : "Rx");

if (is_tx) {
- rtl_lps_leave(hw);
+ schedule_work(&rtlpriv->
+ works.lps_leave_work);
ppsc->last_delaylps_stamp_jiffies =
jiffies;
}
@@ -990,7 +991,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
}
} else if (ETH_P_ARP == ether_type) {
if (is_tx) {
- rtl_lps_leave(hw);
+ schedule_work(&rtlpriv->works.lps_leave_work);
ppsc->last_delaylps_stamp_jiffies = jiffies;
}

@@ -1000,7 +1001,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
"802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx");

if (is_tx) {
- rtl_lps_leave(hw);
+ schedule_work(&rtlpriv->works.lps_leave_work);
ppsc->last_delaylps_stamp_jiffies = jiffies;
}

diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 8fa144f..17cd028 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -542,8 +542,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
WARN_ON(skb_queue_empty(&rx_queue));
while (!skb_queue_empty(&rx_queue)) {
_skb = skb_dequeue(&rx_queue);
- _rtl_usb_rx_process_agg(hw, skb);
- ieee80211_rx_irqsafe(hw, skb);
+ _rtl_usb_rx_process_agg(hw, _skb);
+ ieee80211_rx_irqsafe(hw, _skb);
}
}

diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 94b79c3..9d7f172 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
/* Notify xenvif that ring now has space to send an skb to the frontend */
void xenvif_notify_tx_completion(struct xenvif *vif);

+/* Prevent the device from generating any further traffic. */
+void xenvif_carrier_off(struct xenvif *vif);
+
/* Returns number of ring slots required to send an skb to the frontend */
unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);

diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index b7d41f8..b8c5193 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -343,17 +343,22 @@ err:
return err;
}

-void xenvif_disconnect(struct xenvif *vif)
+void xenvif_carrier_off(struct xenvif *vif)
{
struct net_device *dev = vif->dev;
- if (netif_carrier_ok(dev)) {
- rtnl_lock();
- netif_carrier_off(dev); /* discard queued packets */
- if (netif_running(dev))
- xenvif_down(vif);
- rtnl_unlock();
- xenvif_put(vif);
- }
+
+ rtnl_lock();
+ netif_carrier_off(dev); /* discard queued packets */
+ if (netif_running(dev))
+ xenvif_down(vif);
+ rtnl_unlock();
+ xenvif_put(vif);
+}
+
+void xenvif_disconnect(struct xenvif *vif)
+{
+ if (netif_carrier_ok(vif->dev))
+ xenvif_carrier_off(vif);

atomic_dec(&vif->refcnt);
wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f4a6fca..f3c3a68 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -146,7 +146,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
atomic_dec(&netbk->netfront_count);
}

-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
+static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
+ u8 status);
static void make_tx_response(struct xenvif *vif,
struct xen_netif_tx_request *txp,
s8 st);
@@ -850,7 +851,7 @@ static void netbk_tx_err(struct xenvif *vif,

do {
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
- if (cons >= end)
+ if (cons == end)
break;
txp = RING_GET_REQUEST(&vif->tx, cons++);
} while (1);
@@ -859,6 +860,13 @@ static void netbk_tx_err(struct xenvif *vif,
xenvif_put(vif);
}

+static void netbk_fatal_tx_err(struct xenvif *vif)
+{
+ netdev_err(vif->dev, "fatal error; disabling device\n");
+ xenvif_carrier_off(vif);
+ xenvif_put(vif);
+}
+
static int netbk_count_requests(struct xenvif *vif,
struct xen_netif_tx_request *first,
struct xen_netif_tx_request *txp,
@@ -872,19 +880,22 @@ static int netbk_count_requests(struct xenvif *vif,

do {
if (frags >= work_to_do) {
- netdev_dbg(vif->dev, "Need more frags\n");
+ netdev_err(vif->dev, "Need more frags\n");
+ netbk_fatal_tx_err(vif);
return -frags;
}

if (unlikely(frags >= MAX_SKB_FRAGS)) {
- netdev_dbg(vif->dev, "Too many frags\n");
+ netdev_err(vif->dev, "Too many frags\n");
+ netbk_fatal_tx_err(vif);
return -frags;
}

memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
sizeof(*txp));
if (txp->size > first->size) {
- netdev_dbg(vif->dev, "Frags galore\n");
+ netdev_err(vif->dev, "Frag is bigger than frame.\n");
+ netbk_fatal_tx_err(vif);
return -frags;
}

@@ -892,8 +903,9 @@ static int netbk_count_requests(struct xenvif *vif,
frags++;

if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
- netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
+ netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
txp->offset, txp->size);
+ netbk_fatal_tx_err(vif);
return -frags;
}
} while ((txp++)->flags & XEN_NETTXF_more_data);
@@ -937,7 +949,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
pending_idx = netbk->pending_ring[index];
page = xen_netbk_alloc_page(netbk, skb, pending_idx);
if (!page)
- return NULL;
+ goto err;

gop->source.u.ref = txp->gref;
gop->source.domid = vif->domid;
@@ -959,6 +971,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
}

return gop;
+err:
+ /* Unwind, freeing all pages and sending error responses. */
+ while (i-- > start) {
+ xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
+ XEN_NETIF_RSP_ERROR);
+ }
+ /* The head too, if necessary. */
+ if (start)
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+
+ return NULL;
}

static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
@@ -967,30 +990,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
{
struct gnttab_copy *gop = *gopp;
u16 pending_idx = *((u16 *)skb->data);
- struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
- struct xenvif *vif = pending_tx_info[pending_idx].vif;
- struct xen_netif_tx_request *txp;
struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags;
int i, err, start;

/* Check status of header. */
err = gop->status;
- if (unlikely(err)) {
- pending_ring_idx_t index;
- index = pending_index(netbk->pending_prod++);
- txp = &pending_tx_info[pending_idx].req;
- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
- netbk->pending_ring[index] = pending_idx;
- xenvif_put(vif);
- }
+ if (unlikely(err))
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);

/* Skip first skb fragment if it is on same page as header fragment. */
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);

for (i = start; i < nr_frags; i++) {
int j, newerr;
- pending_ring_idx_t index;

pending_idx = frag_get_pending_idx(&shinfo->frags[i]);

@@ -999,16 +1012,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
if (likely(!newerr)) {
/* Had a previous error? Invalidate this fragment. */
if (unlikely(err))
- xen_netbk_idx_release(netbk, pending_idx);
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
continue;
}

/* Error on this fragment: respond to client with an error. */
- txp = &netbk->pending_tx_info[pending_idx].req;
- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
- index = pending_index(netbk->pending_prod++);
- netbk->pending_ring[index] = pending_idx;
- xenvif_put(vif);
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);

/* Not the first error? Preceding frags already invalidated. */
if (err)
@@ -1016,10 +1025,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,

/* First error: invalidate header and preceding fragments. */
pending_idx = *((u16 *)skb->data);
- xen_netbk_idx_release(netbk, pending_idx);
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
for (j = start; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
- xen_netbk_idx_release(netbk, pending_idx);
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
}

/* Remember the error: invalidate all subsequent fragments. */
@@ -1053,7 +1062,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)

/* Take an extra reference to offset xen_netbk_idx_release */
get_page(netbk->mmap_pages[pending_idx]);
- xen_netbk_idx_release(netbk, pending_idx);
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
}
}

@@ -1066,7 +1075,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,

do {
if (unlikely(work_to_do-- <= 0)) {
- netdev_dbg(vif->dev, "Missing extra info\n");
+ netdev_err(vif->dev, "Missing extra info\n");
+ netbk_fatal_tx_err(vif);
return -EBADR;
}

@@ -1075,8 +1085,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
if (unlikely(!extra.type ||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
vif->tx.req_cons = ++cons;
- netdev_dbg(vif->dev,
+ netdev_err(vif->dev,
"Invalid extra type: %d\n", extra.type);
+ netbk_fatal_tx_err(vif);
return -EINVAL;
}

@@ -1092,13 +1103,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
struct xen_netif_extra_info *gso)
{
if (!gso->u.gso.size) {
- netdev_dbg(vif->dev, "GSO size must not be zero.\n");
+ netdev_err(vif->dev, "GSO size must not be zero.\n");
+ netbk_fatal_tx_err(vif);
return -EINVAL;
}

/* Currently only TCPv4 S.O. is supported. */
if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
- netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
+ netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
+ netbk_fatal_tx_err(vif);
return -EINVAL;
}

@@ -1235,9 +1248,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)

/* Get a netif from the list with work to do. */
vif = poll_net_schedule_list(netbk);
+ /* This can sometimes happen because the test of
+ * list_empty(net_schedule_list) at the top of the
+ * loop is unlocked. Just go back and have another
+ * look.
+ */
if (!vif)
continue;

+ if (vif->tx.sring->req_prod - vif->tx.req_cons >
+ XEN_NETIF_TX_RING_SIZE) {
+ netdev_err(vif->dev,
+ "Impossible number of requests. "
+ "req_prod %d, req_cons %d, size %ld\n",
+ vif->tx.sring->req_prod, vif->tx.req_cons,
+ XEN_NETIF_TX_RING_SIZE);
+ netbk_fatal_tx_err(vif);
+ continue;
+ }
+
RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
if (!work_to_do) {
xenvif_put(vif);
@@ -1265,17 +1294,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
work_to_do = xen_netbk_get_extras(vif, extras,
work_to_do);
idx = vif->tx.req_cons;
- if (unlikely(work_to_do < 0)) {
- netbk_tx_err(vif, &txreq, idx);
+ if (unlikely(work_to_do < 0))
continue;
- }
}

ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
- if (unlikely(ret < 0)) {
- netbk_tx_err(vif, &txreq, idx - ret);
+ if (unlikely(ret < 0))
continue;
- }
+
idx += ret;

if (unlikely(txreq.size < ETH_HLEN)) {
@@ -1287,11 +1313,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)

/* No crossing a page as the payload mustn't fragment. */
if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
- netdev_dbg(vif->dev,
+ netdev_err(vif->dev,
"txreq.offset: %x, size: %u, end: %lu\n",
txreq.offset, txreq.size,
(txreq.offset&~PAGE_MASK) + txreq.size);
- netbk_tx_err(vif, &txreq, idx);
+ netbk_fatal_tx_err(vif);
continue;
}

@@ -1319,8 +1345,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];

if (netbk_set_skb_gso(vif, skb, gso)) {
+ /* Failure in netbk_set_skb_gso is fatal. */
kfree_skb(skb);
- netbk_tx_err(vif, &txreq, idx);
continue;
}
}
@@ -1419,7 +1445,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
txp->size -= data_len;
} else {
/* Schedule a response immediately. */
- xen_netbk_idx_release(netbk, pending_idx);
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
}

if (txp->flags & XEN_NETTXF_csum_blank)
@@ -1474,7 +1500,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)

}

-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
+static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
+ u8 status)
{
struct xenvif *vif;
struct pending_tx_info *pending_tx_info;
@@ -1488,7 +1515,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)

vif = pending_tx_info->vif;

- make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
+ make_tx_response(vif, &pending_tx_info->req, status);

index = pending_index(netbk->pending_prod++);
netbk->pending_ring[index] = pending_idx;
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index fd77e2b..eae55c7 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -19,6 +19,8 @@ static void pci_free_resources(struct pci_dev *dev)

static void pci_stop_dev(struct pci_dev *dev)
{
+ pci_pme_active(dev, false);
+
if (dev->is_added) {
pci_proc_detach_device(dev);
pci_remove_sysfs_dev_files(dev);
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index b3b2984..b03c22d 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -65,7 +65,7 @@ static const struct voltage_map_desc ldo9_voltage_map_desc = {
.min = 2800000, .step = 100000, .max = 3100000,
};
static const struct voltage_map_desc ldo10_voltage_map_desc = {
- .min = 95000, .step = 50000, .max = 1300000,
+ .min = 950000, .step = 50000, .max = 1300000,
};
static const struct voltage_map_desc ldo1213_voltage_map_desc = {
.min = 800000, .step = 100000, .max = 3300000,
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index dd2aeee..8f8c8ae 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -494,6 +494,7 @@ isl1208_rtc_interrupt(int irq, void *data)
{
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
struct i2c_client *client = data;
+ struct rtc_device *rtc = i2c_get_clientdata(client);
int handled = 0, sr, err;

/*
@@ -516,6 +517,8 @@ isl1208_rtc_interrupt(int irq, void *data)
if (sr & ISL1208_REG_SR_ALM) {
dev_dbg(&client->dev, "alarm!\n");

+ rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
+
/* Clear the alarm */
sr &= ~ISL1208_REG_SR_ALM;
sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr);
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index cc05339..b232996 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -44,6 +44,7 @@
#define RTC_YMR 0x34 /* Year match register */
#define RTC_YLR 0x38 /* Year data load register */

+#define RTC_CR_EN (1 << 0) /* counter enable bit */
#define RTC_CR_CWEN (1 << 26) /* Clockwatch enable bit */

#define RTC_TCR_EN (1 << 1) /* Periodic timer enable bit */
@@ -304,7 +305,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
int ret;
struct pl031_local *ldata;
struct rtc_class_ops *ops = id->data;
- unsigned long time;
+ unsigned long time, data;

ret = amba_request_regions(adev, NULL);
if (ret)
@@ -331,10 +332,13 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
dev_dbg(&adev->dev, "designer ID = 0x%02x\n", ldata->hw_designer);
dev_dbg(&adev->dev, "revision = 0x%01x\n", ldata->hw_revision);

+ data = readl(ldata->base + RTC_CR);
/* Enable the clockwatch on ST Variants */
if (ldata->hw_designer == AMBA_VENDOR_ST)
- writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN,
- ldata->base + RTC_CR);
+ data |= RTC_CR_CWEN;
+ else
+ data |= RTC_CR_EN;
+ writel(data, ldata->base + RTC_CR);

/*
* On ST PL031 variants, the RTC reset value does not provide correct
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 8542099..0208baa 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1167,6 +1167,8 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)

int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
{
+ int block_size = dev->se_sub_dev->se_dev_attrib.block_size;
+
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
pr_err("dev[%p]: Unable to change SE Device"
" fabric_max_sectors while dev_export_obj: %d count exists\n",
@@ -1204,8 +1206,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
/*
* Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
*/
+ if (!block_size) {
+ block_size = 512;
+ pr_warn("Defaulting to 512 for zero block_size\n");
+ }
fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
- dev->se_sub_dev->se_dev_attrib.block_size);
+ block_size);

dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = fabric_max_sectors;
pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 92c0229..1e8b3bd 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2488,11 +2488,6 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
return 0;
}
} else {
- if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
- hub_port_warm_reset_required(hub,
- portstatus))
- return -ENOTCONN;
-
return 0;
}

@@ -2685,6 +2680,23 @@ static int check_port_resume_type(struct usb_device *udev,
}

#ifdef CONFIG_USB_SUSPEND
+/*
+ * usb_disable_function_remotewakeup - disable usb3.0
+ * device's function remote wakeup
+ * @udev: target device
+ *
+ * Assume there's only one function on the USB 3.0
+ * device and disable remote wake for the first
+ * interface. FIXME if the interface association
+ * descriptor shows there's more than one function.
+ */
+static int usb_disable_function_remotewakeup(struct usb_device *udev)
+{
+ return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ USB_REQ_CLEAR_FEATURE, USB_RECIP_INTERFACE,
+ USB_INTRF_FUNC_SUSPEND, 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+}

/*
* usb_port_suspend - suspend a usb device's upstream port
@@ -2797,12 +2809,19 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
port1, status);
/* paranoia: "should not happen" */
- if (udev->do_remote_wakeup)
- (void) usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE,
- USB_DEVICE_REMOTE_WAKEUP, 0,
- NULL, 0,
- USB_CTRL_SET_TIMEOUT);
+ if (udev->do_remote_wakeup) {
+ if (!hub_is_superspeed(hub->hdev)) {
+ (void) usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_RECIP_DEVICE,
+ USB_DEVICE_REMOTE_WAKEUP, 0,
+ NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ } else
+ (void) usb_disable_function_remotewakeup(udev);
+
+ }

/* Try to enable USB2 hardware LPM again */
if (udev->usb2_hw_lpm_capable == 1)
@@ -2892,20 +2911,30 @@ static int finish_port_resume(struct usb_device *udev)
* udev->reset_resume
*/
} else if (udev->actconfig && !udev->reset_resume) {
- le16_to_cpus(&devstatus);
- if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
- status = usb_control_msg(udev,
- usb_sndctrlpipe(udev, 0),
- USB_REQ_CLEAR_FEATURE,
+ if (!hub_is_superspeed(udev->parent)) {
+ le16_to_cpus(&devstatus);
+ if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP))
+ status = usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ USB_REQ_CLEAR_FEATURE,
USB_RECIP_DEVICE,
- USB_DEVICE_REMOTE_WAKEUP, 0,
- NULL, 0,
- USB_CTRL_SET_TIMEOUT);
- if (status)
- dev_dbg(&udev->dev,
- "disable remote wakeup, status %d\n",
- status);
+ USB_DEVICE_REMOTE_WAKEUP, 0,
+ NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ } else {
+ status = usb_get_status(udev, USB_RECIP_INTERFACE, 0,
+ &devstatus);
+ le16_to_cpus(&devstatus);
+ if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP
+ | USB_INTRF_STAT_FUNC_RW))
+ status =
+ usb_disable_function_remotewakeup(udev);
}
+
+ if (status)
+ dev_dbg(&udev->dev,
+ "disable remote wakeup, status %d\n",
+ status);
status = 0;
}
return status;
@@ -4503,14 +4532,9 @@ static void hub_events(void)
* SS.Inactive state.
*/
if (hub_port_warm_reset_required(hub, portstatus)) {
- int status;
-
dev_dbg(hub_dev, "warm reset port %d\n", i);
- status = hub_port_reset(hub, i, NULL,
+ hub_port_reset(hub, i, NULL,
HUB_BH_RESET_TIME, true);
- if (status < 0)
- hub_port_disable(hub, i, 1);
- connect_change = 0;
}

if (connect_change)
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index fc9e7cc..349d3fe 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -613,7 +613,11 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
status = STS_PCD;
}
}
- /* FIXME autosuspend idle root hubs */
+
+ /* If a resume is in progress, make sure it can finish */
+ if (ehci->resuming_ports)
+ mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(25));
+
spin_unlock_irqrestore (&ehci->lock, flags);
return status ? retval : 0;
}
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 33182c6..ccc9f70 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -236,7 +236,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
}

static const unsigned char
-max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
+max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 };

/* carryover low/fullspeed bandwidth that crosses uframe boundries */
static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index eb5563a..7893351 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -780,6 +780,7 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
"defaulting to EHCI.\n");
dev_warn(&xhci_pdev->dev,
"USB 3.0 devices will work at USB 2.0 speeds.\n");
+ usb_disable_xhci_ports(xhci_pdev);
return;
}

diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index c3c174d..6f3043f 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1698,7 +1698,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
faked_port_index + 1);
if (slot_id && xhci->devs[slot_id])
xhci_ring_device(xhci, slot_id);
- if (bus_state->port_remote_wakeup && (1 << faked_port_index)) {
+ if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
bus_state->port_remote_wakeup &=
~(1 << faked_port_index);
xhci_test_and_clear_bit(xhci, port_array,
@@ -2587,6 +2587,8 @@ cleanup:
(trb_comp_code != COMP_STALL &&
trb_comp_code != COMP_BABBLE))
xhci_urb_free_priv(xhci, urb_priv);
+ else
+ kfree(urb_priv);

usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
if ((urb->actual_length != urb->transfer_buffer_length &&
@@ -3106,7 +3108,7 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
* running_total.
*/
packets_transferred = (running_total + trb_buff_len) /
- usb_endpoint_maxp(&urb->ep->desc);
+ GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));

if ((total_packet_count - packets_transferred) > 31)
return 31 << 17;
@@ -3640,7 +3642,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
td_len = urb->iso_frame_desc[i].length;
td_remain_len = td_len;
total_packet_count = DIV_ROUND_UP(td_len,
- usb_endpoint_maxp(&urb->ep->desc));
+ GET_MAX_PACKET(
+ usb_endpoint_maxp(&urb->ep->desc)));
/* A zero-length transfer still involves at least one packet. */
if (total_packet_count == 0)
total_packet_count++;
@@ -3662,9 +3665,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
td = urb_priv->td[i];
for (j = 0; j < trbs_per_td; j++) {
u32 remainder = 0;
- field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
+ field = 0;

if (first_trb) {
+ field = TRB_TBC(burst_count) |
+ TRB_TLBPC(residue);
/* Queue the isoc TRB */
field |= TRB_TYPE(TRB_ISOC);
/* Assume URB_ISO_ASAP is set */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 653139b..4c1fa24 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -590,6 +590,7 @@ static struct usb_device_id id_table_combined [] = {
/*
* ELV devices:
*/
+ { USB_DEVICE(FTDI_ELV_VID, FTDI_ELV_WS300_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) },
@@ -676,6 +677,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
{ USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index fa5d560..9d359e18 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -147,6 +147,11 @@
#define XSENS_CONVERTER_6_PID 0xD38E
#define XSENS_CONVERTER_7_PID 0xD38F

+/**
+ * Zolix (www.zolix.com.cb) product ids
+ */
+#define FTDI_OMNI1509 0xD491 /* Omni1509 embedded USB-serial */
+
/*
* NDI (www.ndigital.com) product ids
*/
@@ -204,7 +209,7 @@

/*
* ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
- * All of these devices use FTDI's vendor ID (0x0403).
+ * Almost all of these devices use FTDI's vendor ID (0x0403).
* Further IDs taken from ELV Windows .inf file.
*
* The previously included PID for the UO 100 module was incorrect.
@@ -212,6 +217,8 @@
*
* Armin Laeuger originally sent the PID for the UM 100 module.
*/
+#define FTDI_ELV_VID 0x1B1F /* ELV AG */
+#define FTDI_ELV_WS300_PID 0xC006 /* eQ3 WS 300 PC II */
#define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */
#define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */
#define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index bafe6b9..5f4ce8e 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -242,6 +242,7 @@ static void option_instat_callback(struct urb *urb);
#define TELIT_PRODUCT_CC864_DUAL 0x1005
#define TELIT_PRODUCT_CC864_SINGLE 0x1006
#define TELIT_PRODUCT_DE910_DUAL 0x1010
+#define TELIT_PRODUCT_LE920 0x1200

/* ZTE PRODUCTS */
#define ZTE_VENDOR_ID 0x19d2
@@ -453,6 +454,10 @@ static void option_instat_callback(struct urb *urb);
#define TPLINK_VENDOR_ID 0x2357
#define TPLINK_PRODUCT_MA180 0x0201

+/* Changhong products */
+#define CHANGHONG_VENDOR_ID 0x2077
+#define CHANGHONG_PRODUCT_CH690 0x7001
+
/* some devices interfaces need special handling due to a number of reasons */
enum option_blacklist_reason {
OPTION_BLACKLIST_NONE = 0,
@@ -534,6 +539,11 @@ static const struct option_blacklist_info zte_1255_blacklist = {
.reserved = BIT(3) | BIT(4),
};

+static const struct option_blacklist_info telit_le920_blacklist = {
+ .sendsetup = BIT(0),
+ .reserved = BIT(1) | BIT(5),
+};
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -784,6 +794,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+ .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
@@ -1318,6 +1330,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 996015c..4302733 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -55,6 +55,7 @@ static const struct usb_device_id id_table[] = {
{DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
{DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
{DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
+ {DEVICE_G1K(0x1bc7, 0x900e)}, /* Telit Gobi QDL device */

/* Gobi 2000 devices */
{USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */
diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
index 105d900..16b0bf0 100644
--- a/drivers/usb/storage/initializers.c
+++ b/drivers/usb/storage/initializers.c
@@ -92,8 +92,8 @@ int usb_stor_ucr61s2b_init(struct us_data *us)
return 0;
}

-/* This places the HUAWEI E220 devices in multi-port mode */
-int usb_stor_huawei_e220_init(struct us_data *us)
+/* This places the HUAWEI usb dongles in multi-port mode */
+static int usb_stor_huawei_feature_init(struct us_data *us)
{
int result;

@@ -104,3 +104,75 @@ int usb_stor_huawei_e220_init(struct us_data *us)
US_DEBUGP("Huawei mode set result is %d\n", result);
return 0;
}
+
+/*
+ * It will send a scsi switch command called rewind' to huawei dongle.
+ * When the dongle receives this command at the first time,
+ * it will reboot immediately. After rebooted, it will ignore this command.
+ * So it is unnecessary to read its response.
+ */
+static int usb_stor_huawei_scsi_init(struct us_data *us)
+{
+ int result = 0;
+ int act_len = 0;
+ struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf;
+ char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+ bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN);
+ bcbw->Tag = 0;
+ bcbw->DataTransferLength = 0;
+ bcbw->Flags = bcbw->Lun = 0;
+ bcbw->Length = sizeof(rewind_cmd);
+ memset(bcbw->CDB, 0, sizeof(bcbw->CDB));
+ memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd));
+
+ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw,
+ US_BULK_CB_WRAP_LEN, &act_len);
+ US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result);
+ return result;
+}
+
+/*
+ * It tries to find the supported Huawei USB dongles.
+ * In Huawei, they assign the following product IDs
+ * for all of their mobile broadband dongles,
+ * including the new dongles in the future.
+ * So if the product ID is not included in this list,
+ * it means it is not Huawei's mobile broadband dongles.
+ */
+static int usb_stor_huawei_dongles_pid(struct us_data *us)
+{
+ struct usb_interface_descriptor *idesc;
+ int idProduct;
+
+ idesc = &us->pusb_intf->cur_altsetting->desc;
+ idProduct = us->pusb_dev->descriptor.idProduct;
+ /* The first port is CDROM,
+ * means the dongle in the single port mode,
+ * and a switch command is required to be sent. */
+ if (idesc && idesc->bInterfaceNumber == 0) {
+ if ((idProduct == 0x1001)
+ || (idProduct == 0x1003)
+ || (idProduct == 0x1004)
+ || (idProduct >= 0x1401 && idProduct <= 0x1500)
+ || (idProduct >= 0x1505 && idProduct <= 0x1600)
+ || (idProduct >= 0x1c02 && idProduct <= 0x2202)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+int usb_stor_huawei_init(struct us_data *us)
+{
+ int result = 0;
+
+ if (usb_stor_huawei_dongles_pid(us)) {
+ if (us->pusb_dev->descriptor.idProduct >= 0x1446)
+ result = usb_stor_huawei_scsi_init(us);
+ else
+ result = usb_stor_huawei_feature_init(us);
+ }
+ return result;
+}
diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h
index 529327f..5376d4f 100644
--- a/drivers/usb/storage/initializers.h
+++ b/drivers/usb/storage/initializers.h
@@ -46,5 +46,5 @@ int usb_stor_euscsi_init(struct us_data *us);
* flash reader */
int usb_stor_ucr61s2b_init(struct us_data *us);

-/* This places the HUAWEI E220 devices in multi-port mode */
-int usb_stor_huawei_e220_init(struct us_data *us);
+/* This places the HUAWEI usb dongles in multi-port mode */
+int usb_stor_huawei_init(struct us_data *us);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index dd2c64f..fff5d10 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1515,335 +1515,10 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100,
/* Reported by fangxiaozhi <huananhu@xxxxxxxxxx>
* This brings the HUAWEI data card devices into multi-port mode
*/
-UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000,
+UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50,
"HUAWEI MOBILE",
"Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
- 0),
-UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000,
- "HUAWEI MOBILE",
- "Mass Storage",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init,
0),

/* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index e23c30a..2f232a2 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -120,6 +120,17 @@ MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks");
.useTransport = use_transport, \
}

+#define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \
+ vendor_name, product_name, use_protocol, use_transport, \
+ init_function, Flags) \
+{ \
+ .vendorName = vendor_name, \
+ .productName = product_name, \
+ .useProtocol = use_protocol, \
+ .useTransport = use_transport, \
+ .initFunction = init_function, \
+}
+
static struct us_unusual_dev us_unusual_dev_list[] = {
# include "unusual_devs.h"
{ } /* Terminating entry */
@@ -131,6 +142,7 @@ static struct us_unusual_dev for_dynamic_ids =
#undef UNUSUAL_DEV
#undef COMPLIANT_DEV
#undef USUAL_DEV
+#undef UNUSUAL_VENDOR_INTF

#ifdef CONFIG_LOCKDEP

diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c
index b969279..a9b5f2e 100644
--- a/drivers/usb/storage/usual-tables.c
+++ b/drivers/usb/storage/usual-tables.c
@@ -46,6 +46,20 @@
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans), \
.driver_info = ((useType)<<24) }

+/* Define the device is matched with Vendor ID and interface descriptors */
+#define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \
+ vendorName, productName, useProtocol, useTransport, \
+ initFunction, flags) \
+{ \
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
+ | USB_DEVICE_ID_MATCH_VENDOR, \
+ .idVendor = (id_vendor), \
+ .bInterfaceClass = (cl), \
+ .bInterfaceSubClass = (sc), \
+ .bInterfaceProtocol = (pr), \
+ .driver_info = (flags) \
+}
+
struct usb_device_id usb_storage_usb_ids[] = {
# include "unusual_devs.h"
{ } /* Terminating entry */
@@ -57,6 +71,7 @@ MODULE_DEVICE_TABLE(usb, usb_storage_usb_ids);
#undef UNUSUAL_DEV
#undef COMPLIANT_DEV
#undef USUAL_DEV
+#undef UNUSUAL_VENDOR_INTF


/*
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 0b6387c..29990c9 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -666,8 +666,11 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
if (ret < 0)
printk(KERN_ERR "NILFS: GC failed during preparation: "
"cannot read source blocks: err=%d\n", ret);
- else
+ else {
+ if (nilfs_sb_need_update(nilfs))
+ set_nilfs_discontinued(nilfs);
ret = nilfs_clean_segments(inode->i_sb, argv, kbufs);
+ }

nilfs_remove_all_gcinodes(nilfs);
clear_nilfs_gc_running(nilfs);
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index d1d732c..17de73a 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -152,6 +152,12 @@
#define USB_INTRF_FUNC_SUSPEND_LP (1 << (8 + 0))
#define USB_INTRF_FUNC_SUSPEND_RW (1 << (8 + 1))

+/*
+ * Interface status, Figure 9-5 USB 3.0 spec
+ */
+#define USB_INTRF_STAT_FUNC_RW_CAP 1
+#define USB_INTRF_STAT_FUNC_RW 2
+
#define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */

/* Bit array elements as returned by the USB_REQ_GET_STATUS request. */
diff --git a/kernel/resource.c b/kernel/resource.c
index e1d2b8e..7de982e 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -758,6 +758,7 @@ static void __init __reserve_region_with_split(struct resource *root,
struct resource *parent = root;
struct resource *conflict;
struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC);
+ struct resource *next_res = NULL;

if (!res)
return;
@@ -767,21 +768,46 @@ static void __init __reserve_region_with_split(struct resource *root,
res->end = end;
res->flags = IORESOURCE_BUSY;

- conflict = __request_resource(parent, res);
- if (!conflict)
- return;
+ while (1) {

- /* failed, split and try again */
- kfree(res);
+ conflict = __request_resource(parent, res);
+ if (!conflict) {
+ if (!next_res)
+ break;
+ res = next_res;
+ next_res = NULL;
+ continue;
+ }

- /* conflict covered whole area */
- if (conflict->start <= start && conflict->end >= end)
- return;
+ /* conflict covered whole area */
+ if (conflict->start <= res->start &&
+ conflict->end >= res->end) {
+ kfree(res);
+ WARN_ON(next_res);
+ break;
+ }
+
+ /* failed, split and try again */
+ if (conflict->start > res->start) {
+ end = res->end;
+ res->end = conflict->start - 1;
+ if (conflict->end < end) {
+ next_res = kzalloc(sizeof(*next_res),
+ GFP_ATOMIC);
+ if (!next_res) {
+ kfree(res);
+ break;
+ }
+ next_res->name = name;
+ next_res->start = conflict->end + 1;
+ next_res->end = end;
+ next_res->flags = IORESOURCE_BUSY;
+ }
+ } else {
+ res->start = conflict->end + 1;
+ }
+ }

- if (conflict->start > start)
- __reserve_region_with_split(root, start, conflict->start-1, name);
- if (conflict->end < end)
- __reserve_region_with_split(root, conflict->end+1, end, name);
}

void __init reserve_region_with_split(struct resource *root,
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 573e1ca..2a05e56 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -566,7 +566,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
static int do_balance_runtime(struct rt_rq *rt_rq)
{
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
- struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
+ struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
int i, weight, more = 0;
u64 rt_period;

diff --git a/lib/digsig.c b/lib/digsig.c
index 8c0e629..dc2be7e 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -162,6 +162,8 @@ static int digsig_verify_rsa(struct key *key,
memset(out1, 0, head);
memcpy(out1 + head, p, l);

+ kfree(p);
+
err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len);
if (err)
goto err;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 9c34eb5..33d8b38 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2972,6 +2972,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
if (!huge_pte_none(huge_ptep_get(ptep))) {
pte = huge_ptep_get_and_clear(mm, address, ptep);
pte = pte_mkhuge(pte_modify(pte, newprot));
+ pte = arch_make_huge_pte(pte, vma, NULL, 0);
set_huge_pte_at(mm, address, ptep, pte);
}
}
diff --git a/mm/migrate.c b/mm/migrate.c
index be26d5c..2e03986 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -142,8 +142,10 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
if (is_write_migration_entry(entry))
pte = pte_mkwrite(pte);
#ifdef CONFIG_HUGETLB_PAGE
- if (PageHuge(new))
+ if (PageHuge(new)) {
pte = pte_mkhuge(pte);
+ pte = arch_make_huge_pte(pte, vma, new, 0);
+ }
#endif
flush_cache_page(vma, addr, pte_pfn(pte));
set_pte_at(mm, addr, ptep, pte);
diff --git a/mm/mlock.c b/mm/mlock.c
index ef726e8..3283272 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -524,11 +524,11 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
static int do_mlockall(int flags)
{
struct vm_area_struct * vma, * prev = NULL;
- unsigned int def_flags = 0;

if (flags & MCL_FUTURE)
- def_flags = VM_LOCKED;
- current->mm->def_flags = def_flags;
+ current->mm->def_flags |= VM_LOCKED;
+ else
+ current->mm->def_flags &= ~VM_LOCKED;
if (flags == MCL_FUTURE)
goto out;

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 007bf3b..0ed96c7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4272,10 +4272,11 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
* round what is now in bits to nearest long in bits, then return it in
* bytes.
*/
-static unsigned long __init usemap_size(unsigned long zonesize)
+static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
{
unsigned long usemapsize;

+ zonesize += zone_start_pfn & (pageblock_nr_pages-1);
usemapsize = roundup(zonesize, pageblock_nr_pages);
usemapsize = usemapsize >> pageblock_order;
usemapsize *= NR_PAGEBLOCK_BITS;
@@ -4285,17 +4286,19 @@ static unsigned long __init usemap_size(unsigned long zonesize)
}

static void __init setup_usemap(struct pglist_data *pgdat,
- struct zone *zone, unsigned long zonesize)
+ struct zone *zone,
+ unsigned long zone_start_pfn,
+ unsigned long zonesize)
{
- unsigned long usemapsize = usemap_size(zonesize);
+ unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
zone->pageblock_flags = NULL;
if (usemapsize)
zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
usemapsize);
}
#else
-static inline void setup_usemap(struct pglist_data *pgdat,
- struct zone *zone, unsigned long zonesize) {}
+static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
+ unsigned long zone_start_pfn, unsigned long zonesize) {}
#endif /* CONFIG_SPARSEMEM */

#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
@@ -4414,7 +4417,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
continue;

set_pageblock_order();
- setup_usemap(pgdat, zone, size);
+ setup_usemap(pgdat, zone, zone_start_pfn, size);
ret = init_currently_empty_zone(zone, zone_start_pfn,
size, MEMMAP_EARLY);
BUG_ON(ret);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 22fe004..9c97f7a 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -855,6 +855,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)

skb_pull(skb, sizeof(code));

+ /*
+ * The SMP context must be initialized for all other PDUs except
+ * pairing and security requests. If we get any other PDU when
+ * not initialized simply disconnect (done if this function
+ * returns an error).
+ */
+ if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ &&
+ !conn->smp_chan) {
+ BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code);
+ kfree_skb(skb);
+ return -ENOTSUPP;
+ }
+
switch (code) {
case SMP_CMD_PAIRING_REQ:
reason = smp_cmd_pairing_req(conn, skb);
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index e41456b..ab52468 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -256,6 +256,9 @@ static int br_parse_ip_options(struct sk_buff *skb)
struct net_device *dev = skb->dev;
u32 len;

+ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ goto inhdr_error;
+
iph = ip_hdr(skb);
opt = &(IPCB(skb)->opt);

diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index aa278cd..2a42802 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1797,10 +1797,13 @@ static ssize_t pktgen_thread_write(struct file *file,
return -EFAULT;
i += len;
mutex_lock(&pktgen_thread_lock);
- pktgen_add_device(t, f);
+ ret = pktgen_add_device(t, f);
mutex_unlock(&pktgen_thread_lock);
- ret = count;
- sprintf(pg_result, "OK: add_device=%s", f);
+ if (!ret) {
+ ret = count;
+ sprintf(pg_result, "OK: add_device=%s", f);
+ } else
+ sprintf(pg_result, "ERROR: can not add device %s", f);
goto out;
}

diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 8285f00..aabeb7b 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -589,7 +589,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
case IP_TTL:
if (optlen < 1)
goto e_inval;
- if (val != -1 && (val < 0 || val > 255))
+ if (val != -1 && (val < 1 || val > 255))
goto e_inval;
inet->uc_ttl = val;
break;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 19c430c..3b14d81 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3715,6 +3715,11 @@ static bool tcp_process_frto(struct sock *sk, int flag)
}
} else {
if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
+ if (!tcp_packets_in_flight(tp)) {
+ tcp_enter_frto_loss(sk, 2, flag);
+ return true;
+ }
+
/* Prevent sending of new data. */
tp->snd_cwnd = min(tp->snd_cwnd,
tcp_packets_in_flight(tp));
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 0808ad5..fc9ac78 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1741,7 +1741,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
continue;
if ((rt->rt6i_flags & flags) != flags)
continue;
- if ((noflags != 0) && ((rt->rt6i_flags & flags) != 0))
+ if ((rt->rt6i_flags & noflags) != 0)
continue;
dst_hold(&rt->dst);
break;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index decc21d1..4703c70 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1293,10 +1293,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
cork->length = 0;
sk->sk_sndmsg_page = NULL;
sk->sk_sndmsg_off = 0;
- exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len;
+ exthdrlen = (opt ? opt->opt_flen : 0);
length += exthdrlen;
transhdrlen += exthdrlen;
- dst_exthdrlen = rt->dst.header_len;
+ dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
} else {
rt = (struct rt6_info *)cork->dst;
fl6 = &inet->cork.fl.u.ip6;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index fd44184..08c149c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -846,7 +846,8 @@ restart:
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);

- if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
+ if (!dst_get_neighbour_noref_raw(&rt->dst) &&
+ !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL)))
nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
else if (!(rt->dst.flags & DST_HOST))
nrt = rt6_alloc_clone(rt, &fl6->daddr);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 901cffd..02b1ef8 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2450,13 +2450,15 @@ static int packet_release(struct socket *sock)

packet_flush_mclist(sk);

- memset(&req_u, 0, sizeof(req_u));
-
- if (po->rx_ring.pg_vec)
+ if (po->rx_ring.pg_vec) {
+ memset(&req_u, 0, sizeof(req_u));
packet_set_ring(sk, &req_u, 1, 0);
+ }

- if (po->tx_ring.pg_vec)
+ if (po->tx_ring.pg_vec) {
+ memset(&req_u, 0, sizeof(req_u));
packet_set_ring(sk, &req_u, 1, 1);
+ }

fanout_release(sk);

diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 68a385d..58cd035 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -248,6 +248,8 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
/* Final destructor for endpoint. */
static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
{
+ int i;
+
SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);

/* Free up the HMAC transform. */
@@ -270,6 +272,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
sctp_inq_free(&ep->base.inqueue);
sctp_bind_addr_free(&ep->base.bind_addr);

+ for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i)
+ memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE);
+
/* Remove and free the port */
if (sctp_sk(ep->base.sk)->bind_hash)
sctp_put_port(ep->base.sk);
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index a0fa19f..0716290 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -223,7 +223,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)

/* Free the outqueue structure and any related pending chunks.
*/
-void sctp_outq_teardown(struct sctp_outq *q)
+static void __sctp_outq_teardown(struct sctp_outq *q)
{
struct sctp_transport *transport;
struct list_head *lchunk, *temp;
@@ -276,8 +276,6 @@ void sctp_outq_teardown(struct sctp_outq *q)
sctp_chunk_free(chunk);
}

- q->error = 0;
-
/* Throw away any leftover control chunks. */
list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
list_del_init(&chunk->list);
@@ -285,11 +283,17 @@ void sctp_outq_teardown(struct sctp_outq *q)
}
}

+void sctp_outq_teardown(struct sctp_outq *q)
+{
+ __sctp_outq_teardown(q);
+ sctp_outq_init(q->asoc, q);
+}
+
/* Free the outqueue structure and any related pending chunks. */
void sctp_outq_free(struct sctp_outq *q)
{
/* Throw away leftover chunks. */
- sctp_outq_teardown(q);
+ __sctp_outq_teardown(q);

/* If we were kmalloc()'d, free the memory. */
if (q->malloced)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 944cfce..957bb6e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3375,7 +3375,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,

ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
out:
- kfree(authkey);
+ kzfree(authkey);
return ret;
}

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/