Re: Linux 2.6.34.1

From: Greg KH
Date: Mon Jul 05 2010 - 14:44:56 EST


diff --git a/Documentation/hwmon/ltc4245 b/Documentation/hwmon/ltc4245
index 02838a4..86b5880 100644
--- a/Documentation/hwmon/ltc4245
+++ b/Documentation/hwmon/ltc4245
@@ -72,9 +72,7 @@ in6_min_alarm 5v output undervoltage alarm
in7_min_alarm 3v output undervoltage alarm
in8_min_alarm Vee (-12v) output undervoltage alarm

-in9_input GPIO #1 voltage data
-in10_input GPIO #2 voltage data
-in11_input GPIO #3 voltage data
+in9_input GPIO voltage data

power1_input 12v power usage (mW)
power2_input 5v power usage (mW)
diff --git a/Makefile b/Makefile
index ebc8225..4f2223f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 34
-EXTRAVERSION =
+EXTRAVERSION = .1
NAME = Sheep on Meth

# *DOCUMENTATION*
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index a52a27c..6f80665 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -951,8 +951,6 @@ static int sa1111_resume(struct platform_device *dev)
if (!save)
return 0;

- spin_lock_irqsave(&sachip->lock, flags);
-
/*
* Ensure that the SA1111 is still here.
* FIXME: shouldn't do this here.
@@ -969,6 +967,13 @@ static int sa1111_resume(struct platform_device *dev)
* First of all, wake up the chip.
*/
sa1111_wake(sachip);
+
+ /*
+ * Only lock for write ops. Also, sa1111_wake must be called with
+ * released spinlock!
+ */
+ spin_lock_irqsave(&sachip->lock, flags);
+
sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN0);
sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN1);

diff --git a/arch/arm/mach-mx2/devices.c b/arch/arm/mach-mx2/devices.c
index b91e412..04f36d8 100644
--- a/arch/arm/mach-mx2/devices.c
+++ b/arch/arm/mach-mx2/devices.c
@@ -483,8 +483,8 @@ int __init mxc_register_gpios(void)
#ifdef CONFIG_MACH_MX21
static struct resource mx21_usbhc_resources[] = {
{
- .start = MX21_BASE_ADDR,
- .end = MX21_BASE_ADDR + 0x1FFF,
+ .start = MX21_USBOTG_BASE_ADDR,
+ .end = MX21_USBOTG_BASE_ADDR + SZ_8K - 1,
.flags = IORESOURCE_MEM,
},
{
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 06a90dc..37c8157 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -91,7 +91,11 @@ ENTRY(v7_flush_kern_cache_all)
THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
bl v7_flush_dcache_all
mov r0, #0
+#ifdef CONFIG_SMP
+ mcr p15, 0, r0, c7, c1, 0 @ invalidate I-cache inner shareable
+#else
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
+#endif
ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
mov pc, lr
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c
index 5eb4fd9..ac163de 100644
--- a/arch/arm/mm/copypage-feroceon.c
+++ b/arch/arm/mm/copypage-feroceon.c
@@ -18,7 +18,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom)
{
asm("\
stmfd sp!, {r4-r9, lr} \n\
- mov ip, %0 \n\
+ mov ip, %2 \n\
1: mov lr, r1 \n\
ldmia r1!, {r2 - r9} \n\
pld [lr, #32] \n\
@@ -64,7 +64,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom)
mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\
ldmfd sp!, {r4-r9, pc}"
:
- : "I" (PAGE_SIZE));
+ : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE));
}

void feroceon_copy_user_highpage(struct page *to, struct page *from,
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
index 7c2eb55..cb589cb 100644
--- a/arch/arm/mm/copypage-v4wb.c
+++ b/arch/arm/mm/copypage-v4wb.c
@@ -27,7 +27,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom)
{
asm("\
stmfd sp!, {r4, lr} @ 2\n\
- mov r2, %0 @ 1\n\
+ mov r2, %2 @ 1\n\
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
stmia r0!, {r3, r4, ip, lr} @ 4\n\
@@ -44,7 +44,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom)
mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\
ldmfd sp!, {r4, pc} @ 3"
:
- : "I" (PAGE_SIZE / 64));
+ : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
}

void v4wb_copy_user_highpage(struct page *to, struct page *from,
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c
index 172e6a5..30c7d04 100644
--- a/arch/arm/mm/copypage-v4wt.c
+++ b/arch/arm/mm/copypage-v4wt.c
@@ -25,7 +25,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom)
{
asm("\
stmfd sp!, {r4, lr} @ 2\n\
- mov r2, %0 @ 1\n\
+ mov r2, %2 @ 1\n\
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
1: stmia r0!, {r3, r4, ip, lr} @ 4\n\
ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\
@@ -40,7 +40,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom)
mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\
ldmfd sp!, {r4, pc} @ 3"
:
- : "I" (PAGE_SIZE / 64));
+ : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
}

void v4wt_copy_user_highpage(struct page *to, struct page *from,
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c
index 747ad41..f9cde07 100644
--- a/arch/arm/mm/copypage-xsc3.c
+++ b/arch/arm/mm/copypage-xsc3.c
@@ -34,7 +34,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom)
{
asm("\
stmfd sp!, {r4, r5, lr} \n\
- mov lr, %0 \n\
+ mov lr, %2 \n\
\n\
pld [r1, #0] \n\
pld [r1, #32] \n\
@@ -67,7 +67,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom)
\n\
ldmfd sp!, {r4, r5, pc}"
:
- : "I" (PAGE_SIZE / 64 - 1));
+ : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1));
}

void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 9d40c34..8ad75e9 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -393,6 +393,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
if (addr < TASK_SIZE)
return do_page_fault(addr, fsr, regs);

+ if (user_mode(regs))
+ goto bad_area;
+
index = pgd_index(addr);

/*
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 0ed29bf..55d07c8 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -712,10 +712,10 @@ void __init mem_init(void)
void free_initmem(void)
{
#ifdef CONFIG_HAVE_TCM
- extern char *__tcm_start, *__tcm_end;
+ extern char __tcm_start, __tcm_end;

- totalram_pages += free_area(__phys_to_pfn(__pa(__tcm_start)),
- __phys_to_pfn(__pa(__tcm_end)),
+ totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
+ __phys_to_pfn(__pa(&__tcm_end)),
"TCM link");
#endif

diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index 66dc2d0..d66cead 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -277,7 +277,7 @@ ENTRY(vfp_put_double)
#ifdef CONFIG_VFPv3
@ d16 - d31 registers
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
-1: mcrr p11, 3, r1, r2, c\dr @ fmdrr r1, r2, d\dr
+1: mcrr p11, 3, r0, r1, c\dr @ fmdrr r0, r1, d\dr
mov pc, lr
.org 1b + 8
.endr
diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
index 8542bc3..93f6c63 100644
--- a/arch/blackfin/include/asm/cache.h
+++ b/arch/blackfin/include/asm/cache.h
@@ -15,6 +15,8 @@
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define SMP_CACHE_BYTES L1_CACHE_BYTES

+#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+
#ifdef CONFIG_SMP
#define __cacheline_aligned
#else
diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
index 2797163..7dc0f0f 100644
--- a/arch/frv/include/asm/cache.h
+++ b/arch/frv/include/asm/cache.h
@@ -17,6 +17,8 @@
#define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)

+#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+
#define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
#define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))

diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
index fed3fd3..ecafbe1 100644
--- a/arch/m68k/include/asm/cache.h
+++ b/arch/m68k/include/asm/cache.h
@@ -8,4 +8,6 @@
#define L1_CACHE_SHIFT 4
#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)

+#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+
#endif
diff --git a/arch/mn10300/include/asm/cache.h b/arch/mn10300/include/asm/cache.h
index e03cfa2..6e2fe28 100644
--- a/arch/mn10300/include/asm/cache.h
+++ b/arch/mn10300/include/asm/cache.h
@@ -21,6 +21,8 @@
#define L1_CACHE_DISPARITY L1_CACHE_NENTRIES * L1_CACHE_BYTES
#endif

+#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+
/* data cache purge registers
* - read from the register to unconditionally purge that cache line
* - write address & 0xffffff00 to conditionally purge that cache line
diff --git a/arch/parisc/math-emu/decode_exc.c b/arch/parisc/math-emu/decode_exc.c
index 3ca1c61..27a7492 100644
--- a/arch/parisc/math-emu/decode_exc.c
+++ b/arch/parisc/math-emu/decode_exc.c
@@ -342,6 +342,7 @@ decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[])
return SIGNALCODE(SIGFPE, FPE_FLTINV);
case DIVISIONBYZEROEXCEPTION:
update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
+ Clear_excp_register(exception_index);
return SIGNALCODE(SIGFPE, FPE_FLTDIV);
case INEXACTEXCEPTION:
update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index c09138d..b894721 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -447,6 +447,14 @@ int main(void)
DEFINE(PGD_T_LOG2, PGD_T_LOG2);
DEFINE(PTE_T_LOG2, PTE_T_LOG2);
#endif
+#ifdef CONFIG_FSL_BOOKE
+ DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
+ DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0));
+ DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1));
+ DEFINE(TLBCAM_MAS2, offsetof(struct tlbcam, MAS2));
+ DEFINE(TLBCAM_MAS3, offsetof(struct tlbcam, MAS3));
+ DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
+#endif

#ifdef CONFIG_KVM_EXIT_TIMING
DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 7255265..edd4a57 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -639,6 +639,13 @@ interrupt_base:
rlwinm r12,r12,0,16,1
mtspr SPRN_MAS1,r12

+ /* Make up the required permissions for kernel code */
+#ifdef CONFIG_PTE_64BIT
+ li r13,_PAGE_PRESENT | _PAGE_BAP_SX
+ oris r13,r13,_PAGE_ACCESSED@h
+#else
+ li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
+#endif
b 4f

/* Get the PGD for the current thread */
@@ -646,15 +653,15 @@ interrupt_base:
mfspr r11,SPRN_SPRG_THREAD
lwz r11,PGDIR(r11)

-4:
- /* Make up the required permissions */
+ /* Make up the required permissions for user code */
#ifdef CONFIG_PTE_64BIT
- li r13,_PAGE_PRESENT | _PAGE_EXEC
+ li r13,_PAGE_PRESENT | _PAGE_BAP_UX
oris r13,r13,_PAGE_ACCESSED@h
#else
li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
#endif

+4:
FIND_PTE
andc. r13,r13,r11 /* Check permission */

diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 604af29..ecb532b 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -922,6 +922,8 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
int i;

+ vcpu_load(vcpu);
+
sregs->pvr = vcpu->arch.pvr;

sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
@@ -940,6 +942,9 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
}
}
+
+ vcpu_put(vcpu);
+
return 0;
}

@@ -949,6 +954,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
int i;

+ vcpu_load(vcpu);
+
kvmppc_set_pvr(vcpu, sregs->pvr);

vcpu3s->sdr1 = sregs->u.s.sdr1;
@@ -975,6 +982,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,

/* Flush the MMU after messing with the segments */
kvmppc_mmu_pte_flush(vcpu, 0, 0);
+
+ vcpu_put(vcpu);
+
return 0;
}

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 2a3a195..df0182a 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -479,6 +479,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;

+ vcpu_load(vcpu);
+
regs->pc = vcpu->arch.pc;
regs->cr = kvmppc_get_cr(vcpu);
regs->ctr = vcpu->arch.ctr;
@@ -499,6 +501,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
regs->gpr[i] = kvmppc_get_gpr(vcpu, i);

+ vcpu_put(vcpu);
+
return 0;
}

@@ -506,6 +510,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;

+ vcpu_load(vcpu);
+
vcpu->arch.pc = regs->pc;
kvmppc_set_cr(vcpu, regs->cr);
vcpu->arch.ctr = regs->ctr;
@@ -525,6 +531,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
kvmppc_set_gpr(vcpu, i, regs->gpr[i]);

+ vcpu_put(vcpu);
+
return 0;
}

@@ -553,7 +561,12 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr)
{
- return kvmppc_core_vcpu_translate(vcpu, tr);
+ int r;
+
+ vcpu_load(vcpu);
+ r = kvmppc_core_vcpu_translate(vcpu, tr);
+ vcpu_put(vcpu);
+ return r;
}

int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 297fcd2..bf36a9d 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -193,7 +193,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{
struct kvm_vcpu *vcpu;
vcpu = kvmppc_core_vcpu_create(kvm, id);
- kvmppc_create_vcpu_debugfs(vcpu, id);
+ if (!IS_ERR(vcpu))
+ kvmppc_create_vcpu_debugfs(vcpu, id);
return vcpu;
}

diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
index 64e2e49..3ac0cd3 100644
--- a/arch/powerpc/lib/string.S
+++ b/arch/powerpc/lib/string.S
@@ -71,7 +71,7 @@ _GLOBAL(strcmp)

_GLOBAL(strncmp)
PPC_LCMPI r5,0
- beqlr
+ ble- 2f
mtctr r5
addi r5,r3,-1
addi r4,r4,-1
@@ -82,6 +82,8 @@ _GLOBAL(strncmp)
beqlr 1
bdnzt eq,1b
blr
+2: li r3,0
+ blr

_GLOBAL(strlen)
addi r4,r3,-1
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 1ed6b52..cdc7526 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -2,7 +2,7 @@
* Modifications by Kumar Gala (galak@xxxxxxxxxxxxxxxxxxx) to support
* E500 Book E processors.
*
- * Copyright 2004 Freescale Semiconductor, Inc
+ * Copyright 2004,2010 Freescale Semiconductor, Inc.
*
* This file contains the routines for initializing the MMU
* on the 4xx series of chips.
@@ -56,19 +56,13 @@

unsigned int tlbcam_index;

-#define NUM_TLBCAMS (64)

#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS)
#error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS"
#endif

-struct tlbcam {
- u32 MAS0;
- u32 MAS1;
- unsigned long MAS2;
- u32 MAS3;
- u32 MAS7;
-} TLBCAM[NUM_TLBCAMS];
+#define NUM_TLBCAMS (64)
+struct tlbcam TLBCAM[NUM_TLBCAMS];

struct tlbcamrange {
unsigned long start;
@@ -109,19 +103,6 @@ unsigned long p_mapped_by_tlbcam(phys_addr_t pa)
return 0;
}

-void loadcam_entry(int idx)
-{
- mtspr(SPRN_MAS0, TLBCAM[idx].MAS0);
- mtspr(SPRN_MAS1, TLBCAM[idx].MAS1);
- mtspr(SPRN_MAS2, TLBCAM[idx].MAS2);
- mtspr(SPRN_MAS3, TLBCAM[idx].MAS3);
-
- if (mmu_has_feature(MMU_FTR_BIG_PHYS))
- mtspr(SPRN_MAS7, TLBCAM[idx].MAS7);
-
- asm volatile("isync;tlbwe;isync" : : : "memory");
-}
-
/*
* Set up one of the I/D BAT (block address translation) register pairs.
* The parameters are not checked; in particular size must be a power
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index d49a775..0591f25 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -149,7 +149,15 @@ extern unsigned long mmu_mapin_ram(unsigned long top);
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(unsigned long top);
extern void adjust_total_lowmem(void);
-
+extern void loadcam_entry(unsigned int index);
+
+struct tlbcam {
+ u32 MAS0;
+ u32 MAS1;
+ unsigned long MAS2;
+ u32 MAS3;
+ u32 MAS7;
+};
#elif defined(CONFIG_PPC32)
/* anything 32-bit except 4xx or 8xx */
extern void MMU_init_hw(void);
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index b9243e7..767b0cf 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -146,6 +146,14 @@ ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
flags &= ~(_PAGE_USER | _PAGE_EXEC);

+#ifdef _PAGE_BAP_SR
+ /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
+ * which means that we just cleared supervisor access... oops ;-) This
+ * restores it
+ */
+ flags |= _PAGE_BAP_SR;
+#endif
+
return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_flags);
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index d95679a..d050fc8 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -265,6 +265,14 @@ void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
flags &= ~(_PAGE_USER | _PAGE_EXEC);

+#ifdef _PAGE_BAP_SR
+ /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
+ * which means that we just cleared supervisor access... oops ;-) This
+ * restores it
+ */
+ flags |= _PAGE_BAP_SR;
+#endif
+
if (ppc_md.ioremap)
return ppc_md.ioremap(addr, size, flags, caller);
return __ioremap_caller(addr, size, flags, caller);
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index bbdc5b5..8656ecf 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -271,3 +271,31 @@ _GLOBAL(set_context)
#else
#error Unsupported processor type !
#endif
+
+#if defined(CONFIG_FSL_BOOKE)
+/*
+ * extern void loadcam_entry(unsigned int index)
+ *
+ * Load TLBCAM[index] entry in to the L2 CAM MMU
+ */
+_GLOBAL(loadcam_entry)
+ LOAD_REG_ADDR(r4, TLBCAM)
+ mulli r5,r3,TLBCAM_SIZE
+ add r3,r5,r4
+ lwz r4,TLBCAM_MAS0(r3)
+ mtspr SPRN_MAS0,r4
+ lwz r4,TLBCAM_MAS1(r3)
+ mtspr SPRN_MAS1,r4
+ PPC_LL r4,TLBCAM_MAS2(r3)
+ mtspr SPRN_MAS2,r4
+ lwz r4,TLBCAM_MAS3(r3)
+ mtspr SPRN_MAS3,r4
+BEGIN_MMU_FTR_SECTION
+ lwz r4,TLBCAM_MAS7(r3)
+ mtspr SPRN_MAS7,r4
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
+ isync
+ tlbwe
+ isync
+ blr
+#endif
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
index 2c9e522..7fd90d0 100644
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -1077,7 +1077,7 @@ static int calculate_lfsr(int n)
index = ENTRIES-1;

/* make sure index is valid */
- if ((index > ENTRIES) || (index < 0))
+ if ((index >= ENTRIES) || (index < 0))
index = ENTRIES-1;

return initial_lfsr[index];
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index a8e1d5d..b0760d7 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -154,30 +154,6 @@ static void pseries_mach_cpu_die(void)
for(;;);
}

-static int qcss_tok; /* query-cpu-stopped-state token */
-
-/* Get state of physical CPU.
- * Return codes:
- * 0 - The processor is in the RTAS stopped state
- * 1 - stop-self is in progress
- * 2 - The processor is not in the RTAS stopped state
- * -1 - Hardware Error
- * -2 - Hardware Busy, Try again later.
- */
-static int query_cpu_stopped(unsigned int pcpu)
-{
- int cpu_status, status;
-
- status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
- if (status != 0) {
- printk(KERN_ERR
- "RTAS query-cpu-stopped-state failed: %i\n", status);
- return status;
- }
-
- return cpu_status;
-}
-
static int pseries_cpu_disable(void)
{
int cpu = smp_processor_id();
@@ -224,8 +200,9 @@ static void pseries_cpu_die(unsigned int cpu)
} else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) {

for (tries = 0; tries < 25; tries++) {
- cpu_status = query_cpu_stopped(pcpu);
- if (cpu_status == 0 || cpu_status == -1)
+ cpu_status = smp_query_cpu_stopped(pcpu);
+ if (cpu_status == QCSS_STOPPED ||
+ cpu_status == QCSS_HARDWARE_ERROR)
break;
cpu_relax();
}
@@ -388,6 +365,7 @@ static int __init pseries_cpu_hotplug_init(void)
struct device_node *np;
const char *typep;
int cpu;
+ int qcss_tok;

for_each_node_by_name(np, "interrupt-controller") {
typep = of_get_property(np, "compatible", NULL);
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index a05f8d4..6c4fd2c 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -4,6 +4,14 @@
#include <asm/hvcall.h>
#include <asm/page.h>

+/* Get state of physical CPU from query_cpu_stopped */
+int smp_query_cpu_stopped(unsigned int pcpu);
+#define QCSS_STOPPED 0
+#define QCSS_STOPPING 1
+#define QCSS_NOT_STOPPED 2
+#define QCSS_HARDWARE_ERROR -1
+#define QCSS_HARDWARE_BUSY -2
+
static inline long poll_pending(void)
{
return plpar_hcall_norets(H_POLL_PENDING);
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 4e7f89a..8979982 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -57,6 +57,28 @@
*/
static cpumask_t of_spin_map;

+/* Query where a cpu is now. Return codes #defined in plpar_wrappers.h */
+int smp_query_cpu_stopped(unsigned int pcpu)
+{
+ int cpu_status, status;
+ int qcss_tok = rtas_token("query-cpu-stopped-state");
+
+ if (qcss_tok == RTAS_UNKNOWN_SERVICE) {
+ printk(KERN_INFO "Firmware doesn't support "
+ "query-cpu-stopped-state\n");
+ return QCSS_HARDWARE_ERROR;
+ }
+
+ status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
+ if (status != 0) {
+ printk(KERN_ERR
+ "RTAS query-cpu-stopped-state failed: %i\n", status);
+ return status;
+ }
+
+ return cpu_status;
+}
+
/**
* smp_startup_cpu() - start the given cpu
*
@@ -82,6 +104,12 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)

pcpu = get_hard_smp_processor_id(lcpu);

+ /* Check to see if the CPU out of FW already for kexec */
+ if (smp_query_cpu_stopped(pcpu) == QCSS_NOT_STOPPED){
+ cpu_set(lcpu, of_spin_map);
+ return 1;
+ }
+
/* Fixup atomic count: it exited inside IRQ handler. */
task_thread_info(paca[lcpu].__current)->preempt_count = 0;

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 4929286..ee7c713 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -341,11 +341,13 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,

rc = kvm_vcpu_init(vcpu, kvm, id);
if (rc)
- goto out_free_cpu;
+ goto out_free_sie_block;
VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
vcpu->arch.sie_block);

return vcpu;
+out_free_sie_block:
+ free_page((unsigned long)(vcpu->arch.sie_block));
out_free_cpu:
kfree(vcpu);
out_nomem:
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 06d9e79..63400e6 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -180,6 +180,7 @@ union kvm_mmu_page_role {
unsigned invalid:1;
unsigned cr4_pge:1;
unsigned nxe:1;
+ unsigned cr0_wp:1;
};
};

@@ -541,6 +542,8 @@ struct kvm_x86_ops {
int (*get_lpage_level)(void);
bool (*rdtscp_supported)(void);

+ void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
+
const struct trace_print_flags *exit_reasons_str;
};

diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 4604e6a..d86da72 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -199,8 +199,9 @@
#define MSR_IA32_EBL_CR_POWERON 0x0000002a
#define MSR_IA32_FEATURE_CONTROL 0x0000003a

-#define FEATURE_CONTROL_LOCKED (1<<0)
-#define FEATURE_CONTROL_VMXON_ENABLED (1<<2)
+#define FEATURE_CONTROL_LOCKED (1<<0)
+#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
+#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)

#define MSR_IA32_APICBASE 0x0000001b
#define MSR_IA32_APICBASE_BSP (1<<8)
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index f854d89..29e5e6e 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1420,6 +1420,7 @@ static int __attach_device(struct device *dev,
struct protection_domain *domain)
{
struct iommu_dev_data *dev_data, *alias_data;
+ int ret;

dev_data = get_dev_data(dev);
alias_data = get_dev_data(dev_data->alias);
@@ -1431,13 +1432,14 @@ static int __attach_device(struct device *dev,
spin_lock(&domain->lock);

/* Some sanity checks */
+ ret = -EBUSY;
if (alias_data->domain != NULL &&
alias_data->domain != domain)
- return -EBUSY;
+ goto out_unlock;

if (dev_data->domain != NULL &&
dev_data->domain != domain)
- return -EBUSY;
+ goto out_unlock;

/* Do real assignment */
if (dev_data->alias != dev) {
@@ -1453,10 +1455,14 @@ static int __attach_device(struct device *dev,

atomic_inc(&dev_data->bind);

+ ret = 0;
+
+out_unlock:
+
/* ready */
spin_unlock(&domain->lock);

- return 0;
+ return ret;
}

/*
@@ -2257,10 +2263,6 @@ int __init amd_iommu_init_dma_ops(void)

iommu_detected = 1;
swiotlb = 0;
-#ifdef CONFIG_GART_IOMMU
- gart_iommu_aperture_disabled = 1;
- gart_iommu_aperture = 0;
-#endif

/* Make the driver finally visible to the drivers */
dma_ops = &amd_iommu_dma_ops;
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 6360abf..6f8ce75 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -286,8 +286,12 @@ static u8 * __init iommu_map_mmio_space(u64 address)
{
u8 *ret;

- if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu"))
+ if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
+ pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
+ address);
+ pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
return NULL;
+ }

ret = ioremap_nocache(address, MMIO_REGION_LENGTH);
if (ret != NULL)
@@ -1313,7 +1317,7 @@ static int __init amd_iommu_init(void)
ret = amd_iommu_init_dma_ops();

if (ret)
- goto free;
+ goto free_disable;

amd_iommu_init_api();

@@ -1331,9 +1335,10 @@ static int __init amd_iommu_init(void)
out:
return ret;

-free:
+free_disable:
disable_iommus();

+free:
amd_iommu_uninit_devices();

free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
@@ -1352,6 +1357,15 @@ free:

free_unity_maps();

+#ifdef CONFIG_GART_IOMMU
+ /*
+ * We failed to initialize the AMD IOMMU - try fallback to GART
+ * if possible.
+ */
+ gart_iommu_init();
+
+#endif
+
goto out;
}

diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index db5bdc8..c5e8b53 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -460,8 +460,11 @@ static int __hw_perf_event_init(struct perf_event *event)
if (atomic_read(&active_events) == 0) {
if (!reserve_pmc_hardware())
err = -EBUSY;
- else
+ else {
err = reserve_bts_hardware();
+ if (err)
+ release_pmc_hardware();
+ }
}
if (!err)
atomic_inc(&active_events);
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 03801f2..dfdfe46 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -109,11 +109,14 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
return pv_tsc_khz;
}

+static atomic64_t last_value = ATOMIC64_INIT(0);
+
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
{
struct pvclock_shadow_time shadow;
unsigned version;
cycle_t ret, offset;
+ u64 last;

do {
version = pvclock_get_time_values(&shadow, src);
@@ -123,6 +126,27 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
barrier();
} while (version != src->version);

+ /*
+ * Assumption here is that last_value, a global accumulator, always goes
+ * forward. If we are less than that, we should not be much smaller.
+ * We assume there is an error marging we're inside, and then the correction
+ * does not sacrifice accuracy.
+ *
+ * For reads: global may have changed between test and return,
+ * but this means someone else updated poked the clock at a later time.
+ * We just need to make sure we are not seeing a backwards event.
+ *
+ * For updates: last_value = ret is not enough, since two vcpus could be
+ * updating at the same time, and one of them could be slightly behind,
+ * making the assumption that last_value always go forward fail to hold.
+ */
+ last = atomic64_read(&last_value);
+ do {
+ if (ret < last)
+ return last;
+ last = atomic64_cmpxchg(&last_value, last, ret);
+ } while (unlikely(last != ret));
+
return ret;
}

diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index c4851ef..47ae912 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -676,6 +676,17 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "DG45FC"),
},
},
+ /*
+ * The Dell Inspiron Mini 1012 has DMI_BIOS_VENDOR = "Dell Inc.", so
+ * match on the product name.
+ */
+ {
+ .callback = dmi_low_memory_corruption,
+ .ident = "Phoenix BIOS",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
+ },
+ },
#endif
{}
};
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 86c9f91..46b8277 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -46,6 +46,7 @@

/* Global pointer to shared data; NULL means no measured launch. */
struct tboot *tboot __read_mostly;
+EXPORT_SYMBOL(tboot);

/* timeout for APs (in secs) to enter wait-for-SIPI state during shutdown */
#define AP_WAIT_TIMEOUT 1
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 19a8906..62fd8e6 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -223,7 +223,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);

-static int is_write_protection(struct kvm_vcpu *vcpu)
+static bool is_write_protection(struct kvm_vcpu *vcpu)
{
return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
}
@@ -2085,11 +2085,13 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
direct = 1;
if (mmu_check_root(vcpu, root_gfn))
return 1;
+ spin_lock(&vcpu->kvm->mmu_lock);
sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
PT64_ROOT_LEVEL, direct,
ACC_ALL, NULL);
root = __pa(sp->spt);
++sp->root_count;
+ spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu.root_hpa = root;
return 0;
}
@@ -2111,11 +2113,14 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
root_gfn = 0;
if (mmu_check_root(vcpu, root_gfn))
return 1;
+ spin_lock(&vcpu->kvm->mmu_lock);
sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
PT32_ROOT_LEVEL, direct,
ACC_ALL, NULL);
root = __pa(sp->spt);
++sp->root_count;
+ spin_unlock(&vcpu->kvm->mmu_lock);
+
vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
}
vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
@@ -2439,6 +2444,7 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
r = paging32_init_context(vcpu);

vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
+ vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);

return r;
}
@@ -2478,7 +2484,9 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
goto out;
spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_free_some_pages(vcpu);
+ spin_unlock(&vcpu->kvm->mmu_lock);
r = mmu_alloc_roots(vcpu);
+ spin_lock(&vcpu->kvm->mmu_lock);
mmu_sync_roots(vcpu);
spin_unlock(&vcpu->kvm->mmu_lock);
if (r)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 737361f..1185b55 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -129,6 +129,7 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu);
static void svm_complete_interrupts(struct vcpu_svm *svm);

static int nested_svm_exit_handled(struct vcpu_svm *svm);
+static int nested_svm_intercept(struct vcpu_svm *svm);
static int nested_svm_vmexit(struct vcpu_svm *svm);
static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code);
@@ -1384,6 +1385,8 @@ static int nested_svm_check_permissions(struct vcpu_svm *svm)
static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code)
{
+ int vmexit;
+
if (!is_nested(svm))
return 0;

@@ -1392,19 +1395,24 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
svm->vmcb->control.exit_info_1 = error_code;
svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;

- return nested_svm_exit_handled(svm);
+ vmexit = nested_svm_intercept(svm);
+ if (vmexit == NESTED_EXIT_DONE)
+ svm->nested.exit_required = true;
+
+ return vmexit;
}

-static inline int nested_svm_intr(struct vcpu_svm *svm)
+/* This function returns true if it is save to enable the irq window */
+static inline bool nested_svm_intr(struct vcpu_svm *svm)
{
if (!is_nested(svm))
- return 0;
+ return true;

if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
- return 0;
+ return true;

if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
- return 0;
+ return false;

svm->vmcb->control.exit_code = SVM_EXIT_INTR;

@@ -1417,13 +1425,13 @@ static inline int nested_svm_intr(struct vcpu_svm *svm)
*/
svm->nested.exit_required = true;
trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
- return 1;
+ return false;
}

- return 0;
+ return true;
}

-static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx)
+static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
{
struct page *page;

@@ -1431,7 +1439,9 @@ static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx)
if (is_error_page(page))
goto error;

- return kmap_atomic(page, idx);
+ *_page = page;
+
+ return kmap(page);

error:
kvm_release_page_clean(page);
@@ -1440,16 +1450,9 @@ error:
return NULL;
}

-static void nested_svm_unmap(void *addr, enum km_type idx)
+static void nested_svm_unmap(struct page *page)
{
- struct page *page;
-
- if (!addr)
- return;
-
- page = kmap_atomic_to_page(addr);
-
- kunmap_atomic(addr, idx);
+ kunmap(page);
kvm_release_page_dirty(page);
}

@@ -1459,16 +1462,11 @@ static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
bool ret = false;
u32 t0, t1;
- u8 *msrpm;
+ u8 val;

if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
return false;

- msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
-
- if (!msrpm)
- goto out;
-
switch (msr) {
case 0 ... 0x1fff:
t0 = (msr * 2) % 8;
@@ -1489,11 +1487,10 @@ static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
goto out;
}

- ret = msrpm[t1] & ((1 << param) << t0);
+ if (!kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + t1, &val, 1))
+ ret = val & ((1 << param) << t0);

out:
- nested_svm_unmap(msrpm, KM_USER0);
-
return ret;
}

@@ -1525,7 +1522,7 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
/*
* If this function returns true, this #vmexit was already handled
*/
-static int nested_svm_exit_handled(struct vcpu_svm *svm)
+static int nested_svm_intercept(struct vcpu_svm *svm)
{
u32 exit_code = svm->vmcb->control.exit_code;
int vmexit = NESTED_EXIT_HOST;
@@ -1571,9 +1568,17 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm)
}
}

- if (vmexit == NESTED_EXIT_DONE) {
+ return vmexit;
+}
+
+static int nested_svm_exit_handled(struct vcpu_svm *svm)
+{
+ int vmexit;
+
+ vmexit = nested_svm_intercept(svm);
+
+ if (vmexit == NESTED_EXIT_DONE)
nested_svm_vmexit(svm);
- }

return vmexit;
}
@@ -1615,6 +1620,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
struct vmcb *nested_vmcb;
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
+ struct page *page;

trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
vmcb->control.exit_info_1,
@@ -1622,7 +1628,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
vmcb->control.exit_int_info,
vmcb->control.exit_int_info_err);

- nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, KM_USER0);
+ nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
if (!nested_vmcb)
return 1;

@@ -1635,9 +1641,13 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
nested_vmcb->save.ds = vmcb->save.ds;
nested_vmcb->save.gdtr = vmcb->save.gdtr;
nested_vmcb->save.idtr = vmcb->save.idtr;
+ nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
if (npt_enabled)
nested_vmcb->save.cr3 = vmcb->save.cr3;
+ else
+ nested_vmcb->save.cr3 = svm->vcpu.arch.cr3;
nested_vmcb->save.cr2 = vmcb->save.cr2;
+ nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
nested_vmcb->save.rflags = vmcb->save.rflags;
nested_vmcb->save.rip = vmcb->save.rip;
nested_vmcb->save.rsp = vmcb->save.rsp;
@@ -1712,7 +1722,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
/* Exit nested SVM mode */
svm->nested.vmcb = 0;

- nested_svm_unmap(nested_vmcb, KM_USER0);
+ nested_svm_unmap(page);

kvm_mmu_reset_context(&svm->vcpu);
kvm_mmu_load(&svm->vcpu);
@@ -1723,9 +1733,10 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
{
u32 *nested_msrpm;
+ struct page *page;
int i;

- nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
+ nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, &page);
if (!nested_msrpm)
return false;

@@ -1734,7 +1745,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)

svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);

- nested_svm_unmap(nested_msrpm, KM_USER0);
+ nested_svm_unmap(page);

return true;
}
@@ -1744,8 +1755,9 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
struct vmcb *nested_vmcb;
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
+ struct page *page;

- nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
+ nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
if (!nested_vmcb)
return false;

@@ -1819,21 +1831,6 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
svm->vmcb->save.cpl = nested_vmcb->save.cpl;

- /* We don't want a nested guest to be more powerful than the guest,
- so all intercepts are ORed */
- svm->vmcb->control.intercept_cr_read |=
- nested_vmcb->control.intercept_cr_read;
- svm->vmcb->control.intercept_cr_write |=
- nested_vmcb->control.intercept_cr_write;
- svm->vmcb->control.intercept_dr_read |=
- nested_vmcb->control.intercept_dr_read;
- svm->vmcb->control.intercept_dr_write |=
- nested_vmcb->control.intercept_dr_write;
- svm->vmcb->control.intercept_exceptions |=
- nested_vmcb->control.intercept_exceptions;
-
- svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
-
svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;

/* cache intercepts */
@@ -1851,13 +1848,38 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
else
svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;

+ if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
+ /* We only want the cr8 intercept bits of the guest */
+ svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR8_MASK;
+ svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
+ }
+
+ /* We don't want to see VMMCALLs from a nested guest */
+ svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMMCALL);
+
+ /* We don't want a nested guest to be more powerful than the guest,
+ so all intercepts are ORed */
+ svm->vmcb->control.intercept_cr_read |=
+ nested_vmcb->control.intercept_cr_read;
+ svm->vmcb->control.intercept_cr_write |=
+ nested_vmcb->control.intercept_cr_write;
+ svm->vmcb->control.intercept_dr_read |=
+ nested_vmcb->control.intercept_dr_read;
+ svm->vmcb->control.intercept_dr_write |=
+ nested_vmcb->control.intercept_dr_write;
+ svm->vmcb->control.intercept_exceptions |=
+ nested_vmcb->control.intercept_exceptions;
+
+ svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
+
+ svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
svm->vmcb->control.int_state = nested_vmcb->control.int_state;
svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;

- nested_svm_unmap(nested_vmcb, KM_USER0);
+ nested_svm_unmap(page);

enable_gif(svm);

@@ -1883,6 +1905,7 @@ static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
static int vmload_interception(struct vcpu_svm *svm)
{
struct vmcb *nested_vmcb;
+ struct page *page;

if (nested_svm_check_permissions(svm))
return 1;
@@ -1890,12 +1913,12 @@ static int vmload_interception(struct vcpu_svm *svm)
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);

- nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
+ nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
if (!nested_vmcb)
return 1;

nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
- nested_svm_unmap(nested_vmcb, KM_USER0);
+ nested_svm_unmap(page);

return 1;
}
@@ -1903,6 +1926,7 @@ static int vmload_interception(struct vcpu_svm *svm)
static int vmsave_interception(struct vcpu_svm *svm)
{
struct vmcb *nested_vmcb;
+ struct page *page;

if (nested_svm_check_permissions(svm))
return 1;
@@ -1910,12 +1934,12 @@ static int vmsave_interception(struct vcpu_svm *svm)
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);

- nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
+ nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
if (!nested_vmcb)
return 1;

nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
- nested_svm_unmap(nested_vmcb, KM_USER0);
+ nested_svm_unmap(page);

return 1;
}
@@ -2511,6 +2535,9 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
struct vcpu_svm *svm = to_svm(vcpu);

+ if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+ return;
+
if (irr == -1)
return;

@@ -2568,13 +2595,11 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);

- nested_svm_intr(svm);
-
/* In case GIF=0 we can't rely on the CPU to tell us when
* GIF becomes 1, because that's a separate STGI/VMRUN intercept.
* The next time we get that intercept, this function will be
* called again though and we'll get the vintr intercept. */
- if (gif_set(svm)) {
+ if (gif_set(svm) && nested_svm_intr(svm)) {
svm_set_vintr(svm);
svm_inject_irq(svm, 0x0);
}
@@ -2614,6 +2639,9 @@ static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);

+ if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+ return;
+
if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
kvm_set_cr8(vcpu, cr8);
@@ -2625,6 +2653,9 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
u64 cr8;

+ if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+ return;
+
cr8 = kvm_get_cr8(vcpu);
svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
@@ -2879,6 +2910,20 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
{
}

+static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
+{
+ switch (func) {
+ case 0x8000000A:
+ entry->eax = 1; /* SVM revision 1 */
+ entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
+ ASID emulation to nested SVM */
+ entry->ecx = 0; /* Reserved */
+ entry->edx = 0; /* Do not support any additional features */
+
+ break;
+ }
+}
+
static const struct trace_print_flags svm_exit_reasons_str[] = {
{ SVM_EXIT_READ_CR0, "read_cr0" },
{ SVM_EXIT_READ_CR3, "read_cr3" },
@@ -3023,6 +3068,8 @@ static struct kvm_x86_ops svm_x86_ops = {
.cpuid_update = svm_cpuid_update,

.rdtscp_supported = svm_rdtscp_supported,
+
+ .set_supported_cpuid = svm_set_supported_cpuid,
};

static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2f8db0e..3c86c42 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -27,6 +27,7 @@
#include <linux/moduleparam.h>
#include <linux/ftrace_event.h>
#include <linux/slab.h>
+#include <linux/tboot.h>
#include "kvm_cache_regs.h"
#include "x86.h"

@@ -1176,9 +1177,16 @@ static __init int vmx_disabled_by_bios(void)
u64 msr;

rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
- return (msr & (FEATURE_CONTROL_LOCKED |
- FEATURE_CONTROL_VMXON_ENABLED))
- == FEATURE_CONTROL_LOCKED;
+ if (msr & FEATURE_CONTROL_LOCKED) {
+ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
+ && tboot_enabled())
+ return 1;
+ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
+ && !tboot_enabled())
+ return 1;
+ }
+
+ return 0;
/* locked but not enabled */
}

@@ -1186,21 +1194,23 @@ static int hardware_enable(void *garbage)
{
int cpu = raw_smp_processor_id();
u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
- u64 old;
+ u64 old, test_bits;

if (read_cr4() & X86_CR4_VMXE)
return -EBUSY;

INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
- if ((old & (FEATURE_CONTROL_LOCKED |
- FEATURE_CONTROL_VMXON_ENABLED))
- != (FEATURE_CONTROL_LOCKED |
- FEATURE_CONTROL_VMXON_ENABLED))
+
+ test_bits = FEATURE_CONTROL_LOCKED;
+ test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
+ if (tboot_enabled())
+ test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
+
+ if ((old & test_bits) != test_bits) {
/* enable and lock */
- wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
- FEATURE_CONTROL_LOCKED |
- FEATURE_CONTROL_VMXON_ENABLED);
+ wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
+ }
write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
asm volatile (ASM_VMX_VMXON_RAX
: : "a"(&phys_addr), "m"(phys_addr)
@@ -4115,6 +4125,10 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
}
}

+static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
+{
+}
+
static struct kvm_x86_ops vmx_x86_ops = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
@@ -4186,6 +4200,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.cpuid_update = vmx_cpuid_update,

.rdtscp_supported = vmx_rdtscp_supported,
+
+ .set_supported_cpuid = vmx_set_supported_cpuid,
};

static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c4f35b5..a6517a2 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -484,7 +484,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0);

void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
{
- kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0ful) | (msw & 0x0f));
+ kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
}
EXPORT_SYMBOL_GPL(kvm_lmsw);

@@ -624,48 +624,42 @@ static u32 emulated_msrs[] = {
MSR_IA32_MISC_ENABLE,
};

-static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
+static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
- if (efer & efer_reserved_bits) {
- kvm_inject_gp(vcpu, 0);
- return;
- }
+ if (efer & efer_reserved_bits)
+ return 1;

if (is_paging(vcpu)
- && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
- kvm_inject_gp(vcpu, 0);
- return;
- }
+ && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
+ return 1;

if (efer & EFER_FFXSR) {
struct kvm_cpuid_entry2 *feat;

feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
- if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
- kvm_inject_gp(vcpu, 0);
- return;
- }
+ if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
+ return 1;
}

if (efer & EFER_SVME) {
struct kvm_cpuid_entry2 *feat;

feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
- if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
- kvm_inject_gp(vcpu, 0);
- return;
- }
+ if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
+ return 1;
}

- kvm_x86_ops->set_efer(vcpu, efer);
-
efer &= ~EFER_LMA;
efer |= vcpu->arch.efer & EFER_LMA;

+ kvm_x86_ops->set_efer(vcpu, efer);
+
vcpu->arch.efer = efer;

vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
kvm_mmu_reset_context(vcpu);
+
+ return 0;
}

void kvm_enable_efer_bits(u64 mask)
@@ -695,14 +689,22 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)

static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
{
- static int version;
+ int version;
+ int r;
struct pvclock_wall_clock wc;
struct timespec boot;

if (!wall_clock)
return;

- version++;
+ r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
+ if (r)
+ return;
+
+ if (version & 1)
+ ++version; /* first time write, random junk */
+
+ ++version;

kvm_write_guest(kvm, wall_clock, &version, sizeof(version));

@@ -1086,8 +1088,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
switch (msr) {
case MSR_EFER:
- set_efer(vcpu, data);
- break;
+ return set_efer(vcpu, data);
case MSR_K7_HWCR:
data &= ~(u64)0x40; /* ignore flush filter disable */
if (data != 0) {
@@ -1768,6 +1769,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
{
int r;

+ vcpu_load(vcpu);
r = -E2BIG;
if (cpuid->nent < vcpu->arch.cpuid_nent)
goto out;
@@ -1779,6 +1781,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,

out:
cpuid->nent = vcpu->arch.cpuid_nent;
+ vcpu_put(vcpu);
return r;
}

@@ -1917,6 +1920,9 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
entry->ecx &= kvm_supported_word6_x86_features;
break;
}
+
+ kvm_x86_ops->set_supported_cpuid(function, entry);
+
put_cpu();
}

@@ -2031,6 +2037,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
int r;
unsigned bank_num = mcg_cap & 0xff, bank;

+ vcpu_load(vcpu);
r = -EINVAL;
if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
goto out;
@@ -2045,6 +2052,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
for (bank = 0; bank < bank_num; bank++)
vcpu->arch.mce_banks[bank*4] = ~(u64)0;
out:
+ vcpu_put(vcpu);
return r;
}

@@ -2312,7 +2320,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&mce, argp, sizeof mce))
goto out;
+ vcpu_load(vcpu);
r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
+ vcpu_put(vcpu);
break;
}
case KVM_GET_VCPU_EVENTS: {
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 2c505ee..f1fb411 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -95,7 +95,10 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs)
static void nmi_cpu_start(void *dummy)
{
struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
- model->start(msrs);
+ if (!msrs->controls)
+ WARN_ON_ONCE(1);
+ else
+ model->start(msrs);
}

static int nmi_start(void)
@@ -107,7 +110,10 @@ static int nmi_start(void)
static void nmi_cpu_stop(void *dummy)
{
struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
- model->stop(msrs);
+ if (!msrs->controls)
+ WARN_ON_ONCE(1);
+ else
+ model->stop(msrs);
}

static void nmi_stop(void)
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 987267f..a9c6611 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -60,6 +60,6 @@ static void xen_vcpu_notify_restore(void *data)

void xen_arch_resume(void)
{
- smp_call_function(xen_vcpu_notify_restore,
- (void *)CLOCK_EVT_NOTIFY_RESUME, 1);
+ on_each_cpu(xen_vcpu_notify_restore,
+ (void *)CLOCK_EVT_NOTIFY_RESUME, 1);
}
diff --git a/arch/xtensa/include/asm/cache.h b/arch/xtensa/include/asm/cache.h
index f04c989..ed8cd3c 100644
--- a/arch/xtensa/include/asm/cache.h
+++ b/arch/xtensa/include/asm/cache.h
@@ -29,5 +29,6 @@
# define CACHE_WAY_SIZE ICACHE_WAY_SIZE
#endif

+#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES

#endif /* _XTENSA_CACHE_H */
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 5f127cf..002c0ce 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2503,15 +2503,10 @@ static void cfq_free_io_context(struct io_context *ioc)
__call_for_each_cic(ioc, cic_free_func);
}

-static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+static void cfq_put_cooperator(struct cfq_queue *cfqq)
{
struct cfq_queue *__cfqq, *next;

- if (unlikely(cfqq == cfqd->active_queue)) {
- __cfq_slice_expired(cfqd, cfqq, 0);
- cfq_schedule_dispatch(cfqd);
- }
-
/*
* If this queue was scheduled to merge with another queue, be
* sure to drop the reference taken on that queue (and others in
@@ -2527,6 +2522,16 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
cfq_put_queue(__cfqq);
__cfqq = next;
}
+}
+
+static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ if (unlikely(cfqq == cfqd->active_queue)) {
+ __cfq_slice_expired(cfqd, cfqq, 0);
+ cfq_schedule_dispatch(cfqd);
+ }
+
+ cfq_put_cooperator(cfqq);

cfq_put_queue(cfqq);
}
@@ -3470,6 +3475,9 @@ split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
}

cic_set_cfqq(cic, NULL, 1);
+
+ cfq_put_cooperator(cfqq);
+
cfq_put_queue(cfqq);
return NULL;
}
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index fc2f26b..c5fef01 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -250,7 +250,7 @@ static int __init acpi_backlight(char *str)
ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR;
if (!strcmp("video", str))
acpi_video_support |=
- ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO;
+ ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO;
}
return 1;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 49cffb6..5abab5d 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -160,6 +160,10 @@ int libata_allow_tpm = 0;
module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");

+static int atapi_an;
+module_param(atapi_an, int, 0444);
+MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
+
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("Library module for ATA devices");
MODULE_LICENSE("GPL");
@@ -2572,7 +2576,8 @@ int ata_dev_configure(struct ata_device *dev)
* to enable ATAPI AN to discern between PHY status
* changed notifications and ATAPI ANs.
*/
- if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
+ if (atapi_an &&
+ (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
(!sata_pmp_attached(ap) ||
sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
unsigned int err_mask;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index e3877b6..4723648 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -894,7 +894,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
do_write);
}

- if (!do_write)
+ if (!do_write && !PageSlab(page))
flush_dcache_page(page);

qc->curbytes += qc->sect_size;
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 2a98b09..7f3d179 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -1674,7 +1674,6 @@ static void nv_mcp55_freeze(struct ata_port *ap)
mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
mask &= ~(NV_INT_ALL_MCP55 << shift);
writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
- ata_sff_freeze(ap);
}

static void nv_mcp55_thaw(struct ata_port *ap)
@@ -1688,7 +1687,6 @@ static void nv_mcp55_thaw(struct ata_port *ap)
mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
mask |= (NV_INT_MASK_MCP55 << shift);
writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
- ata_sff_thaw(ap);
}

static void nv_adma_error_handler(struct ata_port *ap)
@@ -2479,8 +2477,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}

pci_set_master(pdev);
- return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
- IRQF_SHARED, ipriv->sht);
+ return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
}

#ifdef CONFIG_PM
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 08f6549..0553455 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -575,6 +575,19 @@ static void svia_configure(struct pci_dev *pdev)
tmp8 |= NATIVE_MODE_ALL;
pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
}
+
+ /*
+ * vt6421 has problems talking to some drives. The following
+ * is the magic fix from Joseph Chan <JosephChan@xxxxxxxxxx>.
+ * Please add proper documentation if possible.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=15173
+ */
+ if (pdev->device == 0x3249) {
+ pci_read_config_byte(pdev, 0x52, &tmp8);
+ tmp8 |= 1 << 2;
+ pci_write_config_byte(pdev, 0x52, tmp8);
+ }
}

static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index f35719a..251acea 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -186,7 +186,7 @@ static ssize_t print_cpus_offline(struct sysdev_class *class,
/* display offline cpus < nr_cpu_ids */
if (!alloc_cpumask_var(&offline, GFP_KERNEL))
return -ENOMEM;
- cpumask_complement(offline, cpu_online_mask);
+ cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
n = cpulist_scnprintf(buf, len, offline);
free_cpumask_var(offline);

diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 4462b11..9ead05d 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -314,9 +314,14 @@ static void deliver_recv_msg(struct smi_info *smi_info,
{
/* Deliver the message to the upper layer with the lock
released. */
- spin_unlock(&(smi_info->si_lock));
- ipmi_smi_msg_received(smi_info->intf, msg);
- spin_lock(&(smi_info->si_lock));
+
+ if (smi_info->run_to_completion) {
+ ipmi_smi_msg_received(smi_info->intf, msg);
+ } else {
+ spin_unlock(&(smi_info->si_lock));
+ ipmi_smi_msg_received(smi_info->intf, msg);
+ spin_lock(&(smi_info->si_lock));
+ }
}

static void return_hosed_msg(struct smi_info *smi_info, int cCode)
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 744f748..a860ec0 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -413,18 +413,10 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
static int sh_cmt_clocksource_enable(struct clocksource *cs)
{
struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
- int ret;

p->total_cycles = 0;

- ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
- if (ret)
- return ret;
-
- /* TODO: calculate good shift from rate and counter bit width */
- cs->shift = 0;
- cs->mult = clocksource_hz2mult(p->rate, cs->shift);
- return 0;
+ return sh_cmt_start(p, FLAG_CLOCKSOURCE);
}

static void sh_cmt_clocksource_disable(struct clocksource *cs)
@@ -451,7 +443,18 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
cs->resume = sh_cmt_clocksource_resume;
cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ /* clk_get_rate() needs an enabled clock */
+ clk_enable(p->clk);
+ p->rate = clk_get_rate(p->clk) / (p->width == 16) ? 512 : 8;
+ clk_disable(p->clk);
+
+ /* TODO: calculate good shift from rate and counter bit width */
+ cs->shift = 10;
+ cs->mult = clocksource_hz2mult(p->rate, cs->shift);
+
pr_info("sh_cmt: %s used as clock source\n", cs->name);
+
clocksource_register(cs);
return 0;
}
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index fc9ff1e..7a24160 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -200,16 +200,8 @@ static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
static int sh_tmu_clocksource_enable(struct clocksource *cs)
{
struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
- int ret;
-
- ret = sh_tmu_enable(p);
- if (ret)
- return ret;

- /* TODO: calculate good shift from rate and counter bit width */
- cs->shift = 10;
- cs->mult = clocksource_hz2mult(p->rate, cs->shift);
- return 0;
+ return sh_tmu_enable(p);
}

static void sh_tmu_clocksource_disable(struct clocksource *cs)
@@ -229,6 +221,16 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
cs->disable = sh_tmu_clocksource_disable;
cs->mask = CLOCKSOURCE_MASK(32);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ /* clk_get_rate() needs an enabled clock */
+ clk_enable(p->clk);
+ /* channel will be configured at parent clock / 4 */
+ p->rate = clk_get_rate(p->clk) / 4;
+ clk_disable(p->clk);
+ /* TODO: calculate good shift from rate and counter bit width */
+ cs->shift = 10;
+ cs->mult = clocksource_hz2mult(p->rate, cs->shift);
+
pr_info("sh_tmu: %s used as clock source\n", cs->name);
clocksource_register(cs);
return 0;
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 5045156..991447b 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -231,7 +231,7 @@ void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
static void fw_card_bm_work(struct work_struct *work)
{
struct fw_card *card = container_of(work, struct fw_card, work.work);
- struct fw_device *root_device;
+ struct fw_device *root_device, *irm_device;
struct fw_node *root_node;
unsigned long flags;
int root_id, new_root_id, irm_id, local_id;
@@ -239,6 +239,7 @@ static void fw_card_bm_work(struct work_struct *work)
bool do_reset = false;
bool root_device_is_running;
bool root_device_is_cmc;
+ bool irm_is_1394_1995_only;

spin_lock_irqsave(&card->lock, flags);

@@ -248,12 +249,18 @@ static void fw_card_bm_work(struct work_struct *work)
}

generation = card->generation;
+
root_node = card->root_node;
fw_node_get(root_node);
root_device = root_node->data;
root_device_is_running = root_device &&
atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
root_device_is_cmc = root_device && root_device->cmc;
+
+ irm_device = card->irm_node->data;
+ irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
+ (irm_device->config_rom[2] & 0x000000f0) == 0;
+
root_id = root_node->node_id;
irm_id = card->irm_node->node_id;
local_id = card->local_node->node_id;
@@ -276,8 +283,15 @@ static void fw_card_bm_work(struct work_struct *work)

if (!card->irm_node->link_on) {
new_root_id = local_id;
- fw_notify("IRM has link off, making local node (%02x) root.\n",
- new_root_id);
+ fw_notify("%s, making local node (%02x) root.\n",
+ "IRM has link off", new_root_id);
+ goto pick_me;
+ }
+
+ if (irm_is_1394_1995_only) {
+ new_root_id = local_id;
+ fw_notify("%s, making local node (%02x) root.\n",
+ "IRM is not 1394a compliant", new_root_id);
goto pick_me;
}

@@ -316,8 +330,8 @@ static void fw_card_bm_work(struct work_struct *work)
* root, and thus, IRM.
*/
new_root_id = local_id;
- fw_notify("BM lock failed, making local node (%02x) root.\n",
- new_root_id);
+ fw_notify("%s, making local node (%02x) root.\n",
+ "BM lock failed", new_root_id);
goto pick_me;
}
} else if (card->bm_generation != generation) {
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 18f41d7..10348d3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -335,7 +335,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@85Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
- 1072, 1376, 0, 768, 769, 772, 808, 0,
+ 1168, 1376, 0, 768, 769, 772, 808, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ef3d91d..691701a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2688,6 +2688,14 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
return -EINVAL;
}

+ /* If the object is bigger than the entire aperture, reject it early
+ * before evicting everything in a vain attempt to find space.
+ */
+ if (obj->size > dev->gtt_total) {
+ DRM_ERROR("Attempting to bind an object larger than the aperture\n");
+ return -E2BIG;
+ }
+
search_free:
free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
obj->size, alignment, 0);
@@ -4231,6 +4239,17 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
int ret;

i915_verify_inactive(dev, __FILE__, __LINE__);
+
+ if (obj_priv->gtt_space != NULL) {
+ if (alignment == 0)
+ alignment = i915_gem_get_gtt_alignment(obj);
+ if (obj_priv->gtt_offset & (alignment - 1)) {
+ ret = i915_gem_object_unbind(obj);
+ if (ret)
+ return ret;
+ }
+ }
+
if (obj_priv->gtt_space == NULL) {
ret = i915_gem_object_bind_to_gtt(obj, alignment);
if (ret)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c7502b6..70765cf 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4155,12 +4155,6 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
if (work == NULL || !work->pending) {
- if (work && !work->pending) {
- obj_priv = to_intel_bo(work->pending_flip_obj);
- DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
- obj_priv,
- atomic_read(&obj_priv->pending_flip));
- }
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 77e40cf..7c28ff1 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1180,16 +1180,6 @@ intel_dp_detect(struct drm_connector *connector)
if (HAS_PCH_SPLIT(dev))
return ironlake_dp_detect(connector);

- temp = I915_READ(PORT_HOTPLUG_EN);
-
- I915_WRITE(PORT_HOTPLUG_EN,
- temp |
- DPB_HOTPLUG_INT_EN |
- DPC_HOTPLUG_INT_EN |
- DPD_HOTPLUG_INT_EN);
-
- POSTING_READ(PORT_HOTPLUG_EN);
-
switch (dp_priv->output_reg) {
case DP_B:
bit = DPB_HOTPLUG_INT_STATUS;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 034218c..4c7204a 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -566,6 +566,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
*/
int radeon_agp_init(struct radeon_device *rdev);
void radeon_agp_resume(struct radeon_device *rdev);
+void radeon_agp_suspend(struct radeon_device *rdev);
void radeon_agp_fini(struct radeon_device *rdev);


diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 28e473f..f40dfb7 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -270,3 +270,8 @@ void radeon_agp_fini(struct radeon_device *rdev)
}
#endif
}
+
+void radeon_agp_suspend(struct radeon_device *rdev)
+{
+ radeon_agp_fini(rdev);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 9916d82..1a4fa9b 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -530,6 +530,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
}

/* look up gpio for ddc, hpd */
+ ddc_bus.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
if ((le16_to_cpu(path->usDeviceTag) &
(ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
@@ -585,9 +587,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
break;
}
}
- } else {
- hpd.hpd = RADEON_HPD_NONE;
- ddc_bus.valid = false;
}

/* needed for aux chan transactions */
@@ -1174,7 +1173,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
- le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
+ le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
lvds->panel_pwr_delay =
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7b629e3..ed6a724 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -748,6 +748,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
/* evict remaining vram memory */
radeon_bo_evict_vram(rdev);

+ radeon_agp_suspend(rdev);
+
pci_save_state(dev->pdev);
if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index bb1c122..c2848e0 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -978,8 +978,11 @@ void radeon_update_display_priority(struct radeon_device *rdev)
/* set display priority to high for r3xx, rv515 chips
* this avoids flickering due to underflow to the
* display controllers during heavy acceleration.
+ * Don't force high on rs4xx igp chips as it seems to
+ * affect the sound card. See kernel bug 15982.
*/
- if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515))
+ if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) &&
+ !(rdev->flags & RADEON_IS_IGP))
rdev->disp_priority = 2;
else
rdev->disp_priority = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index cc5316d..b3ba44c 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -900,9 +900,10 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
flags |= RADEON_FRONT;
}
if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
- if (!dev_priv->have_z_offset)
+ if (!dev_priv->have_z_offset) {
printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
- flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
+ flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
+ }
}

if (flags & (RADEON_FRONT | RADEON_BACK)) {
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 143e788..0010efa 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1305,6 +1305,7 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
index 62416e6..3975e03 100644
--- a/drivers/hid/hid-gyration.c
+++ b/drivers/hid/hid-gyration.c
@@ -73,6 +73,7 @@ static int gyration_event(struct hid_device *hdev, struct hid_field *field,
static const struct hid_device_id gyration_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ }
};
MODULE_DEVICE_TABLE(hid, gyration_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 09d2764..b681cbf 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -267,6 +267,7 @@
#define USB_VENDOR_ID_GYRATION 0x0c16
#define USB_DEVICE_ID_GYRATION_REMOTE 0x0002
#define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003
+#define USB_DEVICE_ID_GYRATION_REMOTE_3 0x0008

#define USB_VENDOR_ID_HAPP 0x078b
#define USB_DEVICE_ID_UGCI_DRIVING 0x0010
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index 65c232a..21d201b 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -45,9 +45,7 @@ enum ltc4245_cmd {
LTC4245_VEEIN = 0x19,
LTC4245_VEESENSE = 0x1a,
LTC4245_VEEOUT = 0x1b,
- LTC4245_GPIOADC1 = 0x1c,
- LTC4245_GPIOADC2 = 0x1d,
- LTC4245_GPIOADC3 = 0x1e,
+ LTC4245_GPIOADC = 0x1c,
};

struct ltc4245_data {
@@ -61,7 +59,7 @@ struct ltc4245_data {
u8 cregs[0x08];

/* Voltage registers */
- u8 vregs[0x0f];
+ u8 vregs[0x0d];
};

static struct ltc4245_data *ltc4245_update_device(struct device *dev)
@@ -86,7 +84,7 @@ static struct ltc4245_data *ltc4245_update_device(struct device *dev)
data->cregs[i] = val;
}

- /* Read voltage registers -- 0x10 to 0x1f */
+ /* Read voltage registers -- 0x10 to 0x1c */
for (i = 0; i < ARRAY_SIZE(data->vregs); i++) {
val = i2c_smbus_read_byte_data(client, i+0x10);
if (unlikely(val < 0))
@@ -128,9 +126,7 @@ static int ltc4245_get_voltage(struct device *dev, u8 reg)
case LTC4245_VEEOUT:
voltage = regval * -55;
break;
- case LTC4245_GPIOADC1:
- case LTC4245_GPIOADC2:
- case LTC4245_GPIOADC3:
+ case LTC4245_GPIOADC:
voltage = regval * 10;
break;
default:
@@ -297,9 +293,7 @@ LTC4245_ALARM(in7_min_alarm, (1 << 2), LTC4245_FAULT2);
LTC4245_ALARM(in8_min_alarm, (1 << 3), LTC4245_FAULT2);

/* GPIO voltages */
-LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC1);
-LTC4245_VOLTAGE(in10_input, LTC4245_GPIOADC2);
-LTC4245_VOLTAGE(in11_input, LTC4245_GPIOADC3);
+LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC);

/* Power Consumption (virtual) */
LTC4245_POWER(power1_input, LTC4245_12VSENSE);
@@ -342,8 +336,6 @@ static struct attribute *ltc4245_attributes[] = {
&sensor_dev_attr_in8_min_alarm.dev_attr.attr,

&sensor_dev_attr_in9_input.dev_attr.attr,
- &sensor_dev_attr_in10_input.dev_attr.attr,
- &sensor_dev_attr_in11_input.dev_attr.attr,

&sensor_dev_attr_power1_input.dev_attr.attr,
&sensor_dev_attr_power2_input.dev_attr.attr,
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 09437e9..0a1042b 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -282,6 +282,7 @@ static int linear_stop (mddev_t *mddev)
rcu_barrier();
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
kfree(conf);
+ mddev->private = NULL;

return 0;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index cefd63d..336792e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -508,9 +508,36 @@ static inline int mddev_trylock(mddev_t * mddev)
return mutex_trylock(&mddev->reconfig_mutex);
}

-static inline void mddev_unlock(mddev_t * mddev)
-{
- mutex_unlock(&mddev->reconfig_mutex);
+static struct attribute_group md_redundancy_group;
+
+static void mddev_unlock(mddev_t * mddev)
+{
+ if (mddev->to_remove) {
+ /* These cannot be removed under reconfig_mutex as
+ * an access to the files will try to take reconfig_mutex
+ * while holding the file unremovable, which leads to
+ * a deadlock.
+ * So hold open_mutex instead - we are allowed to take
+ * it while holding reconfig_mutex, and md_run can
+ * use it to wait for the remove to complete.
+ */
+ struct attribute_group *to_remove = mddev->to_remove;
+ mddev->to_remove = NULL;
+ mutex_lock(&mddev->open_mutex);
+ mutex_unlock(&mddev->reconfig_mutex);
+
+ if (to_remove != &md_redundancy_group)
+ sysfs_remove_group(&mddev->kobj, to_remove);
+ if (mddev->pers == NULL ||
+ mddev->pers->sync_request == NULL) {
+ sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
+ if (mddev->sysfs_action)
+ sysfs_put(mddev->sysfs_action);
+ mddev->sysfs_action = NULL;
+ }
+ mutex_unlock(&mddev->open_mutex);
+ } else
+ mutex_unlock(&mddev->reconfig_mutex);

md_wakeup_thread(mddev->thread);
}
@@ -2980,6 +3007,23 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
/* Looks like we have a winner */
mddev_suspend(mddev);
mddev->pers->stop(mddev);
+
+ if (mddev->pers->sync_request == NULL &&
+ pers->sync_request != NULL) {
+ /* need to add the md_redundancy_group */
+ if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
+ printk(KERN_WARNING
+ "md: cannot register extra attributes for %s\n",
+ mdname(mddev));
+ mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
+ }
+ if (mddev->pers->sync_request != NULL &&
+ pers->sync_request == NULL) {
+ /* need to remove the md_redundancy_group */
+ if (mddev->to_remove == NULL)
+ mddev->to_remove = &md_redundancy_group;
+ }
+
module_put(mddev->pers->owner);
/* Invalidate devices that are now superfluous */
list_for_each_entry(rdev, &mddev->disks, same_set)
@@ -4082,15 +4126,6 @@ static void mddev_delayed_delete(struct work_struct *ws)
{
mddev_t *mddev = container_of(ws, mddev_t, del_work);

- if (mddev->private) {
- sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
- if (mddev->private != (void*)1)
- sysfs_remove_group(&mddev->kobj, mddev->private);
- if (mddev->sysfs_action)
- sysfs_put(mddev->sysfs_action);
- mddev->sysfs_action = NULL;
- mddev->private = NULL;
- }
sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
kobject_del(&mddev->kobj);
kobject_put(&mddev->kobj);
@@ -4248,6 +4283,13 @@ static int do_md_run(mddev_t * mddev)
if (mddev->pers)
return -EBUSY;

+ /* These two calls synchronise us with the
+ * sysfs_remove_group calls in mddev_unlock,
+ * so they must have completed.
+ */
+ mutex_lock(&mddev->open_mutex);
+ mutex_unlock(&mddev->open_mutex);
+
/*
* Analyze all RAID superblock(s)
*/
@@ -4536,8 +4578,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
mddev->queue->unplug_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
module_put(mddev->pers->owner);
- if (mddev->pers->sync_request && mddev->private == NULL)
- mddev->private = (void*)1;
+ if (mddev->pers->sync_request && mddev->to_remove == NULL)
+ mddev->to_remove = &md_redundancy_group;
mddev->pers = NULL;
/* tell userspace to handle 'inactive' */
sysfs_notify_dirent(mddev->sysfs_state);
@@ -5496,6 +5538,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
int err = 0;
void __user *argp = (void __user *)arg;
mddev_t *mddev = NULL;
+ int ro;

if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -5631,6 +5674,34 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
err = do_md_stop(mddev, 1, 1);
goto done_unlock;

+ case BLKROSET:
+ if (get_user(ro, (int __user *)(arg))) {
+ err = -EFAULT;
+ goto done_unlock;
+ }
+ err = -EINVAL;
+
+ /* if the bdev is going readonly the value of mddev->ro
+ * does not matter, no writes are coming
+ */
+ if (ro)
+ goto done_unlock;
+
+ /* are we are already prepared for writes? */
+ if (mddev->ro != 1)
+ goto done_unlock;
+
+ /* transitioning to readauto need only happen for
+ * arrays that call md_write_start
+ */
+ if (mddev->pers) {
+ err = restart_array(mddev);
+ if (err == 0) {
+ mddev->ro = 2;
+ set_disk_ro(mddev->gendisk, 0);
+ }
+ }
+ goto done_unlock;
}

/*
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 8e4c75c..722f5df 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -305,6 +305,7 @@ struct mddev_s
atomic_t max_corr_read_errors; /* max read retries */
struct list_head all_mddevs;

+ struct attribute_group *to_remove;
/* Generic barrier handling.
* If there is a pending barrier request, all other
* writes are blocked while the devices are flushed.
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e59b10e..84d3bf0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -418,7 +418,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
*/
static int read_balance(conf_t *conf, r1bio_t *r1_bio)
{
- const unsigned long this_sector = r1_bio->sector;
+ const sector_t this_sector = r1_bio->sector;
int new_disk = conf->last_used, disk = new_disk;
int wonly_disk = -1;
const int sectors = r1_bio->sectors;
@@ -434,7 +434,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
retry:
if (conf->mddev->recovery_cp < MaxSector &&
(this_sector + sectors >= conf->next_resync)) {
- /* Choose the first operation device, for consistancy */
+ /* Choose the first operational device, for consistancy */
new_disk = 0;

for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
@@ -912,9 +912,10 @@ static int make_request(struct request_queue *q, struct bio * bio)
if (test_bit(Faulty, &rdev->flags)) {
rdev_dec_pending(rdev, mddev);
r1_bio->bios[i] = NULL;
- } else
+ } else {
r1_bio->bios[i] = bio;
- targets++;
+ targets++;
+ }
} else
r1_bio->bios[i] = NULL;
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e2766d8..ad945cc 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -494,7 +494,7 @@ static int raid10_mergeable_bvec(struct request_queue *q,
*/
static int read_balance(conf_t *conf, r10bio_t *r10_bio)
{
- const unsigned long this_sector = r10_bio->sector;
+ const sector_t this_sector = r10_bio->sector;
int disk, slot, nslot;
const int sectors = r10_bio->sectors;
sector_t new_distance, current_distance;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 15348c3..6af0a6d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5087,7 +5087,9 @@ static int run(mddev_t *mddev)
}

/* Ok, everything is just fine now */
- if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
+ if (mddev->to_remove == &raid5_attrs_group)
+ mddev->to_remove = NULL;
+ else if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
printk(KERN_WARNING
"raid5: failed to create sysfs attributes for %s\n",
mdname(mddev));
@@ -5134,7 +5136,8 @@ static int stop(mddev_t *mddev)
mddev->queue->backing_dev_info.congested_fn = NULL;
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
free_conf(conf);
- mddev->private = &raid5_attrs_group;
+ mddev->private = NULL;
+ mddev->to_remove = &raid5_attrs_group;
return 0;
}

diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index 6d3850b..2194da5 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -1047,6 +1047,8 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
step = mapping->get(mapping, UVC_GET_RES,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
+ if (step == 0)
+ step = 1;

xctrl->value = min + (xctrl->value - min + step/2) / step * step;
xctrl->value = clamp(xctrl->value, min, max);
diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmware_balloon.c
index e7161c4..ad8fb09 100644
--- a/drivers/misc/vmware_balloon.c
+++ b/drivers/misc/vmware_balloon.c
@@ -45,7 +45,7 @@

MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
-MODULE_VERSION("1.2.1.0-K");
+MODULE_VERSION("1.2.1.1-k");
MODULE_ALIAS("dmi:*:svnVMware*:*");
MODULE_ALIAS("vmware_vmmemctl");
MODULE_LICENSE("GPL");
@@ -101,6 +101,8 @@ MODULE_LICENSE("GPL");
/* Maximum number of page allocations without yielding processor */
#define VMW_BALLOON_YIELD_THRESHOLD 1024

+/* Maximum number of refused pages we accumulate during inflation cycle */
+#define VMW_BALLOON_MAX_REFUSED 16

/*
* Hypervisor communication port definitions.
@@ -183,6 +185,7 @@ struct vmballoon {

/* transient list of non-balloonable pages */
struct list_head refused_pages;
+ unsigned int n_refused_pages;

/* balloon size in pages */
unsigned int size;
@@ -428,14 +431,21 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
/* inform monitor */
locked = vmballoon_send_lock_page(b, page_to_pfn(page));
if (!locked) {
+ STATS_INC(b->stats.refused_alloc);
+
if (b->reset_required) {
__free_page(page);
return -EIO;
}

- /* place on list of non-balloonable pages, retry allocation */
+ /*
+ * Place page on the list of non-balloonable pages
+ * and retry allocation, unless we already accumulated
+ * too many of them, in which case take a breather.
+ */
list_add(&page->lru, &b->refused_pages);
- STATS_INC(b->stats.refused_alloc);
+ if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED)
+ return -EIO;
}
} while (!locked);

@@ -483,6 +493,8 @@ static void vmballoon_release_refused_pages(struct vmballoon *b)
__free_page(page);
STATS_INC(b->stats.refused_free);
}
+
+ b->n_refused_pages = 0;
}

/*
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 2c712af..48a1dbf 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -164,8 +164,8 @@ static DEFINE_PCI_DEVICE_TABLE(com20020pci_id_table) = {
{ 0x1571, 0xa204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{ 0x1571, 0xa205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{ 0x1571, 0xa206, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
- { 0x10B5, 0x9030, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
- { 0x10B5, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
+ { 0x10B5, 0x9030, 0x10B5, 0x2978, 0, 0, ARC_CAN_10MBIT },
+ { 0x10B5, 0x9050, 0x10B5, 0x2273, 0, 0, ARC_CAN_10MBIT },
{ 0x14BA, 0x6000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{ 0x10B5, 0x2200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{0,}
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 145b1a7..dd70d0c 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -84,6 +84,20 @@ static struct can_bittiming_const sja1000_bittiming_const = {
.brp_inc = 1,
};

+static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
+{
+ unsigned long flags;
+
+ /*
+ * The command register needs some locking and time to settle
+ * the write_reg() operation - especially on SMP systems.
+ */
+ spin_lock_irqsave(&priv->cmdreg_lock, flags);
+ priv->write_reg(priv, REG_CMR, val);
+ priv->read_reg(priv, REG_SR);
+ spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
+}
+
static int sja1000_probe_chip(struct net_device *dev)
{
struct sja1000_priv *priv = netdev_priv(dev);
@@ -297,7 +311,7 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,

can_put_echo_skb(skb, dev, 0);

- priv->write_reg(priv, REG_CMR, CMD_TR);
+ sja1000_write_cmdreg(priv, CMD_TR);

return NETDEV_TX_OK;
}
@@ -346,7 +360,7 @@ static void sja1000_rx(struct net_device *dev)
cf->can_id = id;

/* release receive buffer */
- priv->write_reg(priv, REG_CMR, CMD_RRB);
+ sja1000_write_cmdreg(priv, CMD_RRB);

netif_rx(skb);

@@ -374,7 +388,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
stats->rx_over_errors++;
stats->rx_errors++;
- priv->write_reg(priv, REG_CMR, CMD_CDO); /* clear bit */
+ sja1000_write_cmdreg(priv, CMD_CDO); /* clear bit */
}

if (isrc & IRQ_EI) {
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index 97a622b..de8e778 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -167,6 +167,7 @@ struct sja1000_priv {

void __iomem *reg_base; /* ioremap'ed address to registers */
unsigned long irq_flags; /* for request_irq() */
+ spinlock_t cmdreg_lock; /* lock for concurrent cmd register writes */

u16 flags; /* custom mode flags */
u8 ocr; /* output control register */
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index 57288ca..ef62f17 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -175,9 +175,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,

if (chunk->nsg <= 0)
goto fail;
+ }

+ if (chunk->npages == MLX4_ICM_CHUNK_LEN)
chunk = NULL;
- }

npages -= 1 << cur_order;
} else {
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
index 0a1d4c2..06f1f3c 100644
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -425,5 +425,6 @@ enum ar9170_txq {

#define AR9170_TXQ_DEPTH 32
#define AR9170_TX_MAX_PENDING 128
+#define AR9170_RX_STREAM_MAX_SIZE 65535

#endif /* __AR9170_HW_H */
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index c536929..144db02 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -2516,7 +2516,7 @@ void *ar9170_alloc(size_t priv_size)
* tends to split the streams into separate rx descriptors.
*/

- skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL);
+ skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
if (!skb)
goto err_nomem;

diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index e1c2fca..7bae7fd 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -67,18 +67,28 @@ static struct usb_device_id ar9170_usb_ids[] = {
{ USB_DEVICE(0x0cf3, 0x1001) },
/* TP-Link TL-WN821N v2 */
{ USB_DEVICE(0x0cf3, 0x1002) },
+ /* 3Com Dual Band 802.11n USB Adapter */
+ { USB_DEVICE(0x0cf3, 0x1010) },
+ /* H3C Dual Band 802.11n USB Adapter */
+ { USB_DEVICE(0x0cf3, 0x1011) },
/* Cace Airpcap NX */
{ USB_DEVICE(0xcace, 0x0300) },
/* D-Link DWA 160 A1 */
{ USB_DEVICE(0x07d1, 0x3c10) },
/* D-Link DWA 160 A2 */
{ USB_DEVICE(0x07d1, 0x3a09) },
+ /* Netgear WNA1000 */
+ { USB_DEVICE(0x0846, 0x9040) },
/* Netgear WNDA3100 */
{ USB_DEVICE(0x0846, 0x9010) },
/* Netgear WN111 v2 */
{ USB_DEVICE(0x0846, 0x9001) },
/* Zydas ZD1221 */
{ USB_DEVICE(0x0ace, 0x1221) },
+ /* Proxim ORiNOCO 802.11n USB */
+ { USB_DEVICE(0x1435, 0x0804) },
+ /* WNC Generic 11n USB Dongle */
+ { USB_DEVICE(0x1435, 0x0326) },
/* ZyXEL NWD271N */
{ USB_DEVICE(0x0586, 0x3417) },
/* Z-Com UB81 BG */
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 3abbe75..ea90997 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1211,6 +1211,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
struct ath5k_hw *ah = sc->ah;
struct sk_buff *skb = bf->skb;
struct ath5k_desc *ds;
+ int ret;

if (!skb) {
skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr);
@@ -1237,9 +1238,9 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
ds = bf->desc;
ds->ds_link = bf->daddr; /* link to self */
ds->ds_data = bf->skbaddr;
- ah->ah_setup_rx_desc(ah, ds,
- skb_tailroom(skb), /* buffer size */
- 0);
+ ret = ah->ah_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
+ if (ret)
+ return ret;

if (sc->rxlink != NULL)
*sc->rxlink = bf->daddr;
@@ -2993,13 +2994,15 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,

if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
if (*new_flags & FIF_PROMISC_IN_BSS) {
- rfilt |= AR5K_RX_FILTER_PROM;
__set_bit(ATH_STAT_PROMISC, sc->status);
} else {
__clear_bit(ATH_STAT_PROMISC, sc->status);
}
}

+ if (test_bit(ATH_STAT_PROMISC, sc->status))
+ rfilt |= AR5K_RX_FILTER_PROM;
+
/* Note, AR5K_RX_FILTER_MCAST is already enabled */
if (*new_flags & FIF_ALLMULTI) {
mfilt[0] = ~0;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 78b5711..20d5414 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1241,7 +1241,7 @@ void ath9k_hw_deinit(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);

- if (common->state <= ATH_HW_INITIALIZED)
+ if (common->state < ATH_HW_INITIALIZED)
goto free_hw;

if (!AR_SREV_9100(ah))
@@ -1252,8 +1252,6 @@ void ath9k_hw_deinit(struct ath_hw *ah)
free_hw:
if (!AR_SREV_9280_10_OR_LATER(ah))
ath9k_hw_rf_free_ext_banks(ah);
- kfree(ah);
- ah = NULL;
}
EXPORT_SYMBOL(ath9k_hw_deinit);

diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 3d4d897..b78308c 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -760,6 +760,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc)

tasklet_kill(&sc->intr_tq);
tasklet_kill(&sc->bcon_tasklet);
+
+ kfree(sc->sc_ah);
+ sc->sc_ah = NULL;
}

void ath9k_deinit_device(struct ath_softc *sc)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 1460116..d3ef2a9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -2077,10 +2077,12 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
}
/* Else we have enough samples; calculate estimate of
* actual average throughput */
-
- /* Sanity-check TPT calculations */
- BUG_ON(window->average_tpt != ((window->success_ratio *
- tbl->expected_tpt[index] + 64) / 128));
+ if (window->average_tpt != ((window->success_ratio *
+ tbl->expected_tpt[index] + 64) / 128)) {
+ IWL_ERR(priv, "expected_tpt should have been calculated by now\n");
+ window->average_tpt = ((window->success_ratio *
+ tbl->expected_tpt[index] + 64) / 128);
+ }

/* If we are searching for better modulation mode, check success. */
if (lq_sta->search_better_tbl &&
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 741e65e..661e36b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -561,6 +561,11 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)

mutex_lock(&priv->mutex);

+ if (priv->is_internal_short_scan == true) {
+ IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
+ goto unlock;
+ }
+
if (!iwl_is_ready_rf(priv)) {
IWL_DEBUG_SCAN(priv, "not ready or exit pending\n");
goto unlock;
@@ -958,17 +963,27 @@ void iwl_bg_scan_completed(struct work_struct *work)
{
struct iwl_priv *priv =
container_of(work, struct iwl_priv, scan_completed);
+ bool internal = false;

IWL_DEBUG_SCAN(priv, "SCAN complete scan\n");

cancel_delayed_work(&priv->scan_check);

- if (!priv->is_internal_short_scan)
- ieee80211_scan_completed(priv->hw, false);
- else {
+ mutex_lock(&priv->mutex);
+ if (priv->is_internal_short_scan) {
priv->is_internal_short_scan = false;
IWL_DEBUG_SCAN(priv, "internal short scan completed\n");
+ internal = true;
}
+ mutex_unlock(&priv->mutex);
+
+ /*
+ * Do not hold mutex here since this will cause mac80211 to call
+ * into driver again into functions that will attempt to take
+ * mutex.
+ */
+ if (!internal)
+ ieee80211_scan_completed(priv->hw, false);

if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 8dd0c03..c243df7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -1198,6 +1198,7 @@ static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
struct ieee80211_sta *sta;
struct iwl_station_priv *sta_priv;

+ rcu_read_lock();
sta = ieee80211_find_sta(priv->vif, hdr->addr1);
if (sta) {
sta_priv = (void *)sta->drv_priv;
@@ -1206,6 +1207,7 @@ static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
atomic_dec_return(&sta_priv->pending_frames) == 0)
ieee80211_sta_block_awake(priv->hw, sta, false);
}
+ rcu_read_unlock();

ieee80211_tx_status_irqsafe(priv->hw, skb);
}
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 743a6c6..186dc71 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -80,6 +80,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
{USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
{USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
{USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
+ {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
{USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */
{USB_DEVICE(0x413c, 0x8104)}, /* Cohiba Proto board */
{}
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 2131a44..de632ec 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -188,6 +188,7 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
info->flags |= IEEE80211_TX_STAT_ACK;

info->status.rates[0].count = (flags & 0xFF) + 1;
+ info->status.rates[1].idx = -1;

ieee80211_tx_status_irqsafe(dev, skb);
if (ring->entries - skb_queue_len(&ring->queue) == 2)
diff --git a/drivers/net/wireless/wl12xx/wl1251_sdio.c b/drivers/net/wireless/wl12xx/wl1251_sdio.c
index 9423f22..d74b89b 100644
--- a/drivers/net/wireless/wl12xx/wl1251_sdio.c
+++ b/drivers/net/wireless/wl12xx/wl1251_sdio.c
@@ -160,6 +160,7 @@ disable:
sdio_disable_func(func);
release:
sdio_release_host(func);
+ wl1251_free_hw(wl);
return ret;
}

diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 166b67e..de82183 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -30,23 +30,7 @@

#define OP_BUFFER_FLAGS 0

-/*
- * Read and write access is using spin locking. Thus, writing to the
- * buffer by NMI handler (x86) could occur also during critical
- * sections when reading the buffer. To avoid this, there are 2
- * buffers for independent read and write access. Read access is in
- * process context only, write access only in the NMI handler. If the
- * read buffer runs empty, both buffers are swapped atomically. There
- * is potentially a small window during swapping where the buffers are
- * disabled and samples could be lost.
- *
- * Using 2 buffers is a little bit overhead, but the solution is clear
- * and does not require changes in the ring buffer implementation. It
- * can be changed to a single buffer solution when the ring buffer
- * access is implemented as non-locking atomic code.
- */
-static struct ring_buffer *op_ring_buffer_read;
-static struct ring_buffer *op_ring_buffer_write;
+static struct ring_buffer *op_ring_buffer;
DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);

static void wq_sync_buffer(struct work_struct *work);
@@ -68,12 +52,9 @@ void oprofile_cpu_buffer_inc_smpl_lost(void)

void free_cpu_buffers(void)
{
- if (op_ring_buffer_read)
- ring_buffer_free(op_ring_buffer_read);
- op_ring_buffer_read = NULL;
- if (op_ring_buffer_write)
- ring_buffer_free(op_ring_buffer_write);
- op_ring_buffer_write = NULL;
+ if (op_ring_buffer)
+ ring_buffer_free(op_ring_buffer);
+ op_ring_buffer = NULL;
}

#define RB_EVENT_HDR_SIZE 4
@@ -86,11 +67,8 @@ int alloc_cpu_buffers(void)
unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
RB_EVENT_HDR_SIZE);

- op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
- if (!op_ring_buffer_read)
- goto fail;
- op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
- if (!op_ring_buffer_write)
+ op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
+ if (!op_ring_buffer)
goto fail;

for_each_possible_cpu(i) {
@@ -162,16 +140,11 @@ struct op_sample
*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
{
entry->event = ring_buffer_lock_reserve
- (op_ring_buffer_write, sizeof(struct op_sample) +
+ (op_ring_buffer, sizeof(struct op_sample) +
size * sizeof(entry->sample->data[0]));
- if (entry->event)
- entry->sample = ring_buffer_event_data(entry->event);
- else
- entry->sample = NULL;
-
- if (!entry->sample)
+ if (!entry->event)
return NULL;
-
+ entry->sample = ring_buffer_event_data(entry->event);
entry->size = size;
entry->data = entry->sample->data;

@@ -180,25 +153,16 @@ struct op_sample

int op_cpu_buffer_write_commit(struct op_entry *entry)
{
- return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event);
+ return ring_buffer_unlock_commit(op_ring_buffer, entry->event);
}

struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
{
struct ring_buffer_event *e;
- e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
- if (e)
- goto event;
- if (ring_buffer_swap_cpu(op_ring_buffer_read,
- op_ring_buffer_write,
- cpu))
+ e = ring_buffer_consume(op_ring_buffer, cpu, NULL);
+ if (!e)
return NULL;
- e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
- if (e)
- goto event;
- return NULL;

-event:
entry->event = e;
entry->sample = ring_buffer_event_data(e);
entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
@@ -209,8 +173,7 @@ event:

unsigned long op_cpu_buffer_entries(int cpu)
{
- return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
- + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
+ return ring_buffer_entries_cpu(op_ring_buffer, cpu);
}

static int
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 27c0e6e..2f2d0ec 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1457,7 +1457,8 @@ static void quirk_jmicron_ata(struct pci_dev *pdev)
conf5 &= ~(1 << 24); /* Clear bit 24 */

switch (pdev->device) {
- case PCI_DEVICE_ID_JMICRON_JMB360:
+ case PCI_DEVICE_ID_JMICRON_JMB360: /* SATA single port */
+ case PCI_DEVICE_ID_JMICRON_JMB362: /* SATA dual ports */
/* The controller should be in single function ahci mode */
conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */
break;
@@ -1493,12 +1494,14 @@ static void quirk_jmicron_ata(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
@@ -2127,6 +2130,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9602, quirk_disable_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASUSTEK, 0x9602, quirk_disable_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AI, 0x9602, quirk_disable_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);

/* Go through the list of Hypertransport capabilities and
* return 1 if a HT MSI capability is found and enabled */
@@ -2218,15 +2222,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
ht_enable_msi_mapping);

-/* The P5N32-SLI Premium motherboard from Asus has a problem with msi
+/* The P5N32-SLI motherboards from Asus have a problem with msi
* for the MCP55 NIC. It is not yet determined whether the msi problem
* also affects other devices. As for now, turn off msi for this device.
*/
static void __devinit nvenet_msi_disable(struct pci_dev *dev)
{
- if (dmi_name_in_vendors("P5N32-SLI PREMIUM")) {
+ if (dmi_name_in_vendors("P5N32-SLI PREMIUM") ||
+ dmi_name_in_vendors("P5N32-E SLI")) {
dev_info(&dev->dev,
- "Disabling msi for MCP55 NIC on P5N32-SLI Premium\n");
+ "Disabling msi for MCP55 NIC on P5N32-SLI\n");
dev->no_msi = 1;
}
}
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 041eee4..6df5dff 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -682,6 +682,7 @@ static void pcmcia_requery(struct pcmcia_socket *s)
if (old_funcs != new_funcs) {
/* we need to re-start */
pcmcia_card_remove(s, NULL);
+ s->functions = 0;
pcmcia_card_add(s);
}
}
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 83ace27..6bb6cb9 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -975,7 +975,7 @@ static irqreturn_t yenta_probe_handler(int irq, void *dev_id)
/* probes the PCI interrupt, use only on override functions */
static int yenta_probe_cb_irq(struct yenta_socket *socket)
{
- u8 reg;
+ u8 reg = 0;

if (!socket->cb_irq)
return -1;
@@ -989,7 +989,8 @@ static int yenta_probe_cb_irq(struct yenta_socket *socket)
}

/* generate interrupt, wait */
- reg = exca_readb(socket, I365_CSCINT);
+ if (!socket->dev->irq)
+ reg = exca_readb(socket, I365_CSCINT);
exca_writeb(socket, I365_CSCINT, reg | I365_CSC_STSCHG);
cb_writel(socket, CB_SOCKET_EVENT, -1);
cb_writel(socket, CB_SOCKET_MASK, CB_CSTSMASK);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 6c3320d..50601d9 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -390,6 +390,7 @@ config EEEPC_WMI
depends on ACPI_WMI
depends on INPUT
depends on EXPERIMENTAL
+ depends on BACKLIGHT_CLASS_DEVICE
select INPUT_SPARSEKMAP
---help---
Say Y here if you want to support WMI-based hotkeys on Eee PC laptops.
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index e9aa814..aa13875 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -719,6 +719,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
}
}

+ cmos_rtc.dev = dev;
+ dev_set_drvdata(dev, &cmos_rtc);
+
cmos_rtc.rtc = rtc_device_register(driver_name, dev,
&cmos_rtc_ops, THIS_MODULE);
if (IS_ERR(cmos_rtc.rtc)) {
@@ -726,8 +729,6 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
goto cleanup0;
}

- cmos_rtc.dev = dev;
- dev_set_drvdata(dev, &cmos_rtc);
rename_region(ports, dev_name(&cmos_rtc.rtc->dev));

spin_lock_irq(&rtc_lock);
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 4969b60..3793ea6 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -457,8 +457,6 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
pr_debug("s3c2410_rtc: RTCCON=%02x\n",
readb(s3c_rtc_base + S3C2410_RTCCON));

- s3c_rtc_setfreq(&pdev->dev, 1);
-
device_init_wakeup(&pdev->dev, 1);

/* register RTC and exit */
@@ -475,6 +473,9 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
rtc->max_user_freq = 128;

platform_set_drvdata(pdev, rtc);
+
+ s3c_rtc_setfreq(&pdev->dev, 1);
+
return 0;

err_nortc:
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 88f7446..8c496b5 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -395,12 +395,13 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev,
void sas_ata_task_abort(struct sas_task *task)
{
struct ata_queued_cmd *qc = task->uldd_task;
- struct request_queue *q = qc->scsicmd->device->request_queue;
struct completion *waiting;
- unsigned long flags;

/* Bounce SCSI-initiated commands to the SCSI EH */
if (qc->scsicmd) {
+ struct request_queue *q = qc->scsicmd->device->request_queue;
+ unsigned long flags;
+
spin_lock_irqsave(q->queue_lock, flags);
blk_abort_request(qc->scsicmd->request);
spin_unlock_irqrestore(q->queue_lock, flags);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 8228350..53849f2 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -1030,8 +1030,6 @@ int __sas_task_abort(struct sas_task *task)
void sas_task_abort(struct sas_task *task)
{
struct scsi_cmnd *sc = task->uldd_task;
- struct request_queue *q = sc->device->request_queue;
- unsigned long flags;

/* Escape for libsas internal commands */
if (!sc) {
@@ -1043,13 +1041,15 @@ void sas_task_abort(struct sas_task *task)

if (dev_is_sata(task->dev)) {
sas_ata_task_abort(task);
- return;
- }
+ } else {
+ struct request_queue *q = sc->device->request_queue;
+ unsigned long flags;

- spin_lock_irqsave(q->queue_lock, flags);
- blk_abort_request(sc->request);
- spin_unlock_irqrestore(q->queue_lock, flags);
- scsi_schedule_eh(sc->device->host);
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_abort_request(sc->request);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ scsi_schedule_eh(sc->device->host);
+ }
}

int sas_slave_alloc(struct scsi_device *scsi_dev)
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index 78ed24b..3046386 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -1437,7 +1437,7 @@ int m68328_console_setup(struct console *cp, char *arg)
for (i = 0; i < ARRAY_SIZE(baud_table); i++)
if (baud_table[i] == n)
break;
- if (i < BAUD_TABLE_SIZE) {
+ if (i < ARRAY_SIZE(baud_table)) {
m68328_console_baud = n;
m68328_console_cbaud = 0;
if (i > 15) {
diff --git a/drivers/staging/batman-adv/proc.c b/drivers/staging/batman-adv/proc.c
index 7de60e8..c9366bc 100644
--- a/drivers/staging/batman-adv/proc.c
+++ b/drivers/staging/batman-adv/proc.c
@@ -41,7 +41,7 @@ static int proc_interfaces_read(struct seq_file *seq, void *offset)

rcu_read_lock();
list_for_each_entry_rcu(batman_if, &if_list, list) {
- seq_printf(seq, "[%8s] %s %s \n",
+ seq_printf(seq, "[%8s] %s %s\n",
(batman_if->if_active == IF_ACTIVE ?
"active" : "inactive"),
batman_if->dev,
@@ -188,18 +188,18 @@ static int proc_originators_read(struct seq_file *seq, void *offset)
rcu_read_lock();
if (list_empty(&if_list)) {
rcu_read_unlock();
- seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it \n");
+ seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it\n");
goto end;
}

if (((struct batman_if *)if_list.next)->if_active != IF_ACTIVE) {
rcu_read_unlock();
- seq_printf(seq, "BATMAN disabled - primary interface not active \n");
+ seq_printf(seq, "BATMAN disabled - primary interface not active\n");
goto end;
}

seq_printf(seq,
- " %-14s (%s/%i) %17s [%10s]: %20s ... [B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%s] \n",
+ " %-14s (%s/%i) %17s [%10s]: %20s ... [B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%s]\n",
"Originator", "#", TQ_MAX_VALUE, "Nexthop", "outgoingIF",
"Potential nexthops", SOURCE_VERSION, REVISION_VERSION_STR,
((struct batman_if *)if_list.next)->dev,
@@ -240,7 +240,7 @@ static int proc_originators_read(struct seq_file *seq, void *offset)
spin_unlock_irqrestore(&orig_hash_lock, flags);

if (batman_count == 0)
- seq_printf(seq, "No batman nodes in range ... \n");
+ seq_printf(seq, "No batman nodes in range ...\n");

end:
return 0;
@@ -262,7 +262,7 @@ static int proc_transt_local_read(struct seq_file *seq, void *offset)
rcu_read_lock();
if (list_empty(&if_list)) {
rcu_read_unlock();
- seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it \n");
+ seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it\n");
goto end;
}

@@ -294,7 +294,7 @@ static int proc_transt_global_read(struct seq_file *seq, void *offset)
rcu_read_lock();
if (list_empty(&if_list)) {
rcu_read_unlock();
- seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it \n");
+ seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it\n");
goto end;
}
rcu_read_unlock();
@@ -350,9 +350,9 @@ static int proc_vis_srv_read(struct seq_file *seq, void *offset)
{
int vis_server = atomic_read(&vis_mode);

- seq_printf(seq, "[%c] client mode (server disabled) \n",
+ seq_printf(seq, "[%c] client mode (server disabled)\n",
(vis_server == VIS_TYPE_CLIENT_UPDATE) ? 'x' : ' ');
- seq_printf(seq, "[%c] server mode (server enabled) \n",
+ seq_printf(seq, "[%c] server mode (server enabled)\n",
(vis_server == VIS_TYPE_SERVER_SYNC) ? 'x' : ' ');

return 0;
@@ -369,6 +369,8 @@ static int proc_vis_data_read(struct seq_file *seq, void *offset)
struct vis_info *info;
struct vis_info_entry *entries;
HLIST_HEAD(vis_if_list);
+ struct if_list_entry *entry;
+ struct hlist_node *pos, *n;
int i;
char tmp_addr_str[ETH_STR_LEN];
unsigned long flags;
@@ -387,17 +389,34 @@ static int proc_vis_data_read(struct seq_file *seq, void *offset)
info = hashit.bucket->data;
entries = (struct vis_info_entry *)
((char *)info + sizeof(struct vis_info));
- addr_to_string(tmp_addr_str, info->packet.vis_orig);
- seq_printf(seq, "%s,", tmp_addr_str);

for (i = 0; i < info->packet.entries; i++) {
- proc_vis_read_entry(seq, &entries[i], &vis_if_list,
- info->packet.vis_orig);
+ if (entries[i].quality == 0)
+ continue;
+ proc_vis_insert_interface(entries[i].src, &vis_if_list,
+ compare_orig(entries[i].src,
+ info->packet.vis_orig));
}

- /* add primary/secondary records */
- proc_vis_read_prim_sec(seq, &vis_if_list);
- seq_printf(seq, "\n");
+ hlist_for_each_entry(entry, pos, &vis_if_list, list) {
+ addr_to_string(tmp_addr_str, entry->addr);
+ seq_printf(seq, "%s,", tmp_addr_str);
+
+ for (i = 0; i < info->packet.entries; i++)
+ proc_vis_read_entry(seq, &entries[i],
+ entry->addr, entry->primary);
+
+ /* add primary/secondary records */
+ if (compare_orig(entry->addr, info->packet.vis_orig))
+ proc_vis_read_prim_sec(seq, &vis_if_list);
+
+ seq_printf(seq, "\n");
+ }
+
+ hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) {
+ hlist_del(&entry->list);
+ kfree(entry);
+ }
}
spin_unlock_irqrestore(&vis_hash_lock, flags);

diff --git a/drivers/staging/batman-adv/vis.c b/drivers/staging/batman-adv/vis.c
index fedec1b..28eac7e 100644
--- a/drivers/staging/batman-adv/vis.c
+++ b/drivers/staging/batman-adv/vis.c
@@ -27,24 +27,44 @@
#include "hard-interface.h"
#include "hash.h"

+/* Returns the smallest signed integer in two's complement with the sizeof x */
+#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
+
+/* Checks if a sequence number x is a predecessor/successor of y.
+ they handle overflows/underflows and can correctly check for a
+ predecessor/successor unless the variable sequence number has grown by
+ more then 2**(bitwidth(x)-1)-1.
+ This means that for a uint8_t with the maximum value 255, it would think:
+ * when adding nothing - it is neither a predecessor nor a successor
+ * before adding more than 127 to the starting value - it is a predecessor,
+ * when adding 128 - it is neither a predecessor nor a successor,
+ * after adding more than 127 to the starting value - it is a successor */
+#define seq_before(x, y) ({typeof(x) _dummy = (x - y); \
+ _dummy > smallest_signed_int(_dummy); })
+#define seq_after(x, y) seq_before(y, x)
+
struct hashtable_t *vis_hash;
DEFINE_SPINLOCK(vis_hash_lock);
+static DEFINE_SPINLOCK(recv_list_lock);
static struct vis_info *my_vis_info;
static struct list_head send_list; /* always locked with vis_hash_lock */

static void start_vis_timer(void);

/* free the info */
-static void free_info(void *data)
+static void free_info(struct kref *ref)
{
- struct vis_info *info = data;
+ struct vis_info *info = container_of(ref, struct vis_info, refcount);
struct recvlist_node *entry, *tmp;
+ unsigned long flags;

list_del_init(&info->send_list);
+ spin_lock_irqsave(&recv_list_lock, flags);
list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
list_del(&entry->list);
kfree(entry);
}
+ spin_unlock_irqrestore(&recv_list_lock, flags);
kfree(info);
}

@@ -82,7 +102,7 @@ static int vis_info_choose(void *data, int size)

/* insert interface to the list of interfaces of one originator, if it
* does not already exist in the list */
-static void proc_vis_insert_interface(const uint8_t *interface,
+void proc_vis_insert_interface(const uint8_t *interface,
struct hlist_head *if_list,
bool primary)
{
@@ -107,38 +127,51 @@ void proc_vis_read_prim_sec(struct seq_file *seq,
struct hlist_head *if_list)
{
struct if_list_entry *entry;
- struct hlist_node *pos, *n;
+ struct hlist_node *pos;
char tmp_addr_str[ETH_STR_LEN];

- hlist_for_each_entry_safe(entry, pos, n, if_list, list) {
- if (entry->primary) {
+ hlist_for_each_entry(entry, pos, if_list, list) {
+ if (entry->primary)
seq_printf(seq, "PRIMARY, ");
- } else {
+ else {
addr_to_string(tmp_addr_str, entry->addr);
seq_printf(seq, "SEC %s, ", tmp_addr_str);
}
-
- hlist_del(&entry->list);
- kfree(entry);
}
}

/* read an entry */
void proc_vis_read_entry(struct seq_file *seq,
struct vis_info_entry *entry,
- struct hlist_head *if_list,
- uint8_t *vis_orig)
+ uint8_t *src,
+ bool primary)
{
char to[40];

addr_to_string(to, entry->dest);
- if (entry->quality == 0) {
- proc_vis_insert_interface(vis_orig, if_list, true);
+ if (primary && entry->quality == 0)
seq_printf(seq, "HNA %s, ", to);
- } else {
- proc_vis_insert_interface(entry->src, if_list,
- compare_orig(entry->src, vis_orig));
+ else if (compare_orig(entry->src, src))
seq_printf(seq, "TQ %s %d, ", to, entry->quality);
+}
+
+/* add the info packet to the send list, if it was not
+ * already linked in. */
+static void send_list_add(struct vis_info *info)
+{
+ if (list_empty(&info->send_list)) {
+ kref_get(&info->refcount);
+ list_add_tail(&info->send_list, &send_list);
+ }
+}
+
+/* delete the info packet from the send list, if it was
+ * linked in. */
+static void send_list_del(struct vis_info *info)
+{
+ if (!list_empty(&info->send_list)) {
+ list_del_init(&info->send_list);
+ kref_put(&info->refcount, free_info);
}
}

@@ -146,32 +179,41 @@ void proc_vis_read_entry(struct seq_file *seq,
static void recv_list_add(struct list_head *recv_list, char *mac)
{
struct recvlist_node *entry;
+ unsigned long flags;
+
entry = kmalloc(sizeof(struct recvlist_node), GFP_ATOMIC);
if (!entry)
return;

memcpy(entry->mac, mac, ETH_ALEN);
+ spin_lock_irqsave(&recv_list_lock, flags);
list_add_tail(&entry->list, recv_list);
+ spin_unlock_irqrestore(&recv_list_lock, flags);
}

/* returns 1 if this mac is in the recv_list */
static int recv_list_is_in(struct list_head *recv_list, char *mac)
{
struct recvlist_node *entry;
+ unsigned long flags;

+ spin_lock_irqsave(&recv_list_lock, flags);
list_for_each_entry(entry, recv_list, list) {
- if (memcmp(entry->mac, mac, ETH_ALEN) == 0)
+ if (memcmp(entry->mac, mac, ETH_ALEN) == 0) {
+ spin_unlock_irqrestore(&recv_list_lock, flags);
return 1;
+ }
}
-
+ spin_unlock_irqrestore(&recv_list_lock, flags);
return 0;
}

/* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
- * broken.. ). vis hash must be locked outside. is_new is set when the packet
+ * broken.. ). vis hash must be locked outside. is_new is set when the packet
* is newer than old entries in the hash. */
static struct vis_info *add_packet(struct vis_packet *vis_packet,
- int vis_info_len, int *is_new)
+ int vis_info_len, int *is_new,
+ int make_broadcast)
{
struct vis_info *info, *old_info;
struct vis_info search_elem;
@@ -186,7 +228,7 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet,
old_info = hash_find(vis_hash, &search_elem);

if (old_info != NULL) {
- if (vis_packet->seqno - old_info->packet.seqno <= 0) {
+ if (!seq_after(vis_packet->seqno, old_info->packet.seqno)) {
if (old_info->packet.seqno == vis_packet->seqno) {
recv_list_add(&old_info->recv_list,
vis_packet->sender_orig);
@@ -198,13 +240,15 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet,
}
/* remove old entry */
hash_remove(vis_hash, old_info);
- free_info(old_info);
+ send_list_del(old_info);
+ kref_put(&old_info->refcount, free_info);
}

info = kmalloc(sizeof(struct vis_info) + vis_info_len, GFP_ATOMIC);
if (info == NULL)
return NULL;

+ kref_init(&info->refcount);
INIT_LIST_HEAD(&info->send_list);
INIT_LIST_HEAD(&info->recv_list);
info->first_seen = jiffies;
@@ -214,16 +258,21 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet,
/* initialize and add new packet. */
*is_new = 1;

+ /* Make it a broadcast packet, if required */
+ if (make_broadcast)
+ memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
+
/* repair if entries is longer than packet. */
if (info->packet.entries * sizeof(struct vis_info_entry) > vis_info_len)
- info->packet.entries = vis_info_len / sizeof(struct vis_info_entry);
+ info->packet.entries = vis_info_len /
+ sizeof(struct vis_info_entry);

recv_list_add(&info->recv_list, info->packet.sender_orig);

/* try to add it */
if (hash_add(vis_hash, info) < 0) {
/* did not work (for some reason) */
- free_info(info);
+ kref_put(&old_info->refcount, free_info);
info = NULL;
}

@@ -234,22 +283,21 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet,
void receive_server_sync_packet(struct vis_packet *vis_packet, int vis_info_len)
{
struct vis_info *info;
- int is_new;
+ int is_new, make_broadcast;
unsigned long flags;
int vis_server = atomic_read(&vis_mode);

+ make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC);
+
spin_lock_irqsave(&vis_hash_lock, flags);
- info = add_packet(vis_packet, vis_info_len, &is_new);
+ info = add_packet(vis_packet, vis_info_len, &is_new, make_broadcast);
if (info == NULL)
goto end;

/* only if we are server ourselves and packet is newer than the one in
* hash.*/
- if (vis_server == VIS_TYPE_SERVER_SYNC && is_new) {
- memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
- if (list_empty(&info->send_list))
- list_add_tail(&info->send_list, &send_list);
- }
+ if (vis_server == VIS_TYPE_SERVER_SYNC && is_new)
+ send_list_add(info);
end:
spin_unlock_irqrestore(&vis_hash_lock, flags);
}
@@ -262,31 +310,32 @@ void receive_client_update_packet(struct vis_packet *vis_packet,
int is_new;
unsigned long flags;
int vis_server = atomic_read(&vis_mode);
+ int are_target = 0;

/* clients shall not broadcast. */
if (is_bcast(vis_packet->target_orig))
return;

+ /* Are we the target for this VIS packet? */
+ if (vis_server == VIS_TYPE_SERVER_SYNC &&
+ is_my_mac(vis_packet->target_orig))
+ are_target = 1;
+
spin_lock_irqsave(&vis_hash_lock, flags);
- info = add_packet(vis_packet, vis_info_len, &is_new);
+ info = add_packet(vis_packet, vis_info_len, &is_new, are_target);
if (info == NULL)
goto end;
/* note that outdated packets will be dropped at this point. */


/* send only if we're the target server or ... */
- if (vis_server == VIS_TYPE_SERVER_SYNC &&
- is_my_mac(info->packet.target_orig) &&
- is_new) {
+ if (are_target && is_new) {
info->packet.vis_type = VIS_TYPE_SERVER_SYNC; /* upgrade! */
- memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
- if (list_empty(&info->send_list))
- list_add_tail(&info->send_list, &send_list);
+ send_list_add(info);

/* ... we're not the recipient (and thus need to forward). */
} else if (!is_my_mac(info->packet.target_orig)) {
- if (list_empty(&info->send_list))
- list_add_tail(&info->send_list, &send_list);
+ send_list_add(info);
}
end:
spin_unlock_irqrestore(&vis_hash_lock, flags);
@@ -361,14 +410,17 @@ static int generate_vis_packet(void)
while (hash_iterate(orig_hash, &hashit_global)) {
orig_node = hashit_global.bucket->data;
if (orig_node->router != NULL
- && compare_orig(orig_node->router->addr, orig_node->orig)
+ && compare_orig(orig_node->router->addr,
+ orig_node->orig)
&& orig_node->batman_if
&& (orig_node->batman_if->if_active == IF_ACTIVE)
&& orig_node->router->tq_avg > 0) {

/* fill one entry into buffer. */
entry = &entry_array[info->packet.entries];
- memcpy(entry->src, orig_node->batman_if->net_dev->dev_addr, ETH_ALEN);
+ memcpy(entry->src,
+ orig_node->batman_if->net_dev->dev_addr,
+ ETH_ALEN);
memcpy(entry->dest, orig_node->orig, ETH_ALEN);
entry->quality = orig_node->router->tq_avg;
info->packet.entries++;
@@ -400,6 +452,8 @@ static int generate_vis_packet(void)
return 0;
}

+/* free old vis packets. Must be called with this vis_hash_lock
+ * held */
static void purge_vis_packets(void)
{
HASHIT(hashit);
@@ -412,7 +466,8 @@ static void purge_vis_packets(void)
if (time_after(jiffies,
info->first_seen + (VIS_TIMEOUT*HZ)/1000)) {
hash_remove_bucket(vis_hash, &hashit);
- free_info(info);
+ send_list_del(info);
+ kref_put(&info->refcount, free_info);
}
}
}
@@ -422,6 +477,8 @@ static void broadcast_vis_packet(struct vis_info *info, int packet_length)
HASHIT(hashit);
struct orig_node *orig_node;
unsigned long flags;
+ struct batman_if *batman_if;
+ uint8_t dstaddr[ETH_ALEN];

spin_lock_irqsave(&orig_hash_lock, flags);

@@ -430,45 +487,56 @@ static void broadcast_vis_packet(struct vis_info *info, int packet_length)
orig_node = hashit.bucket->data;

/* if it's a vis server and reachable, send it. */
- if (orig_node &&
- (orig_node->flags & VIS_SERVER) &&
- orig_node->batman_if &&
- orig_node->router) {
+ if ((!orig_node) || (!orig_node->batman_if) ||
+ (!orig_node->router))
+ continue;
+ if (!(orig_node->flags & VIS_SERVER))
+ continue;
+ /* don't send it if we already received the packet from
+ * this node. */
+ if (recv_list_is_in(&info->recv_list, orig_node->orig))
+ continue;

- /* don't send it if we already received the packet from
- * this node. */
- if (recv_list_is_in(&info->recv_list, orig_node->orig))
- continue;
+ memcpy(info->packet.target_orig, orig_node->orig, ETH_ALEN);
+ batman_if = orig_node->batman_if;
+ memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);

- memcpy(info->packet.target_orig,
- orig_node->orig, ETH_ALEN);
+ send_raw_packet((unsigned char *)&info->packet,
+ packet_length, batman_if, dstaddr);
+
+ spin_lock_irqsave(&orig_hash_lock, flags);

- send_raw_packet((unsigned char *) &info->packet,
- packet_length,
- orig_node->batman_if,
- orig_node->router->addr);
- }
}
- memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
spin_unlock_irqrestore(&orig_hash_lock, flags);
+ memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
}

static void unicast_vis_packet(struct vis_info *info, int packet_length)
{
struct orig_node *orig_node;
unsigned long flags;
+ struct batman_if *batman_if;
+ uint8_t dstaddr[ETH_ALEN];

spin_lock_irqsave(&orig_hash_lock, flags);
orig_node = ((struct orig_node *)
hash_find(orig_hash, info->packet.target_orig));

- if ((orig_node != NULL) &&
- (orig_node->batman_if != NULL) &&
- (orig_node->router != NULL)) {
- send_raw_packet((unsigned char *) &info->packet, packet_length,
- orig_node->batman_if,
- orig_node->router->addr);
- }
+ if ((!orig_node) || (!orig_node->batman_if) || (!orig_node->router))
+ goto out;
+
+ /* don't lock while sending the packets ... we therefore
+ * copy the required data before sending */
+ batman_if = orig_node->batman_if;
+ memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
+
+ send_raw_packet((unsigned char *)&info->packet,
+ packet_length, batman_if, dstaddr);
+ return;
+
+out:
spin_unlock_irqrestore(&orig_hash_lock, flags);
}

@@ -502,15 +570,24 @@ static void send_vis_packets(struct work_struct *work)
unsigned long flags;

spin_lock_irqsave(&vis_hash_lock, flags);
+
purge_vis_packets();

- if (generate_vis_packet() == 0)
+ if (generate_vis_packet() == 0) {
/* schedule if generation was successful */
- list_add_tail(&my_vis_info->send_list, &send_list);
+ send_list_add(my_vis_info);
+ }

list_for_each_entry_safe(info, temp, &send_list, send_list) {
- list_del_init(&info->send_list);
+
+ kref_get(&info->refcount);
+ spin_unlock_irqrestore(&vis_hash_lock, flags);
+
send_vis_packet(info);
+
+ spin_lock_irqsave(&vis_hash_lock, flags);
+ send_list_del(info);
+ kref_put(&info->refcount, free_info);
}
spin_unlock_irqrestore(&vis_hash_lock, flags);
start_vis_timer();
@@ -543,6 +620,7 @@ int vis_init(void)
my_vis_info->first_seen = jiffies - atomic_read(&vis_interval);
INIT_LIST_HEAD(&my_vis_info->recv_list);
INIT_LIST_HEAD(&my_vis_info->send_list);
+ kref_init(&my_vis_info->refcount);
my_vis_info->packet.version = COMPAT_VERSION;
my_vis_info->packet.packet_type = BAT_VIS;
my_vis_info->packet.ttl = TTL;
@@ -556,9 +634,9 @@ int vis_init(void)

if (hash_add(vis_hash, my_vis_info) < 0) {
printk(KERN_ERR
- "batman-adv:Can't add own vis packet into hash\n");
- free_info(my_vis_info); /* not in hash, need to remove it
- * manually. */
+ "batman-adv:Can't add own vis packet into hash\n");
+ /* not in hash, need to remove it manually. */
+ kref_put(&my_vis_info->refcount, free_info);
goto err;
}

@@ -572,6 +650,15 @@ err:
return 0;
}

+/* Decrease the reference count on a hash item info */
+static void free_info_ref(void *data)
+{
+ struct vis_info *info = data;
+
+ send_list_del(info);
+ kref_put(&info->refcount, free_info);
+}
+
/* shutdown vis-server */
void vis_quit(void)
{
@@ -583,7 +670,7 @@ void vis_quit(void)

spin_lock_irqsave(&vis_hash_lock, flags);
/* properly remove, kill timers ... */
- hash_delete(vis_hash, free_info);
+ hash_delete(vis_hash, free_info_ref);
vis_hash = NULL;
my_vis_info = NULL;
spin_unlock_irqrestore(&vis_hash_lock, flags);
diff --git a/drivers/staging/batman-adv/vis.h b/drivers/staging/batman-adv/vis.h
index 0cdafde..a1f92a4 100644
--- a/drivers/staging/batman-adv/vis.h
+++ b/drivers/staging/batman-adv/vis.h
@@ -29,6 +29,7 @@ struct vis_info {
/* list of server-neighbors we received a vis-packet
* from. we should not reply to them. */
struct list_head send_list;
+ struct kref refcount;
/* this packet might be part of the vis send queue. */
struct vis_packet packet;
/* vis_info may follow here*/
@@ -48,10 +49,13 @@ struct recvlist_node {
extern struct hashtable_t *vis_hash;
extern spinlock_t vis_hash_lock;

+void proc_vis_insert_interface(const uint8_t *interface,
+ struct hlist_head *if_list,
+ bool primary);
void proc_vis_read_entry(struct seq_file *seq,
struct vis_info_entry *entry,
- struct hlist_head *if_list,
- uint8_t *vis_orig);
+ uint8_t *src,
+ bool primary);
void proc_vis_read_prim_sec(struct seq_file *seq,
struct hlist_head *if_list);
void receive_server_sync_packet(struct vis_packet *vis_packet,
diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c
index dc4849a..9855608 100644
--- a/drivers/staging/comedi/drivers/ni_mio_cs.c
+++ b/drivers/staging/comedi/drivers/ni_mio_cs.c
@@ -123,7 +123,7 @@ static const struct ni_board_struct ni_boards[] = {
.adbits = 12,
.ai_fifo_depth = 1024,
.alwaysdither = 0,
- .gainlkup = ai_gain_16,
+ .gainlkup = ai_gain_4,
.ai_speed = 5000,
.n_aochan = 2,
.aobits = 12,
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
index 740db0c..2ffd0fe 100644
--- a/drivers/staging/rt2860/usb_main_dev.c
+++ b/drivers/staging/rt2860/usb_main_dev.c
@@ -98,6 +98,7 @@ struct usb_device_id rtusb_usb_id[] = {
{USB_DEVICE(0x5A57, 0x0282)}, /* Zinwell */
{USB_DEVICE(0x7392, 0x7718)},
{USB_DEVICE(0x7392, 0x7717)},
+ {USB_DEVICE(0x0411, 0x016f)}, /* MelCo.,Inc. WLI-UC-G301N */
{USB_DEVICE(0x1737, 0x0070)}, /* Linksys WUSB100 */
{USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */
{USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index e40a2e9..fea0e99 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1090,11 +1090,13 @@ device_found1(struct pci_dev *pcid, const struct pci_device_id *ent)
}
//2008-07-21-01<Add>by MikeLiu
//register wpadev
+#if 0
if(wpa_set_wpadev(pDevice, 1)!=0) {
printk("Fail to Register WPADEV?\n");
unregister_netdev(pDevice->dev);
free_netdev(dev);
}
+#endif
device_print_info(pDevice);
pci_set_drvdata(pcid, pDevice);
return 0;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 5e1a253..3c73add 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1201,7 +1201,7 @@ made_compressed_probe:
if (rcv->urb == NULL) {
dev_dbg(&intf->dev,
"out of memory (read urbs usb_alloc_urb)\n");
- goto alloc_fail7;
+ goto alloc_fail6;
}

rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
@@ -1225,7 +1225,7 @@ made_compressed_probe:
if (snd->urb == NULL) {
dev_dbg(&intf->dev,
"out of memory (write urbs usb_alloc_urb)");
- goto alloc_fail7;
+ goto alloc_fail8;
}

if (usb_endpoint_xfer_int(epwrite))
@@ -1264,6 +1264,7 @@ made_compressed_probe:
i = device_create_file(&intf->dev,
&dev_attr_iCountryCodeRelDate);
if (i < 0) {
+ device_remove_file(&intf->dev, &dev_attr_wCountryCodes);
kfree(acm->country_codes);
goto skip_countries;
}
@@ -1300,6 +1301,7 @@ alloc_fail8:
usb_free_urb(acm->wb[i].urb);
alloc_fail7:
acm_read_buffers_free(acm);
+alloc_fail6:
for (i = 0; i < num_rx_buf; i++)
usb_free_urb(acm->ru[i].urb);
usb_free_urb(acm->ctrlurb);
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 2f3dc4c..9d02dc6 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1322,6 +1322,7 @@ int usb_resume(struct device *dev, pm_message_t msg)

/* For all other calls, take the device back to full power and
* tell the PM core in case it was autosuspended previously.
+ * Unbind the interfaces that will need rebinding later.
*/
} else {
status = usb_resume_both(udev, msg);
@@ -1330,6 +1331,7 @@ int usb_resume(struct device *dev, pm_message_t msg)
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
udev->last_busy = jiffies;
+ do_unbind_rebind(udev, DO_REBIND);
}
}

diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 2f8cedd..b60a14d 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1261,6 +1261,51 @@ static void hcd_free_coherent(struct usb_bus *bus, dma_addr_t *dma_handle,
*dma_handle = 0;
}

+static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+ enum dma_data_direction dir;
+
+ if (urb->transfer_flags & URB_SETUP_MAP_SINGLE)
+ dma_unmap_single(hcd->self.controller,
+ urb->setup_dma,
+ sizeof(struct usb_ctrlrequest),
+ DMA_TO_DEVICE);
+ else if (urb->transfer_flags & URB_SETUP_MAP_LOCAL)
+ hcd_free_coherent(urb->dev->bus,
+ &urb->setup_dma,
+ (void **) &urb->setup_packet,
+ sizeof(struct usb_ctrlrequest),
+ DMA_TO_DEVICE);
+
+ dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ if (urb->transfer_flags & URB_DMA_MAP_SG)
+ dma_unmap_sg(hcd->self.controller,
+ urb->sg->sg,
+ urb->num_sgs,
+ dir);
+ else if (urb->transfer_flags & URB_DMA_MAP_PAGE)
+ dma_unmap_page(hcd->self.controller,
+ urb->transfer_dma,
+ urb->transfer_buffer_length,
+ dir);
+ else if (urb->transfer_flags & URB_DMA_MAP_SINGLE)
+ dma_unmap_single(hcd->self.controller,
+ urb->transfer_dma,
+ urb->transfer_buffer_length,
+ dir);
+ else if (urb->transfer_flags & URB_MAP_LOCAL)
+ hcd_free_coherent(urb->dev->bus,
+ &urb->transfer_dma,
+ &urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ dir);
+
+ /* Make it safe to call this routine more than once */
+ urb->transfer_flags &= ~(URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
+ URB_DMA_MAP_SG | URB_DMA_MAP_PAGE |
+ URB_DMA_MAP_SINGLE | URB_MAP_LOCAL);
+}
+
static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
@@ -1272,8 +1317,6 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
* unless it uses pio or talks to another transport,
* or uses the provided scatter gather list for bulk.
*/
- if (is_root_hub(urb->dev))
- return 0;

if (usb_endpoint_xfer_control(&urb->ep->desc)
&& !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) {
@@ -1286,6 +1329,7 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
if (dma_mapping_error(hcd->self.controller,
urb->setup_dma))
return -EAGAIN;
+ urb->transfer_flags |= URB_SETUP_MAP_SINGLE;
} else if (hcd->driver->flags & HCD_LOCAL_MEM)
ret = hcd_alloc_coherent(
urb->dev->bus, mem_flags,
@@ -1293,20 +1337,57 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
(void **)&urb->setup_packet,
sizeof(struct usb_ctrlrequest),
DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+ urb->transfer_flags |= URB_SETUP_MAP_LOCAL;
}

dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- if (ret == 0 && urb->transfer_buffer_length != 0
+ if (urb->transfer_buffer_length != 0
&& !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
if (hcd->self.uses_dma) {
- urb->transfer_dma = dma_map_single (
- hcd->self.controller,
- urb->transfer_buffer,
- urb->transfer_buffer_length,
- dir);
- if (dma_mapping_error(hcd->self.controller,
+ if (urb->num_sgs) {
+ int n = dma_map_sg(
+ hcd->self.controller,
+ urb->sg->sg,
+ urb->num_sgs,
+ dir);
+ if (n <= 0)
+ ret = -EAGAIN;
+ else
+ urb->transfer_flags |= URB_DMA_MAP_SG;
+ if (n != urb->num_sgs) {
+ urb->num_sgs = n;
+ urb->transfer_flags |=
+ URB_DMA_SG_COMBINED;
+ }
+ } else if (urb->sg) {
+ struct scatterlist *sg;
+
+ sg = (struct scatterlist *) urb->sg;
+ urb->transfer_dma = dma_map_page(
+ hcd->self.controller,
+ sg_page(sg),
+ sg->offset,
+ urb->transfer_buffer_length,
+ dir);
+ if (dma_mapping_error(hcd->self.controller,
urb->transfer_dma))
- return -EAGAIN;
+ ret = -EAGAIN;
+ else
+ urb->transfer_flags |= URB_DMA_MAP_PAGE;
+ } else {
+ urb->transfer_dma = dma_map_single(
+ hcd->self.controller,
+ urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ dir);
+ if (dma_mapping_error(hcd->self.controller,
+ urb->transfer_dma))
+ ret = -EAGAIN;
+ else
+ urb->transfer_flags |= URB_DMA_MAP_SINGLE;
+ }
} else if (hcd->driver->flags & HCD_LOCAL_MEM) {
ret = hcd_alloc_coherent(
urb->dev->bus, mem_flags,
@@ -1314,55 +1395,16 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
&urb->transfer_buffer,
urb->transfer_buffer_length,
dir);
-
- if (ret && usb_endpoint_xfer_control(&urb->ep->desc)
- && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP))
- hcd_free_coherent(urb->dev->bus,
- &urb->setup_dma,
- (void **)&urb->setup_packet,
- sizeof(struct usb_ctrlrequest),
- DMA_TO_DEVICE);
+ if (ret == 0)
+ urb->transfer_flags |= URB_MAP_LOCAL;
}
+ if (ret && (urb->transfer_flags & (URB_SETUP_MAP_SINGLE |
+ URB_SETUP_MAP_LOCAL)))
+ unmap_urb_for_dma(hcd, urb);
}
return ret;
}

-static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
-{
- enum dma_data_direction dir;
-
- if (is_root_hub(urb->dev))
- return;
-
- if (usb_endpoint_xfer_control(&urb->ep->desc)
- && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) {
- if (hcd->self.uses_dma)
- dma_unmap_single(hcd->self.controller, urb->setup_dma,
- sizeof(struct usb_ctrlrequest),
- DMA_TO_DEVICE);
- else if (hcd->driver->flags & HCD_LOCAL_MEM)
- hcd_free_coherent(urb->dev->bus, &urb->setup_dma,
- (void **)&urb->setup_packet,
- sizeof(struct usb_ctrlrequest),
- DMA_TO_DEVICE);
- }
-
- dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- if (urb->transfer_buffer_length != 0
- && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
- if (hcd->self.uses_dma)
- dma_unmap_single(hcd->self.controller,
- urb->transfer_dma,
- urb->transfer_buffer_length,
- dir);
- else if (hcd->driver->flags & HCD_LOCAL_MEM)
- hcd_free_coherent(urb->dev->bus, &urb->transfer_dma,
- &urb->transfer_buffer,
- urb->transfer_buffer_length,
- dir);
- }
-}
-
/*-------------------------------------------------------------------------*/

/* may be called in any context with a valid urb->dev usecount
@@ -1391,21 +1433,20 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
* URBs must be submitted in process context with interrupts
* enabled.
*/
- status = map_urb_for_dma(hcd, urb, mem_flags);
- if (unlikely(status)) {
- usbmon_urb_submit_error(&hcd->self, urb, status);
- goto error;
- }

- if (is_root_hub(urb->dev))
+ if (is_root_hub(urb->dev)) {
status = rh_urb_enqueue(hcd, urb);
- else
- status = hcd->driver->urb_enqueue(hcd, urb, mem_flags);
+ } else {
+ status = map_urb_for_dma(hcd, urb, mem_flags);
+ if (likely(status == 0)) {
+ status = hcd->driver->urb_enqueue(hcd, urb, mem_flags);
+ if (unlikely(status))
+ unmap_urb_for_dma(hcd, urb);
+ }
+ }

if (unlikely(status)) {
usbmon_urb_submit_error(&hcd->self, urb, status);
- unmap_urb_for_dma(hcd, urb);
- error:
urb->hcpriv = NULL;
INIT_LIST_HEAD(&urb->urb_list);
atomic_dec(&urb->use_count);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index cd22027..794dca2 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -259,9 +259,6 @@ static void sg_clean(struct usb_sg_request *io)
kfree(io->urbs);
io->urbs = NULL;
}
- if (io->dev->dev.dma_mask != NULL)
- usb_buffer_unmap_sg(io->dev, usb_pipein(io->pipe),
- io->sg, io->nents);
io->dev = NULL;
}

@@ -364,7 +361,6 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
{
int i;
int urb_flags;
- int dma;
int use_sg;

if (!io || !dev || !sg
@@ -378,21 +374,9 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
io->pipe = pipe;
io->sg = sg;
io->nents = nents;
-
- /* not all host controllers use DMA (like the mainstream pci ones);
- * they can use PIO (sl811) or be software over another transport.
- */
- dma = (dev->dev.dma_mask != NULL);
- if (dma)
- io->entries = usb_buffer_map_sg(dev, usb_pipein(pipe),
- sg, nents);
- else
- io->entries = nents;
+ io->entries = nents;

/* initialize all the urbs we'll use */
- if (io->entries <= 0)
- return io->entries;
-
if (dev->bus->sg_tablesize > 0) {
io->urbs = kmalloc(sizeof *io->urbs, mem_flags);
use_sg = true;
@@ -404,8 +388,6 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
goto nomem;

urb_flags = 0;
- if (dma)
- urb_flags |= URB_NO_TRANSFER_DMA_MAP;
if (usb_pipein(pipe))
urb_flags |= URB_SHORT_NOT_OK;

@@ -423,12 +405,13 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,

io->urbs[0]->complete = sg_complete;
io->urbs[0]->context = io;
+
/* A length of zero means transfer the whole sg list */
io->urbs[0]->transfer_buffer_length = length;
if (length == 0) {
for_each_sg(sg, sg, io->entries, i) {
io->urbs[0]->transfer_buffer_length +=
- sg_dma_len(sg);
+ sg->length;
}
}
io->urbs[0]->sg = io;
@@ -454,26 +437,16 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
io->urbs[i]->context = io;

/*
- * Some systems need to revert to PIO when DMA is temporarily
- * unavailable. For their sakes, both transfer_buffer and
- * transfer_dma are set when possible.
- *
- * Note that if IOMMU coalescing occurred, we cannot
- * trust sg_page anymore, so check if S/G list shrunk.
+ * Some systems can't use DMA; they use PIO instead.
+ * For their sakes, transfer_buffer is set whenever
+ * possible.
*/
- if (io->nents == io->entries && !PageHighMem(sg_page(sg)))
+ if (!PageHighMem(sg_page(sg)))
io->urbs[i]->transfer_buffer = sg_virt(sg);
else
io->urbs[i]->transfer_buffer = NULL;

- if (dma) {
- io->urbs[i]->transfer_dma = sg_dma_address(sg);
- len = sg_dma_len(sg);
- } else {
- /* hc may use _only_ transfer_buffer */
- len = sg->length;
- }
-
+ len = sg->length;
if (length) {
len = min_t(unsigned, len, length);
length -= len;
@@ -481,6 +454,8 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
io->entries = i + 1;
}
io->urbs[i]->transfer_buffer_length = len;
+
+ io->urbs[i]->sg = (struct usb_sg_request *) sg;
}
io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
}
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 45a32da..fec46d0 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -333,9 +333,12 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
is_out = usb_endpoint_dir_out(&ep->desc);
}

- /* Cache the direction for later use */
- urb->transfer_flags = (urb->transfer_flags & ~URB_DIR_MASK) |
- (is_out ? URB_DIR_OUT : URB_DIR_IN);
+ /* Clear the internal flags and cache the direction for later use */
+ urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE |
+ URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL |
+ URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
+ URB_DMA_SG_COMBINED);
+ urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);

if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
dev->state < USB_STATE_CONFIGURED)
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 0561430..956108e 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -893,6 +893,7 @@ void usb_buffer_unmap(struct urb *urb)
EXPORT_SYMBOL_GPL(usb_buffer_unmap);
#endif /* 0 */

+#if 0
/**
* usb_buffer_map_sg - create scatterlist DMA mapping(s) for an endpoint
* @dev: device to which the scatterlist will be mapped
@@ -936,6 +937,7 @@ int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE) ? : -ENOMEM;
}
EXPORT_SYMBOL_GPL(usb_buffer_map_sg);
+#endif

/* XXX DISABLED, no users currently. If you wish to re-enable this
* XXX please determine whether the sync is to transfer ownership of
@@ -972,6 +974,7 @@ void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
EXPORT_SYMBOL_GPL(usb_buffer_dmasync_sg);
#endif

+#if 0
/**
* usb_buffer_unmap_sg - free DMA mapping(s) for a scatterlist
* @dev: device to which the scatterlist will be mapped
@@ -997,6 +1000,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
EXPORT_SYMBOL_GPL(usb_buffer_unmap_sg);
+#endif

/* To disable USB, kernel command line is 'nousb' not 'usbcore.nousb' */
#ifdef MODULE
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index fa3d142..08a9a62 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -489,7 +489,7 @@ static int fsl_ep_enable(struct usb_ep *_ep,
case USB_ENDPOINT_XFER_ISOC:
/* Calculate transactions needed for high bandwidth iso */
mult = (unsigned char)(1 + ((max >> 11) & 0x03));
- max = max & 0x8ff; /* bit 0~10 */
+ max = max & 0x7ff; /* bit 0~10 */
/* 3 transactions at most */
if (mult > 3)
goto en_done;
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index e3a74e7..a422a1b 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -215,26 +215,17 @@ static int ehci_hcd_au1xxx_drv_suspend(struct device *dev)
msleep(10);

/* Root hub was already suspended. Disable irq emission and
- * mark HW unaccessible, bail out if RH has been resumed. Use
- * the spinlock to properly synchronize with possible pending
- * RH suspend or resume activity.
- *
- * This is still racy as hcd->state is manipulated outside of
- * any locks =P But that will be a different fix.
+ * mark HW unaccessible. The PM and USB cores make sure that
+ * the root hub is either suspended or stopped.
*/
spin_lock_irqsave(&ehci->lock, flags);
- if (hcd->state != HC_STATE_SUSPENDED) {
- rc = -EINVAL;
- goto bail;
- }
+ ehci_prepare_ports_for_controller_suspend(ehci);
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
(void)ehci_readl(ehci, &ehci->regs->intr_enable);

clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);

au1xxx_stop_ehc();
-
-bail:
spin_unlock_irqrestore(&ehci->lock, flags);

// could save FLADJ in case of Vaux power loss
@@ -264,6 +255,7 @@ static int ehci_hcd_au1xxx_drv_resume(struct device *dev)
if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) {
int mask = INTR_MASK;

+ ehci_prepare_ports_for_controller_resume(ehci);
if (!hcd->self.root_hub->do_remote_wakeup)
mask &= ~STS_PCD;
ehci_writel(ehci, mask, &ehci->regs->intr_enable);
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 0e26aa1..5cd967d 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -313,6 +313,7 @@ static int ehci_fsl_drv_suspend(struct device *dev)
struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
void __iomem *non_ehci = hcd->regs;

+ ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd));
if (!fsl_deep_sleep())
return 0;

@@ -327,6 +328,7 @@ static int ehci_fsl_drv_resume(struct device *dev)
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
void __iomem *non_ehci = hcd->regs;

+ ehci_prepare_ports_for_controller_resume(ehci);
if (!fsl_deep_sleep())
return 0;

diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index c7178bc..1b2af4d 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -106,12 +106,75 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
ehci->owned_ports = 0;
}

+static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
+ bool suspending)
+{
+ int port;
+ u32 temp;
+
+ /* If remote wakeup is enabled for the root hub but disabled
+ * for the controller, we must adjust all the port wakeup flags
+ * when the controller is suspended or resumed. In all other
+ * cases they don't need to be changed.
+ */
+ if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup ||
+ device_may_wakeup(ehci_to_hcd(ehci)->self.controller))
+ return;
+
+ /* clear phy low-power mode before changing wakeup flags */
+ if (ehci->has_hostpc) {
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ u32 __iomem *hostpc_reg;
+
+ hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
+ + HOSTPC0 + 4 * port);
+ temp = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg);
+ }
+ msleep(5);
+ }
+
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ u32 __iomem *reg = &ehci->regs->port_status[port];
+ u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
+ u32 t2 = t1 & ~PORT_WAKE_BITS;
+
+ /* If we are suspending the controller, clear the flags.
+ * If we are resuming the controller, set the wakeup flags.
+ */
+ if (!suspending) {
+ if (t1 & PORT_CONNECT)
+ t2 |= PORT_WKOC_E | PORT_WKDISC_E;
+ else
+ t2 |= PORT_WKOC_E | PORT_WKCONN_E;
+ }
+ ehci_vdbg(ehci, "port %d, %08x -> %08x\n",
+ port + 1, t1, t2);
+ ehci_writel(ehci, t2, reg);
+ }
+
+ /* enter phy low-power mode again */
+ if (ehci->has_hostpc) {
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ u32 __iomem *hostpc_reg;
+
+ hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
+ + HOSTPC0 + 4 * port);
+ temp = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, temp | HOSTPC_PHCD, hostpc_reg);
+ }
+ }
+}
+
static int ehci_bus_suspend (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
int port;
int mask;
- u32 __iomem *hostpc_reg = NULL;
+ int changed;

ehci_dbg(ehci, "suspend root hub\n");

@@ -155,15 +218,13 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
*/
ehci->bus_suspended = 0;
ehci->owned_ports = 0;
+ changed = 0;
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *reg = &ehci->regs->port_status [port];
u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
- u32 t2 = t1;
+ u32 t2 = t1 & ~PORT_WAKE_BITS;

- if (ehci->has_hostpc)
- hostpc_reg = (u32 __iomem *)((u8 *)ehci->regs
- + HOSTPC0 + 4 * (port & 0xff));
/* keep track of which ports we suspend */
if (t1 & PORT_OWNER)
set_bit(port, &ehci->owned_ports);
@@ -172,40 +233,45 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
set_bit(port, &ehci->bus_suspended);
}

- /* enable remote wakeup on all ports */
+ /* enable remote wakeup on all ports, if told to do so */
if (hcd->self.root_hub->do_remote_wakeup) {
/* only enable appropriate wake bits, otherwise the
* hardware can not go phy low power mode. If a race
* condition happens here(connection change during bits
* set), the port change detection will finally fix it.
*/
- if (t1 & PORT_CONNECT) {
+ if (t1 & PORT_CONNECT)
t2 |= PORT_WKOC_E | PORT_WKDISC_E;
- t2 &= ~PORT_WKCONN_E;
- } else {
+ else
t2 |= PORT_WKOC_E | PORT_WKCONN_E;
- t2 &= ~PORT_WKDISC_E;
- }
- } else
- t2 &= ~PORT_WAKE_BITS;
+ }

if (t1 != t2) {
ehci_vdbg (ehci, "port %d, %08x -> %08x\n",
port + 1, t1, t2);
ehci_writel(ehci, t2, reg);
- if (hostpc_reg) {
- u32 t3;
+ changed = 1;
+ }
+ }

- spin_unlock_irq(&ehci->lock);
- msleep(5);/* 5ms for HCD enter low pwr mode */
- spin_lock_irq(&ehci->lock);
- t3 = ehci_readl(ehci, hostpc_reg);
- ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
- t3 = ehci_readl(ehci, hostpc_reg);
- ehci_dbg(ehci, "Port%d phy low pwr mode %s\n",
+ if (changed && ehci->has_hostpc) {
+ spin_unlock_irq(&ehci->lock);
+ msleep(5); /* 5 ms for HCD to enter low-power mode */
+ spin_lock_irq(&ehci->lock);
+
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ u32 __iomem *hostpc_reg;
+ u32 t3;
+
+ hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
+ + HOSTPC0 + 4 * port);
+ t3 = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
+ t3 = ehci_readl(ehci, hostpc_reg);
+ ehci_dbg(ehci, "Port %d phy low-power mode %s\n",
port, (t3 & HOSTPC_PHCD) ?
"succeeded" : "failed");
- }
}
}

@@ -291,6 +357,25 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
msleep(8);
spin_lock_irq(&ehci->lock);

+ /* clear phy low-power mode before resume */
+ if (ehci->bus_suspended && ehci->has_hostpc) {
+ i = HCS_N_PORTS(ehci->hcs_params);
+ while (i--) {
+ if (test_bit(i, &ehci->bus_suspended)) {
+ u32 __iomem *hostpc_reg;
+
+ hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
+ + HOSTPC0 + 4 * i);
+ temp = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, temp & ~HOSTPC_PHCD,
+ hostpc_reg);
+ }
+ }
+ spin_unlock_irq(&ehci->lock);
+ msleep(5);
+ spin_lock_irq(&ehci->lock);
+ }
+
/* manually resume the ports we suspended during bus_suspend() */
i = HCS_N_PORTS (ehci->hcs_params);
while (i--) {
@@ -675,16 +760,25 @@ static int ehci_hub_control (
goto error;
if (ehci->no_selective_suspend)
break;
- if (temp & PORT_SUSPEND) {
- if ((temp & PORT_PE) == 0)
- goto error;
- /* resume signaling for 20 msec */
- temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
- ehci_writel(ehci, temp | PORT_RESUME,
- status_reg);
- ehci->reset_done [wIndex] = jiffies
- + msecs_to_jiffies (20);
+ if (!(temp & PORT_SUSPEND))
+ break;
+ if ((temp & PORT_PE) == 0)
+ goto error;
+
+ /* clear phy low-power mode before resume */
+ if (hostpc_reg) {
+ temp1 = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, temp1 & ~HOSTPC_PHCD,
+ hostpc_reg);
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ msleep(5);/* wait to leave low-power mode */
+ spin_lock_irqsave(&ehci->lock, flags);
}
+ /* resume signaling for 20 msec */
+ temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
+ ehci_writel(ehci, temp | PORT_RESUME, status_reg);
+ ehci->reset_done[wIndex] = jiffies
+ + msecs_to_jiffies(20);
break;
case USB_PORT_FEAT_C_SUSPEND:
clear_bit(wIndex, &ehci->port_c_suspend);
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index ead5f4f..c5f662d 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -284,23 +284,15 @@ static int ehci_pci_suspend(struct usb_hcd *hcd)
msleep(10);

/* Root hub was already suspended. Disable irq emission and
- * mark HW unaccessible, bail out if RH has been resumed. Use
- * the spinlock to properly synchronize with possible pending
- * RH suspend or resume activity.
- *
- * This is still racy as hcd->state is manipulated outside of
- * any locks =P But that will be a different fix.
+ * mark HW unaccessible. The PM and USB cores make sure that
+ * the root hub is either suspended or stopped.
*/
spin_lock_irqsave (&ehci->lock, flags);
- if (hcd->state != HC_STATE_SUSPENDED) {
- rc = -EINVAL;
- goto bail;
- }
+ ehci_prepare_ports_for_controller_suspend(ehci);
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
(void)ehci_readl(ehci, &ehci->regs->intr_enable);

clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- bail:
spin_unlock_irqrestore (&ehci->lock, flags);

// could save FLADJ in case of Vaux power loss
@@ -330,6 +322,7 @@ static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
!hibernated) {
int mask = INTR_MASK;

+ ehci_prepare_ports_for_controller_resume(ehci);
if (!hcd->self.root_hub->do_remote_wakeup)
mask &= ~STS_PCD;
ehci_writel(ehci, mask, &ehci->regs->intr_enable);
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 556c0b4..ddf61c3 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -536,6 +536,16 @@ struct ehci_fstn {

/*-------------------------------------------------------------------------*/

+/* Prepare the PORTSC wakeup flags during controller suspend/resume */
+
+#define ehci_prepare_ports_for_controller_suspend(ehci) \
+ ehci_adjust_port_wakeup_flags(ehci, true);
+
+#define ehci_prepare_ports_for_controller_resume(ehci) \
+ ehci_adjust_port_wakeup_flags(ehci, false);
+
+/*-------------------------------------------------------------------------*/
+
#ifdef CONFIG_USB_EHCI_ROOT_HUB_TT

/*
diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h
index 72dae1c..3b6e864 100644
--- a/drivers/usb/host/fhci.h
+++ b/drivers/usb/host/fhci.h
@@ -20,6 +20,7 @@

#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/bug.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/kfifo.h>
@@ -515,9 +516,13 @@ static inline int cq_put(struct kfifo *kfifo, void *p)

static inline void *cq_get(struct kfifo *kfifo)
{
- void *p = NULL;
+ unsigned int sz;
+ void *p;
+
+ sz = kfifo_out(kfifo, (void *)&p, sizeof(p));
+ if (sz != sizeof(p))
+ return NULL;

- kfifo_out(kfifo, (void *)&p, sizeof(p));
return p;
}

diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index 141d049..b388dd1 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -646,7 +646,7 @@ int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
wurb->urb = urb;
INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);

- if (urb->sg) {
+ if (urb->num_sgs) {
ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
if (ret == -EINVAL) {
qset_free_stds(qset, urb);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 417d37a..98a73cd 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -54,7 +54,7 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int retval;

- hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 1;
+ hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2;

xhci->cap_regs = hcd->regs;
xhci->op_regs = hcd->regs +
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 85d7e8f..40cba25 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -242,10 +242,27 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
int i;
union xhci_trb *enq = ring->enqueue;
struct xhci_segment *enq_seg = ring->enq_seg;
+ struct xhci_segment *cur_seg;
+ unsigned int left_on_ring;

/* Check if ring is empty */
- if (enq == ring->dequeue)
+ if (enq == ring->dequeue) {
+ /* Can't use link trbs */
+ left_on_ring = TRBS_PER_SEGMENT - 1;
+ for (cur_seg = enq_seg->next; cur_seg != enq_seg;
+ cur_seg = cur_seg->next)
+ left_on_ring += TRBS_PER_SEGMENT - 1;
+
+ /* Always need one TRB free in the ring. */
+ left_on_ring -= 1;
+ if (num_trbs > left_on_ring) {
+ xhci_warn(xhci, "Not enough room on ring; "
+ "need %u TRBs, %u TRBs left\n",
+ num_trbs, left_on_ring);
+ return 0;
+ }
return 1;
+ }
/* Make sure there's an extra empty TRB available */
for (i = 0; i <= num_trbs; ++i) {
if (enq == ring->dequeue)
@@ -334,7 +351,8 @@ static struct xhci_segment *find_trb_seg(
while (cur_seg->trbs > trb ||
&cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
- if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK &&
+ if ((generic_trb->field[3] & TRB_TYPE_BITMASK) ==
+ TRB_TYPE(TRB_LINK) &&
(generic_trb->field[3] & LINK_TOGGLE))
*cycle_state = ~(*cycle_state) & 0x1;
cur_seg = cur_seg->next;
@@ -390,7 +408,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
BUG();

trb = &state->new_deq_ptr->generic;
- if (TRB_TYPE(trb->field[3]) == TRB_LINK &&
+ if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
(trb->field[3] & LINK_TOGGLE))
state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
@@ -578,6 +596,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
/* Otherwise just ring the doorbell to restart the ring */
ring_ep_doorbell(xhci, slot_id, ep_index);
}
+ ep->stopped_td = NULL;
+ ep->stopped_trb = NULL;

/*
* Drop the lock and complete the URBs in the cancelled TD list.
@@ -1061,8 +1081,13 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
ep->ep_state |= EP_HALTED;
ep->stopped_td = td;
ep->stopped_trb = event_trb;
+
xhci_queue_reset_ep(xhci, slot_id, ep_index);
xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
+
+ ep->stopped_td = NULL;
+ ep->stopped_trb = NULL;
+
xhci_ring_cmd_db(xhci);
}

@@ -1390,8 +1415,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
cur_trb != event_trb;
next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
- if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP &&
- TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK)
+ if ((cur_trb->generic.field[3] &
+ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
+ (cur_trb->generic.field[3] &
+ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
td->urb->actual_length +=
TRB_LEN(cur_trb->generic.field[2]);
}
@@ -1938,7 +1965,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
int running_total, trb_buff_len, ret;
u64 addr;

- if (urb->sg)
+ if (urb->num_sgs)
return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);

ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 7e42772..5a752d6 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -105,6 +105,33 @@ int xhci_halt(struct xhci_hcd *xhci)
}

/*
+ * Set the run bit and wait for the host to be running.
+ */
+int xhci_start(struct xhci_hcd *xhci)
+{
+ u32 temp;
+ int ret;
+
+ temp = xhci_readl(xhci, &xhci->op_regs->command);
+ temp |= (CMD_RUN);
+ xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
+ temp);
+ xhci_writel(xhci, temp, &xhci->op_regs->command);
+
+ /*
+ * Wait for the HCHalted Status bit to be 0 to indicate the host is
+ * running.
+ */
+ ret = handshake(xhci, &xhci->op_regs->status,
+ STS_HALT, 0, XHCI_MAX_HALT_USEC);
+ if (ret == -ETIMEDOUT)
+ xhci_err(xhci, "Host took too long to start, "
+ "waited %u microseconds.\n",
+ XHCI_MAX_HALT_USEC);
+ return ret;
+}
+
+/*
* Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
*
* This resets pipelines, timers, counters, state machines, etc.
@@ -115,6 +142,7 @@ int xhci_reset(struct xhci_hcd *xhci)
{
u32 command;
u32 state;
+ int ret;

state = xhci_readl(xhci, &xhci->op_regs->status);
if ((state & STS_HALT) == 0) {
@@ -129,7 +157,17 @@ int xhci_reset(struct xhci_hcd *xhci)
/* XXX: Why does EHCI set this here? Shouldn't other code do this? */
xhci_to_hcd(xhci)->state = HC_STATE_HALT;

- return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000);
+ ret = handshake(xhci, &xhci->op_regs->command,
+ CMD_RESET, 0, 250 * 1000);
+ if (ret)
+ return ret;
+
+ xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
+ /*
+ * xHCI cannot write to any doorbells or operational registers other
+ * than status until the "Controller Not Ready" flag is cleared.
+ */
+ return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
}


@@ -452,13 +490,11 @@ int xhci_run(struct usb_hcd *hcd)
if (NUM_TEST_NOOPS > 0)
doorbell = xhci_setup_one_noop(xhci);

- temp = xhci_readl(xhci, &xhci->op_regs->command);
- temp |= (CMD_RUN);
- xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
- temp);
- xhci_writel(xhci, temp, &xhci->op_regs->command);
- /* Flush PCI posted writes */
- temp = xhci_readl(xhci, &xhci->op_regs->command);
+ if (xhci_start(xhci)) {
+ xhci_halt(xhci);
+ return -ENODEV;
+ }
+
xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp);
if (doorbell)
(*doorbell)(xhci);
@@ -1438,6 +1474,8 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
kfree(virt_ep->stopped_td);
xhci_ring_cmd_db(xhci);
}
+ virt_ep->stopped_td = NULL;
+ virt_ep->stopped_trb = NULL;
spin_unlock_irqrestore(&xhci->lock, flags);

if (ret)
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index ddf7f9a..8a7968d 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -416,7 +416,7 @@ static unsigned int mon_bin_get_data(const struct mon_reader_bin *rp,

} else {
/* If IOMMU coalescing occurred, we cannot trust sg_page */
- if (urb->sg->nents != urb->num_sgs) {
+ if (urb->transfer_flags & URB_DMA_SG_COMBINED) {
*flag = 'D';
return length;
}
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 4d0be13..d562602 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -161,9 +161,7 @@ static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb,
} else {
struct scatterlist *sg = urb->sg->sg;

- /* If IOMMU coalescing occurred, we cannot trust sg_page */
- if (urb->sg->nents != urb->num_sgs ||
- PageHighMem(sg_page(sg)))
+ if (PageHighMem(sg_page(sg)))
return 'D';

/* For the text interface we copy only the first sg buffer */
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index ec9b044..009e26c 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -61,6 +61,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
+ { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
+ { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
{ USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */
{ USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */
@@ -72,9 +74,12 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x1601) }, /* Arkham Technology DS101 Adapter */
{ USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */
{ USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */
+ { USB_DEVICE(0x10C4, 0x8044) }, /* Cygnal Debug Adapter */
+ { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
{ USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
{ USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
{ USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
+ { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
{ USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
{ USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
{ USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */
@@ -82,12 +87,15 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
{ USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
{ USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
+ { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
{ USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
{ USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
+ { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
{ USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
{ USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
{ USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
+ { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
{ USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
{ USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
{ USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
@@ -105,6 +113,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
{ USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
{ USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
{ USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
@@ -115,6 +124,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
{ USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
{ USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
+ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
+ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
{ } /* Terminating Entry */
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index e23c779..582e832 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -1309,7 +1309,7 @@ static void cypress_read_int_callback(struct urb *urb)
/* process read if there is data other than line status */
if (tty && bytes > i) {
tty_insert_flip_string_fixed_flag(tty, data + i,
- bytes - i, tty_flag);
+ tty_flag, bytes - i);
tty_flip_buffer_push(tty);
}

diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 68b0aa5..3edda3e 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1703,8 +1703,8 @@ static int digi_read_inb_callback(struct urb *urb)
/* data length is len-1 (one byte of len is port_status) */
--len;
if (len > 0) {
- tty_insert_flip_string_fixed_flag(tty, data, len,
- flag);
+ tty_insert_flip_string_fixed_flag(tty, data, flag,
+ len);
tty_flip_buffer_push(tty);
}
}
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1d7c4fa..3f5676e 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -2289,6 +2289,8 @@ static void ftdi_set_termios(struct tty_struct *tty,
"urb failed to set to rts/cts flow control\n");
}

+ /* raise DTR/RTS */
+ set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
} else {
/*
* Xon/Xoff code
@@ -2336,6 +2338,8 @@ static void ftdi_set_termios(struct tty_struct *tty,
}
}

+ /* lower DTR/RTS */
+ clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
}
return;
}
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 4a0f519..71bdbe0 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -312,6 +312,7 @@ static int ir_open(struct tty_struct *tty, struct usb_serial_port *port)
kfree(port->read_urb->transfer_buffer);
port->read_urb->transfer_buffer = buffer;
port->read_urb->transfer_buffer_length = buffer_size;
+ port->bulk_in_buffer = buffer;

buffer = kmalloc(buffer_size, GFP_KERNEL);
if (!buffer) {
@@ -321,6 +322,7 @@ static int ir_open(struct tty_struct *tty, struct usb_serial_port *port)
kfree(port->write_urb->transfer_buffer);
port->write_urb->transfer_buffer = buffer;
port->write_urb->transfer_buffer_length = buffer_size;
+ port->bulk_out_buffer = buffer;
port->bulk_out_size = buffer_size;
}

diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 8eef91b..cc0ba38 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -321,6 +321,7 @@ err_cleanup:
usb_free_urb(priv->write_urb_pool[j]);
}
}
+ kfree(priv);
usb_set_serial_port_data(serial->port[i], NULL);
}
return -ENOMEM;
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index c113a2a..bd5bd85 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -345,7 +345,8 @@ static void kobil_close(struct usb_serial_port *port)

/* FIXME: Add rts/dtr methods */
if (port->write_urb) {
- usb_kill_urb(port->write_urb);
+ usb_poison_urb(port->write_urb);
+ kfree(port->write_urb->transfer_buffer);
usb_free_urb(port->write_urb);
port->write_urb = NULL;
}
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 2fda1c0..68f6a1d 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -731,7 +731,6 @@ static void mos7840_bulk_in_callback(struct urb *urb)
mos7840_port = urb->context;
if (!mos7840_port) {
dbg("%s", "NULL mos7840_port pointer");
- mos7840_port->read_urb_busy = false;
return;
}

diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 84d0eda..8b2e612 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -380,6 +380,10 @@ static int option_resume(struct usb_serial *serial);

#define CINTERION_VENDOR_ID 0x0681

+/* Olivetti products */
+#define OLIVETTI_VENDOR_ID 0x0b3c
+#define OLIVETTI_PRODUCT_OLICARD100 0xc000
+
/* some devices interfaces need special handling due to a number of reasons */
enum option_blacklist_reason {
OPTION_BLACKLIST_NONE = 0,
@@ -675,6 +679,180 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1060, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1061, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1062, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1063, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1064, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1065, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1066, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1067, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1068, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1069, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1070, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1071, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1072, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1073, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1074, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1075, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1076, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1077, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1078, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1079, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1080, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1081, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1082, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1083, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1084, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1085, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1086, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1087, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1088, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1089, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1090, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1091, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1092, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1093, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1094, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1095, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1096, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1097, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1098, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1099, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1100, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1101, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1102, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1103, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1104, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1105, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1106, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1107, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1108, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1109, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1110, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1111, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1112, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1113, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1114, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1115, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1116, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1117, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1118, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1119, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1120, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1121, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1122, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1123, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1124, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1125, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1126, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1127, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1128, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1129, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1130, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1131, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1132, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1133, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1134, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1135, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1136, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1137, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1138, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1139, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1140, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1141, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1142, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1143, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1144, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1145, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1146, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1147, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1148, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1149, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1150, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1151, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1152, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1153, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1154, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1155, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1156, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1157, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1158, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1159, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1160, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1161, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1162, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1163, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1164, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1165, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1166, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1167, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1168, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1260, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1261, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1262, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1263, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1264, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1265, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1266, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1279, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1280, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1281, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1282, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1283, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1284, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1285, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1286, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1287, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1288, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1289, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1290, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1291, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1292, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1293, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1294, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1295, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1296, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1297, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
@@ -726,6 +904,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},

{ USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
+
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
index 7e3bea2..214a3e5 100644
--- a/drivers/usb/serial/qcaux.c
+++ b/drivers/usb/serial/qcaux.c
@@ -50,6 +50,10 @@
#define SANYO_VENDOR_ID 0x0474
#define SANYO_PRODUCT_KATANA_LX 0x0754 /* SCP-3800 (Katana LX) */

+/* Samsung devices */
+#define SAMSUNG_VENDOR_ID 0x04e8
+#define SAMSUNG_PRODUCT_U520 0x6640 /* SCH-U520 */
+
static struct usb_device_id id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) },
@@ -61,6 +65,7 @@ static struct usb_device_id id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 5d39191..2ea32c5 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -726,8 +726,8 @@ static void spcp8x5_read_bulk_callback(struct urb *urb)
/* overrun is special, not associated with a char */
if (status & UART_OVERRUN_ERROR)
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- tty_insert_flip_string_fixed_flag(tty, data,
- urb->actual_length, tty_flag);
+ tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
+ urb->actual_length);
tty_flip_buffer_push(tty);
}
tty_kref_put(tty);
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 0949427..fb7fc40 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -249,6 +249,7 @@ static struct usb_serial_driver clie_3_5_device = {
.throttle = visor_throttle,
.unthrottle = visor_unthrottle,
.attach = clie_3_5_startup,
+ .release = visor_release,
.write = visor_write,
.write_room = visor_write_room,
.write_bulk_callback = visor_write_bulk_callback,
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index ccf1dbb..55b3cd1 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1853,6 +1853,21 @@ UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),

+/* Reported by Hans de Goede <hdegoede@xxxxxxxxxx>
+ * These Appotech controllers are found in Picture Frames, they provide a
+ * (buggy) emulation of a cdrom drive which contains the windows software
+ * Uploading of pictures happens over the corresponding /dev/sg device. */
+UNUSUAL_DEV( 0x1908, 0x1315, 0x0000, 0x0000,
+ "BUILDWIN",
+ "Photo Frame",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_BAD_SENSE ),
+UNUSUAL_DEV( 0x1908, 0x1320, 0x0000, 0x0000,
+ "BUILDWIN",
+ "Photo Frame",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_BAD_SENSE ),
+
UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
"ST",
"2A",
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9777583..c9d0c79 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -637,7 +637,7 @@ const static struct file_operations vhost_net_fops = {
};

static struct miscdevice vhost_net_misc = {
- VHOST_NET_MINOR,
+ MISC_DYNAMIC_MINOR,
"vhost-net",
&vhost_net_fops,
};
diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
index 8d406fb..f3d7440 100644
--- a/drivers/video/arcfb.c
+++ b/drivers/video/arcfb.c
@@ -80,7 +80,7 @@ struct arcfb_par {
spinlock_t lock;
};

-static struct fb_fix_screeninfo arcfb_fix __initdata = {
+static struct fb_fix_screeninfo arcfb_fix __devinitdata = {
.id = "arcfb",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_MONO01,
@@ -90,7 +90,7 @@ static struct fb_fix_screeninfo arcfb_fix __initdata = {
.accel = FB_ACCEL_NONE,
};

-static struct fb_var_screeninfo arcfb_var __initdata = {
+static struct fb_var_screeninfo arcfb_var __devinitdata = {
.xres = 128,
.yres = 64,
.xres_virtual = 128,
@@ -588,7 +588,7 @@ err:
return retval;
}

-static int arcfb_remove(struct platform_device *dev)
+static int __devexit arcfb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);

@@ -602,7 +602,7 @@ static int arcfb_remove(struct platform_device *dev)

static struct platform_driver arcfb_driver = {
.probe = arcfb_probe,
- .remove = arcfb_remove,
+ .remove = __devexit_p(arcfb_remove),
.driver = {
.name = "arcfb",
},
diff --git a/drivers/video/hgafb.c b/drivers/video/hgafb.c
index 8bbf251..af8f0f2 100644
--- a/drivers/video/hgafb.c
+++ b/drivers/video/hgafb.c
@@ -106,7 +106,7 @@ static DEFINE_SPINLOCK(hga_reg_lock);

/* Framebuffer driver structures */

-static struct fb_var_screeninfo __initdata hga_default_var = {
+static struct fb_var_screeninfo hga_default_var __devinitdata = {
.xres = 720,
.yres = 348,
.xres_virtual = 720,
@@ -120,7 +120,7 @@ static struct fb_var_screeninfo __initdata hga_default_var = {
.width = -1,
};

-static struct fb_fix_screeninfo __initdata hga_fix = {
+static struct fb_fix_screeninfo hga_fix __devinitdata = {
.id = "HGA",
.type = FB_TYPE_PACKED_PIXELS, /* (not sure) */
.visual = FB_VISUAL_MONO10,
@@ -276,7 +276,7 @@ static void hga_blank(int blank_mode)
spin_unlock_irqrestore(&hga_reg_lock, flags);
}

-static int __init hga_card_detect(void)
+static int __devinit hga_card_detect(void)
{
int count = 0;
void __iomem *p, *q;
@@ -596,7 +596,7 @@ static int __devinit hgafb_probe(struct platform_device *pdev)
return 0;
}

-static int hgafb_remove(struct platform_device *pdev)
+static int __devexit hgafb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);

@@ -621,7 +621,7 @@ static int hgafb_remove(struct platform_device *pdev)

static struct platform_driver hgafb_driver = {
.probe = hgafb_probe,
- .remove = hgafb_remove,
+ .remove = __devexit_p(hgafb_remove),
.driver = {
.name = "hgafb",
},
diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c
index 9b5532b..bc67251 100644
--- a/drivers/video/vfb.c
+++ b/drivers/video/vfb.c
@@ -78,7 +78,7 @@ static void rvfree(void *mem, unsigned long size)
vfree(mem);
}

-static struct fb_var_screeninfo vfb_default __initdata = {
+static struct fb_var_screeninfo vfb_default __devinitdata = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
@@ -100,7 +100,7 @@ static struct fb_var_screeninfo vfb_default __initdata = {
.vmode = FB_VMODE_NONINTERLACED,
};

-static struct fb_fix_screeninfo vfb_fix __initdata = {
+static struct fb_fix_screeninfo vfb_fix __devinitdata = {
.id = "Virtual FB",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index bf638a4..2ab3cc7 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -65,7 +65,7 @@ struct vga16fb_par {

/* --------------------------------------------------------------------- */

-static struct fb_var_screeninfo vga16fb_defined __initdata = {
+static struct fb_var_screeninfo vga16fb_defined __devinitdata = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
@@ -85,7 +85,7 @@ static struct fb_var_screeninfo vga16fb_defined __initdata = {
};

/* name should not depend on EGA/VGA */
-static struct fb_fix_screeninfo vga16fb_fix __initdata = {
+static struct fb_fix_screeninfo vga16fb_fix __devinitdata = {
.id = "VGA16 VGA",
.smem_start = VGA_FB_PHYS,
.smem_len = VGA_FB_PHYS_LEN,
@@ -1278,7 +1278,7 @@ static struct fb_ops vga16fb_ops = {
};

#ifndef MODULE
-static int vga16fb_setup(char *options)
+static int __init vga16fb_setup(char *options)
{
char *this_opt;

@@ -1376,7 +1376,7 @@ static int __devinit vga16fb_probe(struct platform_device *dev)
return ret;
}

-static int vga16fb_remove(struct platform_device *dev)
+static int __devexit vga16fb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);

@@ -1393,7 +1393,7 @@ static int vga16fb_remove(struct platform_device *dev)

static struct platform_driver vga16fb_driver = {
.probe = vga16fb_probe,
- .remove = vga16fb_remove,
+ .remove = __devexit_p(vga16fb_remove),
.driver = {
.name = "vga16fb",
},
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index 31b0e17..e66b8b1 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -53,7 +53,7 @@ static void w100_update_enable(void);
static void w100_update_disable(void);
static void calc_hsync(struct w100fb_par *par);
static void w100_init_graphic_engine(struct w100fb_par *par);
-struct w100_pll_info *w100_get_xtal_table(unsigned int freq);
+struct w100_pll_info *w100_get_xtal_table(unsigned int freq) __devinit;

/* Pseudo palette size */
#define MAX_PALETTES 16
@@ -782,7 +782,7 @@ out:
}


-static int w100fb_remove(struct platform_device *pdev)
+static int __devexit w100fb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
struct w100fb_par *par=info->par;
@@ -1020,7 +1020,7 @@ static struct pll_entries {
{ 0 },
};

-struct w100_pll_info *w100_get_xtal_table(unsigned int freq)
+struct w100_pll_info __devinit *w100_get_xtal_table(unsigned int freq)
{
struct pll_entries *pll_entry = w100_pll_tables;

@@ -1611,7 +1611,7 @@ static void w100_vsync(void)

static struct platform_driver w100fb_driver = {
.probe = w100fb_probe,
- .remove = w100fb_remove,
+ .remove = __devexit_p(w100fb_remove),
.suspend = w100fb_suspend,
.resume = w100fb_resume,
.driver = {
@@ -1619,7 +1619,7 @@ static struct platform_driver w100fb_driver = {
},
};

-int __devinit w100fb_init(void)
+int __init w100fb_init(void)
{
return platform_driver_register(&w100fb_driver);
}
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index eab33f1..7b547f5 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -499,7 +499,7 @@ int xenbus_printf(struct xenbus_transaction t,
#define PRINTF_BUFFER_SIZE 4096
char *printf_buffer;

- printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
+ printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH);
if (printf_buffer == NULL)
return -ENOMEM;

diff --git a/fs/aio.c b/fs/aio.c
index 1cf12b3..48fdeeb 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -36,6 +36,7 @@
#include <linux/blkdev.h>
#include <linux/mempool.h>
#include <linux/hash.h>
+#include <linux/compat.h>

#include <asm/kmap_types.h>
#include <asm/uaccess.h>
@@ -1384,13 +1385,22 @@ static ssize_t aio_fsync(struct kiocb *iocb)
return ret;
}

-static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
+static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
{
ssize_t ret;

- ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
- kiocb->ki_nbytes, 1,
- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
+#ifdef CONFIG_COMPAT
+ if (compat)
+ ret = compat_rw_copy_check_uvector(type,
+ (struct compat_iovec __user *)kiocb->ki_buf,
+ kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
+ &kiocb->ki_iovec);
+ else
+#endif
+ ret = rw_copy_check_uvector(type,
+ (struct iovec __user *)kiocb->ki_buf,
+ kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
+ &kiocb->ki_iovec);
if (ret < 0)
goto out;

@@ -1420,7 +1430,7 @@ static ssize_t aio_setup_single_vector(struct kiocb *kiocb)
* Performs the initial checks and aio retry method
* setup for the kiocb at the time of io submission.
*/
-static ssize_t aio_setup_iocb(struct kiocb *kiocb)
+static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
{
struct file *file = kiocb->ki_filp;
ssize_t ret = 0;
@@ -1469,7 +1479,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb)
ret = security_file_permission(file, MAY_READ);
if (unlikely(ret))
break;
- ret = aio_setup_vectored_rw(READ, kiocb);
+ ret = aio_setup_vectored_rw(READ, kiocb, compat);
if (ret)
break;
ret = -EINVAL;
@@ -1483,7 +1493,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb)
ret = security_file_permission(file, MAY_WRITE);
if (unlikely(ret))
break;
- ret = aio_setup_vectored_rw(WRITE, kiocb);
+ ret = aio_setup_vectored_rw(WRITE, kiocb, compat);
if (ret)
break;
ret = -EINVAL;
@@ -1548,7 +1558,8 @@ static void aio_batch_free(struct hlist_head *batch_hash)
}

static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
- struct iocb *iocb, struct hlist_head *batch_hash)
+ struct iocb *iocb, struct hlist_head *batch_hash,
+ bool compat)
{
struct kiocb *req;
struct file *file;
@@ -1609,7 +1620,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
req->ki_opcode = iocb->aio_lio_opcode;

- ret = aio_setup_iocb(req);
+ ret = aio_setup_iocb(req, compat);

if (ret)
goto out_put_req;
@@ -1637,20 +1648,8 @@ out_put_req:
return ret;
}

-/* sys_io_submit:
- * Queue the nr iocbs pointed to by iocbpp for processing. Returns
- * the number of iocbs queued. May return -EINVAL if the aio_context
- * specified by ctx_id is invalid, if nr is < 0, if the iocb at
- * *iocbpp[0] is not properly initialized, if the operation specified
- * is invalid for the file descriptor in the iocb. May fail with
- * -EFAULT if any of the data structures point to invalid data. May
- * fail with -EBADF if the file descriptor specified in the first
- * iocb is invalid. May fail with -EAGAIN if insufficient resources
- * are available to queue any iocbs. Will return 0 if nr is 0. Will
- * fail with -ENOSYS if not implemented.
- */
-SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
- struct iocb __user * __user *, iocbpp)
+long do_io_submit(aio_context_t ctx_id, long nr,
+ struct iocb __user *__user *iocbpp, bool compat)
{
struct kioctx *ctx;
long ret = 0;
@@ -1687,7 +1686,7 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
break;
}

- ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash);
+ ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash, compat);
if (ret)
break;
}
@@ -1697,6 +1696,24 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
return i ? i : ret;
}

+/* sys_io_submit:
+ * Queue the nr iocbs pointed to by iocbpp for processing. Returns
+ * the number of iocbs queued. May return -EINVAL if the aio_context
+ * specified by ctx_id is invalid, if nr is < 0, if the iocb at
+ * *iocbpp[0] is not properly initialized, if the operation specified
+ * is invalid for the file descriptor in the iocb. May fail with
+ * -EFAULT if any of the data structures point to invalid data. May
+ * fail with -EBADF if the file descriptor specified in the first
+ * iocb is invalid. May fail with -EAGAIN if insufficient resources
+ * are available to queue any iocbs. Will return 0 if nr is 0. Will
+ * fail with -ENOSYS if not implemented.
+ */
+SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
+ struct iocb __user * __user *, iocbpp)
+{
+ return do_io_submit(ctx_id, nr, iocbpp, 0);
+}
+
/* lookup_kiocb
* Finds a given iocb for cancellation.
*/
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 6ef7b26..6b4d0cc 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -160,6 +160,9 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name,
int ret;
struct posix_acl *acl = NULL;

+ if (!is_owner_or_cap(dentry->d_inode))
+ return -EPERM;
+
if (value) {
acl = posix_acl_from_xattr(value, size);
if (acl == NULL) {
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 39e47f4..a6db615 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -95,8 +95,10 @@ extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode,
__u16 fileHandle, struct file *file,
struct vfsmount *mnt, unsigned int oflags);
extern int cifs_posix_open(char *full_path, struct inode **pinode,
- struct vfsmount *mnt, int mode, int oflags,
- __u32 *poplock, __u16 *pnetfid, int xid);
+ struct vfsmount *mnt,
+ struct super_block *sb,
+ int mode, int oflags,
+ __u32 *poplock, __u16 *pnetfid, int xid);
extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
FILE_UNIX_BASIC_INFO *info,
struct cifs_sb_info *cifs_sb);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index e9f7ecc..ff3d891 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -183,13 +183,14 @@ cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
}

int cifs_posix_open(char *full_path, struct inode **pinode,
- struct vfsmount *mnt, int mode, int oflags,
- __u32 *poplock, __u16 *pnetfid, int xid)
+ struct vfsmount *mnt, struct super_block *sb,
+ int mode, int oflags,
+ __u32 *poplock, __u16 *pnetfid, int xid)
{
int rc;
FILE_UNIX_BASIC_INFO *presp_data;
__u32 posix_flags = 0;
- struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifs_fattr fattr;

cFYI(1, ("posix open %s", full_path));
@@ -242,7 +243,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,

/* get new inode and set it up */
if (*pinode == NULL) {
- *pinode = cifs_iget(mnt->mnt_sb, &fattr);
+ *pinode = cifs_iget(sb, &fattr);
if (!*pinode) {
rc = -ENOMEM;
goto posix_open_ret;
@@ -251,7 +252,8 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
cifs_fattr_to_inode(*pinode, &fattr);
}

- cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags);
+ if (mnt)
+ cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags);

posix_open_ret:
kfree(presp_data);
@@ -315,13 +317,14 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
if (nd && (nd->flags & LOOKUP_OPEN))
oflags = nd->intent.open.flags;
else
- oflags = FMODE_READ;
+ oflags = FMODE_READ | SMB_O_CREAT;

if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
- rc = cifs_posix_open(full_path, &newinode, nd->path.mnt,
- mode, oflags, &oplock, &fileHandle, xid);
+ rc = cifs_posix_open(full_path, &newinode,
+ nd ? nd->path.mnt : NULL,
+ inode->i_sb, mode, oflags, &oplock, &fileHandle, xid);
/* EIO could indicate that (posix open) operation is not
supported, despite what server claimed in capability
negotation. EREMOTE indicates DFS junction, which is not
@@ -678,6 +681,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
(nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
(nd->intent.open.flags & O_CREAT)) {
rc = cifs_posix_open(full_path, &newInode, nd->path.mnt,
+ parent_dir_inode->i_sb,
nd->intent.open.create_mode,
nd->intent.open.flags, &oplock,
&fileHandle, xid);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 9b11a8f..4cbdb20 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -298,10 +298,12 @@ int cifs_open(struct inode *inode, struct file *file)
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
int oflags = (int) cifs_posix_convert_flags(file->f_flags);
+ oflags |= SMB_O_CREAT;
/* can not refresh inode info since size could be stale */
rc = cifs_posix_open(full_path, &inode, file->f_path.mnt,
- cifs_sb->mnt_file_mode /* ignored */,
- oflags, &oplock, &netfid, xid);
+ inode->i_sb,
+ cifs_sb->mnt_file_mode /* ignored */,
+ oflags, &oplock, &netfid, xid);
if (rc == 0) {
cFYI(1, ("posix open succeeded"));
/* no need for special case handling of setting mode
@@ -513,8 +515,9 @@ reopen_error_exit:
int oflags = (int) cifs_posix_convert_flags(file->f_flags);
/* can not refresh inode info since size could be stale */
rc = cifs_posix_open(full_path, NULL, file->f_path.mnt,
- cifs_sb->mnt_file_mode /* ignored */,
- oflags, &oplock, &netfid, xid);
+ inode->i_sb,
+ cifs_sb->mnt_file_mode /* ignored */,
+ oflags, &oplock, &netfid, xid);
if (rc == 0) {
cFYI(1, ("posix reopen succeeded"));
goto reopen_success;
diff --git a/fs/compat.c b/fs/compat.c
index 0544873..6490d21 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -568,6 +568,79 @@ out:
return ret;
}

+/* A write operation does a read from user space and vice versa */
+#define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ)
+
+ssize_t compat_rw_copy_check_uvector(int type,
+ const struct compat_iovec __user *uvector, unsigned long nr_segs,
+ unsigned long fast_segs, struct iovec *fast_pointer,
+ struct iovec **ret_pointer)
+{
+ compat_ssize_t tot_len;
+ struct iovec *iov = *ret_pointer = fast_pointer;
+ ssize_t ret = 0;
+ int seg;
+
+ /*
+ * SuS says "The readv() function *may* fail if the iovcnt argument
+ * was less than or equal to 0, or greater than {IOV_MAX}. Linux has
+ * traditionally returned zero for zero segments, so...
+ */
+ if (nr_segs == 0)
+ goto out;
+
+ ret = -EINVAL;
+ if (nr_segs > UIO_MAXIOV || nr_segs < 0)
+ goto out;
+ if (nr_segs > fast_segs) {
+ ret = -ENOMEM;
+ iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
+ if (iov == NULL) {
+ *ret_pointer = fast_pointer;
+ goto out;
+ }
+ }
+ *ret_pointer = iov;
+
+ /*
+ * Single unix specification:
+ * We should -EINVAL if an element length is not >= 0 and fitting an
+ * ssize_t. The total length is fitting an ssize_t
+ *
+ * Be careful here because iov_len is a size_t not an ssize_t
+ */
+ tot_len = 0;
+ ret = -EINVAL;
+ for (seg = 0; seg < nr_segs; seg++) {
+ compat_ssize_t tmp = tot_len;
+ compat_uptr_t buf;
+ compat_ssize_t len;
+
+ if (__get_user(len, &uvector->iov_len) ||
+ __get_user(buf, &uvector->iov_base)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ if (len < 0) /* size_t not fitting in compat_ssize_t .. */
+ goto out;
+ tot_len += len;
+ if (tot_len < tmp) /* maths overflow on the compat_ssize_t */
+ goto out;
+ if (!access_ok(vrfy_dir(type), compat_ptr(buf), len)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ iov->iov_base = compat_ptr(buf);
+ iov->iov_len = (compat_size_t) len;
+ uvector++;
+ iov++;
+ }
+ ret = tot_len;
+
+out:
+ return ret;
+}
+
static inline long
copy_iocb(long nr, u32 __user *ptr32, struct iocb __user * __user *ptr64)
{
@@ -600,7 +673,7 @@ compat_sys_io_submit(aio_context_t ctx_id, int nr, u32 __user *iocb)
iocb64 = compat_alloc_user_space(nr * sizeof(*iocb64));
ret = copy_iocb(nr, iocb, iocb64);
if (!ret)
- ret = sys_io_submit(ctx_id, nr, iocb64);
+ ret = do_io_submit(ctx_id, nr, iocb64, 1);
return ret;
}

@@ -1077,70 +1150,21 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
{
compat_ssize_t tot_len;
struct iovec iovstack[UIO_FASTIOV];
- struct iovec *iov=iovstack, *vector;
+ struct iovec *iov;
ssize_t ret;
- int seg;
io_fn_t fn;
iov_fn_t fnv;

- /*
- * SuS says "The readv() function *may* fail if the iovcnt argument
- * was less than or equal to 0, or greater than {IOV_MAX}. Linux has
- * traditionally returned zero for zero segments, so...
- */
- ret = 0;
- if (nr_segs == 0)
- goto out;
-
- /*
- * First get the "struct iovec" from user memory and
- * verify all the pointers
- */
ret = -EINVAL;
- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
- goto out;
if (!file->f_op)
goto out;
- if (nr_segs > UIO_FASTIOV) {
- ret = -ENOMEM;
- iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
- if (!iov)
- goto out;
- }
+
ret = -EFAULT;
if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector)))
goto out;

- /*
- * Single unix specification:
- * We should -EINVAL if an element length is not >= 0 and fitting an
- * ssize_t. The total length is fitting an ssize_t
- *
- * Be careful here because iov_len is a size_t not an ssize_t
- */
- tot_len = 0;
- vector = iov;
- ret = -EINVAL;
- for (seg = 0 ; seg < nr_segs; seg++) {
- compat_ssize_t tmp = tot_len;
- compat_ssize_t len;
- compat_uptr_t buf;
-
- if (__get_user(len, &uvector->iov_len) ||
- __get_user(buf, &uvector->iov_base)) {
- ret = -EFAULT;
- goto out;
- }
- if (len < 0) /* size_t not fitting an compat_ssize_t .. */
- goto out;
- tot_len += len;
- if (tot_len < tmp) /* maths overflow on the compat_ssize_t */
- goto out;
- vector->iov_base = compat_ptr(buf);
- vector->iov_len = (compat_size_t) len;
- uvector++;
- vector++;
- }
+ tot_len = compat_rw_copy_check_uvector(type, uvector, nr_segs,
+ UIO_FASTIOV, iovstack, &iov);
if (tot_len == 0) {
ret = 0;
goto out;
diff --git a/fs/dcache.c b/fs/dcache.c
index f1358e5..2b6f09a 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1529,6 +1529,7 @@ void d_delete(struct dentry * dentry)
spin_lock(&dentry->d_lock);
isdir = S_ISDIR(dentry->d_inode->i_mode);
if (atomic_read(&dentry->d_count) == 1) {
+ dentry->d_flags &= ~DCACHE_CANT_MOUNT;
dentry_iput(dentry);
fsnotify_nameremove(dentry, isdir);
return;
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index 4cfab1c..d91e9d8 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -608,7 +608,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent)
de->inode_no = cpu_to_le64(parent->i_ino);
memcpy(de->name, PARENT_DIR, sizeof(PARENT_DIR));
exofs_set_de_type(de, inode);
- kunmap_atomic(page, KM_USER0);
+ kunmap_atomic(kaddr, KM_USER0);
err = exofs_commit_chunk(page, 0, chunk_size);
fail:
page_cache_release(page);
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index d1fc662..d3d6a64 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -959,6 +959,9 @@ mext_check_arguments(struct inode *orig_inode,
return -EINVAL;
}

+ if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode))
+ return -EPERM;
+
/* Ext4 move extent does not support swapfile */
if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) {
ext4_debug("ext4 move extent: The argument files should "
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 5692c48..6df797e 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -911,7 +911,8 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
percpu_counter_add(&sbi->s_freeinodes_counter,
EXT4_INODES_PER_GROUP(sb));

- if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
+ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
+ sbi->s_log_groups_per_flex) {
ext4_group_t flex_group;
flex_group = ext4_flex_group(sbi, input->group);
atomic_add(input->free_blocks_count,
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 4b37f7c..760dc8d 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -852,6 +852,12 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb)
unsigned long expired;
long nr_pages;

+ /*
+ * When set to zero, disable periodic writeback
+ */
+ if (!dirty_writeback_interval)
+ return 0;
+
expired = wb->last_old_flush +
msecs_to_jiffies(dirty_writeback_interval * 10);
if (time_before(jiffies, expired))
@@ -947,8 +953,12 @@ int bdi_writeback_task(struct bdi_writeback *wb)
break;
}

- wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
- schedule_timeout_interruptible(wait_jiffies);
+ if (dirty_writeback_interval) {
+ wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
+ schedule_timeout_interruptible(wait_jiffies);
+ } else
+ schedule();
+
try_to_freeze();
}

diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index e6dd2ae..b20bfcc 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -218,6 +218,11 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
if (error)
goto out_drop_write;

+ error = -EACCES;
+ if (!is_owner_or_cap(inode))
+ goto out;
+
+ error = 0;
flags = ip->i_diskflags;
new_flags = (flags & ~mask) | (reqflags & mask);
if ((new_flags ^ flags) == 0)
@@ -275,8 +280,10 @@ static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
{
struct inode *inode = filp->f_path.dentry->d_inode;
u32 fsflags, gfsflags;
+
if (get_user(fsflags, ptr))
return -EFAULT;
+
gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
if (!S_ISDIR(inode->i_mode)) {
if (gfsflags & GFS2_DIF_INHERIT_JDATA)
diff --git a/fs/libfs.c b/fs/libfs.c
index ea9a6cc..b016af9 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -418,7 +418,8 @@ int simple_write_end(struct file *file, struct address_space *mapping,
* unique inode values later for this filesystem, then you must take care
* to pass it an appropriate max_reserved value to avoid collisions.
*/
-int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files)
+int simple_fill_super(struct super_block *s, unsigned long magic,
+ struct tree_descr *files)
{
struct inode *inode;
struct dentry *root;
diff --git a/fs/namei.c b/fs/namei.c
index b86b96f..f6c7fcf 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1620,6 +1620,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
case LAST_DOTDOT:
follow_dotdot(nd);
dir = nd->path.dentry;
+ case LAST_DOT:
if (nd->path.mnt->mnt_sb->s_type->fs_flags & FS_REVAL_DOT) {
if (!dir->d_op->d_revalidate(dir, nd)) {
error = -ESTALE;
@@ -1627,7 +1628,6 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
}
}
/* fallthrough */
- case LAST_DOT:
case LAST_ROOT:
if (open_flag & O_CREAT)
goto exit;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 3aea3ca..91679e2 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1386,7 +1386,7 @@ static int nfs_commit_inode(struct inode *inode, int how)
int res = 0;

if (!nfs_commit_set_lock(NFS_I(inode), may_wait))
- goto out;
+ goto out_mark_dirty;
spin_lock(&inode->i_lock);
res = nfs_scan_commit(inode, &head, 0, 0);
spin_unlock(&inode->i_lock);
@@ -1398,9 +1398,18 @@ static int nfs_commit_inode(struct inode *inode, int how)
wait_on_bit(&NFS_I(inode)->flags, NFS_INO_COMMIT,
nfs_wait_bit_killable,
TASK_KILLABLE);
+ else
+ goto out_mark_dirty;
} else
nfs_commit_clear_lock(NFS_I(inode));
-out:
+ return res;
+ /* Note: If we exit without ensuring that the commit is complete,
+ * we must mark the inode as dirty. Otherwise, future calls to
+ * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
+ * that the data is on the disk.
+ */
+out_mark_dirty:
+ __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
return res;
}

@@ -1509,14 +1518,17 @@ int nfs_wb_page(struct inode *inode, struct page *page)
};
int ret;

- while(PagePrivate(page)) {
+ for (;;) {
wait_on_page_writeback(page);
if (clear_page_dirty_for_io(page)) {
ret = nfs_writepage_locked(page, &wbc);
if (ret < 0)
goto out_error;
+ continue;
}
- ret = sync_inode(inode, &wbc);
+ if (!PagePrivate(page))
+ break;
+ ret = nfs_commit_inode(inode, FLUSH_SYNC);
if (ret < 0)
goto out_error;
}
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 171699e..06b2a26 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -120,7 +120,7 @@ u32 nfsd_supported_minorversion;
int nfsd_vers(int vers, enum vers_op change)
{
if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS)
- return -1;
+ return 0;
switch(change) {
case NFSD_SET:
nfsd_versions[vers] = nfsd_version[vers];
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 6dd5f19..4eb9baa 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -443,8 +443,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
if (size_change)
put_write_access(inode);
if (!err)
- if (EX_ISSYNC(fhp->fh_export))
- write_inode_now(inode, 1);
+ commit_metadata(fhp);
out:
return err;

@@ -724,7 +723,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
struct inode *inode;
int flags = O_RDONLY|O_LARGEFILE;
__be32 err;
- int host_err;
+ int host_err = 0;

validate_process_creds();

@@ -761,7 +760,8 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
* Check to see if there are any leases on this file.
* This may block while leases are broken.
*/
- host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0));
+ if (!(access & NFSD_MAY_NOT_BREAK_LEASE))
+ host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0));
if (host_err == -EWOULDBLOCK)
host_err = -ETIMEDOUT;
if (host_err) /* NOMEM or WOULDBLOCK */
@@ -1169,7 +1169,8 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
goto out;
}

- err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_WRITE, &file);
+ err = nfsd_open(rqstp, fhp, S_IFREG,
+ NFSD_MAY_WRITE|NFSD_MAY_NOT_BREAK_LEASE, &file);
if (err)
goto out;
if (EX_ISSYNC(fhp->fh_export)) {
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index 4b1de0a..217a62c 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -20,6 +20,7 @@
#define NFSD_MAY_OWNER_OVERRIDE 64
#define NFSD_MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/
#define NFSD_MAY_BYPASS_GSS_ON_ROOT 256
+#define NFSD_MAY_NOT_BREAK_LEASE 512

#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 811dbb3..7a8db41 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -212,6 +212,8 @@ extern void kick_iocb(struct kiocb *iocb);
extern int aio_complete(struct kiocb *iocb, long res, long res2);
struct mm_struct;
extern void exit_aio(struct mm_struct *mm);
+extern long do_io_submit(aio_context_t ctx_id, long nr,
+ struct iocb __user *__user *iocbpp, bool compat);
#else
static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
static inline int aio_put_req(struct kiocb *iocb) { return 0; }
@@ -219,6 +221,9 @@ static inline void kick_iocb(struct kiocb *iocb) { }
static inline int aio_complete(struct kiocb *iocb, long res, long res2) { return 0; }
struct mm_struct;
static inline void exit_aio(struct mm_struct *mm) { }
+static inline long do_io_submit(aio_context_t ctx_id, long nr,
+ struct iocb __user * __user *iocbpp,
+ bool compat) { return 0; }
#endif /* CONFIG_AIO */

static inline struct kiocb *list_kiocb(struct list_head *h)
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 717c691..168f7da 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -356,5 +356,9 @@ asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user * filename,
asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename,
int flags, int mode);

+extern ssize_t compat_rw_copy_check_uvector(int type,
+ const struct compat_iovec __user *uvector, unsigned long nr_segs,
+ unsigned long fast_segs, struct iovec *fast_pointer,
+ struct iovec **ret_pointer);
#endif /* CONFIG_COMPAT */
#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 44f35ae..801b398 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2356,7 +2356,7 @@ extern const struct file_operations simple_dir_operations;
extern const struct inode_operations simple_dir_inode_operations;
struct tree_descr { char *name; const struct file_operations *ops; int mode; };
struct dentry *d_alloc_name(struct dentry *, const char *);
-extern int simple_fill_super(struct super_block *, int, struct tree_descr *);
+extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *);
extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
extern void simple_release_fs(struct vfsmount **mount, int *count);

diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 8b5f7cc..ce9cd11 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -3,6 +3,12 @@
#include <linux/module.h>
#include <linux/major.h>

+/*
+ * These allocations are managed by device@xxxxxxxxxxx If you use an
+ * entry that is not in assigned your entry may well be moved and
+ * reassigned, or set dynamic if a fixed value is not justified.
+ */
+
#define PSMOUSE_MINOR 1
#define MS_BUSMOUSE_MINOR 2
#define ATIXL_BUSMOUSE_MINOR 3
@@ -30,7 +36,6 @@
#define HPET_MINOR 228
#define FUSE_MINOR 229
#define KVM_MINOR 232
-#define VHOST_NET_MINOR 233
#define MISC_DYNAMIC_MINOR 255

struct device;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 9f688d2..c516a33 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2321,6 +2321,7 @@
#define PCI_VENDOR_ID_JMICRON 0x197B
#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360
#define PCI_DEVICE_ID_JMICRON_JMB361 0x2361
+#define PCI_DEVICE_ID_JMICRON_JMB362 0x2362
#define PCI_DEVICE_ID_JMICRON_JMB363 0x2363
#define PCI_DEVICE_ID_JMICRON_JMB365 0x2365
#define PCI_DEVICE_ID_JMICRON_JMB366 0x2366
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index c8e3754..eea9188 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -531,6 +531,7 @@ enum perf_event_active_state {
struct file;

struct perf_mmap_data {
+ atomic_t refcount;
struct rcu_head rcu_head;
#ifdef CONFIG_PERF_USE_VMALLOC
struct work_struct work;
@@ -538,7 +539,6 @@ struct perf_mmap_data {
int data_order;
int nr_pages; /* nr of data pages */
int writable; /* are we writable */
- int nr_locked; /* nr pages mlocked */

atomic_t poll; /* POLL_ for wakeups */
atomic_t events; /* event_id limit */
@@ -582,7 +582,6 @@ struct perf_event {
int nr_siblings;
int group_flags;
struct perf_event *group_leader;
- struct perf_event *output;
const struct pmu *pmu;

enum perf_event_active_state state;
@@ -643,6 +642,8 @@ struct perf_event {
/* mmap bits */
struct mutex mmap_mutex;
atomic_t mmap_count;
+ int mmap_locked;
+ struct user_struct *mmap_user;
struct perf_mmap_data *data;

/* poll related */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 0249d41..c3dc7e1 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -75,12 +75,6 @@ struct kmem_cache {
int offset; /* Free pointer offset. */
struct kmem_cache_order_objects oo;

- /*
- * Avoid an extra cache line for UP, SMP and for the node local to
- * struct kmem_cache.
- */
- struct kmem_cache_node local_node;
-
/* Allocation and freeing of slabs */
struct kmem_cache_order_objects max;
struct kmem_cache_order_objects min;
@@ -102,6 +96,9 @@ struct kmem_cache {
*/
int remote_node_defrag_ratio;
struct kmem_cache_node *node[MAX_NUMNODES];
+#else
+ /* Avoid an extra cache line for UP */
+ struct kmem_cache_node local_node;
#endif
};

@@ -132,7 +129,7 @@ struct kmem_cache {
#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
/* Reserve extra caches for potential DMA use */
-#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6)
+#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT)
#else
/* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 1f59d93..d70f424 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -223,21 +223,11 @@ static inline void lru_cache_add_anon(struct page *page)
__lru_cache_add(page, LRU_INACTIVE_ANON);
}

-static inline void lru_cache_add_active_anon(struct page *page)
-{
- __lru_cache_add(page, LRU_ACTIVE_ANON);
-}
-
static inline void lru_cache_add_file(struct page *page)
{
__lru_cache_add(page, LRU_INACTIVE_FILE);
}

-static inline void lru_cache_add_active_file(struct page *page)
-{
- __lru_cache_add(page, LRU_ACTIVE_FILE);
-}
-
/* linux/mm/vmscan.c */
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
diff --git a/include/linux/tboot.h b/include/linux/tboot.h
index bf2a0c7..1dba6ee 100644
--- a/include/linux/tboot.h
+++ b/include/linux/tboot.h
@@ -150,6 +150,7 @@ extern int tboot_force_iommu(void);

#else

+#define tboot_enabled() 0
#define tboot_probe() do { } while (0)
#define tboot_shutdown(shutdown_type) do { } while (0)
#define tboot_sleep(sleep_state, pm1a_control, pm1b_control) \
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 739f1fd..9983302 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -965,10 +965,19 @@ extern int usb_disabled(void);
* needed */
#define URB_FREE_BUFFER 0x0100 /* Free transfer buffer with the URB */

+/* The following flags are used internally by usbcore and HCDs */
#define URB_DIR_IN 0x0200 /* Transfer from device to host */
#define URB_DIR_OUT 0
#define URB_DIR_MASK URB_DIR_IN

+#define URB_DMA_MAP_SINGLE 0x00010000 /* Non-scatter-gather mapping */
+#define URB_DMA_MAP_PAGE 0x00020000 /* HCD-unsupported S-G */
+#define URB_DMA_MAP_SG 0x00040000 /* HCD-supported S-G */
+#define URB_MAP_LOCAL 0x00080000 /* HCD-local-memory mapping */
+#define URB_SETUP_MAP_SINGLE 0x00100000 /* Setup packet DMA mapped */
+#define URB_SETUP_MAP_LOCAL 0x00200000 /* HCD-local setup packet */
+#define URB_DMA_SG_COMBINED 0x00400000 /* S-G entries were combined */
+
struct usb_iso_packet_descriptor {
unsigned int offset;
unsigned int length; /* expected length */
diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h
index a510b75..32c0697 100644
--- a/include/trace/events/signal.h
+++ b/include/trace/events/signal.h
@@ -10,7 +10,8 @@

#define TP_STORE_SIGINFO(__entry, info) \
do { \
- if (info == SEND_SIG_NOINFO) { \
+ if (info == SEND_SIG_NOINFO || \
+ info == SEND_SIG_FORCED) { \
__entry->errno = 0; \
__entry->code = SI_USER; \
} else if (info == SEND_SIG_PRIV) { \
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 6d870f2..bf4f78f 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4599,7 +4599,7 @@ static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent,
parent_css = parent->subsys[subsys_id];
child_css = child->subsys[subsys_id];
parent_id = parent_css->id;
- depth = parent_id->depth;
+ depth = parent_id->depth + 1;

child_id = get_new_cssid(ss, depth);
if (IS_ERR(child_id))
diff --git a/kernel/compat.c b/kernel/compat.c
index 7f40e92..5adab05 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -495,29 +495,26 @@ asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
{
int ret;
cpumask_var_t mask;
- unsigned long *k;
- unsigned int min_length = cpumask_size();
-
- if (nr_cpu_ids <= BITS_PER_COMPAT_LONG)
- min_length = sizeof(compat_ulong_t);

- if (len < min_length)
+ if ((len * BITS_PER_BYTE) < nr_cpu_ids)
+ return -EINVAL;
+ if (len & (sizeof(compat_ulong_t)-1))
return -EINVAL;

if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;

ret = sched_getaffinity(pid, mask);
- if (ret < 0)
- goto out;
+ if (ret == 0) {
+ size_t retlen = min_t(size_t, len, cpumask_size());

- k = cpumask_bits(mask);
- ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8);
- if (ret == 0)
- ret = min_length;
-
-out:
+ if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8))
+ ret = -EFAULT;
+ else
+ ret = retlen;
+ }
free_cpumask_var(mask);
+
return ret;
}

diff --git a/kernel/mutex.c b/kernel/mutex.c
index 632f04c..4c0b7b3 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -172,6 +172,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
struct thread_info *owner;

/*
+ * If we own the BKL, then don't spin. The owner of
+ * the mutex might be waiting on us to release the BKL.
+ */
+ if (unlikely(current->lock_depth >= 0))
+ break;
+
+ /*
* If there's an owner, wait for it to either
* release the lock or go to sleep.
*/
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 3d1552d..a244651 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -262,6 +262,18 @@ static void update_event_times(struct perf_event *event)
event->total_time_running = run_end - event->tstamp_running;
}

+/*
+ * Update total_time_enabled and total_time_running for all events in a group.
+ */
+static void update_group_times(struct perf_event *leader)
+{
+ struct perf_event *event;
+
+ update_event_times(leader);
+ list_for_each_entry(event, &leader->sibling_list, group_entry)
+ update_event_times(event);
+}
+
static struct list_head *
ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
{
@@ -315,8 +327,6 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
static void
list_del_event(struct perf_event *event, struct perf_event_context *ctx)
{
- struct perf_event *sibling, *tmp;
-
if (list_empty(&event->group_entry))
return;
ctx->nr_events--;
@@ -329,7 +339,7 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
if (event->group_leader != event)
event->group_leader->nr_siblings--;

- update_event_times(event);
+ update_group_times(event);

/*
* If event was in error state, then keep it
@@ -340,6 +350,12 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
*/
if (event->state > PERF_EVENT_STATE_OFF)
event->state = PERF_EVENT_STATE_OFF;
+}
+
+static void
+perf_destroy_group(struct perf_event *event, struct perf_event_context *ctx)
+{
+ struct perf_event *sibling, *tmp;

/*
* If this was a group event with sibling events then
@@ -505,18 +521,6 @@ retry:
}

/*
- * Update total_time_enabled and total_time_running for all events in a group.
- */
-static void update_group_times(struct perf_event *leader)
-{
- struct perf_event *event;
-
- update_event_times(leader);
- list_for_each_entry(event, &leader->sibling_list, group_entry)
- update_event_times(event);
-}
-
-/*
* Cross CPU call to disable a performance event
*/
static void __perf_event_disable(void *info)
@@ -1452,6 +1456,9 @@ do { \
divisor = nsec * frequency;
}

+ if (!divisor)
+ return dividend;
+
return div64_u64(dividend, divisor);
}

@@ -1474,7 +1481,7 @@ static int perf_event_start(struct perf_event *event)
static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
{
struct hw_perf_event *hwc = &event->hw;
- u64 period, sample_period;
+ s64 period, sample_period;
s64 delta;

period = perf_calculate_period(event, nsec, count);
@@ -1825,6 +1832,7 @@ static void free_event_rcu(struct rcu_head *head)
}

static void perf_pending_sync(struct perf_event *event);
+static void perf_mmap_data_put(struct perf_mmap_data *data);

static void free_event(struct perf_event *event)
{
@@ -1840,9 +1848,9 @@ static void free_event(struct perf_event *event)
atomic_dec(&nr_task_events);
}

- if (event->output) {
- fput(event->output->filp);
- event->output = NULL;
+ if (event->data) {
+ perf_mmap_data_put(event->data);
+ event->data = NULL;
}

if (event->destroy)
@@ -1856,9 +1864,18 @@ int perf_event_release_kernel(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;

+ /*
+ * Remove from the PMU, can't get re-enabled since we got
+ * here because the last ref went.
+ */
+ perf_event_disable(event);
+
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
- perf_event_remove_from_context(event);
+ raw_spin_lock_irq(&ctx->lock);
+ list_del_event(event, ctx);
+ perf_destroy_group(event, ctx);
+ raw_spin_unlock_irq(&ctx->lock);
mutex_unlock(&ctx->mutex);

mutex_lock(&event->owner->perf_event_mutex);
@@ -2138,7 +2155,27 @@ unlock:
return ret;
}

-static int perf_event_set_output(struct perf_event *event, int output_fd);
+static const struct file_operations perf_fops;
+
+static struct perf_event *perf_fget_light(int fd, int *fput_needed)
+{
+ struct file *file;
+
+ file = fget_light(fd, fput_needed);
+ if (!file)
+ return ERR_PTR(-EBADF);
+
+ if (file->f_op != &perf_fops) {
+ fput_light(file, *fput_needed);
+ *fput_needed = 0;
+ return ERR_PTR(-EBADF);
+ }
+
+ return file->private_data;
+}
+
+static int perf_event_set_output(struct perf_event *event,
+ struct perf_event *output_event);
static int perf_event_set_filter(struct perf_event *event, void __user *arg);

static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -2165,7 +2202,23 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return perf_event_period(event, (u64 __user *)arg);

case PERF_EVENT_IOC_SET_OUTPUT:
- return perf_event_set_output(event, arg);
+ {
+ struct perf_event *output_event = NULL;
+ int fput_needed = 0;
+ int ret;
+
+ if (arg != -1) {
+ output_event = perf_fget_light(arg, &fput_needed);
+ if (IS_ERR(output_event))
+ return PTR_ERR(output_event);
+ }
+
+ ret = perf_event_set_output(event, output_event);
+ if (output_event)
+ fput_light(output_event->filp, fput_needed);
+
+ return ret;
+ }

case PERF_EVENT_IOC_SET_FILTER:
return perf_event_set_filter(event, (void __user *)arg);
@@ -2290,8 +2343,6 @@ perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
unsigned long size;
int i;

- WARN_ON(atomic_read(&event->mmap_count));
-
size = sizeof(struct perf_mmap_data);
size += nr_pages * sizeof(void *);

@@ -2398,8 +2449,6 @@ perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
unsigned long size;
void *all_buf;

- WARN_ON(atomic_read(&event->mmap_count));
-
size = sizeof(struct perf_mmap_data);
size += sizeof(void *);

@@ -2479,7 +2528,7 @@ perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data)
if (!data->watermark)
data->watermark = max_size / 2;

-
+ atomic_set(&data->refcount, 1);
rcu_assign_pointer(event->data, data);
}

@@ -2491,13 +2540,26 @@ static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head)
perf_mmap_data_free(data);
}

-static void perf_mmap_data_release(struct perf_event *event)
+static struct perf_mmap_data *perf_mmap_data_get(struct perf_event *event)
{
- struct perf_mmap_data *data = event->data;
+ struct perf_mmap_data *data;
+
+ rcu_read_lock();
+ data = rcu_dereference(event->data);
+ if (data) {
+ if (!atomic_inc_not_zero(&data->refcount))
+ data = NULL;
+ }
+ rcu_read_unlock();

- WARN_ON(atomic_read(&event->mmap_count));
+ return data;
+}
+
+static void perf_mmap_data_put(struct perf_mmap_data *data)
+{
+ if (!atomic_dec_and_test(&data->refcount))
+ return;

- rcu_assign_pointer(event->data, NULL);
call_rcu(&data->rcu_head, perf_mmap_data_free_rcu);
}

@@ -2512,15 +2574,18 @@ static void perf_mmap_close(struct vm_area_struct *vma)
{
struct perf_event *event = vma->vm_file->private_data;

- WARN_ON_ONCE(event->ctx->parent_ctx);
if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
unsigned long size = perf_data_size(event->data);
- struct user_struct *user = current_user();
+ struct user_struct *user = event->mmap_user;
+ struct perf_mmap_data *data = event->data;

atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
- vma->vm_mm->locked_vm -= event->data->nr_locked;
- perf_mmap_data_release(event);
+ vma->vm_mm->locked_vm -= event->mmap_locked;
+ rcu_assign_pointer(event->data, NULL);
mutex_unlock(&event->mmap_mutex);
+
+ perf_mmap_data_put(data);
+ free_uid(user);
}
}

@@ -2564,13 +2629,10 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)

WARN_ON_ONCE(event->ctx->parent_ctx);
mutex_lock(&event->mmap_mutex);
- if (event->output) {
- ret = -EINVAL;
- goto unlock;
- }
-
- if (atomic_inc_not_zero(&event->mmap_count)) {
- if (nr_pages != event->data->nr_pages)
+ if (event->data) {
+ if (event->data->nr_pages == nr_pages)
+ atomic_inc(&event->data->refcount);
+ else
ret = -EINVAL;
goto unlock;
}
@@ -2602,21 +2664,23 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
WARN_ON(event->data);

data = perf_mmap_data_alloc(event, nr_pages);
- ret = -ENOMEM;
- if (!data)
+ if (!data) {
+ ret = -ENOMEM;
goto unlock;
+ }

- ret = 0;
perf_mmap_data_init(event, data);
-
- atomic_set(&event->mmap_count, 1);
- atomic_long_add(user_extra, &user->locked_vm);
- vma->vm_mm->locked_vm += extra;
- event->data->nr_locked = extra;
if (vma->vm_flags & VM_WRITE)
event->data->writable = 1;

+ atomic_long_add(user_extra, &user->locked_vm);
+ event->mmap_locked = extra;
+ event->mmap_user = get_current_user();
+ vma->vm_mm->locked_vm += event->mmap_locked;
+
unlock:
+ if (!ret)
+ atomic_inc(&event->mmap_count);
mutex_unlock(&event->mmap_mutex);

vma->vm_flags |= VM_RESERVED;
@@ -2946,7 +3010,6 @@ int perf_output_begin(struct perf_output_handle *handle,
struct perf_event *event, unsigned int size,
int nmi, int sample)
{
- struct perf_event *output_event;
struct perf_mmap_data *data;
unsigned long tail, offset, head;
int have_lost;
@@ -2963,10 +3026,6 @@ int perf_output_begin(struct perf_output_handle *handle,
if (event->parent)
event = event->parent;

- output_event = rcu_dereference(event->output);
- if (output_event)
- event = output_event;
-
data = rcu_dereference(event->data);
if (!data)
goto out;
@@ -4730,54 +4789,41 @@ err_size:
goto out;
}

-static int perf_event_set_output(struct perf_event *event, int output_fd)
+static int
+perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
{
- struct perf_event *output_event = NULL;
- struct file *output_file = NULL;
- struct perf_event *old_output;
- int fput_needed = 0;
+ struct perf_mmap_data *data = NULL, *old_data = NULL;
int ret = -EINVAL;

- if (!output_fd)
+ if (!output_event)
goto set;

- output_file = fget_light(output_fd, &fput_needed);
- if (!output_file)
- return -EBADF;
-
- if (output_file->f_op != &perf_fops)
+ /* don't allow circular references */
+ if (event == output_event)
goto out;

- output_event = output_file->private_data;
-
- /* Don't chain output fds */
- if (output_event->output)
- goto out;
-
- /* Don't set an output fd when we already have an output channel */
- if (event->data)
- goto out;
-
- atomic_long_inc(&output_file->f_count);
-
set:
mutex_lock(&event->mmap_mutex);
- old_output = event->output;
- rcu_assign_pointer(event->output, output_event);
- mutex_unlock(&event->mmap_mutex);
+ /* Can't redirect output if we've got an active mmap() */
+ if (atomic_read(&event->mmap_count))
+ goto unlock;

- if (old_output) {
- /*
- * we need to make sure no existing perf_output_*()
- * is still referencing this event.
- */
- synchronize_rcu();
- fput(old_output->filp);
+ if (output_event) {
+ /* get the buffer we want to redirect to */
+ data = perf_mmap_data_get(output_event);
+ if (!data)
+ goto unlock;
}

+ old_data = event->data;
+ rcu_assign_pointer(event->data, data);
ret = 0;
+unlock:
+ mutex_unlock(&event->mmap_mutex);
+
+ if (old_data)
+ perf_mmap_data_put(old_data);
out:
- fput_light(output_file, fput_needed);
return ret;
}

@@ -4793,13 +4839,13 @@ SYSCALL_DEFINE5(perf_event_open,
struct perf_event_attr __user *, attr_uptr,
pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
{
- struct perf_event *event, *group_leader;
+ struct perf_event *event, *group_leader = NULL, *output_event = NULL;
struct perf_event_attr attr;
struct perf_event_context *ctx;
struct file *event_file = NULL;
struct file *group_file = NULL;
+ int event_fd;
int fput_needed = 0;
- int fput_needed2 = 0;
int err;

/* for future expandability... */
@@ -4820,26 +4866,38 @@ SYSCALL_DEFINE5(perf_event_open,
return -EINVAL;
}

+ event_fd = get_unused_fd_flags(O_RDWR);
+ if (event_fd < 0)
+ return event_fd;
+
+ if (group_fd != -1) {
+ group_leader = perf_fget_light(group_fd, &fput_needed);
+ if (IS_ERR(group_leader)) {
+ err = PTR_ERR(group_leader);
+ goto err_put_context;
+ }
+ group_file = group_leader->filp;
+ if (flags & PERF_FLAG_FD_OUTPUT)
+ output_event = group_leader;
+ if (flags & PERF_FLAG_FD_NO_GROUP)
+ group_leader = NULL;
+ }
+
/*
* Get the target context (task or percpu):
*/
ctx = find_get_context(pid, cpu);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto err_fd;
+ }

/*
* Look up the group leader (we will attach this event to it):
*/
- group_leader = NULL;
- if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) {
+ if (group_leader) {
err = -EINVAL;
- group_file = fget_light(group_fd, &fput_needed);
- if (!group_file)
- goto err_put_context;
- if (group_file->f_op != &perf_fops)
- goto err_put_context;

- group_leader = group_file->private_data;
/*
* Do not allow a recursive hierarchy (this new sibling
* becoming part of another group-sibling):
@@ -4861,22 +4919,21 @@ SYSCALL_DEFINE5(perf_event_open,

event = perf_event_alloc(&attr, cpu, ctx, group_leader,
NULL, NULL, GFP_KERNEL);
- err = PTR_ERR(event);
- if (IS_ERR(event))
+ if (IS_ERR(event)) {
+ err = PTR_ERR(event);
goto err_put_context;
+ }

- err = anon_inode_getfd("[perf_event]", &perf_fops, event, O_RDWR);
- if (err < 0)
- goto err_free_put_context;
+ if (output_event) {
+ err = perf_event_set_output(event, output_event);
+ if (err)
+ goto err_free_put_context;
+ }

- event_file = fget_light(err, &fput_needed2);
- if (!event_file)
+ event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
+ if (IS_ERR(event_file)) {
+ err = PTR_ERR(event_file);
goto err_free_put_context;
-
- if (flags & PERF_FLAG_FD_OUTPUT) {
- err = perf_event_set_output(event, group_fd);
- if (err)
- goto err_fput_free_put_context;
}

event->filp = event_file;
@@ -4892,19 +4949,17 @@ SYSCALL_DEFINE5(perf_event_open,
list_add_tail(&event->owner_entry, &current->perf_event_list);
mutex_unlock(&current->perf_event_mutex);

-err_fput_free_put_context:
- fput_light(event_file, fput_needed2);
+ fput_light(group_file, fput_needed);
+ fd_install(event_fd, event_file);
+ return event_fd;

err_free_put_context:
- if (err < 0)
- free_event(event);
-
+ free_event(event);
err_put_context:
- if (err < 0)
- put_ctx(ctx);
-
fput_light(group_file, fput_needed);
-
+ put_ctx(ctx);
+err_fd:
+ put_unused_fd(event_fd);
return err;
}

diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 00d1fda..ad72342 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -559,14 +559,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
new_timer->it_id = (timer_t) new_timer_id;
new_timer->it_clock = which_clock;
new_timer->it_overrun = -1;
- error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
- if (error)
- goto out;

- /*
- * return the timer_id now. The next step is hard to
- * back out if there is an error.
- */
if (copy_to_user(created_timer_id,
&new_timer_id, sizeof (new_timer_id))) {
error = -EFAULT;
@@ -597,6 +590,10 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
new_timer->sigq->info.si_tid = new_timer->it_id;
new_timer->sigq->info.si_code = SI_TIMER;

+ error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
+ if (error)
+ goto out;
+
spin_lock_irq(&current->sighand->siglock);
new_timer->it_signal = current->signal;
list_add(&new_timer->list, &current->signal->posix_timers);
diff --git a/kernel/signal.c b/kernel/signal.c
index dbd7fe0..48f2130 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -642,7 +642,7 @@ static inline bool si_fromuser(const struct siginfo *info)
static int check_kill_permission(int sig, struct siginfo *info,
struct task_struct *t)
{
- const struct cred *cred = current_cred(), *tcred;
+ const struct cred *cred, *tcred;
struct pid *sid;
int error;

@@ -656,8 +656,10 @@ static int check_kill_permission(int sig, struct siginfo *info,
if (error)
return error;

+ cred = current_cred();
tcred = __task_cred(t);
- if ((cred->euid ^ tcred->suid) &&
+ if (!same_thread_group(current, t) &&
+ (cred->euid ^ tcred->suid) &&
(cred->euid ^ tcred->uid) &&
(cred->uid ^ tcred->suid) &&
(cred->uid ^ tcred->uid) &&
diff --git a/lib/idr.c b/lib/idr.c
index 2eb1dca..0d74f6b 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -445,6 +445,7 @@ EXPORT_SYMBOL(idr_remove);
void idr_remove_all(struct idr *idp)
{
int n, id, max;
+ int bt_mask;
struct idr_layer *p;
struct idr_layer *pa[MAX_LEVEL];
struct idr_layer **paa = &pa[0];
@@ -462,8 +463,10 @@ void idr_remove_all(struct idr *idp)
p = p->ary[(id >> n) & IDR_MASK];
}

+ bt_mask = id;
id += 1 << n;
- while (n < fls(id)) {
+ /* Get the highest bit that the above add changed from 0->1. */
+ while (n < fls(id ^ bt_mask)) {
if (p)
free_layer(p);
n += IDR_BITS;
diff --git a/mm/filemap.c b/mm/filemap.c
index 140ebda..3760bdc 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -441,7 +441,7 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
/*
* Splice_read and readahead add shmem/tmpfs pages into the page cache
* before shmem_readpage has a chance to mark them as SwapBacked: they
- * need to go on the active_anon lru below, and mem_cgroup_cache_charge
+ * need to go on the anon lru below, and mem_cgroup_cache_charge
* (called in add_to_page_cache) needs to know where they're going too.
*/
if (mapping_cap_swap_backed(mapping))
@@ -452,7 +452,7 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
if (page_is_file_cache(page))
lru_cache_add_file(page);
else
- lru_cache_add_active_anon(page);
+ lru_cache_add_anon(page);
}
return ret;
}
@@ -1099,6 +1099,12 @@ page_not_up_to_date_locked:
}

readpage:
+ /*
+ * A previous I/O error may have been due to temporary
+ * failures, eg. multipath errors.
+ * PG_error will be set again if readpage fails.
+ */
+ ClearPageError(page);
/* Start the actual read. The read will unlock the page. */
error = mapping->a_ops->readpage(filp, page);

diff --git a/mm/slub.c b/mm/slub.c
index d2a54fe..6cf6be7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2141,7 +2141,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)

for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = s->node[node];
- if (n && n != &s->local_node)
+ if (n)
kmem_cache_free(kmalloc_caches, n);
s->node[node] = NULL;
}
@@ -2150,33 +2150,22 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
{
int node;
- int local_node;
-
- if (slab_state >= UP && (s < kmalloc_caches ||
- s >= kmalloc_caches + KMALLOC_CACHES))
- local_node = page_to_nid(virt_to_page(s));
- else
- local_node = 0;

for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n;

- if (local_node == node)
- n = &s->local_node;
- else {
- if (slab_state == DOWN) {
- early_kmem_cache_node_alloc(gfpflags, node);
- continue;
- }
- n = kmem_cache_alloc_node(kmalloc_caches,
- gfpflags, node);
-
- if (!n) {
- free_kmem_cache_nodes(s);
- return 0;
- }
+ if (slab_state == DOWN) {
+ early_kmem_cache_node_alloc(gfpflags, node);
+ continue;
+ }
+ n = kmem_cache_alloc_node(kmalloc_caches,
+ gfpflags, node);

+ if (!n) {
+ free_kmem_cache_nodes(s);
+ return 0;
}
+
s->node[node] = n;
init_kmem_cache_node(n, s);
}
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index a952b7f..334c359 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -15,8 +15,12 @@ comment "CFG80211 needs to be enabled for MAC80211"

if MAC80211 != n

+config MAC80211_HAS_RC
+ def_bool n
+
config MAC80211_RC_PID
bool "PID controller based rate control algorithm" if EMBEDDED
+ select MAC80211_HAS_RC
---help---
This option enables a TX rate control algorithm for
mac80211 that uses a PID controller to select the TX
@@ -24,12 +28,14 @@ config MAC80211_RC_PID

config MAC80211_RC_MINSTREL
bool "Minstrel" if EMBEDDED
+ select MAC80211_HAS_RC
default y
---help---
This option enables the 'minstrel' TX rate control algorithm

choice
prompt "Default rate control algorithm"
+ depends on MAC80211_HAS_RC
default MAC80211_RC_DEFAULT_MINSTREL
---help---
This option selects the default rate control algorithm
@@ -62,6 +68,9 @@ config MAC80211_RC_DEFAULT

endif

+comment "Some wireless drivers require a rate control algorithm"
+ depends on MAC80211_HAS_RC=n
+
config MAC80211_MESH
bool "Enable mac80211 mesh networking (pre-802.11s) support"
depends on MAC80211 && EXPERIMENTAL
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index edc872e..0d1811b 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -97,9 +97,6 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
params->mesh_id_len,
params->mesh_id);

- if (sdata->vif.type != NL80211_IFTYPE_MONITOR || !flags)
- return 0;
-
if (type == NL80211_IFTYPE_AP_VLAN &&
params && params->use_4addr == 0)
rcu_assign_pointer(sdata->u.vlan.sta, NULL);
@@ -107,7 +104,9 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
params && params->use_4addr >= 0)
sdata->u.mgd.use_4addr = params->use_4addr;

- sdata->u.mntr_flags = *flags;
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR && flags)
+ sdata->u.mntr_flags = *flags;
+
return 0;
}

diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 875c8de..1349a09 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1530,9 +1530,45 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
mutex_unlock(&ifmgd->mtx);

if (skb->len >= 24 + 2 /* mgmt + deauth reason */ &&
- (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH)
- cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
+ (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) {
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_work *wk;
+
+ mutex_lock(&local->work_mtx);
+ list_for_each_entry(wk, &local->work_list, list) {
+ if (wk->sdata != sdata)
+ continue;
+
+ if (wk->type != IEEE80211_WORK_ASSOC)
+ continue;
+
+ if (memcmp(mgmt->bssid, wk->filter_ta, ETH_ALEN))
+ continue;
+ if (memcmp(mgmt->sa, wk->filter_ta, ETH_ALEN))
+ continue;

+ /*
+ * Printing the message only here means we can't
+ * spuriously print it, but it also means that it
+ * won't be printed when the frame comes in before
+ * we even tried to associate or in similar cases.
+ *
+ * Ultimately, I suspect cfg80211 should print the
+ * messages instead.
+ */
+ printk(KERN_DEBUG
+ "%s: deauthenticated from %pM (Reason: %u)\n",
+ sdata->name, mgmt->bssid,
+ le16_to_cpu(mgmt->u.deauth.reason_code));
+
+ list_del_rcu(&wk->list);
+ free_work(wk);
+ break;
+ }
+ mutex_unlock(&local->work_mtx);
+
+ cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
+ }
out:
kfree_skb(skb);
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 04ea07f..1946f6b 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1414,7 +1414,8 @@ ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
return res;

if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
- if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
+ if (unlikely(!ieee80211_has_protected(fc) &&
+ ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
rx->key))
return -EACCES;
/* BIP does not use Protected field, so need to check MMIE */
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index cfc473e..d0716b9 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -584,7 +584,8 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
struct ieee80211_hdr *hdr = (void *)tx->skb->data;
struct ieee80211_supported_band *sband;
struct ieee80211_rate *rate;
- int i, len;
+ int i;
+ u32 len;
bool inval = false, rts = false, short_preamble = false;
struct ieee80211_tx_rate_control txrc;
u32 sta_flags;
@@ -593,7 +594,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)

sband = tx->local->hw.wiphy->bands[tx->channel->band];

- len = min_t(int, tx->skb->len + FCS_LEN,
+ len = min_t(u32, tx->skb->len + FCS_LEN,
tx->local->hw.wiphy->frag_threshold);

/* set up the tx rate control struct we give the RC algo */
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 186c466..9842611 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -208,7 +208,7 @@ HOSTCFLAGS_zconf.tab.o := -I$(src)
HOSTLOADLIBES_qconf = $(KC_QT_LIBS) -ldl
HOSTCXXFLAGS_qconf.o = $(KC_QT_CFLAGS) -D LKC_DIRECT_LINK

-HOSTLOADLIBES_gconf = `pkg-config --libs gtk+-2.0 gmodule-2.0 libglade-2.0`
+HOSTLOADLIBES_gconf = `pkg-config --libs gtk+-2.0 gmodule-2.0 libglade-2.0` -ldl
HOSTCFLAGS_gconf.o = `pkg-config --cflags gtk+-2.0 gmodule-2.0 libglade-2.0` \
-D LKC_DIRECT_LINK

diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index a2ff861..e9d98be 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -345,7 +345,9 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
new_hw_ptr = hw_base + pos;
}
__delta:
- delta = (new_hw_ptr - old_hw_ptr) % runtime->boundary;
+ delta = new_hw_ptr - old_hw_ptr;
+ if (delta < 0)
+ delta += runtime->boundary;
if (xrun_debug(substream, in_interrupt ?
XRUN_DEBUG_PERIODUPDATE : XRUN_DEBUG_HWPTRUPDATE)) {
char name[16];
@@ -439,8 +441,13 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
snd_pcm_playback_silence(substream, new_hw_ptr);

if (in_interrupt) {
- runtime->hw_ptr_interrupt = new_hw_ptr -
- (new_hw_ptr % runtime->period_size);
+ delta = new_hw_ptr - runtime->hw_ptr_interrupt;
+ if (delta < 0)
+ delta += runtime->boundary;
+ delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
+ runtime->hw_ptr_interrupt += delta;
+ if (runtime->hw_ptr_interrupt >= runtime->boundary)
+ runtime->hw_ptr_interrupt -= runtime->boundary;
}
runtime->hw_ptr_base = hw_base;
runtime->status->hw_ptr = new_hw_ptr;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 20b5982..d124e95 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -27,7 +27,6 @@
#include <linux/pm_qos_params.h>
#include <linux/uio.h>
#include <linux/dma-mapping.h>
-#include <linux/math64.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/info.h>
@@ -370,38 +369,6 @@ static int period_to_usecs(struct snd_pcm_runtime *runtime)
return usecs;
}

-static int calc_boundary(struct snd_pcm_runtime *runtime)
-{
- u_int64_t boundary;
-
- boundary = (u_int64_t)runtime->buffer_size *
- (u_int64_t)runtime->period_size;
-#if BITS_PER_LONG < 64
- /* try to find lowest common multiple for buffer and period */
- if (boundary > LONG_MAX - runtime->buffer_size) {
- u_int32_t remainder = -1;
- u_int32_t divident = runtime->buffer_size;
- u_int32_t divisor = runtime->period_size;
- while (remainder) {
- remainder = divident % divisor;
- if (remainder) {
- divident = divisor;
- divisor = remainder;
- }
- }
- boundary = div_u64(boundary, divisor);
- if (boundary > LONG_MAX - runtime->buffer_size)
- return -ERANGE;
- }
-#endif
- if (boundary == 0)
- return -ERANGE;
- runtime->boundary = boundary;
- while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
- runtime->boundary *= 2;
- return 0;
-}
-
static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
@@ -477,9 +444,9 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
runtime->stop_threshold = runtime->buffer_size;
runtime->silence_threshold = 0;
runtime->silence_size = 0;
- err = calc_boundary(runtime);
- if (err < 0)
- goto _error;
+ runtime->boundary = runtime->buffer_size;
+ while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
+ runtime->boundary *= 2;

snd_pcm_timer_resolution_change(substream);
runtime->status->state = SNDRV_PCM_STATE_SETUP;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index cec6815..0bc24bd 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2263,16 +2263,23 @@ static int azx_dev_free(struct snd_device *device)
* white/black-listing for position_fix
*/
static struct snd_pci_quirk position_fix_list[] __devinitdata = {
+ SND_PCI_QUIRK(0x1025, 0x009f, "Acer Aspire 5110", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB),
SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB),
- SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1849, 0x0888, "775Dual-VSTA", POS_FIX_LPIB),
SND_PCI_QUIRK(0x8086, 0x2503, "DG965OT AAD63733-203", POS_FIX_LPIB),
SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
{}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 886d8e4..3273765 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -9392,6 +9392,7 @@ static struct snd_pci_quirk alc882_ssid_cfg_tbl[] = {
SND_PCI_QUIRK(0x106b, 0x1000, "iMac 24", ALC885_IMAC24),
SND_PCI_QUIRK(0x106b, 0x2800, "AppleTV", ALC885_IMAC24),
SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC885_MBP3),
+ SND_PCI_QUIRK(0x106b, 0x3000, "iMac", ALC889A_MB31),
SND_PCI_QUIRK(0x106b, 0x3600, "Macbook 3,1", ALC889A_MB31),
SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC885_MBP3),
SND_PCI_QUIRK(0x106b, 0x3e00, "iMac 24 Aluminum", ALC885_IMAC24),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index a0e06d8..f1e7bab 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -2078,12 +2078,12 @@ static struct snd_pci_quirk stac927x_cfg_tbl[] = {
SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_INTEL, 0xff00, 0x2000,
"Intel D965", STAC_D965_3ST),
/* Dell 3 stack systems */
- SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f7, "Dell XPS M1730", STAC_DELL_3ST),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01dd, "Dell Dimension E520", STAC_DELL_3ST),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ed, "Dell ", STAC_DELL_3ST),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f4, "Dell ", STAC_DELL_3ST),
/* Dell 3 stack systems with verb table in BIOS */
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f3, "Dell Inspiron 1420", STAC_DELL_BIOS),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f7, "Dell XPS M1730", STAC_DELL_BIOS),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0227, "Dell Vostro 1400 ", STAC_DELL_BIOS),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022e, "Dell ", STAC_DELL_BIOS),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022f, "Dell Inspiron 1525", STAC_DELL_BIOS),
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 2e0772f..1e22141 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -424,8 +424,8 @@ static const struct soc_enum wm8350_enum[] = {
SOC_ENUM_SINGLE(WM8350_INPUT_MIXER_VOLUME, 15, 2, wm8350_lr),
};

-static DECLARE_TLV_DB_LINEAR(pre_amp_tlv, -1200, 3525);
-static DECLARE_TLV_DB_LINEAR(out_pga_tlv, -5700, 600);
+static DECLARE_TLV_DB_SCALE(pre_amp_tlv, -1200, 3525, 0);
+static DECLARE_TLV_DB_SCALE(out_pga_tlv, -5700, 600, 0);
static DECLARE_TLV_DB_SCALE(dac_pcm_tlv, -7163, 36, 1);
static DECLARE_TLV_DB_SCALE(adc_pcm_tlv, -12700, 50, 1);
static DECLARE_TLV_DB_SCALE(out_mix_tlv, -1500, 300, 1);
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index 6acc885..a9fa46c 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -107,21 +107,21 @@ static void wm8400_codec_reset(struct snd_soc_codec *codec)
wm8400_reset_codec_reg_cache(wm8400->wm8400);
}

-static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600);
+static const DECLARE_TLV_DB_SCALE(rec_mix_tlv, -1500, 600, 0);

-static const DECLARE_TLV_DB_LINEAR(in_pga_tlv, -1650, 3000);
+static const DECLARE_TLV_DB_SCALE(in_pga_tlv, -1650, 3000, 0);

-static const DECLARE_TLV_DB_LINEAR(out_mix_tlv, -2100, 0);
+static const DECLARE_TLV_DB_SCALE(out_mix_tlv, -2100, 0, 0);

-static const DECLARE_TLV_DB_LINEAR(out_pga_tlv, -7300, 600);
+static const DECLARE_TLV_DB_SCALE(out_pga_tlv, -7300, 600, 0);

-static const DECLARE_TLV_DB_LINEAR(out_omix_tlv, -600, 0);
+static const DECLARE_TLV_DB_SCALE(out_omix_tlv, -600, 0, 0);

-static const DECLARE_TLV_DB_LINEAR(out_dac_tlv, -7163, 0);
+static const DECLARE_TLV_DB_SCALE(out_dac_tlv, -7163, 0, 0);

-static const DECLARE_TLV_DB_LINEAR(in_adc_tlv, -7163, 1763);
+static const DECLARE_TLV_DB_SCALE(in_adc_tlv, -7163, 1763, 0);

-static const DECLARE_TLV_DB_LINEAR(out_sidetone_tlv, -3600, 0);
+static const DECLARE_TLV_DB_SCALE(out_sidetone_tlv, -3600, 0, 0);

static int wm8400_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
@@ -440,7 +440,7 @@ static int outmixer_event (struct snd_soc_dapm_widget *w,
/* INMIX dB values */
static const unsigned int in_mix_tlv[] = {
TLV_DB_RANGE_HEAD(1),
- 0,7, TLV_DB_LINEAR_ITEM(-1200, 600),
+ 0,7, TLV_DB_SCALE_ITEM(-1200, 600, 0),
};

/* Left In PGA Connections */
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index 831f473..7c6b00e 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -111,21 +111,21 @@ static const u16 wm8990_reg[] = {

#define wm8990_reset(c) snd_soc_write(c, WM8990_RESET, 0)

-static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600);
+static const DECLARE_TLV_DB_SCALE(rec_mix_tlv, -1500, 600, 0);

-static const DECLARE_TLV_DB_LINEAR(in_pga_tlv, -1650, 3000);
+static const DECLARE_TLV_DB_SCALE(in_pga_tlv, -1650, 3000, 0);

-static const DECLARE_TLV_DB_LINEAR(out_mix_tlv, 0, -2100);
+static const DECLARE_TLV_DB_SCALE(out_mix_tlv, 0, -2100, 0);

-static const DECLARE_TLV_DB_LINEAR(out_pga_tlv, -7300, 600);
+static const DECLARE_TLV_DB_SCALE(out_pga_tlv, -7300, 600, 0);

-static const DECLARE_TLV_DB_LINEAR(out_omix_tlv, -600, 0);
+static const DECLARE_TLV_DB_SCALE(out_omix_tlv, -600, 0, 0);

-static const DECLARE_TLV_DB_LINEAR(out_dac_tlv, -7163, 0);
+static const DECLARE_TLV_DB_SCALE(out_dac_tlv, -7163, 0, 0);

-static const DECLARE_TLV_DB_LINEAR(in_adc_tlv, -7163, 1763);
+static const DECLARE_TLV_DB_SCALE(in_adc_tlv, -7163, 1763, 0);

-static const DECLARE_TLV_DB_LINEAR(out_sidetone_tlv, -3600, 0);
+static const DECLARE_TLV_DB_SCALE(out_sidetone_tlv, -3600, 0, 0);

static int wm899x_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
@@ -451,7 +451,7 @@ static int outmixer_event(struct snd_soc_dapm_widget *w,
/* INMIX dB values */
static const unsigned int in_mix_tlv[] = {
TLV_DB_RANGE_HEAD(1),
- 0, 7, TLV_DB_LINEAR_ITEM(-1200, 600),
+ 0, 7, TLV_DB_SCALE_ITEM(-1200, 600, 0),
};

/* Left In PGA Connections */
diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c
index 2b31ac6..803aeef 100644
--- a/sound/soc/imx/imx-pcm-dma-mx2.c
+++ b/sound/soc/imx/imx-pcm-dma-mx2.c
@@ -73,7 +73,8 @@ static void snd_imx_dma_err_callback(int channel, void *data, int err)
{
struct snd_pcm_substream *substream = data;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct imx_pcm_dma_params *dma_params = rtd->dai->cpu_dai->dma_data;
+ struct imx_pcm_dma_params *dma_params =
+ snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct imx_pcm_runtime_data *iprtd = runtime->private_data;
int ret;
@@ -102,7 +103,7 @@ static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream)
struct imx_pcm_runtime_data *iprtd = runtime->private_data;
int ret;

- dma_params = snd_soc_get_dma_data(rtd->dai->cpu_dai, substream);
+ dma_params = snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream);

iprtd->dma = imx_dma_request_by_prio(DRV_NAME, DMA_PRIO_HIGH);
if (iprtd->dma < 0) {
@@ -212,7 +213,7 @@ static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream)
struct imx_pcm_runtime_data *iprtd = runtime->private_data;
int err;

- dma_params = snd_soc_get_dma_data(rtd->dai->cpu_dai, substream);
+ dma_params = snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream);

iprtd->substream = substream;
iprtd->buf = (unsigned int *)substream->dma_buffer.area;
diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c
index 8977317..5fd55cd 100644
--- a/tools/perf/bench/mem-memcpy.c
+++ b/tools/perf/bench/mem-memcpy.c
@@ -24,7 +24,7 @@

static const char *length_str = "1MB";
static const char *routine = "default";
-static int use_clock = 0;
+static bool use_clock = false;
static int clock_fd;

static const struct option options[] = {
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c
index 81cee78..da1b2e9 100644
--- a/tools/perf/bench/sched-messaging.c
+++ b/tools/perf/bench/sched-messaging.c
@@ -31,9 +31,9 @@

#define DATASIZE 100

-static int use_pipes = 0;
+static bool use_pipes = false;
static unsigned int loops = 100;
-static unsigned int thread_mode = 0;
+static bool thread_mode = false;
static unsigned int num_groups = 10;

struct sender_context {
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 6ad7148..94a814c 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -29,11 +29,11 @@

static char const *input_name = "perf.data";

-static int force;
+static bool force;

-static int full_paths;
+static bool full_paths;

-static int print_line;
+static bool print_line;

struct sym_hist {
u64 sum;
@@ -584,7 +584,7 @@ static const struct option options[] = {
OPT_STRING('s', "symbol", &sym_hist_filter, "symbol",
"symbol to annotate"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 30a05f5..f8e3d18 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -27,7 +27,7 @@ static const struct option buildid_cache_options[] = {
"file list", "file(s) to add"),
OPT_STRING('r', "remove", &remove_name_list_str, "file list",
"file(s) to remove"),
- OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose"),
+ OPT_INCR('v', "verbose", &verbose, "be more verbose"),
OPT_END()
};

diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index d0675c0..af2ad8b 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -16,7 +16,7 @@
#include "util/symbol.h"

static char const *input_name = "perf.data";
-static int force;
+static bool force;
static bool with_hits;

static const char * const buildid_list_usage[] = {
@@ -29,7 +29,7 @@ static const struct option options[] = {
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose"),
OPT_END()
};
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 1ea15d8..3a1d94d 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -19,7 +19,7 @@
static char const *input_old = "perf.data.old",
*input_new = "perf.data";
static char diff__default_sort_order[] = "dso,symbol";
-static int force;
+static bool force;
static bool show_displacement;

static int perf_session__add_hist_entry(struct perf_session *self,
@@ -188,7 +188,7 @@ static const char * const diff_usage[] = {
};

static const struct option options[] = {
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('m', "displacement", &show_displacement,
"Show position displacement relative to baseline"),
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 215b584..81e3ecc 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -29,7 +29,7 @@ enum help_format {
HELP_FORMAT_WEB,
};

-static int show_all = 0;
+static bool show_all = false;
static enum help_format help_format = HELP_FORMAT_MAN;
static struct option builtin_help_options[] = {
OPT_BOOLEAN('a', "all", &show_all, "print all available commands"),
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index e12c844..6c38e4f 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -744,7 +744,7 @@ static const char * const lock_usage[] = {

static const struct option lock_options[] = {
OPT_STRING('i', "input", &input_name, "file", "input file name"),
- OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
+ OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
OPT_END()
};
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 152d6c9..0562c50 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -162,7 +162,7 @@ static const char * const probe_usage[] = {
};

static const struct option options[] = {
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose (show parsed arguments, etc)"),
#ifndef NO_DWARF_SUPPORT
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index f1411e9..d2d1b02 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -39,19 +39,19 @@ static int output;
static const char *output_name = "perf.data";
static int group = 0;
static unsigned int realtime_prio = 0;
-static int raw_samples = 0;
-static int system_wide = 0;
+static bool raw_samples = false;
+static bool system_wide = false;
static int profile_cpu = -1;
static pid_t target_pid = -1;
static pid_t child_pid = -1;
-static int inherit = 1;
-static int force = 0;
-static int append_file = 0;
-static int call_graph = 0;
-static int inherit_stat = 0;
-static int no_samples = 0;
-static int sample_address = 0;
-static int multiplex = 0;
+static bool inherit = true;
+static bool force = false;
+static bool append_file = false;
+static bool call_graph = false;
+static bool inherit_stat = false;
+static bool no_samples = false;
+static bool sample_address = false;
+static bool multiplex = false;
static int multiplex_fd = -1;

static long samples = 0;
@@ -451,7 +451,7 @@ static int __cmd_record(int argc, const char **argv)
rename(output_name, oldname);
}
} else {
- append_file = 0;
+ append_file = false;
}

flags = O_CREAT|O_RDWR;
@@ -676,7 +676,7 @@ static const struct option options[] = {
"number of mmap data pages"),
OPT_BOOLEAN('g', "call-graph", &call_graph,
"do call-graph (stack chain/backtrace) recording"),
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_BOOLEAN('s', "stat", &inherit_stat,
"per thread counts"),
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index f815de2..63fc64e 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -33,11 +33,11 @@

static char const *input_name = "perf.data";

-static int force;
+static bool force;
static bool hide_unresolved;
static bool dont_use_callchains;

-static int show_threads;
+static bool show_threads;
static struct perf_read_values show_threads_values;

static char default_pretty_printing_style[] = "normal";
@@ -400,7 +400,7 @@ static const char * const report_usage[] = {
static const struct option options[] = {
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 4f5a03e..682783c 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1790,7 +1790,7 @@ static const char * const sched_usage[] = {
static const struct option sched_options[] = {
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
@@ -1805,7 +1805,7 @@ static const char * const latency_usage[] = {
static const struct option latency_options[] = {
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
"sort by key(s): runtime, switch, avg, max"),
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_INTEGER('C', "CPU", &profile_cpu,
"CPU to profile on"),
@@ -1822,7 +1822,7 @@ static const char * const replay_usage[] = {
static const struct option replay_options[] = {
OPT_INTEGER('r', "repeat", &replay_repeat,
"repeat the workload replay N times (-1: infinite)"),
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 95db31c..f7f4a88 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -66,16 +66,16 @@ static struct perf_event_attr default_attrs[] = {

};

-static int system_wide = 0;
+static bool system_wide = false;
static unsigned int nr_cpus = 0;
static int run_idx = 0;

static int run_count = 1;
-static int inherit = 1;
-static int scale = 1;
+static bool inherit = true;
+static bool scale = true;
static pid_t target_pid = -1;
static pid_t child_pid = -1;
-static int null_run = 0;
+static bool null_run = false;

static int fd[MAX_NR_CPUS][MAX_COUNTERS];

@@ -494,7 +494,7 @@ static const struct option options[] = {
"system-wide collection from all CPUs"),
OPT_BOOLEAN('c', "scale", &scale,
"scale/normalize counters"),
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_INTEGER('r', "repeat", &run_count,
"repeat command and print average + stddev (max: 100)"),
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 0d4d8ff..1d7416d 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -43,7 +43,7 @@ static u64 turbo_frequency;

static u64 first_time, last_time;

-static int power_only;
+static bool power_only;


struct per_pid;
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 1f52932..4f94b10 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -57,7 +57,7 @@

static int fd[MAX_NR_CPUS][MAX_COUNTERS];

-static int system_wide = 0;
+static bool system_wide = false;

static int default_interval = 0;

@@ -65,18 +65,18 @@ static int count_filter = 5;
static int print_entries;

static int target_pid = -1;
-static int inherit = 0;
+static bool inherit = false;
static int profile_cpu = -1;
static int nr_cpus = 0;
static unsigned int realtime_prio = 0;
-static int group = 0;
+static bool group = false;
static unsigned int page_size;
static unsigned int mmap_pages = 16;
static int freq = 1000; /* 1 KHz */

static int delay_secs = 2;
-static int zero = 0;
-static int dump_symtab = 0;
+static bool zero = false;
+static bool dump_symtab = false;

static bool hide_kernel_symbols = false;
static bool hide_user_symbols = false;
@@ -169,7 +169,7 @@ static void sig_winch_handler(int sig __used)
update_print_entries(&winsize);
}

-static void parse_source(struct sym_entry *syme)
+static int parse_source(struct sym_entry *syme)
{
struct symbol *sym;
struct sym_entry_source *source;
@@ -180,12 +180,21 @@ static void parse_source(struct sym_entry *syme)
u64 len;

if (!syme)
- return;
+ return -1;
+
+ sym = sym_entry__symbol(syme);
+ map = syme->map;
+
+ /*
+ * We can't annotate with just /proc/kallsyms
+ */
+ if (map->dso->origin == DSO__ORIG_KERNEL)
+ return -1;

if (syme->src == NULL) {
syme->src = zalloc(sizeof(*source));
if (syme->src == NULL)
- return;
+ return -1;
pthread_mutex_init(&syme->src->lock, NULL);
}

@@ -195,9 +204,6 @@ static void parse_source(struct sym_entry *syme)
pthread_mutex_lock(&source->lock);
goto out_assign;
}
-
- sym = sym_entry__symbol(syme);
- map = syme->map;
path = map->dso->long_name;

len = sym->end - sym->start;
@@ -209,7 +215,7 @@ static void parse_source(struct sym_entry *syme)

file = popen(command, "r");
if (!file)
- return;
+ return -1;

pthread_mutex_lock(&source->lock);
source->lines_tail = &source->lines;
@@ -245,6 +251,7 @@ static void parse_source(struct sym_entry *syme)
out_assign:
sym_filter_entry = syme;
pthread_mutex_unlock(&source->lock);
+ return 0;
}

static void __zero_source_counters(struct sym_entry *syme)
@@ -839,7 +846,7 @@ static void handle_keypress(int c)
display_weighted = ~display_weighted;
break;
case 'z':
- zero = ~zero;
+ zero = !zero;
break;
default:
break;
@@ -990,7 +997,17 @@ static void event__process_sample(const event_t *self,
if (sym_filter_entry_sched) {
sym_filter_entry = sym_filter_entry_sched;
sym_filter_entry_sched = NULL;
- parse_source(sym_filter_entry);
+ if (parse_source(sym_filter_entry) < 0) {
+ struct symbol *sym = sym_entry__symbol(sym_filter_entry);
+
+ pr_err("Can't annotate %s", sym->name);
+ if (sym_filter_entry->map->dso->origin == DSO__ORIG_KERNEL) {
+ pr_err(": No vmlinux file was found in the path:\n");
+ vmlinux_path__fprintf(stderr);
+ } else
+ pr_err(".\n");
+ exit(1);
+ }
}

syme = symbol__priv(al.sym);
@@ -1296,7 +1313,7 @@ static const struct option options[] = {
"display this many functions"),
OPT_BOOLEAN('U', "hide_user_symbols", &hide_user_symbols,
"hide user symbols"),
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_END()
};
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 407041d..8fc50d8 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -505,7 +505,7 @@ static const char * const trace_usage[] = {
static const struct option options[] = {
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
- OPT_BOOLEAN('v', "verbose", &verbose,
+ OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('L', "Latency", &latency_format,
"show latency attributes (irqs/preemption disabled, etc)"),
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 0905600..666b1c2 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -12,7 +12,7 @@
#include "util.h"

int verbose = 0;
-int dump_trace = 0;
+bool dump_trace = false;

int eprintf(int level, const char *fmt, ...)
{
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index c6c24c5..0f7af87 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -2,10 +2,11 @@
#ifndef __PERF_DEBUG_H
#define __PERF_DEBUG_H

+#include <stdbool.h>
#include "event.h"

extern int verbose;
-extern int dump_trace;
+extern bool dump_trace;

int eprintf(int level,
const char *fmt, ...) __attribute__((format(printf, 2, 3)));
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
index efebd5b..bbe646b 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/perf/util/parse-options.c
@@ -49,6 +49,7 @@ static int get_value(struct parse_opt_ctx_t *p,
break;
/* FALLTHROUGH */
case OPTION_BOOLEAN:
+ case OPTION_INCR:
case OPTION_BIT:
case OPTION_SET_INT:
case OPTION_SET_PTR:
@@ -73,6 +74,10 @@ static int get_value(struct parse_opt_ctx_t *p,
return 0;

case OPTION_BOOLEAN:
+ *(bool *)opt->value = unset ? false : true;
+ return 0;
+
+ case OPTION_INCR:
*(int *)opt->value = unset ? 0 : *(int *)opt->value + 1;
return 0;

@@ -478,6 +483,7 @@ int usage_with_options_internal(const char * const *usagestr,
case OPTION_GROUP:
case OPTION_BIT:
case OPTION_BOOLEAN:
+ case OPTION_INCR:
case OPTION_SET_INT:
case OPTION_SET_PTR:
case OPTION_LONG:
diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h
index 948805a..b2da725 100644
--- a/tools/perf/util/parse-options.h
+++ b/tools/perf/util/parse-options.h
@@ -8,7 +8,8 @@ enum parse_opt_type {
OPTION_GROUP,
/* options with no arguments */
OPTION_BIT,
- OPTION_BOOLEAN, /* _INCR would have been a better name */
+ OPTION_BOOLEAN,
+ OPTION_INCR,
OPTION_SET_INT,
OPTION_SET_PTR,
/* options with arguments (usually) */
@@ -95,6 +96,7 @@ struct option {
#define OPT_GROUP(h) { .type = OPTION_GROUP, .help = (h) }
#define OPT_BIT(s, l, v, h, b) { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (b) }
#define OPT_BOOLEAN(s, l, v, h) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = (v), .help = (h) }
+#define OPT_INCR(s, l, v, h) { .type = OPTION_INCR, .short_name = (s), .long_name = (l), .value = (v), .help = (h) }
#define OPT_SET_INT(s, l, v, h, i) { .type = OPTION_SET_INT, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (i) }
#define OPT_SET_PTR(s, l, v, h, p) { .type = OPTION_SET_PTR, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (p) }
#define OPT_INTEGER(s, l, v, h) { .type = OPTION_INTEGER, .short_name = (s), .long_name = (l), .value = (v), .help = (h) }
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index c458c4a..4acb5d7 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -18,18 +18,6 @@
#define NT_GNU_BUILD_ID 3
#endif

-enum dso_origin {
- DSO__ORIG_KERNEL = 0,
- DSO__ORIG_JAVA_JIT,
- DSO__ORIG_BUILD_ID_CACHE,
- DSO__ORIG_FEDORA,
- DSO__ORIG_UBUNTU,
- DSO__ORIG_BUILDID,
- DSO__ORIG_DSO,
- DSO__ORIG_KMODULE,
- DSO__ORIG_NOT_FOUND,
-};
-
static void dsos__add(struct list_head *head, struct dso *dso);
static struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
static int dso__load_kernel_sym(struct dso *self, struct map *map,
@@ -1025,7 +1013,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
}
curr_map->map_ip = identity__map_ip;
curr_map->unmap_ip = identity__map_ip;
- curr_dso->origin = DSO__ORIG_KERNEL;
+ curr_dso->origin = self->origin;
map_groups__insert(kmap->kmaps, curr_map);
dsos__add(&dsos__kernel, curr_dso);
dso__set_loaded(curr_dso, map->type);
@@ -1895,6 +1883,17 @@ out_fail:
return -1;
}

+size_t vmlinux_path__fprintf(FILE *fp)
+{
+ int i;
+ size_t printed = 0;
+
+ for (i = 0; i < vmlinux_path__nr_entries; ++i)
+ printed += fprintf(fp, "[%d] %s\n", i, vmlinux_path[i]);
+
+ return printed;
+}
+
static int setup_list(struct strlist **list, const char *list_str,
const char *list_name)
{
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index f30a374..044a4bc 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -150,6 +150,19 @@ size_t dsos__fprintf_buildid(FILE *fp, bool with_hits);

size_t dso__fprintf_buildid(struct dso *self, FILE *fp);
size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp);
+
+enum dso_origin {
+ DSO__ORIG_KERNEL = 0,
+ DSO__ORIG_JAVA_JIT,
+ DSO__ORIG_BUILD_ID_CACHE,
+ DSO__ORIG_FEDORA,
+ DSO__ORIG_UBUNTU,
+ DSO__ORIG_BUILDID,
+ DSO__ORIG_DSO,
+ DSO__ORIG_KMODULE,
+ DSO__ORIG_NOT_FOUND,
+};
+
char dso__symtab_origin(const struct dso *self);
void dso__set_long_name(struct dso *self, char *name);
void dso__set_build_id(struct dso *self, void *build_id);
@@ -169,4 +182,6 @@ int kallsyms__parse(const char *filename, void *arg,
int symbol__init(void);
bool symbol_type__is_a(char symbol_type, enum map_type map_type);

+size_t vmlinux_path__fprintf(FILE *fp);
+
#endif /* __PERF_SYMBOL */
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 613c9cc..dc78bae 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -40,7 +40,7 @@ int header_page_size_size;
int header_page_data_offset;
int header_page_data_size;

-int latency_format;
+bool latency_format;

static char *input_buf;
static unsigned long long input_buf_ptr;
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index c3269b9..81f2fd2 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -1,6 +1,7 @@
#ifndef __PERF_TRACE_EVENTS_H
#define __PERF_TRACE_EVENTS_H

+#include <stdbool.h>
#include "parse-events.h"

#define __unused __attribute__((unused))
@@ -241,7 +242,7 @@ extern int header_page_size_size;
extern int header_page_data_offset;
extern int header_page_data_size;

-extern int latency_format;
+extern bool latency_format;

int parse_header_page(char *buf, unsigned long size);
int trace_parse_common_type(void *data);
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/