Re: Linux 3.8.13

From: Greg KH
Date: Sat May 11 2013 - 17:46:13 EST


diff --git a/Makefile b/Makefile
index 902974f..183eff3 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 8
-SUBLEVEL = 12
+SUBLEVEL = 13
EXTRAVERSION =
NAME = Displaced Humerus Anterior

diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 7a32976..01b20a2 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -237,7 +237,7 @@ static int __init xen_init_events(void)
xen_init_IRQ();

if (request_percpu_irq(xen_events_irq, xen_arm_callback,
- "events", xen_vcpu)) {
+ "events", &xen_vcpu)) {
pr_err("Error requesting IRQ %d\n", xen_events_irq);
return -EINVAL;
}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index afadae6..0782eaf 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -148,6 +148,7 @@ void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
#define VM_FAULT_BADACCESS 0x020000

#define ESR_WRITE (1 << 6)
+#define ESR_CM (1 << 8)
#define ESR_LNX_EXEC (1 << 24)

/*
@@ -206,7 +207,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
struct task_struct *tsk;
struct mm_struct *mm;
int fault, sig, code;
- int write = esr & ESR_WRITE;
+ bool write = (esr & ESR_WRITE) && !(esr & ESR_CM);
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
(write ? FAULT_FLAG_WRITE : 0);

diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 51fb00a..4f440a65 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -112,6 +112,10 @@
#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff
#define PPC_INST_MTSPR_DSCR 0x7c1103a6
#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff
+#define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6
+#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff
+#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
+#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff
#define PPC_INST_SLBFEE 0x7c0007a7

#define PPC_INST_STRING 0x7c00042a
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 3251840..6686794 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -961,7 +961,10 @@ static int emulate_instruction(struct pt_regs *regs)

#ifdef CONFIG_PPC64
/* Emulate the mfspr rD, DSCR. */
- if (((instword & PPC_INST_MFSPR_DSCR_MASK) == PPC_INST_MFSPR_DSCR) &&
+ if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
+ PPC_INST_MFSPR_DSCR_USER) ||
+ ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
+ PPC_INST_MFSPR_DSCR)) &&
cpu_has_feature(CPU_FTR_DSCR)) {
PPC_WARN_EMULATED(mfdscr, regs);
rd = (instword >> 21) & 0x1f;
@@ -969,7 +972,10 @@ static int emulate_instruction(struct pt_regs *regs)
return 0;
}
/* Emulate the mtspr DSCR, rD. */
- if (((instword & PPC_INST_MTSPR_DSCR_MASK) == PPC_INST_MTSPR_DSCR) &&
+ if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
+ PPC_INST_MTSPR_DSCR_USER) ||
+ ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
+ PPC_INST_MTSPR_DSCR)) &&
cpu_has_feature(CPU_FTR_DSCR)) {
PPC_WARN_EMULATED(mtdscr, regs);
rd = (instword >> 21) & 0x1f;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index bba87ca..6a252c4 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -201,7 +201,7 @@ int __node_distance(int a, int b)
int distance = LOCAL_DISTANCE;

if (!form1_affinity)
- return distance;
+ return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);

for (i = 0; i < distance_ref_points_depth; i++) {
if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index da02e9c..d978353 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -310,7 +310,7 @@ void intel_pmu_lbr_read(void)
* - in case there is no HW filter
* - in case the HW filter has errata or limitations
*/
-static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
+static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
{
u64 br_type = event->attr.branch_sample_type;
int mask = 0;
@@ -318,8 +318,11 @@ static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
if (br_type & PERF_SAMPLE_BRANCH_USER)
mask |= X86_BR_USER;

- if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
+ if (br_type & PERF_SAMPLE_BRANCH_KERNEL) {
+ if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
+ return -EACCES;
mask |= X86_BR_KERNEL;
+ }

/* we ignore BRANCH_HV here */

@@ -339,6 +342,8 @@ static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
* be used by fixup code for some CPU
*/
event->hw.branch_reg.reg = mask;
+
+ return 0;
}

/*
@@ -386,7 +391,9 @@ int intel_pmu_setup_lbr_filter(struct perf_event *event)
/*
* setup SW LBR filter
*/
- intel_pmu_setup_sw_lbr_filter(event);
+ ret = intel_pmu_setup_sw_lbr_filter(event);
+ if (ret)
+ return ret;

/*
* setup HW LBR filter, if any
@@ -442,8 +449,18 @@ static int branch_type(unsigned long from, unsigned long to)
return X86_BR_NONE;

addr = buf;
- } else
- addr = (void *)from;
+ } else {
+ /*
+ * The LBR logs any address in the IP, even if the IP just
+ * faulted. This means userspace can control the from address.
+ * Ensure we don't blindy read any address by validating it is
+ * a known text address.
+ */
+ if (kernel_text_address(from))
+ addr = (void *)from;
+ else
+ return X86_BR_NONE;
+ }

/*
* decoder needs to know the ABI especially
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index b43200d..3e091f0 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -2428,7 +2428,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
static int __init uncore_type_init(struct intel_uncore_type *type)
{
struct intel_uncore_pmu *pmus;
- struct attribute_group *events_group;
+ struct attribute_group *attr_group;
struct attribute **attrs;
int i, j;

@@ -2455,19 +2455,19 @@ static int __init uncore_type_init(struct intel_uncore_type *type)
while (type->event_descs[i].attr.attr.name)
i++;

- events_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
- sizeof(*events_group), GFP_KERNEL);
- if (!events_group)
+ attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
+ sizeof(*attr_group), GFP_KERNEL);
+ if (!attr_group)
goto fail;

- attrs = (struct attribute **)(events_group + 1);
- events_group->name = "events";
- events_group->attrs = attrs;
+ attrs = (struct attribute **)(attr_group + 1);
+ attr_group->name = "events";
+ attr_group->attrs = attrs;

for (j = 0; j < i; j++)
attrs[j] = &type->event_descs[j].attr.attr;

- type->events_group = events_group;
+ type->events_group = attr_group;
}

type->pmu_group = &uncore_pmu_attr_group;
@@ -2853,6 +2853,7 @@ static int __init uncore_cpu_init(void)
msr_uncores = nhm_msr_uncores;
break;
case 42: /* Sandy Bridge */
+ case 58: /* Ivy Bridge */
if (snb_uncore_cbox.num_boxes > max_cores)
snb_uncore_cbox.num_boxes = max_cores;
msr_uncores = snb_msr_uncores;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index d7aea41..7d7a36d 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -45,11 +45,15 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
int i;
unsigned long puds = 0, pmds = 0, ptes = 0, tables;
unsigned long start = 0, good_end;
+ unsigned long pgd_extra = 0;
phys_addr_t base;

for (i = 0; i < nr_range; i++) {
unsigned long range, extra;

+ if ((mr[i].end >> PGDIR_SHIFT) - (mr[i].start >> PGDIR_SHIFT))
+ pgd_extra++;
+
range = mr[i].end - mr[i].start;
puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;

@@ -74,6 +78,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
+ tables += (pgd_extra * PAGE_SIZE);

#ifdef CONFIG_X86_32
/* for fixmap */
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 0ca1ca7..c9303ed 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -330,17 +330,17 @@ static struct device_attribute *dynamic_csrow_dimm_attr[] = {
};

/* possible dynamic channel ce_count attribute files */
-DEVICE_CHANNEL(ch0_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch0_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 0);
-DEVICE_CHANNEL(ch1_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch1_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 1);
-DEVICE_CHANNEL(ch2_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch2_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 2);
-DEVICE_CHANNEL(ch3_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch3_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 3);
-DEVICE_CHANNEL(ch4_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch4_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 4);
-DEVICE_CHANNEL(ch5_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch5_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 5);

/* Total possible dynamic ce_count attribute file table */
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 5ccf984..cac9c9a 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -239,6 +239,8 @@ struct ast_fbdev {
void *sysram;
int size;
struct ttm_bo_kmap_obj mapping;
+ int x1, y1, x2, y2; /* dirty rect */
+ spinlock_t dirty_lock;
};

#define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index d9ec779..9138678 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -52,16 +52,52 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8;
int ret;
bool unmap = false;
+ bool store_for_later = false;
+ int x2, y2;
+ unsigned long flags;

obj = afbdev->afb.obj;
bo = gem_to_ast_bo(obj);

+ /*
+ * try and reserve the BO, if we fail with busy
+ * then the BO is being moved and we should
+ * store up the damage until later.
+ */
ret = ast_bo_reserve(bo, true);
if (ret) {
- DRM_ERROR("failed to reserve fb bo\n");
+ if (ret != -EBUSY)
+ return;
+
+ store_for_later = true;
+ }
+
+ x2 = x + width - 1;
+ y2 = y + height - 1;
+ spin_lock_irqsave(&afbdev->dirty_lock, flags);
+
+ if (afbdev->y1 < y)
+ y = afbdev->y1;
+ if (afbdev->y2 > y2)
+ y2 = afbdev->y2;
+ if (afbdev->x1 < x)
+ x = afbdev->x1;
+ if (afbdev->x2 > x2)
+ x2 = afbdev->x2;
+
+ if (store_for_later) {
+ afbdev->x1 = x;
+ afbdev->x2 = x2;
+ afbdev->y1 = y;
+ afbdev->y2 = y2;
+ spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
return;
}

+ afbdev->x1 = afbdev->y1 = INT_MAX;
+ afbdev->x2 = afbdev->y2 = 0;
+ spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
+
if (!bo->kmap.virtual) {
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret) {
@@ -71,10 +107,10 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
}
unmap = true;
}
- for (i = y; i < y + height; i++) {
+ for (i = y; i <= y2; i++) {
/* assume equal stride for now */
src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp);
- memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
+ memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, (x2 - x + 1) * bpp);

}
if (unmap)
@@ -305,6 +341,7 @@ int ast_fbdev_init(struct drm_device *dev)

ast->fbdev = afbdev;
afbdev->helper.funcs = &ast_fb_helper_funcs;
+ spin_lock_init(&afbdev->dirty_lock);
ret = drm_fb_helper_init(dev, &afbdev->helper,
1, 1);
if (ret) {
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 3602731..09da339 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -316,7 +316,7 @@ int ast_bo_reserve(struct ast_bo *bo, bool no_wait)

ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
if (ret) {
- if (ret != -ERESTARTSYS)
+ if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo);
return ret;
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 6e0cc72..7ca0595 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -154,6 +154,8 @@ struct cirrus_fbdev {
struct list_head fbdev_list;
void *sysram;
int size;
+ int x1, y1, x2, y2; /* dirty rect */
+ spinlock_t dirty_lock;
};

struct cirrus_bo {
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 6c6b4c8..1e64d6f 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -26,16 +26,51 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
int ret;
bool unmap = false;
+ bool store_for_later = false;
+ int x2, y2;
+ unsigned long flags;

obj = afbdev->gfb.obj;
bo = gem_to_cirrus_bo(obj);

+ /*
+ * try and reserve the BO, if we fail with busy
+ * then the BO is being moved and we should
+ * store up the damage until later.
+ */
ret = cirrus_bo_reserve(bo, true);
if (ret) {
- DRM_ERROR("failed to reserve fb bo\n");
+ if (ret != -EBUSY)
+ return;
+ store_for_later = true;
+ }
+
+ x2 = x + width - 1;
+ y2 = y + height - 1;
+ spin_lock_irqsave(&afbdev->dirty_lock, flags);
+
+ if (afbdev->y1 < y)
+ y = afbdev->y1;
+ if (afbdev->y2 > y2)
+ y2 = afbdev->y2;
+ if (afbdev->x1 < x)
+ x = afbdev->x1;
+ if (afbdev->x2 > x2)
+ x2 = afbdev->x2;
+
+ if (store_for_later) {
+ afbdev->x1 = x;
+ afbdev->x2 = x2;
+ afbdev->y1 = y;
+ afbdev->y2 = y2;
+ spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
return;
}

+ afbdev->x1 = afbdev->y1 = INT_MAX;
+ afbdev->x2 = afbdev->y2 = 0;
+ spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
+
if (!bo->kmap.virtual) {
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret) {
@@ -282,6 +317,7 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)

cdev->mode_info.gfbdev = gfbdev;
gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
+ spin_lock_init(&gfbdev->dirty_lock);

ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 1413a26..2ed8cfc 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -321,7 +321,7 @@ int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)

ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
if (ret) {
- if (ret != -ERESTARTSYS)
+ if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo);
return ret;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 24efae4..539bae9 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -205,11 +205,11 @@ static void
drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
{
if (obj->import_attach) {
- drm_prime_remove_imported_buf_handle(&filp->prime,
+ drm_prime_remove_buf_handle(&filp->prime,
obj->import_attach->dmabuf);
}
if (obj->export_dma_buf) {
- drm_prime_remove_imported_buf_handle(&filp->prime,
+ drm_prime_remove_buf_handle(&filp->prime,
obj->export_dma_buf);
}
}
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 7f12573..4f6439d 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -61,6 +61,7 @@ struct drm_prime_member {
struct dma_buf *dma_buf;
uint32_t handle;
};
+static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);

int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
@@ -68,7 +69,8 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
{
struct drm_gem_object *obj;
void *buf;
- int ret;
+ int ret = 0;
+ struct dma_buf *dmabuf;

obj = drm_gem_object_lookup(dev, file_priv, handle);
if (!obj)
@@ -77,43 +79,44 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
mutex_lock(&file_priv->prime.lock);
/* re-export the original imported object */
if (obj->import_attach) {
- get_dma_buf(obj->import_attach->dmabuf);
- *prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags);
- drm_gem_object_unreference_unlocked(obj);
- mutex_unlock(&file_priv->prime.lock);
- return 0;
+ dmabuf = obj->import_attach->dmabuf;
+ goto out_have_obj;
}

if (obj->export_dma_buf) {
- get_dma_buf(obj->export_dma_buf);
- *prime_fd = dma_buf_fd(obj->export_dma_buf, flags);
- drm_gem_object_unreference_unlocked(obj);
- } else {
- buf = dev->driver->gem_prime_export(dev, obj, flags);
- if (IS_ERR(buf)) {
- /* normally the created dma-buf takes ownership of the ref,
- * but if that fails then drop the ref
- */
- drm_gem_object_unreference_unlocked(obj);
- mutex_unlock(&file_priv->prime.lock);
- return PTR_ERR(buf);
- }
- obj->export_dma_buf = buf;
- *prime_fd = dma_buf_fd(buf, flags);
+ dmabuf = obj->export_dma_buf;
+ goto out_have_obj;
}
+
+ buf = dev->driver->gem_prime_export(dev, obj, flags);
+ if (IS_ERR(buf)) {
+ /* normally the created dma-buf takes ownership of the ref,
+ * but if that fails then drop the ref
+ */
+ ret = PTR_ERR(buf);
+ goto out;
+ }
+ obj->export_dma_buf = buf;
+
/* if we've exported this buffer the cheat and add it to the import list
* so we get the correct handle back
*/
- ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
- obj->export_dma_buf, handle);
- if (ret) {
- drm_gem_object_unreference_unlocked(obj);
- mutex_unlock(&file_priv->prime.lock);
- return ret;
- }
+ ret = drm_prime_add_buf_handle(&file_priv->prime,
+ obj->export_dma_buf, handle);
+ if (ret)
+ goto out;

+ *prime_fd = dma_buf_fd(buf, flags);
mutex_unlock(&file_priv->prime.lock);
return 0;
+
+out_have_obj:
+ get_dma_buf(dmabuf);
+ *prime_fd = dma_buf_fd(dmabuf, flags);
+out:
+ drm_gem_object_unreference_unlocked(obj);
+ mutex_unlock(&file_priv->prime.lock);
+ return ret;
}
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);

@@ -130,7 +133,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,

mutex_lock(&file_priv->prime.lock);

- ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime,
+ ret = drm_prime_lookup_buf_handle(&file_priv->prime,
dma_buf, handle);
if (!ret) {
ret = 0;
@@ -149,7 +152,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
if (ret)
goto out_put;

- ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
+ ret = drm_prime_add_buf_handle(&file_priv->prime,
dma_buf, *handle);
if (ret)
goto fail;
@@ -307,7 +310,7 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
}
EXPORT_SYMBOL(drm_prime_destroy_file_private);

-int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
+static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
{
struct drm_prime_member *member;

@@ -315,14 +318,14 @@ int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv
if (!member)
return -ENOMEM;

+ get_dma_buf(dma_buf);
member->dma_buf = dma_buf;
member->handle = handle;
list_add(&member->entry, &prime_fpriv->head);
return 0;
}
-EXPORT_SYMBOL(drm_prime_add_imported_buf_handle);

-int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
+int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
{
struct drm_prime_member *member;

@@ -334,19 +337,20 @@ int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fp
}
return -ENOENT;
}
-EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle);
+EXPORT_SYMBOL(drm_prime_lookup_buf_handle);

-void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
{
struct drm_prime_member *member, *safe;

mutex_lock(&prime_fpriv->lock);
list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
if (member->dma_buf == dma_buf) {
+ dma_buf_put(dma_buf);
list_del(&member->entry);
kfree(member);
}
}
mutex_unlock(&prime_fpriv->lock);
}
-EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle);
+EXPORT_SYMBOL(drm_prime_remove_buf_handle);
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 8652cdf..029eccf 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -211,7 +211,7 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)

vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);

- if (vdc_stat & _PSB_PIPE_EVENT_FLAG)
+ if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE))
dsp_int = 1;

/* FIXME: Handle Medfield
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7339a4b..e78419f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -711,6 +711,7 @@ typedef struct drm_i915_private {
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
+ unsigned int fdi_rx_polarity_inverted:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
unsigned int lvds_val; /* used for checking LVDS channel mode */
@@ -774,6 +775,7 @@ typedef struct drm_i915_private {
unsigned long gtt_start;
unsigned long gtt_mappable_end;
unsigned long gtt_end;
+ unsigned long stolen_base; /* limited to low memory (32-bit) */

struct io_mapping *gtt_mapping;
phys_addr_t gtt_base_addr;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index de45b60..3b9d18b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2662,17 +2662,35 @@ static inline int fence_number(struct drm_i915_private *dev_priv,
return fence - dev_priv->fence_regs;
}

+static void i915_gem_write_fence__ipi(void *data)
+{
+ wbinvd();
+}
+
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- int reg = fence_number(dev_priv, fence);
-
- i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int fence_reg = fence_number(dev_priv, fence);
+
+ /* In order to fully serialize access to the fenced region and
+ * the update to the fence register we need to take extreme
+ * measures on SNB+. In theory, the write to the fence register
+ * flushes all memory transactions before, and coupled with the
+ * mb() placed around the register write we serialise all memory
+ * operations with respect to the changes in the tiler. Yet, on
+ * SNB+ we need to take a step further and emit an explicit wbinvd()
+ * on each processor in order to manually flush all memory
+ * transactions before updating the fence register.
+ */
+ if (HAS_LLC(obj->base.dev))
+ on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
+ i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);

if (enable) {
- obj->fence_reg = reg;
+ obj->fence_reg = fence_reg;
fence->obj = obj;
list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
} else {
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a3f06bc..d8ac0a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -157,6 +157,13 @@ create_hw_context(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}

+ if (INTEL_INFO(dev)->gen >= 7) {
+ ret = i915_gem_object_set_cache_level(ctx->obj,
+ I915_CACHE_LLC_MLC);
+ if (ret)
+ goto err_out;
+ }
+
/* The ring associated with the context object is handled by the normal
* object tracking code. We give an initial ring value simple to pass an
* assertion in the context switch code.
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 8e91083..be24312 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -42,56 +42,50 @@
* for is a boon.
*/

-#define PTE_ADDRESS_MASK 0xfffff000
-#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
-#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
-#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
-#define PTE_MAPPING_TYPE_CACHED (3 << 1)
-#define PTE_MAPPING_TYPE_MASK (3 << 1)
-#define PTE_VALID (1 << 0)
-
-/**
- * i915_stolen_to_phys - take an offset into stolen memory and turn it into
- * a physical one
- * @dev: drm device
- * @offset: address to translate
- *
- * Some chip functions require allocations from stolen space and need the
- * physical address of the memory in question.
- */
-static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
+static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev_priv->bridge_dev;
u32 base;

-#if 0
/* On the machines I have tested the Graphics Base of Stolen Memory
- * is unreliable, so compute the base by subtracting the stolen memory
- * from the Top of Low Usable DRAM which is where the BIOS places
- * the graphics stolen memory.
+ * is unreliable, so on those compute the base by subtracting the
+ * stolen memory from the Top of Low Usable DRAM which is where the
+ * BIOS places the graphics stolen memory.
+ *
+ * On gen2, the layout is slightly different with the Graphics Segment
+ * immediately following Top of Memory (or Top of Usable DRAM). Note
+ * it appears that TOUD is only reported by 865g, so we just use the
+ * top of memory as determined by the e820 probe.
+ *
+ * XXX gen2 requires an unavailable symbol and 945gm fails with
+ * its value of TOLUD.
*/
- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
- /* top 32bits are reserved = 0 */
+ base = 0;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ /* Read Base Data of Stolen Memory Register (BDSM) directly.
+ * Note that there is also a MCHBAR miror at 0x1080c0 or
+ * we could use device 2:0x5c instead.
+ */
+ pci_read_config_dword(pdev, 0xB0, &base);
+ base &= ~4095; /* lower bits used for locking register */
+ } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+ /* Read Graphics Base of Stolen Memory directly */
pci_read_config_dword(pdev, 0xA4, &base);
- } else {
- /* XXX presume 8xx is the same as i915 */
- pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
- }
-#else
- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
- u16 val;
- pci_read_config_word(pdev, 0xb0, &val);
- base = val >> 4 << 20;
- } else {
+#if 0
+ } else if (IS_GEN3(dev)) {
u8 val;
+ /* Stolen is immediately below Top of Low Usable DRAM */
pci_read_config_byte(pdev, 0x9c, &val);
base = val >> 3 << 27;
- }
- base -= dev_priv->mm.gtt->stolen_size;
+ base -= dev_priv->mm.gtt->stolen_size;
+ } else {
+ /* Stolen is immediately above Top of Memory */
+ base = max_low_pfn_mapped << PAGE_SHIFT;
#endif
+ }

- return base + offset;
+ return base;
}

static void i915_warn_stolen(struct drm_device *dev)
@@ -116,7 +110,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
if (!compressed_fb)
goto err;

- cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
+ cfb_base = dev_priv->mm.stolen_base + compressed_fb->start;
if (!cfb_base)
goto err_fb;

@@ -129,7 +123,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
if (!compressed_llb)
goto err_fb;

- ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
+ ll_base = dev_priv->mm.stolen_base + compressed_llb->start;
if (!ll_base)
goto err_llb;
}
@@ -148,7 +142,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
}

DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
- cfb_base, ll_base, size >> 20);
+ (long)cfb_base, (long)ll_base, size >> 20);
return;

err_llb:
@@ -180,6 +174,13 @@ int i915_gem_init_stolen(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size;

+ dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
+ if (dev_priv->mm.stolen_base == 0)
+ return 0;
+
+ DRM_DEBUG_KMS("found %d bytes of stolen memory at %08lx\n",
+ dev_priv->mm.gtt->stolen_size, dev_priv->mm.stolen_base);
+
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2bfd05a..ce70f0a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3839,7 +3839,7 @@
#define _TRANSB_CHICKEN2 0xf1064
#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
-
+#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29)

#define SOUTH_CHICKEN1 0xc2000
#define FDIA_PHASE_SYNC_SHIFT_OVR 19
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 55ffba1..bd83391 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -351,12 +351,14 @@ parse_general_features(struct drm_i915_private *dev_priv,
dev_priv->lvds_ssc_freq =
intel_bios_ssc_frequency(dev, general->ssc_freq);
dev_priv->display_clock_mode = general->display_clock_mode;
- DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n",
+ dev_priv->fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+ DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
dev_priv->int_tv_support,
dev_priv->int_crt_support,
dev_priv->lvds_use_ssc,
dev_priv->lvds_ssc_freq,
- dev_priv->display_clock_mode);
+ dev_priv->display_clock_mode,
+ dev_priv->fdi_rx_polarity_inverted);
}
}

diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 36e57f9..e088d6f 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -127,7 +127,9 @@ struct bdb_general_features {
/* bits 3 */
u8 disable_smooth_vision:1;
u8 single_dvi:1;
- u8 rsvd9:6; /* finish byte */
+ u8 rsvd9:1;
+ u8 fdi_rx_polarity_inverted:1;
+ u8 rsvd10:4; /* finish byte */

/* bits 4 */
u8 legacy_monitor_detect;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d3f834a..faeaebc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7732,22 +7732,25 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
if (crtc->enabled)
*prepare_pipes |= 1 << intel_crtc->pipe;

- /* We only support modeset on one single crtc, hence we need to do that
- * only for the passed in crtc iff we change anything else than just
- * disable crtcs.
- *
- * This is actually not true, to be fully compatible with the old crtc
- * helper we automatically disable _any_ output (i.e. doesn't need to be
- * connected to the crtc we're modesetting on) if it's disconnected.
- * Which is a rather nutty api (since changed the output configuration
- * without userspace's explicit request can lead to confusion), but
- * alas. Hence we currently need to modeset on all pipes we prepare. */
+ /*
+ * For simplicity do a full modeset on any pipe where the output routing
+ * changed. We could be more clever, but that would require us to be
+ * more careful with calling the relevant encoder->mode_set functions.
+ */
if (*prepare_pipes)
*modeset_pipes = *prepare_pipes;

/* ... and mask these out. */
*modeset_pipes &= ~(*disable_pipes);
*prepare_pipes &= ~(*disable_pipes);
+
+ /*
+ * HACK: We don't (yet) fully support global modesets. intel_set_config
+ * obies this rule, but the modeset restore mode of
+ * intel_modeset_setup_hw_state does not.
+ */
+ *modeset_pipes &= 1 << intel_crtc->pipe;
+ *prepare_pipes &= 1 << intel_crtc->pipe;
}

static bool intel_crtc_in_use(struct drm_crtc *crtc)
@@ -9388,6 +9391,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
/* flush any delayed tasks or pending work */
flush_scheduled_work();

+ /* destroy backlight, if any, before the connectors */
+ intel_panel_destroy_backlight(dev);
+
drm_mode_config_cleanup(dev);
}

diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 73ce6e9..cbe1ec3 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2467,17 +2467,14 @@ done:
static void
intel_dp_destroy(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_connector *intel_connector = to_intel_connector(connector);

if (!IS_ERR_OR_NULL(intel_connector->edid))
kfree(intel_connector->edid);

- if (is_edp(intel_dp)) {
- intel_panel_destroy_backlight(dev);
+ if (is_edp(intel_dp))
intel_panel_fini(&intel_connector->panel);
- }

drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 15da995..ba96e04 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -449,6 +449,7 @@ void intel_dvo_init(struct drm_device *dev)
const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
struct i2c_adapter *i2c;
int gpio;
+ bool dvoinit;

/* Allow the I2C driver info to specify the GPIO to be used in
* special cases, but otherwise default to what's defined
@@ -468,7 +469,17 @@ void intel_dvo_init(struct drm_device *dev)
i2c = intel_gmbus_get_adapter(dev_priv, gpio);

intel_dvo->dev = *dvo;
- if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
+
+ /* GMBUS NAK handling seems to be unstable, hence let the
+ * transmitter detection run in bit banging mode for now.
+ */
+ intel_gmbus_force_bit(i2c, true);
+
+ dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
+
+ intel_gmbus_force_bit(i2c, false);
+
+ if (!dvoinit)
continue;

intel_encoder->type = INTEL_OUTPUT_DVO;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 17aee74..8b383a6 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -556,7 +556,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
kfree(lvds_connector->base.edid);

- intel_panel_destroy_backlight(connector->dev);
intel_panel_fini(&lvds_connector->base.panel);

drm_sysfs_connector_remove(connector);
@@ -790,6 +789,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Fujitsu Esprimo Q900",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
+ },
+ },

{ } /* terminating entry */
};
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index bee8cb6..94d895b 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -422,6 +422,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector)

intel_panel_init_backlight(dev);

+ if (WARN_ON(dev_priv->backlight))
+ return -ENODEV;
+
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = _intel_panel_get_max_backlight(dev);
@@ -447,8 +450,10 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
void intel_panel_destroy_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->backlight)
+ if (dev_priv->backlight) {
backlight_device_unregister(dev_priv->backlight);
+ dev_priv->backlight = NULL;
+ }
}
#else
int intel_panel_setup_backlight(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index dde0ded..253bcf3 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3560,6 +3560,7 @@ static void cpt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
+ uint32_t val;

/*
* On Ibex Peak and Cougar Point, we need to disable clock
@@ -3572,8 +3573,12 @@ static void cpt_init_clock_gating(struct drm_device *dev)
/* The below fixes the weird display corruption, a few pixels shifted
* downward, on (only) LVDS of some HP laptops with IVY.
*/
- for_each_pipe(pipe)
- I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE);
+ for_each_pipe(pipe) {
+ val = TRANS_CHICKEN2_TIMING_OVERRIDE;
+ if (dev_priv->fdi_rx_polarity_inverted)
+ val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
+ I915_WRITE(TRANS_CHICKEN2(pipe), val);
+ }
/* WADP0ClockGatingDisable */
for_each_pipe(pipe) {
I915_WRITE(TRANS_CHICKEN1(pipe),
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index c275bf0..506c331 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1213,11 +1213,13 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ u16 active_outputs;
u32 tmp;

tmp = I915_READ(intel_sdvo->sdvo_reg);
+ intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);

- if (!(tmp & SDVO_ENABLE))
+ if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
return false;

if (HAS_PCH_CPT(dev))
@@ -2704,7 +2706,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
struct intel_sdvo *intel_sdvo;
u32 hotplug_mask;
int i;
-
intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
if (!intel_sdvo)
return false;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 5ea5033..a657709 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -116,6 +116,8 @@ struct mga_fbdev {
void *sysram;
int size;
struct ttm_bo_kmap_obj mapping;
+ int x1, y1, x2, y2; /* dirty rect */
+ spinlock_t dirty_lock;
};

struct mga_crtc {
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 2f48648..41eefc4 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -28,16 +28,52 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
int ret;
bool unmap = false;
+ bool store_for_later = false;
+ int x2, y2;
+ unsigned long flags;

obj = mfbdev->mfb.obj;
bo = gem_to_mga_bo(obj);

+ /*
+ * try and reserve the BO, if we fail with busy
+ * then the BO is being moved and we should
+ * store up the damage until later.
+ */
ret = mgag200_bo_reserve(bo, true);
if (ret) {
- DRM_ERROR("failed to reserve fb bo\n");
+ if (ret != -EBUSY)
+ return;
+
+ store_for_later = true;
+ }
+
+ x2 = x + width - 1;
+ y2 = y + height - 1;
+ spin_lock_irqsave(&mfbdev->dirty_lock, flags);
+
+ if (mfbdev->y1 < y)
+ y = mfbdev->y1;
+ if (mfbdev->y2 > y2)
+ y2 = mfbdev->y2;
+ if (mfbdev->x1 < x)
+ x = mfbdev->x1;
+ if (mfbdev->x2 > x2)
+ x2 = mfbdev->x2;
+
+ if (store_for_later) {
+ mfbdev->x1 = x;
+ mfbdev->x2 = x2;
+ mfbdev->y1 = y;
+ mfbdev->y2 = y2;
+ spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
return;
}

+ mfbdev->x1 = mfbdev->y1 = INT_MAX;
+ mfbdev->x2 = mfbdev->y2 = 0;
+ spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
+
if (!bo->kmap.virtual) {
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret) {
@@ -47,10 +83,10 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
}
unmap = true;
}
- for (i = y; i < y + height; i++) {
+ for (i = y; i <= y2; i++) {
/* assume equal stride for now */
src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
- memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, width * bpp);
+ memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, (x2 - x + 1) * bpp);

}
if (unmap)
@@ -269,6 +305,7 @@ int mgag200_fbdev_init(struct mga_device *mdev)

mdev->mfbdev = mfbdev;
mfbdev->helper.funcs = &mga_fb_helper_funcs;
+ spin_lock_init(&mfbdev->dirty_lock);

ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
mdev->num_crtc, MGAG200FB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 8fc9d92..401c989 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -315,8 +315,8 @@ int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)

ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
if (ret) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("reserve failed %p\n", bo);
+ if (ret != -ERESTARTSYS && ret != -EBUSY)
+ DRM_ERROR("reserve failed %p %d\n", bo, ret);
return ret;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 5ce9bf5..43672b6 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1389,10 +1389,10 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);

DRM_DEBUG("atom firmware requested %08x %dkb\n",
- firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
- firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
+ le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
+ le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));

- usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
+ usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
}
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 21a892c..6d6fdb3 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -557,6 +557,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
/* use frac fb div on APUs */
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ /* use frac fb div on RS780/RS880 */
+ if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
} else {
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 1b0a4ec..90dc470 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -105,6 +105,27 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
}
}

+static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+ if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+ return true;
+ else
+ return false;
+}
+
+static bool dce4_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+ u32 pos1, pos2;
+
+ pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+ pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+ if (pos1 != pos2)
+ return true;
+ else
+ return false;
+}
+
/**
* dce4_wait_for_vblank - vblank wait asic callback.
*
@@ -115,21 +136,28 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
*/
void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
- int i;
+ unsigned i = 0;

if (crtc >= rdev->num_crtc)
return;

- if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
+ if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
+ return;
+
+ /* depending on when we hit vblank, we may be close to active; if so,
+ * wait for another frame.
+ */
+ while (dce4_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!dce4_is_counter_moving(rdev, crtc))
break;
- udelay(1);
}
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+ }
+
+ while (!dce4_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!dce4_is_counter_moving(rdev, crtc))
break;
- udelay(1);
}
}
}
@@ -608,6 +636,16 @@ void evergreen_hpd_init(struct radeon_device *rdev)

list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ * also avoid interrupt storms during dpms.
+ */
+ continue;
+ }
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, tmp);
@@ -1325,17 +1363,16 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
radeon_wait_for_vblank(rdev, i);
- tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
radeon_wait_for_vblank(rdev, i);
- tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
@@ -1347,6 +1384,15 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
break;
udelay(1);
}
+
+ /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~EVERGREEN_CRTC_MASTER_EN;
+ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ save->crtc_enabled[i] = false;
+ /* ***** */
} else {
save->crtc_enabled[i] = false;
}
@@ -1364,6 +1410,22 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
}
/* wait for the MC to settle */
udelay(100);
+
+ /* lock double buffered regs */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+ if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
+ tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (!(tmp & 1)) {
+ tmp |= 1;
+ WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ }
+ }
}

void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -1385,6 +1447,33 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);

+ /* unlock regs and wait for update */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
+ if ((tmp & 0x3) != 0) {
+ tmp &= ~0x3;
+ WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+ if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
+ tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (tmp & 1) {
+ tmp &= ~1;
+ WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+ if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
+ break;
+ udelay(1);
+ }
+ }
+ }
+
/* unblackout the MC */
tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
tmp &= ~BLACKOUT_MODE_MASK;
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 034f4c2..3e9773a 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -225,6 +225,8 @@
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
+#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
+#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8

#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
#define EVERGREEN_DC_GPIO_HPD_A 0x64b4
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index b64e55d..10e1bd1 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -471,7 +471,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x990F) ||
(rdev->pdev->device == 0x9910) ||
(rdev->pdev->device == 0x9917) ||
- (rdev->pdev->device == 0x9999)) {
+ (rdev->pdev->device == 0x9999) ||
+ (rdev->pdev->device == 0x999C)) {
rdev->config.cayman.max_simds_per_se = 6;
rdev->config.cayman.max_backends_per_se = 2;
} else if ((rdev->pdev->device == 0x9903) ||
@@ -480,7 +481,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x990D) ||
(rdev->pdev->device == 0x990E) ||
(rdev->pdev->device == 0x9913) ||
- (rdev->pdev->device == 0x9918)) {
+ (rdev->pdev->device == 0x9918) ||
+ (rdev->pdev->device == 0x999D)) {
rdev->config.cayman.max_simds_per_se = 4;
rdev->config.cayman.max_backends_per_se = 2;
} else if ((rdev->pdev->device == 0x9919) ||
@@ -619,6 +621,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)

WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ if (ASIC_IS_DCE6(rdev))
+ WREG32(DMIF_ADDR_CALC, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 48e5022..e045f8c 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -45,6 +45,10 @@
#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001

#define DMIF_ADDR_CONFIG 0xBD4
+
+/* DCE6 only */
+#define DMIF_ADDR_CALC 0xC00
+
#define SRBM_GFX_CNTL 0x0E44
#define RINGID(x) (((x) & 0x3) << 0)
#define VMID(x) (((x) & 0x7) << 0)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 8ff7cac..62719ec 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -69,6 +69,38 @@ MODULE_FIRMWARE(FIRMWARE_R520);
* and others in some cases.
*/

+static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+ if (crtc == 0) {
+ if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
+ return true;
+ else
+ return false;
+ } else {
+ if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
+ return true;
+ else
+ return false;
+ }
+}
+
+static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+ u32 vline1, vline2;
+
+ if (crtc == 0) {
+ vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ } else {
+ vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ }
+ if (vline1 != vline2)
+ return true;
+ else
+ return false;
+}
+
/**
* r100_wait_for_vblank - vblank wait asic callback.
*
@@ -79,36 +111,33 @@ MODULE_FIRMWARE(FIRMWARE_R520);
*/
void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
- int i;
+ unsigned i = 0;

if (crtc >= rdev->num_crtc)
return;

if (crtc == 0) {
- if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) {
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR))
- break;
- udelay(1);
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
- break;
- udelay(1);
- }
- }
+ if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN))
+ return;
} else {
- if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) {
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR))
- break;
- udelay(1);
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
- break;
- udelay(1);
- }
+ if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN))
+ return;
+ }
+
+ /* depending on when we hit vblank, we may be close to active; if so,
+ * wait for another frame.
+ */
+ while (r100_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!r100_is_counter_moving(rdev, crtc))
+ break;
+ }
+ }
+
+ while (!r100_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!r100_is_counter_moving(rdev, crtc))
+ break;
}
}
}
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index ec576aa..8ec2376 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -357,7 +357,9 @@
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4

+#define AVIVO_D1MODE_MASTER_UPDATE_LOCK 0x60e0
#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4
+#define AVIVO_D1CRTC_UPDATE_LOCK 0x60e8

/* master controls */
#define AVIVO_DC_CRTC_MASTER_EN 0x60f8
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 95970ec..d89a1f8 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -489,7 +489,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
offset = dig->afmt->offset;

/* Older chipsets require setting HDMI and routing manually */
- if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+ if (ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE3(rdev)) {
hdmi = HDMI0_ERROR_ACK | HDMI0_ENABLE;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
@@ -557,7 +557,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);

/* Older chipsets not handled by AtomBIOS */
- if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+ if (ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE3(rdev)) {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, 0,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f22eb57..96168ef 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2028,6 +2028,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
num_modes = power_info->info.ucNumOfPowerModeEntries;
if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+ if (num_modes == 0)
+ return state_index;
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
if (!rdev->pm.power_state)
return state_index;
@@ -2432,6 +2434,8 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);

radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+ if (power_info->pplib.ucNumStates == 0)
+ return state_index;
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
power_info->pplib.ucNumStates, GFP_KERNEL);
if (!rdev->pm.power_state)
@@ -2514,6 +2518,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
u16 data_offset;
u8 frev, crev;
+ u8 *power_state_offset;

if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset))
@@ -2530,15 +2535,17 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
non_clock_info_array = (struct _NonClockInfoArray *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+ if (state_array->ucNumEntries == 0)
+ return state_index;
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
state_array->ucNumEntries, GFP_KERNEL);
if (!rdev->pm.power_state)
return state_index;
+ power_state_offset = (u8 *)state_array->states;
for (i = 0; i < state_array->ucNumEntries; i++) {
mode_index = 0;
- power_state = (union pplib_power_state *)&state_array->states[i];
- /* XXX this might be an inagua bug... */
- non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
+ power_state = (union pplib_power_state *)power_state_offset;
+ non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
@@ -2550,9 +2557,6 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
if (power_state->v2.ucNumDPMLevels) {
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
clock_array_index = power_state->v2.clockInfoIndex[j];
- /* XXX this might be an inagua bug... */
- if (clock_array_index >= clock_info_array->ucNumEntries)
- continue;
clock_info = (union pplib_clock_info *)
&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
valid = radeon_atombios_parse_pplib_clock_info(rdev,
@@ -2574,6 +2578,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
non_clock_info);
state_index++;
}
+ power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
}
/* if multiple clock modes, mark the lowest as no display */
for (i = 0; i < state_index; i++) {
@@ -2620,7 +2625,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
default:
break;
}
- } else {
+ }
+
+ if (state_index == 0) {
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
if (rdev->pm.power_state) {
rdev->pm.power_state[0].clock_info =
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 9c312f9..bc36922 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -50,9 +50,13 @@ int radeon_driver_unload_kms(struct drm_device *dev)

if (rdev == NULL)
return 0;
+ if (rdev->rmmio == NULL)
+ goto done_free;
radeon_acpi_fini(rdev);
radeon_modeset_fini(rdev);
radeon_device_fini(rdev);
+
+done_free:
kfree(rdev);
dev->dev_private = NULL;
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 338fd6a..788c64c 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -843,7 +843,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct radeon_device *rdev = dev->dev_private;

seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
- seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+ /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
+ if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
+ seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
+ else
+ seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
if (rdev->asic->pm.get_memory_clock)
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index cd72062..8adc5b5 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -161,7 +161,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
radeon_semaphore_free(rdev, &ib->semaphore, NULL);
}
/* if we can't remember our last VM flush then flush now! */
- if (ib->vm && !ib->vm->last_flush) {
+ /* XXX figure out why we have to flush for every IB */
+ if (ib->vm /*&& !ib->vm->last_flush*/) {
radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
}
if (const_ib) {
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5a0fc74..46fa1b0 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -52,23 +52,59 @@ static const u32 crtc_offsets[2] =
AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
};

+static bool avivo_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+ if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
+ return true;
+ else
+ return false;
+}
+
+static bool avivo_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+ u32 pos1, pos2;
+
+ pos1 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+ pos2 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+ if (pos1 != pos2)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * avivo_wait_for_vblank - vblank wait asic callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (r5xx-r7xx).
+ */
void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
- int i;
+ unsigned i = 0;

if (crtc >= rdev->num_crtc)
return;

- if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN) {
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK))
+ if (!(RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN))
+ return;
+
+ /* depending on when we hit vblank, we may be close to active; if so,
+ * wait for another frame.
+ */
+ while (avivo_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!avivo_is_counter_moving(rdev, crtc))
break;
- udelay(1);
}
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
+ }
+
+ while (!avivo_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!avivo_is_counter_moving(rdev, crtc))
break;
- udelay(1);
}
}
}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 435ed35..ffcba73 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -303,8 +303,10 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
radeon_wait_for_vblank(rdev, i);
+ WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
/* wait for the next frame */
frame_count = radeon_get_vblank_counter(rdev, i);
@@ -313,6 +315,15 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
break;
udelay(1);
}
+
+ /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+ WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~AVIVO_CRTC_EN;
+ WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ save->crtc_enabled[i] = false;
+ /* ***** */
} else {
save->crtc_enabled[i] = false;
}
@@ -338,6 +349,22 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
}
/* wait for the MC to settle */
udelay(100);
+
+ /* lock double buffered regs */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+ if (!(tmp & AVIVO_D1GRPH_UPDATE_LOCK)) {
+ tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (!(tmp & 1)) {
+ tmp |= 1;
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ }
+ }
}

void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
@@ -348,7 +375,7 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
/* update crtc base addresses */
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->family >= CHIP_RV770) {
- if (i == 1) {
+ if (i == 0) {
WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
upper_32_bits(rdev->mc.vram_start));
WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
@@ -367,6 +394,33 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
}
WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);

+ /* unlock regs and wait for update */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
+ if ((tmp & 0x3) != 0) {
+ tmp &= ~0x3;
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+ if (tmp & AVIVO_D1GRPH_UPDATE_LOCK) {
+ tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (tmp & 1) {
+ tmp &= ~1;
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+ if ((tmp & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) == 0)
+ break;
+ udelay(1);
+ }
+ }
+ }
+
if (rdev->family >= CHIP_R600) {
/* unblackout the MC */
if (rdev->family >= CHIP_RV770)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index dd00721..40d766e 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1374,7 +1374,7 @@ static void si_select_se_sh(struct radeon_device *rdev,
u32 data = INSTANCE_BROADCAST_WRITES;

if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
- data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+ data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
else if (se_num == 0xffffffff)
data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
else if (sh_num == 0xffffffff)
@@ -1659,6 +1659,7 @@ static void si_gpu_init(struct radeon_device *rdev)

WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CALC, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index c056aae..e9a01f0 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -60,6 +60,8 @@

#define DMIF_ADDR_CONFIG 0xBD4

+#define DMIF_ADDR_CALC 0xC00
+
#define SRBM_STATUS 0xE50

#define SRBM_SOFT_RESET 0x0E60
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 05bfe53..892cd87 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -100,6 +100,16 @@ static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
return 0;
}

+static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
+{
+ int ret = -ENOSYS;
+ if (user)
+ ret = alloc_oc_sq(rdev, sq);
+ if (ret)
+ ret = alloc_host_sq(rdev, sq);
+ return ret;
+}
+
static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
struct c4iw_dev_ucontext *uctx)
{
@@ -168,18 +178,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
goto free_sw_rq;
}

- if (user) {
- ret = alloc_oc_sq(rdev, &wq->sq);
- if (ret)
- goto free_hwaddr;
-
- ret = alloc_host_sq(rdev, &wq->sq);
- if (ret)
- goto free_sq;
- } else
- ret = alloc_host_sq(rdev, &wq->sq);
- if (ret)
- goto free_hwaddr;
+ ret = alloc_sq(rdev, &wq->sq, user);
+ if (ret)
+ goto free_hwaddr;
memset(wq->sq.queue, 0, wq->sq.memsize);
dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);

diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 4647b50..4c867f4 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3948,6 +3948,9 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
if (!table)
goto out;

+ /* Initialize table spin-lock */
+ spin_lock_init(&table->lock);
+
if (ioapic)
/* Keep the first 32 indexes free for IOAPIC interrupts */
table->min_index = 32;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index f2fdbb7..82c63ac 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1326,7 +1326,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {

static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
{
- int rc, i;
+ int rc, i, mac_len;
struct net_device *netdev;
struct ibmveth_adapter *adapter;
unsigned char *mac_addr_p;
@@ -1336,11 +1336,19 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
dev->unit_address);

mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
- NULL);
+ &mac_len);
if (!mac_addr_p) {
dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
return -EINVAL;
}
+ /* Workaround for old/broken pHyp */
+ if (mac_len == 8)
+ mac_addr_p += 2;
+ else if (mac_len != 6) {
+ dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
+ mac_len);
+ return -EINVAL;
+ }

mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
VETH_MCAST_FILTER_SIZE, NULL);
@@ -1365,17 +1373,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)

netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);

- /*
- * Some older boxes running PHYP non-natively have an OF that returns
- * a 8-byte local-mac-address field (and the first 2 bytes have to be
- * ignored) while newer boxes' OF return a 6-byte field. Note that
- * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
- * The RPA doc specifies that the first byte must be 10b, so we'll
- * just look for it to solve this 8 vs. 6 byte field issue
- */
- if ((*mac_addr_p & 0x3) != 0x02)
- mac_addr_p += 2;
-
adapter->mac_addr = 0;
memcpy(&adapter->mac_addr, mac_addr_p, 6);

diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index fd4772a..522fb10 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
+#include <linux/pm_runtime.h>

#include "e1000.h"

@@ -2053,7 +2054,19 @@ static int e1000_get_rxnfc(struct net_device *netdev,
}
}

+static int e1000e_ethtool_begin(struct net_device *netdev)
+{
+ return pm_runtime_get_sync(netdev->dev.parent);
+}
+
+static void e1000e_ethtool_complete(struct net_device *netdev)
+{
+ pm_runtime_put_sync(netdev->dev.parent);
+}
+
static const struct ethtool_ops e1000_ethtool_ops = {
+ .begin = e1000e_ethtool_begin,
+ .complete = e1000e_ethtool_complete,
.get_settings = e1000_get_settings,
.set_settings = e1000_set_settings,
.get_drvinfo = e1000_get_drvinfo,
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 1f93880..8692eca 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3952,6 +3952,7 @@ static int e1000_open(struct net_device *netdev)
netif_start_queue(netdev);

adapter->idle_check = true;
+ hw->mac.get_link_status = true;
pm_runtime_put(&pdev->dev);

/* fire a link status change interrupt to start the watchdog */
@@ -4312,6 +4313,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
(adapter->hw.phy.media_type == e1000_media_type_copper)) {
int ret_val;

+ pm_runtime_get_sync(&adapter->pdev->dev);
ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
@@ -4322,6 +4324,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
if (ret_val)
e_warn("Error reading PHY register\n");
+ pm_runtime_put_sync(&adapter->pdev->dev);
} else {
/* Do not read PHY registers if link is not up
* Set values to typical power-on defaults
@@ -5450,8 +5453,7 @@ release:
return retval;
}

-static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
- bool runtime)
+static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -5475,10 +5477,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
}
e1000e_reset_interrupt_capability(adapter);

- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
status = er32(STATUS);
if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
@@ -5534,13 +5532,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
ew32(WUFC, 0);
}

- *enable_wake = !!wufc;
-
- /* make sure adapter isn't asleep if manageability is enabled */
- if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
- (hw->mac.ops.check_mng_mode(hw)))
- *enable_wake = true;
-
if (adapter->hw.phy.type == e1000_phy_igp_3)
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);

@@ -5551,26 +5542,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,

pci_clear_master(pdev);

- return 0;
-}
-
-static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
-{
- if (sleep && wake) {
- pci_prepare_to_sleep(pdev);
- return;
- }
-
- pci_wake_from_d3(pdev, wake);
- pci_set_power_state(pdev, PCI_D3hot);
-}
-
-static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
- bool wake)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
/* The pci-e switch on some quad port adapters will report a
* correctable error when the MAC transitions from D0 to D3. To
* prevent this we need to mask off the correctable errors on the
@@ -5584,12 +5555,13 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
(devctl & ~PCI_EXP_DEVCTL_CERE));

- e1000_power_off(pdev, sleep, wake);
+ pci_save_state(pdev);
+ pci_prepare_to_sleep(pdev);

pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
- } else {
- e1000_power_off(pdev, sleep, wake);
}
+
+ return 0;
}

#ifdef CONFIG_PCIEASPM
@@ -5640,9 +5612,7 @@ static int __e1000_resume(struct pci_dev *pdev)
if (aspm_disable_flag)
e1000e_disable_aspm(pdev, aspm_disable_flag);

- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- pci_save_state(pdev);
+ pci_set_master(pdev);

e1000e_set_interrupt_capability(adapter);
if (netif_running(netdev)) {
@@ -5708,14 +5678,8 @@ static int __e1000_resume(struct pci_dev *pdev)
static int e1000_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- int retval;
- bool wake;
-
- retval = __e1000_shutdown(pdev, &wake, false);
- if (!retval)
- e1000_complete_shutdown(pdev, true, wake);

- return retval;
+ return __e1000_shutdown(pdev, false);
}

static int e1000_resume(struct device *dev)
@@ -5738,13 +5702,10 @@ static int e1000_runtime_suspend(struct device *dev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);

- if (e1000e_pm_ready(adapter)) {
- bool wake;
-
- __e1000_shutdown(pdev, &wake, true);
- }
+ if (!e1000e_pm_ready(adapter))
+ return 0;

- return 0;
+ return __e1000_shutdown(pdev, true);
}

static int e1000_idle(struct device *dev)
@@ -5782,12 +5743,7 @@ static int e1000_runtime_resume(struct device *dev)

static void e1000_shutdown(struct pci_dev *pdev)
{
- bool wake = false;
-
- __e1000_shutdown(pdev, &wake, false);
-
- if (system_state == SYSTEM_POWER_OFF)
- e1000_complete_shutdown(pdev, false, wake);
+ __e1000_shutdown(pdev, false);
}

#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -5908,9 +5864,9 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
"Cannot re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
} else {
- pci_set_master(pdev);
pdev->state_saved = true;
pci_restore_state(pdev);
+ pci_set_master(pdev);

pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -6341,7 +6297,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)

/* initialize the wol settings based on the eeprom settings */
adapter->wol = adapter->eeprom_wol;
- device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ /* make sure adapter isn't asleep if manageability is enabled */
+ if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) ||
+ (hw->mac.ops.check_mng_mode(hw)))
+ device_wakeup_enable(&pdev->dev);

/* save off EEPROM version number */
e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 2d849da..2d56d71 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5779,6 +5779,14 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
goto err_stop_0;
}

+ /* 8168evl does not automatically pad to minimum length. */
+ if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
+ skb->len < ETH_ZLEN)) {
+ if (skb_padto(skb, ETH_ZLEN))
+ goto err_update_stats;
+ skb_put(skb, ETH_ZLEN - skb->len);
+ }
+
if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
goto err_stop_0;

@@ -5850,6 +5858,7 @@ err_dma_1:
rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
err_dma_0:
dev_kfree_skb(skb);
+err_update_stats:
dev->stats.tx_dropped++;
return NETDEV_TX_OK;

diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index f79cbcd..8c1ecc5 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -628,6 +628,7 @@ static int pci_pm_suspend(struct device *dev)
goto Fixup;
}

+ pci_dev->state_saved = false;
if (pm->suspend) {
pci_power_t prev = pci_dev->current_state;
int error;
@@ -774,6 +775,7 @@ static int pci_pm_freeze(struct device *dev)
return 0;
}

+ pci_dev->state_saved = false;
if (pm->freeze) {
int error;

@@ -862,6 +864,7 @@ static int pci_pm_poweroff(struct device *dev)
goto Fixup;
}

+ pci_dev->state_saved = false;
if (pm->poweroff) {
int error;

@@ -987,6 +990,7 @@ static int pci_pm_runtime_suspend(struct device *dev)
if (!pm || !pm->runtime_suspend)
return -ENOSYS;

+ pci_dev->state_saved = false;
pci_dev->no_d3cold = false;
error = pm->runtime_suspend(dev);
suspend_report_result(pm->runtime_suspend, error);
diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c
index 83b21d9..0c644e7 100644
--- a/drivers/pwm/pwm-spear.c
+++ b/drivers/pwm/pwm-spear.c
@@ -143,7 +143,7 @@ static int spear_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
u32 val;

rc = clk_enable(pc->clk);
- if (!rc)
+ if (rc)
return rc;

val = spear_pwm_readl(pc, pwm->hwpwm, PWMCR);
@@ -209,12 +209,12 @@ static int spear_pwm_probe(struct platform_device *pdev)
pc->chip.npwm = NUM_PWM;

ret = clk_prepare(pc->clk);
- if (!ret)
+ if (ret)
return ret;

if (of_device_is_compatible(np, "st,spear1340-pwm")) {
ret = clk_enable(pc->clk);
- if (!ret) {
+ if (ret) {
clk_unprepare(pc->clk);
return ret;
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index f2845f1..2573cf4 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2461,14 +2461,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* TD list.
*/
if (list_empty(&ep_ring->td_list)) {
- xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
- "with no TDs queued?\n",
- TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
- ep_index);
- xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
- (le32_to_cpu(event->flags) &
- TRB_TYPE_BITMASK)>>10);
- xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
+ /*
+ * A stopped endpoint may generate an extra completion
+ * event if the device was suspended. Don't print
+ * warnings.
+ */
+ if (!(trb_comp_code == COMP_STOP ||
+ trb_comp_code == COMP_STOP_INVAL)) {
+ xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
+ TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
+ ep_index);
+ xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
+ (le32_to_cpu(event->flags) &
+ TRB_TYPE_BITMASK)>>10);
+ xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
+ }
if (ep->skip) {
ep->skip = false;
xhci_dbg(xhci, "td_list is empty while skip "
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 01443ce..13ddec9 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -61,15 +61,6 @@ static int autofs4_mount_busy(struct vfsmount *mnt, struct dentry *dentry)
/* This is an autofs submount, we can't expire it */
if (autofs_type_indirect(sbi->type))
goto done;
-
- /*
- * Otherwise it's an offset mount and we need to check
- * if we can umount its mount, if there is one.
- */
- if (!d_mountpoint(path.dentry)) {
- status = 0;
- goto done;
- }
}

/* Update the expiry counter if fs is busy */
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index ae94117..105b265 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -36,16 +36,19 @@
* compare two delayed tree backrefs with same bytenr and type
*/
static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
- struct btrfs_delayed_tree_ref *ref1)
+ struct btrfs_delayed_tree_ref *ref1, int type)
{
- if (ref1->root < ref2->root)
- return -1;
- if (ref1->root > ref2->root)
- return 1;
- if (ref1->parent < ref2->parent)
- return -1;
- if (ref1->parent > ref2->parent)
- return 1;
+ if (type == BTRFS_TREE_BLOCK_REF_KEY) {
+ if (ref1->root < ref2->root)
+ return -1;
+ if (ref1->root > ref2->root)
+ return 1;
+ } else {
+ if (ref1->parent < ref2->parent)
+ return -1;
+ if (ref1->parent > ref2->parent)
+ return 1;
+ }
return 0;
}

@@ -109,7 +112,8 @@ static int comp_entry(struct btrfs_delayed_ref_node *ref2,
if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
- btrfs_delayed_node_to_tree_ref(ref1));
+ btrfs_delayed_node_to_tree_ref(ref1),
+ ref1->type);
} else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 7c4e6cc..4b5398c 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -5794,7 +5794,9 @@ out:
* block must be cow'd
*/
static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
- struct inode *inode, u64 offset, u64 len)
+ struct inode *inode, u64 offset, u64 *len,
+ u64 *orig_start, u64 *orig_block_len,
+ u64 *ram_bytes)
{
struct btrfs_path *path;
int ret;
@@ -5851,8 +5853,12 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
backref_offset = btrfs_file_extent_offset(leaf, fi);

+ *orig_start = key.offset - backref_offset;
+ *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
+ *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
+
extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
- if (extent_end < offset + len) {
+ if (extent_end < offset + *len) {
/* extent doesn't include our full range, must cow */
goto out;
}
@@ -5876,13 +5882,14 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
*/
disk_bytenr += backref_offset;
disk_bytenr += offset - key.offset;
- num_bytes = min(offset + len, extent_end) - offset;
+ num_bytes = min(offset + *len, extent_end) - offset;
if (csum_exist_in_range(root, disk_bytenr, num_bytes))
goto out;
/*
* all of the above have passed, it is safe to overwrite this extent
* without cow
*/
+ *len = num_bytes;
ret = 1;
out:
btrfs_free_path(path);
@@ -6092,7 +6099,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
em->block_start != EXTENT_MAP_HOLE)) {
int type;
int ret;
- u64 block_start;
+ u64 block_start, orig_start, orig_block_len, ram_bytes;

if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
type = BTRFS_ORDERED_PREALLOC;
@@ -6110,10 +6117,8 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
if (IS_ERR(trans))
goto must_cow;

- if (can_nocow_odirect(trans, inode, start, len) == 1) {
- u64 orig_start = em->orig_start;
- u64 orig_block_len = em->orig_block_len;
-
+ if (can_nocow_odirect(trans, inode, start, &len, &orig_start,
+ &orig_block_len, &ram_bytes) == 1) {
if (type == BTRFS_ORDERED_PREALLOC) {
free_extent_map(em);
em = create_pinned_em(inode, start, len,
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index a2fe9f5..0cfa2f4 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1880,6 +1880,10 @@ retry:
return 0;

ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &offset);
+ if (n_group > (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
+ ext4_warning(sb, "resize would cause inodes_count overflow");
+ return -EINVAL;
+ }
ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);

n_desc_blocks = num_desc_blocks(sb, n_group + 1);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index ccee8cc..d0de769 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -908,19 +908,19 @@ static int can_do_hugetlb_shm(void)

static int get_hstate_idx(int page_size_log)
{
- struct hstate *h;
+ struct hstate *h = hstate_sizelog(page_size_log);

- if (!page_size_log)
- return default_hstate_idx;
- h = size_to_hstate(1 << page_size_log);
if (!h)
return -1;
return h - hstates;
}

-struct file *hugetlb_file_setup(const char *name, unsigned long addr,
- size_t size, vm_flags_t acctflag,
- struct user_struct **user,
+/*
+ * Note that size should be aligned to proper hugepage size in caller side,
+ * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
+ */
+struct file *hugetlb_file_setup(const char *name, size_t size,
+ vm_flags_t acctflag, struct user_struct **user,
int creat_flags, int page_size_log)
{
int error = -ENOMEM;
@@ -929,8 +929,6 @@ struct file *hugetlb_file_setup(const char *name, unsigned long addr,
struct path path;
struct dentry *root;
struct qstr quick_string;
- struct hstate *hstate;
- unsigned long num_pages;
int hstate_idx;

hstate_idx = get_hstate_idx(page_size_log);
@@ -969,12 +967,10 @@ struct file *hugetlb_file_setup(const char *name, unsigned long addr,
if (!inode)
goto out_dentry;

- hstate = hstate_inode(inode);
- size += addr & ~huge_page_mask(hstate);
- num_pages = ALIGN(size, huge_page_size(hstate)) >>
- huge_page_shift(hstate);
error = -ENOMEM;
- if (hugetlb_reserve_pages(inode, 0, num_pages, NULL, acctflag))
+ if (hugetlb_reserve_pages(inode, 0,
+ size >> huge_page_shift(hstate_inode(inode)), NULL,
+ acctflag))
goto out_inode;

d_instantiate(path.dentry, inode);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 51d53a4..e3c6121 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -4513,9 +4513,9 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
if (status != 0)
goto out;
/* Is this a delegated lock? */
- if (test_bit(NFS_DELEGATED_STATE, &state->flags))
- goto out;
lsp = request->fl_u.nfs4_fl.owner;
+ if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
+ goto out;
seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
status = -ENOMEM;
if (seqid == NULL)
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index fad21c9..881fb15 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1559,9 +1559,8 @@ extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *s

void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
-int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
-int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle);
-void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
+int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle);
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);

int drm_prime_add_dma_buf(struct drm_device *dev, struct drm_gem_object *obj);
int drm_prime_lookup_obj(struct drm_device *dev, struct dma_buf *buf,
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 1cdbfe9..d7da55c 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -227,6 +227,7 @@
{0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -234,11 +235,13 @@
{0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
@@ -590,6 +593,8 @@
{0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x999C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x999D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f94bc83..1dbdb1a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -836,7 +836,7 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
unsigned int cmd_flags)
{
if (unlikely(cmd_flags & REQ_DISCARD))
- return q->limits.max_discard_sectors;
+ return min(q->limits.max_discard_sectors, UINT_MAX >> 9);

if (unlikely(cmd_flags & REQ_WRITE_SAME))
return q->limits.max_write_same_sectors;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 0c80d3f..db695d5 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -185,8 +185,7 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)

extern const struct file_operations hugetlbfs_file_operations;
extern const struct vm_operations_struct hugetlb_vm_ops;
-struct file *hugetlb_file_setup(const char *name, unsigned long addr,
- size_t size, vm_flags_t acct,
+struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
struct user_struct **user, int creat_flags,
int page_size_log);

@@ -205,8 +204,8 @@ static inline int is_file_hugepages(struct file *file)

#define is_file_hugepages(file) 0
static inline struct file *
-hugetlb_file_setup(const char *name, unsigned long addr, size_t size,
- vm_flags_t acctflag, struct user_struct **user, int creat_flags,
+hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
+ struct user_struct **user, int creat_flags,
int page_size_log)
{
return ERR_PTR(-ENOSYS);
@@ -284,6 +283,13 @@ static inline struct hstate *hstate_file(struct file *f)
return hstate_inode(f->f_dentry->d_inode);
}

+static inline struct hstate *hstate_sizelog(int page_size_log)
+{
+ if (!page_size_log)
+ return &default_hstate;
+ return size_to_hstate(1 << page_size_log);
+}
+
static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
{
return hstate_file(vma->vm_file);
@@ -348,11 +354,12 @@ static inline int hstate_index(struct hstate *h)
return h - hstates;
}

-#else
+#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
#define alloc_huge_page_node(h, nid) NULL
#define alloc_bootmem_huge_page(h) NULL
#define hstate_file(f) NULL
+#define hstate_sizelog(s) NULL
#define hstate_vma(v) NULL
#define hstate_inode(i) NULL
#define huge_page_size(h) PAGE_SIZE
@@ -367,6 +374,6 @@ static inline unsigned int pages_per_huge_page(struct hstate *h)
}
#define hstate_index_to_shift(index) 0
#define hstate_index(h) 0
-#endif
+#endif /* CONFIG_HUGETLB_PAGE */

#endif /* _LINUX_HUGETLB_H */
diff --git a/ipc/shm.c b/ipc/shm.c
index 9bab650..9ec2316 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -491,10 +491,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)

sprintf (name, "SYSV%08x", key);
if (shmflg & SHM_HUGETLB) {
+ struct hstate *hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT)
+ & SHM_HUGE_MASK);
+ size_t hugesize = ALIGN(size, huge_page_size(hs));
+
/* hugetlb_file_setup applies strict accounting */
if (shmflg & SHM_NORESERVE)
acctflag = VM_NORESERVE;
- file = hugetlb_file_setup(name, 0, size, acctflag,
+ file = hugetlb_file_setup(name, hugesize, acctflag,
&shp->mlock_user, HUGETLB_SHMFS_INODE,
(shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
} else {
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 642a89c..a291aa2 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -617,9 +617,9 @@ void audit_trim_trees(void)
}
spin_unlock(&hash_lock);
trim_marked(tree);
- put_tree(tree);
drop_collected_mounts(root_mnt);
skip_it:
+ put_tree(tree);
mutex_lock(&audit_filter_mutex);
}
list_del(&cursor);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1c82852..55a9d05 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -5020,36 +5020,32 @@ void trace_init_global_iter(struct trace_iterator *iter)
iter->cpu_file = TRACE_PIPE_ALL_CPU;
}

-static void
-__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
+void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
{
- static arch_spinlock_t ftrace_dump_lock =
- (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
/* use static because iter can be a bit big for the stack */
static struct trace_iterator iter;
+ static atomic_t dump_running;
unsigned int old_userobj;
- static int dump_ran;
unsigned long flags;
int cnt = 0, cpu;

- /* only one dump */
- local_irq_save(flags);
- arch_spin_lock(&ftrace_dump_lock);
- if (dump_ran)
- goto out;
-
- dump_ran = 1;
+ /* Only allow one dump user at a time. */
+ if (atomic_inc_return(&dump_running) != 1) {
+ atomic_dec(&dump_running);
+ return;
+ }

+ /*
+ * Always turn off tracing when we dump.
+ * We don't need to show trace output of what happens
+ * between multiple crashes.
+ *
+ * If the user does a sysrq-z, then they can re-enable
+ * tracing with echo 1 > tracing_on.
+ */
tracing_off();

- /* Did function tracer already get disabled? */
- if (ftrace_is_dead()) {
- printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
- printk("# MAY BE MISSING FUNCTION EVENTS\n");
- }
-
- if (disable_tracing)
- ftrace_kill();
+ local_irq_save(flags);

trace_init_global_iter(&iter);

@@ -5082,6 +5078,12 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)

printk(KERN_TRACE "Dumping ftrace buffer:\n");

+ /* Did function tracer already get disabled? */
+ if (ftrace_is_dead()) {
+ printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
+ printk("# MAY BE MISSING FUNCTION EVENTS\n");
+ }
+
/*
* We need to stop all tracing on all CPUS to read the
* the next buffer. This is a bit expensive, but is
@@ -5121,26 +5123,14 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
printk(KERN_TRACE "---------------------------------\n");

out_enable:
- /* Re-enable tracing if requested */
- if (!disable_tracing) {
- trace_flags |= old_userobj;
+ trace_flags |= old_userobj;

- for_each_tracing_cpu(cpu) {
- atomic_dec(&iter.tr->data[cpu]->disabled);
- }
- tracing_on();
+ for_each_tracing_cpu(cpu) {
+ atomic_dec(&iter.tr->data[cpu]->disabled);
}
-
- out:
- arch_spin_unlock(&ftrace_dump_lock);
+ atomic_dec(&dump_running);
local_irq_restore(flags);
}
-
-/* By default: disable tracing after the dump */
-void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
-{
- __ftrace_dump(true, oops_dump_mode);
-}
EXPORT_SYMBOL_GPL(ftrace_dump);

__init static int tracer_alloc_buffers(void)
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 5fc7aa5..81f6275 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -702,8 +702,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
/* Maximum number of functions to trace before diagnosing a hang */
#define GRAPH_MAX_FUNC_TEST 100000000

-static void
-__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
static unsigned int graph_hang_thresh;

/* Wrap the real function entry probe to avoid possible hanging */
@@ -713,8 +711,11 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
ftrace_graph_stop();
printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
- if (ftrace_dump_on_oops)
- __ftrace_dump(false, DUMP_ALL);
+ if (ftrace_dump_on_oops) {
+ ftrace_dump(DUMP_ALL);
+ /* ftrace_dump() disables tracing */
+ tracing_on();
+ }
return 0;
}

diff --git a/mm/mmap.c b/mm/mmap.c
index 32f3372..e6beac4 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1296,15 +1296,20 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
file = fget(fd);
if (!file)
goto out;
+ if (is_file_hugepages(file))
+ len = ALIGN(len, huge_page_size(hstate_file(file)));
} else if (flags & MAP_HUGETLB) {
struct user_struct *user = NULL;
+
+ len = ALIGN(len, huge_page_size(hstate_sizelog(
+ (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK)));
/*
* VM_NORESERVE is used because the reservations will be
* taken when vm_ops->mmap() is called
* A dummy user value is used because we are not locking
* memory so no accounting is necessary
*/
- file = hugetlb_file_setup(HUGETLB_ANON_FILE, addr, len,
+ file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
VM_NORESERVE,
&user, HUGETLB_ANONHUGE_INODE,
(flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index c301300..c49dcd0 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -66,6 +66,12 @@ static bool rpfilter_lookup_reverse(struct flowi4 *fl4,
return dev_match;
}

+static bool rpfilter_is_local(const struct sk_buff *skb)
+{
+ const struct rtable *rt = skb_rtable(skb);
+ return rt && (rt->rt_flags & RTCF_LOCAL);
+}
+
static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_rpfilter_info *info;
@@ -76,7 +82,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
info = par->matchinfo;
invert = info->flags & XT_RPFILTER_INVERT;

- if (par->in->flags & IFF_LOOPBACK)
+ if (rpfilter_is_local(skb))
return true ^ invert;

iph = ip_hdr(skb);
diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c
index 83acc14..0ea43c7 100644
--- a/net/ipv6/netfilter/ip6t_NPT.c
+++ b/net/ipv6/netfilter/ip6t_NPT.c
@@ -57,7 +57,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
if (pfx_len - i >= 32)
mask = 0;
else
- mask = htonl(~((1 << (pfx_len - i)) - 1));
+ mask = htonl((1 << (i - pfx_len + 32)) - 1);

idx = i / 32;
addr->s6_addr32[idx] &= mask;
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
index 5060d54..e0983f3 100644
--- a/net/ipv6/netfilter/ip6t_rpfilter.c
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@ -71,6 +71,12 @@ static bool rpfilter_lookup_reverse6(const struct sk_buff *skb,
return ret;
}

+static bool rpfilter_is_local(const struct sk_buff *skb)
+{
+ const struct rt6_info *rt = (const void *) skb_dst(skb);
+ return rt && (rt->rt6i_flags & RTF_LOCAL);
+}
+
static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_rpfilter_info *info = par->matchinfo;
@@ -78,7 +84,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
struct ipv6hdr *iph;
bool invert = info->flags & XT_RPFILTER_INVERT;

- if (par->in->flags & IFF_LOOPBACK)
+ if (rpfilter_is_local(skb))
return true ^ invert;

iph = ipv6_hdr(skb);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 6d6d8f2..38ca630 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1470,7 +1470,8 @@ ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
if (ret == -EAGAIN)
ret = 1;

- return ret < 0 ? ret : ret > 0 ? 0 : -IPSET_ERR_EXIST;
+ return (ret < 0 && ret != -ENOTEMPTY) ? ret :
+ ret > 0 ? 0 : -IPSET_ERR_EXIST;
}

/* Get headed data of a set */
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 8371c2b..09c744a 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -174,9 +174,13 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
{
const struct set_elem *e = list_set_elem(map, i);

- if (i == map->size - 1 && e->id != IPSET_INVALID_ID)
- /* Last element replaced: e.g. add new,before,last */
- ip_set_put_byindex(e->id);
+ if (e->id != IPSET_INVALID_ID) {
+ const struct set_elem *x = list_set_elem(map, map->size - 1);
+
+ /* Last element replaced or pushed off */
+ if (x->id != IPSET_INVALID_ID)
+ ip_set_put_byindex(x->id);
+ }
if (with_timeout(map->timeout))
list_elem_tadd(map, i, id, ip_set_timeout_set(timeout));
else
diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
index 12475ef..e5920fb 100644
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
@@ -37,14 +37,10 @@ static int get_callid(const char *dptr, unsigned int dataoff,
if (ret > 0)
break;
if (!ret)
- return 0;
+ return -EINVAL;
dataoff += *matchoff;
}

- /* Empty callid is useless */
- if (!*matchlen)
- return -EINVAL;
-
/* Too large is useless */
if (*matchlen > IP_VS_PEDATA_MAXLEN)
return -EINVAL;
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 884f2b3..91527d5 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -236,7 +236,9 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
/* We only allow helper re-assignment of the same sort since
* we cannot reallocate the helper extension area.
*/
- if (help->helper != helper) {
+ struct nf_conntrack_helper *tmp = rcu_dereference(help->helper);
+
+ if (tmp && tmp->help != helper->help) {
RCU_INIT_POINTER(help->helper, NULL);
goto out;
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 627b0e5..a081915 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1705,6 +1705,9 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
if (nlh->nlmsg_flags & NLM_F_CREATE) {
enum ip_conntrack_events events;

+ if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
+ return -EINVAL;
+
ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
&rtuple, u3);
if (IS_ERR(ct))
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index df8f4f2..b4e0d1c 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1547,7 +1547,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,

msglen = origlen = end - dptr;
if (msglen > datalen)
- return NF_DROP;
+ return NF_ACCEPT;

ret = process_sip_msg(skb, ct, protoff, dataoff,
&dptr, &msglen);
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 5f2f910..4bc2aaf 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -468,33 +468,22 @@ EXPORT_SYMBOL_GPL(nf_nat_packet);
struct nf_nat_proto_clean {
u8 l3proto;
u8 l4proto;
- bool hash;
};

-/* Clear NAT section of all conntracks, in case we're loaded again. */
-static int nf_nat_proto_clean(struct nf_conn *i, void *data)
+/* kill conntracks with affected NAT section */
+static int nf_nat_proto_remove(struct nf_conn *i, void *data)
{
const struct nf_nat_proto_clean *clean = data;
struct nf_conn_nat *nat = nfct_nat(i);

if (!nat)
return 0;
- if (!(i->status & IPS_SRC_NAT_DONE))
- return 0;
+
if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
(clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
return 0;

- if (clean->hash) {
- spin_lock_bh(&nf_nat_lock);
- hlist_del_rcu(&nat->bysource);
- spin_unlock_bh(&nf_nat_lock);
- } else {
- memset(nat, 0, sizeof(*nat));
- i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK |
- IPS_SEQ_ADJUST);
- }
- return 0;
+ return i->status & IPS_NAT_MASK ? 1 : 0;
}

static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
@@ -506,16 +495,8 @@ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
struct net *net;

rtnl_lock();
- /* Step 1 - remove from bysource hash */
- clean.hash = true;
for_each_net(net)
- nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
- synchronize_rcu();
-
- /* Step 2 - clean NAT section */
- clean.hash = false;
- for_each_net(net)
- nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
+ nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
rtnl_unlock();
}

@@ -527,16 +508,9 @@ static void nf_nat_l3proto_clean(u8 l3proto)
struct net *net;

rtnl_lock();
- /* Step 1 - remove from bysource hash */
- clean.hash = true;
- for_each_net(net)
- nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
- synchronize_rcu();

- /* Step 2 - clean NAT section */
- clean.hash = false;
for_each_net(net)
- nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
+ nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
rtnl_unlock();
}

@@ -774,7 +748,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
{
struct nf_nat_proto_clean clean = {};

- nf_ct_iterate_cleanup(net, &nf_nat_proto_clean, &clean);
+ nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean);
synchronize_rcu();
nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
}
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/