Re: Linux 3.12.9

From: Greg KH
Date: Sat Jan 25 2014 - 12:51:47 EST


diff --git a/Makefile b/Makefile
index 5d0ec13bb77d..4ee77eaa7b1f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 12
-SUBLEVEL = 8
+SUBLEVEL = 9
EXTRAVERSION =
NAME = One Giant Leap for Frogkind

diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index f35906b3d8c9..41960fb0daff 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -171,7 +171,7 @@ void __init arm_dt_init_cpu_maps(void)

bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
- return (phys_id & MPIDR_HWID_BITMASK) == cpu_logical_map(cpu);
+ return phys_id == cpu_logical_map(cpu);
}

/**
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 8e44973b0139..2f176a495c32 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -66,6 +66,7 @@ void highbank_set_cpu_jump(int cpu, void *jump_addr)

static void highbank_l2x0_disable(void)
{
+ outer_flush_all();
/* Disable PL310 L2 Cache controller */
highbank_smc1(0x102, 0x0);
}
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 57911430324e..3f44b162fcab 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -163,6 +163,7 @@ void __iomem *omap4_get_l2cache_base(void)

static void omap4_l2x0_disable(void)
{
+ outer_flush_all();
/* Disable PL310 L2 Cache controller */
omap_smc1(0x102, 0x0);
}
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index e09f0bfb7b8f..4b8e4d3cd6ea 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ptrace.h>
+#include <linux/syscore_ops.h>

#include <asm/apic.h>

@@ -816,6 +817,18 @@ out:
return ret;
}

+static void ibs_eilvt_setup(void)
+{
+ /*
+ * Force LVT offset assignment for family 10h: The offsets are
+ * not assigned by the BIOS for this family, so the OS is
+ * responsible for doing it. If the OS assignment fails, fall
+ * back to BIOS settings and try to setup this.
+ */
+ if (boot_cpu_data.x86 == 0x10)
+ force_ibs_eilvt_setup();
+}
+
static inline int get_ibs_lvt_offset(void)
{
u64 val;
@@ -851,6 +864,36 @@ static void clear_APIC_ibs(void *dummy)
setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
}

+#ifdef CONFIG_PM
+
+static int perf_ibs_suspend(void)
+{
+ clear_APIC_ibs(NULL);
+ return 0;
+}
+
+static void perf_ibs_resume(void)
+{
+ ibs_eilvt_setup();
+ setup_APIC_ibs(NULL);
+}
+
+static struct syscore_ops perf_ibs_syscore_ops = {
+ .resume = perf_ibs_resume,
+ .suspend = perf_ibs_suspend,
+};
+
+static void perf_ibs_pm_init(void)
+{
+ register_syscore_ops(&perf_ibs_syscore_ops);
+}
+
+#else
+
+static inline void perf_ibs_pm_init(void) { }
+
+#endif
+
static int
perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
@@ -877,18 +920,12 @@ static __init int amd_ibs_init(void)
if (!caps)
return -ENODEV; /* ibs not supported by the cpu */

- /*
- * Force LVT offset assignment for family 10h: The offsets are
- * not assigned by the BIOS for this family, so the OS is
- * responsible for doing it. If the OS assignment fails, fall
- * back to BIOS settings and try to setup this.
- */
- if (boot_cpu_data.x86 == 0x10)
- force_ibs_eilvt_setup();
+ ibs_eilvt_setup();

if (!ibs_eilvt_valid())
goto out;

+ perf_ibs_pm_init();
get_online_cpus();
ibs_caps = caps;
/* make ibs_caps visible to other cpus: */
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index f0dcb0ceb6a2..15a569a47b4d 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1085,7 +1085,7 @@ ENTRY(ftrace_caller)
pushl $0 /* Pass NULL as regs pointer */
movl 4*4(%esp), %eax
movl 0x4(%ebp), %edx
- leal function_trace_op, %ecx
+ movl function_trace_op, %ecx
subl $MCOUNT_INSN_SIZE, %eax

.globl ftrace_call
@@ -1143,7 +1143,7 @@ ENTRY(ftrace_regs_caller)
movl 12*4(%esp), %eax /* Load ip (1st parameter) */
subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
- leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
+ movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
pushl %esp /* Save pt_regs as 4th parameter */

GLOBAL(ftrace_regs_call)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index b077f4cc225a..9ce256739175 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -88,7 +88,7 @@ END(function_hook)
MCOUNT_SAVE_FRAME \skip

/* Load the ftrace_ops into the 3rd parameter */
- leaq function_trace_op, %rdx
+ movq function_trace_op(%rip), %rdx

/* Load ip into the first parameter */
movq RIP(%rsp), %rdi
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index ab19263baf39..fb78bb9ad8f6 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -156,7 +156,6 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "80860F14", (unsigned long)&byt_sdio_dev_desc },
{ "80860F41", (unsigned long)&byt_i2c_dev_desc },
{ "INT33B2", },
- { "INT33FC", },

{ }
};
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index a069b5e2a2d2..920cd19edc69 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -961,12 +961,18 @@ void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
enum pipe pipe;
struct intel_crtc *intel_crtc;

+ dev_priv->ddi_plls.spll_refcount = 0;
+ dev_priv->ddi_plls.wrpll1_refcount = 0;
+ dev_priv->ddi_plls.wrpll2_refcount = 0;
+
for_each_pipe(pipe) {
intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);

- if (!intel_crtc->active)
+ if (!intel_crtc->active) {
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
continue;
+ }

intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
pipe);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index aad6f7bfc589..dd2d542e4651 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -10592,9 +10592,9 @@ void intel_modeset_gem_init(struct drm_device *dev)

intel_setup_overlay(dev);

- drm_modeset_lock_all(dev);
+ mutex_lock(&dev->mode_config.mutex);
intel_modeset_setup_hw_state(dev, false);
- drm_modeset_unlock_all(dev);
+ mutex_unlock(&dev->mode_config.mutex);
}

void intel_modeset_cleanup(struct drm_device *dev)
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 78be66176840..942509892895 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");

#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
#define NUM_REAL_CORES 32 /* Number of Real cores per cpu */
-#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
+#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */
#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index ba46d9749a0b..015bc455cf1c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1119,6 +1119,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
rdev->raid_disk = -1;
clear_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
+ clear_bit(Bitmap_sync, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);

if (mddev->raid_disks == 0) {
@@ -1197,6 +1198,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
*/
if (ev1 < mddev->bitmap->events_cleared)
return 0;
+ if (ev1 < mddev->events)
+ set_bit(Bitmap_sync, &rdev->flags);
} else {
if (ev1 < mddev->events)
/* just a hot-add of a new device, leave raid_disk at -1 */
@@ -1605,6 +1608,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
rdev->raid_disk = -1;
clear_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
+ clear_bit(Bitmap_sync, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);

if (mddev->raid_disks == 0) {
@@ -1687,6 +1691,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
*/
if (ev1 < mddev->bitmap->events_cleared)
return 0;
+ if (ev1 < mddev->events)
+ set_bit(Bitmap_sync, &rdev->flags);
} else {
if (ev1 < mddev->events)
/* just a hot-add of a new device, leave raid_disk at -1 */
@@ -2830,6 +2836,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
else
rdev->saved_raid_disk = -1;
clear_bit(In_sync, &rdev->flags);
+ clear_bit(Bitmap_sync, &rdev->flags);
err = rdev->mddev->pers->
hot_add_disk(rdev->mddev, rdev);
if (err) {
@@ -5773,6 +5780,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
info->raid_disk < mddev->raid_disks) {
rdev->raid_disk = info->raid_disk;
set_bit(In_sync, &rdev->flags);
+ clear_bit(Bitmap_sync, &rdev->flags);
} else
rdev->raid_disk = -1;
} else
@@ -7731,7 +7739,8 @@ static int remove_and_add_spares(struct mddev *mddev,
if (test_bit(Faulty, &rdev->flags))
continue;
if (mddev->ro &&
- rdev->saved_raid_disk < 0)
+ ! (rdev->saved_raid_disk >= 0 &&
+ !test_bit(Bitmap_sync, &rdev->flags)))
continue;

rdev->recovery_offset = 0;
@@ -7812,9 +7821,12 @@ void md_check_recovery(struct mddev *mddev)
* As we only add devices that are already in-sync,
* we can activate the spares immediately.
*/
- clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
remove_and_add_spares(mddev, NULL);
- mddev->pers->spare_active(mddev);
+ /* There is no thread, but we need to call
+ * ->spare_active and clear saved_raid_disk
+ */
+ md_reap_sync_thread(mddev);
+ clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
goto unlock;
}

diff --git a/drivers/md/md.h b/drivers/md/md.h
index 608050c43f17..636756450a19 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -129,6 +129,9 @@ struct md_rdev {
enum flag_bits {
Faulty, /* device is known to have a fault */
In_sync, /* device is in_sync with rest of array */
+ Bitmap_sync, /* ..actually, not quite In_sync. Need a
+ * bitmap-based recovery to get fully in sync
+ */
Unmerged, /* device is being added to array and should
* be considerred for bvec_merge_fn but not
* yet for actual IO
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 73dc8a377522..308575d23550 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1319,7 +1319,7 @@ read_again:
/* Could not read all from this device, so we will
* need another r10_bio.
*/
- sectors_handled = (r10_bio->sectors + max_sectors
+ sectors_handled = (r10_bio->sector + max_sectors
- bio->bi_sector);
r10_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
@@ -1327,7 +1327,7 @@ read_again:
bio->bi_phys_segments = 2;
else
bio->bi_phys_segments++;
- spin_unlock(&conf->device_lock);
+ spin_unlock_irq(&conf->device_lock);
/* Cannot call generic_make_request directly
* as that will be queued in __generic_make_request
* and subsequent mempool_alloc might block
@@ -3220,10 +3220,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
if (j == conf->copies) {
/* Cannot recover, so abort the recovery or
* record a bad block */
- put_buf(r10_bio);
- if (rb2)
- atomic_dec(&rb2->remaining);
- r10_bio = rb2;
if (any_working) {
/* problem is that there are bad blocks
* on other device(s)
@@ -3255,6 +3251,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
mirror->recovery_disabled
= mddev->recovery_disabled;
}
+ put_buf(r10_bio);
+ if (rb2)
+ atomic_dec(&rb2->remaining);
+ r10_bio = rb2;
break;
}
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8a0665d04567..93174c6ab37c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3502,7 +3502,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
*/
set_bit(R5_Insync, &dev->flags);

- if (rdev && test_bit(R5_WriteError, &dev->flags)) {
+ if (test_bit(R5_WriteError, &dev->flags)) {
/* This flag does not apply to '.replacement'
* only to .rdev, so make sure to check that*/
struct md_rdev *rdev2 = rcu_dereference(
@@ -3515,7 +3515,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
} else
clear_bit(R5_WriteError, &dev->flags);
}
- if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
+ if (test_bit(R5_MadeGood, &dev->flags)) {
/* This flag does not apply to '.replacement'
* only to .rdev, so make sure to check that*/
struct md_rdev *rdev2 = rcu_dereference(
diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c
index 114f5ef4b73a..2832576d8b12 100644
--- a/drivers/pinctrl/pinctrl-baytrail.c
+++ b/drivers/pinctrl/pinctrl-baytrail.c
@@ -512,7 +512,6 @@ static const struct dev_pm_ops byt_gpio_pm_ops = {

static const struct acpi_device_id byt_gpio_acpi_match[] = {
{ "INT33B2", 0 },
- { "INT33FC", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match);
diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c
index 34ab0679e992..b95a8b3395ae 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1032.c
@@ -325,8 +325,8 @@ static int apci1032_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[1];
if (dev->irq) {
dev->read_subdev = s;
- s->type = COMEDI_SUBD_DI | SDF_CMD_READ;
- s->subdev_flags = SDF_READABLE;
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
s->n_chan = 1;
s->maxdata = 1;
s->range_table = &range_digital;
diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c
index 78cea193504f..c9702bfa88be 100644
--- a/drivers/staging/comedi/drivers/adl_pci9111.c
+++ b/drivers/staging/comedi/drivers/adl_pci9111.c
@@ -869,7 +869,7 @@ static int pci9111_auto_attach(struct comedi_device *dev,
pci9111_reset(dev);

if (pcidev->irq > 0) {
- ret = request_irq(dev->irq, pci9111_interrupt,
+ ret = request_irq(pcidev->irq, pci9111_interrupt,
IRQF_SHARED, dev->board_name, dev);
if (ret)
return ret;
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index aaa22867e656..1440d0b4a7bc 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1537,6 +1537,8 @@ static int pl011_startup(struct uart_port *port)
/*
* Provoke TX FIFO interrupt into asserting.
*/
+ spin_lock_irq(&uap->port.lock);
+
cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
writew(cr, uap->port.membase + UART011_CR);
writew(0, uap->port.membase + UART011_FBRD);
@@ -1561,6 +1563,8 @@ static int pl011_startup(struct uart_port *port)
cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
writew(cr, uap->port.membase + UART011_CR);

+ spin_unlock_irq(&uap->port.lock);
+
/*
* initialise the old status of the modem signals
*/
@@ -1629,11 +1633,13 @@ static void pl011_shutdown(struct uart_port *port)
* it during startup().
*/
uap->autorts = false;
+ spin_lock_irq(&uap->port.lock);
cr = readw(uap->port.membase + UART011_CR);
uap->old_cr = cr;
cr &= UART011_CR_RTS | UART011_CR_DTR;
cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
writew(cr, uap->port.membase + UART011_CR);
+ spin_unlock_irq(&uap->port.lock);

/*
* disable break condition and fifos
diff --git a/fs/dcache.c b/fs/dcache.c
index 89f96719a29b..f27c1d12a1fa 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -3064,8 +3064,13 @@ char *d_path(const struct path *path, char *buf, int buflen)
* thus don't need to be hashed. They also don't need a name until a
* user wants to identify the object in /proc/pid/fd/. The little hack
* below allows us to generate a name for these objects on demand:
+ *
+ * Some pseudo inodes are mountable. When they are mounted
+ * path->dentry == path->mnt->mnt_root. In that case don't call d_dname
+ * and instead have d_path return the mounted path.
*/
- if (path->dentry->d_op && path->dentry->d_op->d_dname)
+ if (path->dentry->d_op && path->dentry->d_op->d_dname &&
+ (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
return path->dentry->d_op->d_dname(path->dentry, buf, buflen);

rcu_read_lock();
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 9f4935b8f208..3595180b62ac 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -510,13 +510,16 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
}
WARN_ON(inode->i_state & I_SYNC);
/*
- * Skip inode if it is clean. We don't want to mess with writeback
- * lists in this function since flusher thread may be doing for example
- * sync in parallel and if we move the inode, it could get skipped. So
- * here we make sure inode is on some writeback list and leave it there
- * unless we have completely cleaned the inode.
+ * Skip inode if it is clean and we have no outstanding writeback in
+ * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this
+ * function since flusher thread may be doing for example sync in
+ * parallel and if we move the inode, it could get skipped. So here we
+ * make sure inode is on some writeback list and leave it there unless
+ * we have completely cleaned the inode.
*/
- if (!(inode->i_state & I_DIRTY))
+ if (!(inode->i_state & I_DIRTY) &&
+ (wbc->sync_mode != WB_SYNC_ALL ||
+ !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
goto out;
inode->i_state |= I_SYNC;
spin_unlock(&inode->i_lock);
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 12987666e5f0..630db362a2d1 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -1610,10 +1610,22 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
if (!(attr->ia_valid & ATTR_GID) || gid_eq(ogid, ngid))
ogid = ngid = NO_GID_QUOTA_CHANGE;

- error = gfs2_quota_lock(ip, nuid, ngid);
+ error = get_write_access(inode);
if (error)
return error;

+ error = gfs2_rs_alloc(ip);
+ if (error)
+ goto out;
+
+ error = gfs2_rindex_update(sdp);
+ if (error)
+ goto out;
+
+ error = gfs2_quota_lock(ip, nuid, ngid);
+ if (error)
+ goto out;
+
if (!uid_eq(ouid, NO_UID_QUOTA_CHANGE) ||
!gid_eq(ogid, NO_GID_QUOTA_CHANGE)) {
error = gfs2_quota_check(ip, nuid, ngid);
@@ -1640,6 +1652,8 @@ out_end_trans:
gfs2_trans_end(sdp);
out_gunlock_q:
gfs2_quota_unlock(ip);
+out:
+ put_write_access(inode);
return error;
}

diff --git a/fs/namespace.c b/fs/namespace.c
index da5c49483430..84447dbcb650 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2888,7 +2888,7 @@ bool fs_fully_visible(struct file_system_type *type)
struct inode *inode = child->mnt_mountpoint->d_inode;
if (!S_ISDIR(inode->i_mode))
goto next;
- if (inode->i_nlink != 2)
+ if (inode->i_nlink > 2)
goto next;
}
visible = true;
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 9f6b486b6c01..a1a191634abc 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1440,17 +1440,19 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,

nilfs_clear_logs(&sci->sc_segbufs);

- err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
- if (unlikely(err))
- return err;
-
if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
sci->sc_freesegs,
sci->sc_nfreesegs,
NULL);
WARN_ON(err); /* do not happen */
+ sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
}
+
+ err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
+ if (unlikely(err))
+ return err;
+
nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
sci->sc_stage = prev_stage;
}
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index fe68a5a98583..7032518f8542 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -6,6 +6,8 @@
#include <linux/proc_fs.h>
#include <linux/elf.h>

+#include <asm/pgtable.h> /* for pgprot_t */
+
#define ELFCORE_ADDR_MAX (-1ULL)
#define ELFCORE_ADDR_ERR (-2ULL)

diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 2ab11dc38077..5677fb58e688 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -447,7 +447,7 @@ static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
static inline struct i2c_adapter *
i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter)
{
-#if IS_ENABLED(I2C_MUX)
+#if IS_ENABLED(CONFIG_I2C_MUX)
struct device *parent = adapter->dev.parent;

if (parent != NULL && parent->type == &i2c_adapter_type)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8b6e55ee8855..fed08c0c543b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -762,11 +762,14 @@ static __always_inline void *lowmem_page_address(const struct page *page)
#endif

#if defined(WANT_PAGE_VIRTUAL)
-#define page_address(page) ((page)->virtual)
-#define set_page_address(page, address) \
- do { \
- (page)->virtual = (address); \
- } while(0)
+static inline void *page_address(const struct page *page)
+{
+ return page->virtual;
+}
+static inline void set_page_address(struct page *page, void *address)
+{
+ page->virtual = address;
+}
#define page_address_init() do { } while(0)
#endif

diff --git a/kernel/fork.c b/kernel/fork.c
index 690cfacaed71..458953ca4d50 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1175,7 +1175,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
* do not allow it to share a thread group or signal handlers or
* parent with the forking task.
*/
- if (clone_flags & (CLONE_SIGHAND | CLONE_PARENT)) {
+ if (clone_flags & CLONE_SIGHAND) {
if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
(task_active_pid_ns(current) !=
current->nsproxy->pid_ns_for_children))
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 47962456ed87..292a266e0d42 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1154,7 +1154,7 @@ alloc:
new_page = NULL;

if (unlikely(!new_page)) {
- if (is_huge_zero_pmd(orig_pmd)) {
+ if (!page) {
ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
address, pmd, orig_pmd, haddr);
} else {
@@ -1181,7 +1181,7 @@ alloc:

count_vm_event(THP_FAULT_ALLOC);

- if (is_huge_zero_pmd(orig_pmd))
+ if (!page)
clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
else
copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
@@ -1207,7 +1207,7 @@ alloc:
page_add_new_anon_rmap(new_page, vma, haddr);
set_pmd_at(mm, haddr, pmd, entry);
update_mmu_cache_pmd(vma, address, pmd);
- if (is_huge_zero_pmd(orig_pmd)) {
+ if (!page) {
add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
put_huge_zero_page();
} else {
diff --git a/mm/util.c b/mm/util.c
index eaf63fc2c92f..96da2d7c076c 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -387,7 +387,10 @@ struct address_space *page_mapping(struct page *page)
{
struct address_space *mapping = page->mapping;

- VM_BUG_ON(PageSlab(page));
+ /* This happens if someone calls flush_dcache_page on slab page */
+ if (unlikely(PageSlab(page)))
+ return NULL;
+
if (unlikely(PageSwapCache(page))) {
swp_entry_t entry;

diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 392a0445265c..25d5ebaf25f9 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -220,6 +220,14 @@ static int inode_alloc_security(struct inode *inode)
return 0;
}

+static void inode_free_rcu(struct rcu_head *head)
+{
+ struct inode_security_struct *isec;
+
+ isec = container_of(head, struct inode_security_struct, rcu);
+ kmem_cache_free(sel_inode_cache, isec);
+}
+
static void inode_free_security(struct inode *inode)
{
struct inode_security_struct *isec = inode->i_security;
@@ -230,8 +238,16 @@ static void inode_free_security(struct inode *inode)
list_del_init(&isec->list);
spin_unlock(&sbsec->isec_lock);

- inode->i_security = NULL;
- kmem_cache_free(sel_inode_cache, isec);
+ /*
+ * The inode may still be referenced in a path walk and
+ * a call to selinux_inode_permission() can be made
+ * after inode_free_security() is called. Ideally, the VFS
+ * wouldn't do this, but fixing that is a much harder
+ * job. For now, simply free the i_security via RCU, and
+ * leave the current inode->i_security pointer intact.
+ * The inode will be freed after the RCU grace period too.
+ */
+ call_rcu(&isec->rcu, inode_free_rcu);
}

static int file_alloc_security(struct file *file)
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index aa47bcabb5f6..6fd9dd256a62 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -38,7 +38,10 @@ struct task_security_struct {

struct inode_security_struct {
struct inode *inode; /* back pointer to inode object */
- struct list_head list; /* list of inode_security_struct */
+ union {
+ struct list_head list; /* list of inode_security_struct */
+ struct rcu_head rcu; /* for freeing the inode_security_struct */
+ };
u32 task_sid; /* SID of creating task */
u32 sid; /* SID of this object */
u16 sclass; /* security class of this object */
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/