Re: Linux 4.13.2

From: Greg KH
Date: Thu Sep 14 2017 - 12:05:27 EST


diff --git a/Documentation/driver-api/firmware/request_firmware.rst b/Documentation/driver-api/firmware/request_firmware.rst
index 1c2c4967cd43..cc0aea880824 100644
--- a/Documentation/driver-api/firmware/request_firmware.rst
+++ b/Documentation/driver-api/firmware/request_firmware.rst
@@ -44,17 +44,6 @@ request_firmware_nowait
.. kernel-doc:: drivers/base/firmware_class.c
:functions: request_firmware_nowait

-Considerations for suspend and resume
-=====================================
-
-During suspend and resume only the built-in firmware and the firmware cache
-elements of the firmware API can be used. This is managed by fw_pm_notify().
-
-fw_pm_notify
-------------
-.. kernel-doc:: drivers/base/firmware_class.c
- :functions: fw_pm_notify
-
request firmware API expected driver use
========================================

diff --git a/Makefile b/Makefile
index 41a976854cad..8aad6bc50d52 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 13
-SUBLEVEL = 1
+SUBLEVEL = 2
EXTRAVERSION =
NAME = Fearless Coyote

diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index ff8b0aa2dfde..42f585379e19 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -315,8 +315,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* signal first. We do not need to release the mmap_sem because
* it would already be released in __lock_page_or_retry in
* mm/filemap.c. */
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+ if (!user_mode(regs))
+ goto no_context;
return 0;
+ }

/*
* Major/minor page fault accounting is only done on the
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index 51763d674050..a92ac63addf0 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -323,6 +323,7 @@
interrupt-controller;
reg = <0x1d00000 0x10000>, /* GICD */
<0x1d40000 0x40000>; /* GICR */
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
};
};

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9b1dd114956a..56e68dfac974 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4839,7 +4839,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
* Note: AMD only (since it supports the PFERR_GUEST_PAGE_MASK used
* in PFERR_NEXT_GUEST_PAGE)
*/
- if (error_code == PFERR_NESTED_GUEST_PAGE) {
+ if (vcpu->arch.mmu.direct_map &&
+ error_code == PFERR_NESTED_GUEST_PAGE) {
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
return 1;
}
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index bfbe1e154128..19b63d20f5d3 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -256,38 +256,6 @@ static int fw_cache_piggyback_on_request(const char *name);
* guarding for corner cases a global lock should be OK */
static DEFINE_MUTEX(fw_lock);

-static bool __enable_firmware = false;
-
-static void enable_firmware(void)
-{
- mutex_lock(&fw_lock);
- __enable_firmware = true;
- mutex_unlock(&fw_lock);
-}
-
-static void disable_firmware(void)
-{
- mutex_lock(&fw_lock);
- __enable_firmware = false;
- mutex_unlock(&fw_lock);
-}
-
-/*
- * When disabled only the built-in firmware and the firmware cache will be
- * used to look for firmware.
- */
-static bool firmware_enabled(void)
-{
- bool enabled = false;
-
- mutex_lock(&fw_lock);
- if (__enable_firmware)
- enabled = true;
- mutex_unlock(&fw_lock);
-
- return enabled;
-}
-
static struct firmware_cache fw_cache;

static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
@@ -1239,12 +1207,6 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
if (ret <= 0) /* error or already assigned */
goto out;

- if (!firmware_enabled()) {
- WARN(1, "firmware request while host is not available\n");
- ret = -EHOSTDOWN;
- goto out;
- }
-
ret = fw_get_filesystem_firmware(device, fw->priv);
if (ret) {
if (!(opt_flags & FW_OPT_NO_WARN))
@@ -1755,62 +1717,6 @@ static void device_uncache_fw_images_delay(unsigned long delay)
msecs_to_jiffies(delay));
}

-/**
- * fw_pm_notify - notifier for suspend/resume
- * @notify_block: unused
- * @mode: mode we are switching to
- * @unused: unused
- *
- * Used to modify the firmware_class state as we move in between states.
- * The firmware_class implements a firmware cache to enable device driver
- * to fetch firmware upon resume before the root filesystem is ready. We
- * disable API calls which do not use the built-in firmware or the firmware
- * cache when we know these calls will not work.
- *
- * The inner logic behind all this is a bit complex so it is worth summarizing
- * the kernel's own suspend/resume process with context and focus on how this
- * can impact the firmware API.
- *
- * First a review on how we go to suspend::
- *
- * pm_suspend() --> enter_state() -->
- * sys_sync()
- * suspend_prepare() -->
- * __pm_notifier_call_chain(PM_SUSPEND_PREPARE, ...);
- * suspend_freeze_processes() -->
- * freeze_processes() -->
- * __usermodehelper_set_disable_depth(UMH_DISABLED);
- * freeze all tasks ...
- * freeze_kernel_threads()
- * suspend_devices_and_enter() -->
- * dpm_suspend_start() -->
- * dpm_prepare()
- * dpm_suspend()
- * suspend_enter() -->
- * platform_suspend_prepare()
- * dpm_suspend_late()
- * freeze_enter()
- * syscore_suspend()
- *
- * When we resume we bail out of a loop from suspend_devices_and_enter() and
- * unwind back out to the caller enter_state() where we were before as follows::
- *
- * enter_state() -->
- * suspend_devices_and_enter() --> (bail from loop)
- * dpm_resume_end() -->
- * dpm_resume()
- * dpm_complete()
- * suspend_finish() -->
- * suspend_thaw_processes() -->
- * thaw_processes() -->
- * __usermodehelper_set_disable_depth(UMH_FREEZING);
- * thaw_workqueues();
- * thaw all processes ...
- * usermodehelper_enable();
- * pm_notifier_call_chain(PM_POST_SUSPEND);
- *
- * fw_pm_notify() works through pm_notifier_call_chain().
- */
static int fw_pm_notify(struct notifier_block *notify_block,
unsigned long mode, void *unused)
{
@@ -1824,7 +1730,6 @@ static int fw_pm_notify(struct notifier_block *notify_block,
*/
kill_pending_fw_fallback_reqs(true);
device_cache_fw_images();
- disable_firmware();
break;

case PM_POST_SUSPEND:
@@ -1837,7 +1742,6 @@ static int fw_pm_notify(struct notifier_block *notify_block,
mutex_lock(&fw_lock);
fw_cache.state = FW_LOADER_NO_CACHE;
mutex_unlock(&fw_lock);
- enable_firmware();

device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
break;
@@ -1886,7 +1790,6 @@ static void __init fw_cache_init(void)
static int fw_shutdown_notify(struct notifier_block *unused1,
unsigned long unused2, void *unused3)
{
- disable_firmware();
/*
* Kill all pending fallback requests to avoid both stalling shutdown,
* and avoid a deadlock with the usermode_lock.
@@ -1902,7 +1805,6 @@ static struct notifier_block fw_shutdown_nb = {

static int __init firmware_class_init(void)
{
- enable_firmware();
fw_cache_init();
register_reboot_notifier(&fw_shutdown_nb);
#ifdef CONFIG_FW_LOADER_USER_HELPER
@@ -1914,7 +1816,6 @@ static int __init firmware_class_init(void)

static void __exit firmware_class_exit(void)
{
- disable_firmware();
#ifdef CONFIG_PM_SLEEP
unregister_syscore_ops(&fw_syscore_ops);
unregister_pm_notifier(&fw_cache.pm_notify);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index a764d5ca7536..5bedf7bc3d88 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -876,6 +876,8 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
}
}

+#define MXC_V1_ECCBYTES 5
+
static int mxc_v1_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
@@ -885,7 +887,7 @@ static int mxc_v1_ooblayout_ecc(struct mtd_info *mtd, int section,
return -ERANGE;

oobregion->offset = (section * 16) + 6;
- oobregion->length = nand_chip->ecc.bytes;
+ oobregion->length = MXC_V1_ECCBYTES;

return 0;
}
@@ -907,8 +909,7 @@ static int mxc_v1_ooblayout_free(struct mtd_info *mtd, int section,
oobregion->length = 4;
}
} else {
- oobregion->offset = ((section - 1) * 16) +
- nand_chip->ecc.bytes + 6;
+ oobregion->offset = ((section - 1) * 16) + MXC_V1_ECCBYTES + 6;
if (section < nand_chip->ecc.steps)
oobregion->length = (section * 16) + 6 -
oobregion->offset;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index c6c18b82f8f4..c05cf874cbb8 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -3993,10 +3993,13 @@ static void nand_manufacturer_detect(struct nand_chip *chip)
* nand_decode_ext_id() otherwise.
*/
if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
- chip->manufacturer.desc->ops->detect)
+ chip->manufacturer.desc->ops->detect) {
+ /* The 3rd id byte holds MLC / multichip data */
+ chip->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
chip->manufacturer.desc->ops->detect(chip);
- else
+ } else {
nand_decode_ext_id(chip);
+ }
}

/*
diff --git a/drivers/mtd/nand/nand_hynix.c b/drivers/mtd/nand/nand_hynix.c
index b12dc7325378..bd9a6e343848 100644
--- a/drivers/mtd/nand/nand_hynix.c
+++ b/drivers/mtd/nand/nand_hynix.c
@@ -477,7 +477,7 @@ static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
* The ECC requirements field meaning depends on the
* NAND technology.
*/
- u8 nand_tech = chip->id.data[5] & 0x3;
+ u8 nand_tech = chip->id.data[5] & 0x7;

if (nand_tech < 3) {
/* > 26nm, reference: H27UBG8T2A datasheet */
@@ -533,7 +533,7 @@ static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
if (nand_tech > 0)
chip->options |= NAND_NEED_SCRAMBLING;
} else {
- nand_tech = chip->id.data[5] & 0x3;
+ nand_tech = chip->id.data[5] & 0x7;

/* < 32nm */
if (nand_tech > 2)
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
index 88af7145a51a..8928500b5bde 100644
--- a/drivers/mtd/nand/qcom_nandc.c
+++ b/drivers/mtd/nand/qcom_nandc.c
@@ -109,7 +109,11 @@
#define READ_ADDR 0

/* NAND_DEV_CMD_VLD bits */
-#define READ_START_VLD 0
+#define READ_START_VLD BIT(0)
+#define READ_STOP_VLD BIT(1)
+#define WRITE_START_VLD BIT(2)
+#define ERASE_START_VLD BIT(3)
+#define SEQ_READ_START_VLD BIT(4)

/* NAND_EBI2_ECC_BUF_CFG bits */
#define NUM_STEPS 0
@@ -148,6 +152,10 @@
#define FETCH_ID 0xb
#define RESET_DEVICE 0xd

+/* Default Value for NAND_DEV_CMD_VLD */
+#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
+ ERASE_START_VLD | SEQ_READ_START_VLD)
+
/*
* the NAND controller performs reads/writes with ECC in 516 byte chunks.
* the driver calls the chunks 'step' or 'codeword' interchangeably
@@ -672,8 +680,7 @@ static int nandc_param(struct qcom_nand_host *host)

/* configure CMD1 and VLD for ONFI param probing */
nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
- (nandc->vld & ~(1 << READ_START_VLD))
- | 0 << READ_START_VLD);
+ (nandc->vld & ~READ_START_VLD));
nandc_set_reg(nandc, NAND_DEV_CMD1,
(nandc->cmd1 & ~(0xFF << READ_ADDR))
| NAND_CMD_PARAM << READ_ADDR);
@@ -1893,7 +1900,7 @@ static int qcom_nand_host_setup(struct qcom_nand_host *host)
| wide_bus << WIDE_FLASH
| 1 << DEV0_CFG1_ECC_DISABLE;

- host->ecc_bch_cfg = host->bch_enabled << ECC_CFG_ECC_DISABLE
+ host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
| 0 << ECC_SW_RESET
| host->cw_data << ECC_NUM_DATA_BYTES
| 1 << ECC_FORCE_CLK_OPEN
@@ -1972,13 +1979,14 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
{
/* kill onenand */
nandc_write(nandc, SFLASHC_BURST_CFG, 0);
+ nandc_write(nandc, NAND_DEV_CMD_VLD, NAND_DEV_CMD_VLD_VAL);

/* enable ADM DMA */
nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);

/* save the original values of these registers */
nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1);
- nandc->vld = nandc_read(nandc, NAND_DEV_CMD_VLD);
+ nandc->vld = NAND_DEV_CMD_VLD_VAL;

return 0;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index f1b60740e020..53ae30259989 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -159,7 +159,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)

brcmf_feat_firmware_capabilities(ifp);
memset(&gscan_cfg, 0, sizeof(gscan_cfg));
- if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID)
+ if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID &&
+ drvr->bus_if->chip != BRCM_CC_4345_CHIP_ID)
brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN,
"pfn_gscan_cfg",
&gscan_cfg, sizeof(gscan_cfg));
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 0b75def39c6c..d2c289446c00 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -3702,7 +3702,10 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
if (rt2x00_rt(rt2x00dev, RT3572))
rt2800_rfcsr_write(rt2x00dev, 8, 0);

- tx_pin = rt2800_register_read(rt2x00dev, TX_PIN_CFG);
+ if (rt2x00_rt(rt2x00dev, RT6352))
+ tx_pin = rt2800_register_read(rt2x00dev, TX_PIN_CFG);
+ else
+ tx_pin = 0;

switch (rt2x00dev->default_ant.tx_chain_num) {
case 3:
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
index 31965f0ef69d..e8f07573aed9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -1183,7 +1183,10 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
}

/* fixed internal switch S1->WiFi, S0->BT */
- btcoexist->btc_write_4byte(btcoexist, 0x948, 0x0);
+ if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+ else
+ btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);

switch (antpos_type) {
case BTC_ANT_WIFI_AT_MAIN:
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index e6024b013ca5..00eea3440290 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -173,6 +173,16 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)

u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv)
{
+ struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
+
+ /* override ant_num / ant_path */
+ if (mod_params->ant_sel) {
+ rtlpriv->btcoexist.btc_info.ant_num =
+ (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1);
+
+ rtlpriv->btcoexist.btc_info.single_ant_path =
+ (mod_params->ant_sel == 1 ? 0 : 1);
+ }
return rtlpriv->btcoexist.btc_info.single_ant_path;
}

@@ -183,6 +193,7 @@ u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)

u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
{
+ struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
u8 num;

if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2)
@@ -190,6 +201,10 @@ u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
else
num = 1;

+ /* override ant_num / ant_path */
+ if (mod_params->ant_sel)
+ num = (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1) + 1;
+
return num;
}

@@ -861,7 +876,7 @@ bool exhalbtc_bind_bt_coex_withadapter(void *adapter)
{
struct btc_coexist *btcoexist = &gl_bt_coexist;
struct rtl_priv *rtlpriv = adapter;
- u8 ant_num = 2, chip_type, single_ant_path = 0;
+ u8 ant_num = 2, chip_type;

if (btcoexist->binded)
return false;
@@ -896,12 +911,6 @@ bool exhalbtc_bind_bt_coex_withadapter(void *adapter)
ant_num = rtl_get_hwpg_ant_num(rtlpriv);
exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_PG, ant_num);

- /* set default antenna position to main port */
- btcoexist->board_info.btdm_ant_pos = BTC_ANTENNA_AT_MAIN_PORT;
-
- single_ant_path = rtl_get_hwpg_single_ant_path(rtlpriv);
- exhalbtc_set_single_ant_path(single_ant_path);
-
if (rtl_get_hwpg_package_type(rtlpriv) == 0)
btcoexist->board_info.tfbga_package = false;
else if (rtl_get_hwpg_package_type(rtlpriv) == 1)
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 5f5cd306f76d..ffa7191ddfa5 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -75,7 +75,7 @@ static struct nvmf_host *nvmf_host_default(void)

kref_init(&host->ref);
snprintf(host->nqn, NVMF_NQN_SIZE,
- "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id);
+ "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);

mutex_lock(&nvmf_hosts_mutex);
list_add_tail(&host->list, &nvmf_hosts);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 12540b6104b5..1618dac7bf74 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1814,6 +1814,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
goto restore;
}

+ btrfs_qgroup_rescan_resume(fs_info);
+
if (!fs_info->uuid_root) {
btrfs_info(fs_info, "creating UUID tree");
ret = btrfs_create_uuid_tree(fs_info);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index af330c31f627..a85d1cf9b4a8 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -631,11 +631,11 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
if (result <= 0)
goto out;

- result = generic_write_sync(iocb, result);
- if (result < 0)
- goto out;
written = result;
iocb->ki_pos += written;
+ result = generic_write_sync(iocb, written);
+ if (result < 0)
+ goto out;

/* Return error values */
if (nfs_need_check_write(file, inode)) {
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index dc456416d2be..68cc22083639 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -251,7 +251,6 @@ int nfs_iocounter_wait(struct nfs_lock_context *l_ctx);
extern const struct nfs_pageio_ops nfs_pgio_rw_ops;
struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *);
void nfs_pgio_header_free(struct nfs_pgio_header *);
-void nfs_pgio_data_destroy(struct nfs_pgio_header *);
int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index de9066a92c0d..d291e6e72573 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -530,16 +530,6 @@ struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
}
EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);

-/*
- * nfs_pgio_header_free - Free a read or write header
- * @hdr: The header to free
- */
-void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
-{
- hdr->rw_ops->rw_free_header(hdr);
-}
-EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
-
/**
* nfs_pgio_data_destroy - make @hdr suitable for reuse
*
@@ -548,14 +538,24 @@ EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
*
* @hdr: A header that has had nfs_generic_pgio called
*/
-void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
+static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
{
if (hdr->args.context)
put_nfs_open_context(hdr->args.context);
if (hdr->page_array.pagevec != hdr->page_array.page_array)
kfree(hdr->page_array.pagevec);
}
-EXPORT_SYMBOL_GPL(nfs_pgio_data_destroy);
+
+/*
+ * nfs_pgio_header_free - Free a read or write header
+ * @hdr: The header to free
+ */
+void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
+{
+ nfs_pgio_data_destroy(hdr);
+ hdr->rw_ops->rw_free_header(hdr);
+}
+EXPORT_SYMBOL_GPL(nfs_pgio_header_free);

/**
* nfs_pgio_rpcsetup - Set up arguments for a pageio call
@@ -669,7 +669,6 @@ EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
static void nfs_pgio_error(struct nfs_pgio_header *hdr)
{
set_bit(NFS_IOHDR_REDO, &hdr->flags);
- nfs_pgio_data_destroy(hdr);
hdr->completion_ops->completion(hdr);
}

@@ -680,7 +679,6 @@ static void nfs_pgio_error(struct nfs_pgio_header *hdr)
static void nfs_pgio_release(void *calldata)
{
struct nfs_pgio_header *hdr = calldata;
- nfs_pgio_data_destroy(hdr);
hdr->completion_ops->completion(hdr);
}

@@ -714,9 +712,6 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
int io_flags,
gfp_t gfp_flags)
{
- struct nfs_pgio_mirror *new;
- int i;
-
desc->pg_moreio = 0;
desc->pg_inode = inode;
desc->pg_ops = pg_ops;
@@ -732,21 +727,9 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
desc->pg_mirror_count = 1;
desc->pg_mirror_idx = 0;

- if (pg_ops->pg_get_mirror_count) {
- /* until we have a request, we don't have an lseg and no
- * idea how many mirrors there will be */
- new = kcalloc(NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX,
- sizeof(struct nfs_pgio_mirror), gfp_flags);
- desc->pg_mirrors_dynamic = new;
- desc->pg_mirrors = new;
-
- for (i = 0; i < NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX; i++)
- nfs_pageio_mirror_init(&desc->pg_mirrors[i], bsize);
- } else {
- desc->pg_mirrors_dynamic = NULL;
- desc->pg_mirrors = desc->pg_mirrors_static;
- nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
- }
+ desc->pg_mirrors_dynamic = NULL;
+ desc->pg_mirrors = desc->pg_mirrors_static;
+ nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
}
EXPORT_SYMBOL_GPL(nfs_pageio_init);

@@ -865,32 +848,52 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
return ret;
}

+static struct nfs_pgio_mirror *
+nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc,
+ unsigned int mirror_count)
+{
+ struct nfs_pgio_mirror *ret;
+ unsigned int i;
+
+ kfree(desc->pg_mirrors_dynamic);
+ desc->pg_mirrors_dynamic = NULL;
+ if (mirror_count == 1)
+ return desc->pg_mirrors_static;
+ ret = kmalloc_array(mirror_count, sizeof(*ret), GFP_NOFS);
+ if (ret != NULL) {
+ for (i = 0; i < mirror_count; i++)
+ nfs_pageio_mirror_init(&ret[i], desc->pg_bsize);
+ desc->pg_mirrors_dynamic = ret;
+ }
+ return ret;
+}
+
/*
* nfs_pageio_setup_mirroring - determine if mirroring is to be used
* by calling the pg_get_mirror_count op
*/
-static int nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
+static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req)
{
- int mirror_count = 1;
+ unsigned int mirror_count = 1;

- if (!pgio->pg_ops->pg_get_mirror_count)
- return 0;
-
- mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
-
- if (pgio->pg_error < 0)
- return pgio->pg_error;
-
- if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX)
- return -EINVAL;
+ if (pgio->pg_ops->pg_get_mirror_count)
+ mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
+ if (mirror_count == pgio->pg_mirror_count || pgio->pg_error < 0)
+ return;

- if (WARN_ON_ONCE(!pgio->pg_mirrors_dynamic))
- return -EINVAL;
+ if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX) {
+ pgio->pg_error = -EINVAL;
+ return;
+ }

+ pgio->pg_mirrors = nfs_pageio_alloc_mirrors(pgio, mirror_count);
+ if (pgio->pg_mirrors == NULL) {
+ pgio->pg_error = -ENOMEM;
+ pgio->pg_mirrors = pgio->pg_mirrors_static;
+ mirror_count = 1;
+ }
pgio->pg_mirror_count = mirror_count;
-
- return 0;
}

/*
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index c383d0913b54..64bb20130edf 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -2274,7 +2274,6 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
nfs_pageio_reset_write_mds(desc);
mirror->pg_recoalesce = 1;
}
- nfs_pgio_data_destroy(hdr);
hdr->release(hdr);
}

@@ -2398,7 +2397,6 @@ pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
nfs_pageio_reset_read_mds(desc);
mirror->pg_recoalesce = 1;
}
- nfs_pgio_data_destroy(hdr);
hdr->release(hdr);
}

diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index 9301c5a6060b..dcd1292664b3 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -270,7 +270,14 @@ static inline uint64_t howmany_64(uint64_t x, uint32_t y)
#endif /* DEBUG */

#ifdef CONFIG_XFS_RT
-#define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME)
+
+/*
+ * make sure we ignore the inode flag if the filesystem doesn't have a
+ * configured realtime device.
+ */
+#define XFS_IS_REALTIME_INODE(ip) \
+ (((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) && \
+ (ip)->i_mount->m_rtdev_targp)
#else
#define XFS_IS_REALTIME_INODE(ip) (0)
#endif
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 898e87998417..79a804f1aab9 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -463,7 +463,7 @@ radix_tree_node_free(struct radix_tree_node *node)
* To make use of this facility, the radix tree must be initialised without
* __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
*/
-static int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
+static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
{
struct radix_tree_preload *rtp;
struct radix_tree_node *node;
@@ -2103,7 +2103,8 @@ EXPORT_SYMBOL(radix_tree_tagged);
*/
void idr_preload(gfp_t gfp_mask)
{
- __radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE);
+ if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
+ preempt_disable();
}
EXPORT_SYMBOL(idr_preload);

@@ -2117,13 +2118,13 @@ EXPORT_SYMBOL(idr_preload);
*/
int ida_pre_get(struct ida *ida, gfp_t gfp)
{
- __radix_tree_preload(gfp, IDA_PRELOAD_SIZE);
/*
* The IDA API has no preload_end() equivalent. Instead,
* ida_get_new() can return -EAGAIN, prompting the caller
* to return to the ida_pre_get() step.
*/
- preempt_enable();
+ if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE))
+ preempt_enable();

if (!this_cpu_read(ida_bitmap)) {
struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
diff --git a/mm/memory.c b/mm/memory.c
index 56e48e4593cb..274547075486 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3888,6 +3888,11 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
/* do counter updates before entering really critical section. */
check_sync_rss_stat(current);

+ if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
+ flags & FAULT_FLAG_INSTRUCTION,
+ flags & FAULT_FLAG_REMOTE))
+ return VM_FAULT_SIGSEGV;
+
/*
* Enable the memcg OOM handling for faults triggered in user
* space. Kernel faults are handled more gracefully.
@@ -3895,11 +3900,6 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
if (flags & FAULT_FLAG_USER)
mem_cgroup_oom_enable();

- if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
- flags & FAULT_FLAG_INSTRUCTION,
- flags & FAULT_FLAG_REMOTE))
- return VM_FAULT_SIGSEGV;
-
if (unlikely(is_vm_hugetlb_page(vma)))
ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
else
diff --git a/mm/sparse.c b/mm/sparse.c
index 7b4be3fd5cac..cdce7a7bb3f3 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -630,7 +630,7 @@ void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
unsigned long pfn;

for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
- unsigned long section_nr = pfn_to_section_nr(start_pfn);
+ unsigned long section_nr = pfn_to_section_nr(pfn);
struct mem_section *ms;

/* onlining code should never touch invalid ranges */
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 6ba4aab2db0b..a8952b6563c6 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3052,7 +3052,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
p->flags = 0;
spin_unlock(&swap_lock);
vfree(swap_map);
- vfree(cluster_info);
+ kvfree(cluster_info);
+ kvfree(frontswap_map);
if (swap_file) {
if (inode && S_ISREG(inode->i_mode)) {
inode_unlock(inode);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 303c779bfe38..43ba91c440bc 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -58,7 +58,7 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
u8 code, u8 ident, u16 dlen, void *data);
static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
void *data);
-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
+static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);

static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
@@ -1473,7 +1473,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)

set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}

@@ -2987,12 +2987,15 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
return len;
}

-static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
+static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
{
struct l2cap_conf_opt *opt = *ptr;

BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);

+ if (size < L2CAP_CONF_OPT_SIZE + len)
+ return;
+
opt->type = type;
opt->len = len;

@@ -3017,7 +3020,7 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
*ptr += L2CAP_CONF_OPT_SIZE + len;
}

-static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
+static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
{
struct l2cap_conf_efs efs;

@@ -3045,7 +3048,7 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
}

l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
- (unsigned long) &efs);
+ (unsigned long) &efs, size);
}

static void l2cap_ack_timeout(struct work_struct *work)
@@ -3191,11 +3194,12 @@ static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
chan->ack_win = chan->tx_win;
}

-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
{
struct l2cap_conf_req *req = data;
struct l2cap_conf_rfc rfc = { .mode = chan->mode };
void *ptr = req->data;
+ void *endptr = data + data_size;
u16 size;

BT_DBG("chan %p", chan);
@@ -3220,7 +3224,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)

done:
if (chan->imtu != L2CAP_DEFAULT_MTU)
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);

switch (chan->mode) {
case L2CAP_MODE_BASIC:
@@ -3239,7 +3243,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
rfc.max_pdu_size = 0;

l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);
break;

case L2CAP_MODE_ERTM:
@@ -3259,21 +3263,21 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
L2CAP_DEFAULT_TX_WINDOW);

l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);

if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
- l2cap_add_opt_efs(&ptr, chan);
+ l2cap_add_opt_efs(&ptr, chan, endptr - ptr);

if (test_bit(FLAG_EXT_CTRL, &chan->flags))
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
- chan->tx_win);
+ chan->tx_win, endptr - ptr);

if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
if (chan->fcs == L2CAP_FCS_NONE ||
test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
chan->fcs = L2CAP_FCS_NONE;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
- chan->fcs);
+ chan->fcs, endptr - ptr);
}
break;

@@ -3291,17 +3295,17 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
rfc.max_pdu_size = cpu_to_le16(size);

l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);

if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
- l2cap_add_opt_efs(&ptr, chan);
+ l2cap_add_opt_efs(&ptr, chan, endptr - ptr);

if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
if (chan->fcs == L2CAP_FCS_NONE ||
test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
chan->fcs = L2CAP_FCS_NONE;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
- chan->fcs);
+ chan->fcs, endptr - ptr);
}
break;
}
@@ -3312,10 +3316,11 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
return ptr - data;
}

-static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
{
struct l2cap_conf_rsp *rsp = data;
void *ptr = rsp->data;
+ void *endptr = data + data_size;
void *req = chan->conf_req;
int len = chan->conf_len;
int type, hint, olen;
@@ -3417,7 +3422,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
return -ECONNREFUSED;

l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);
}

if (result == L2CAP_CONF_SUCCESS) {
@@ -3430,7 +3435,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
chan->omtu = mtu;
set_bit(CONF_MTU_DONE, &chan->conf_state);
}
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);

if (remote_efs) {
if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
@@ -3444,7 +3449,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)

l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
sizeof(efs),
- (unsigned long) &efs);
+ (unsigned long) &efs, endptr - ptr);
} else {
/* Send PENDING Conf Rsp */
result = L2CAP_CONF_PENDING;
@@ -3477,7 +3482,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
set_bit(CONF_MODE_DONE, &chan->conf_state);

l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
+ sizeof(rfc), (unsigned long) &rfc, endptr - ptr);

if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
chan->remote_id = efs.id;
@@ -3491,7 +3496,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
le32_to_cpu(efs.sdu_itime);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
sizeof(efs),
- (unsigned long) &efs);
+ (unsigned long) &efs, endptr - ptr);
}
break;

@@ -3505,7 +3510,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
set_bit(CONF_MODE_DONE, &chan->conf_state);

l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);

break;

@@ -3527,10 +3532,11 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
}

static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
- void *data, u16 *result)
+ void *data, size_t size, u16 *result)
{
struct l2cap_conf_req *req = data;
void *ptr = req->data;
+ void *endptr = data + size;
int type, olen;
unsigned long val;
struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
@@ -3548,13 +3554,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
chan->imtu = L2CAP_DEFAULT_MIN_MTU;
} else
chan->imtu = val;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
break;

case L2CAP_CONF_FLUSH_TO:
chan->flush_to = val;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
- 2, chan->flush_to);
+ 2, chan->flush_to, endptr - ptr);
break;

case L2CAP_CONF_RFC:
@@ -3568,13 +3574,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
chan->fcs = 0;

l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
+ sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
break;

case L2CAP_CONF_EWS:
chan->ack_win = min_t(u16, val, chan->ack_win);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
- chan->tx_win);
+ chan->tx_win, endptr - ptr);
break;

case L2CAP_CONF_EFS:
@@ -3587,7 +3593,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
return -ECONNREFUSED;

l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
- (unsigned long) &efs);
+ (unsigned long) &efs, endptr - ptr);
break;

case L2CAP_CONF_FCS:
@@ -3692,7 +3698,7 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
return;

l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}

@@ -3900,7 +3906,7 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
u8 buf[128];
set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}

@@ -3978,7 +3984,7 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
break;

l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, req), req);
+ l2cap_build_conf_req(chan, req, sizeof(req)), req);
chan->num_conf_req++;
break;

@@ -4090,7 +4096,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
}

/* Complete config. */
- len = l2cap_parse_conf_req(chan, rsp);
+ len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
if (len < 0) {
l2cap_send_disconn_req(chan, ECONNRESET);
goto unlock;
@@ -4124,7 +4130,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
u8 buf[64];
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}

@@ -4184,7 +4190,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
char buf[64];

len = l2cap_parse_conf_rsp(chan, rsp->data, len,
- buf, &result);
+ buf, sizeof(buf), &result);
if (len < 0) {
l2cap_send_disconn_req(chan, ECONNRESET);
goto done;
@@ -4214,7 +4220,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
/* throw out any old stored conf requests */
result = L2CAP_CONF_SUCCESS;
len = l2cap_parse_conf_rsp(chan, rsp->data, len,
- req, &result);
+ req, sizeof(req), &result);
if (len < 0) {
l2cap_send_disconn_req(chan, ECONNRESET);
goto done;
@@ -4791,7 +4797,7 @@ static void l2cap_do_create(struct l2cap_chan *chan, int result,
set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}
}
@@ -7465,7 +7471,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(conn, l2cap_get_ident(conn),
L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf),
+ l2cap_build_conf_req(chan, buf, sizeof(buf)),
buf);
chan->num_conf_req++;
}
diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
index a9b86133b9b3..dfa916e651fb 100644
--- a/tools/testing/selftests/timers/Makefile
+++ b/tools/testing/selftests/timers/Makefile
@@ -14,20 +14,20 @@ TEST_GEN_PROGS_EXTENDED = alarmtimer-suspend valid-adjtimex adjtick change_skew

include ../lib.mk

+define RUN_DESTRUCTIVE_TESTS
+ @for TEST in $(TEST_GEN_PROGS_EXTENDED); do \
+ BASENAME_TEST=`basename $$TEST`; \
+ if [ ! -x $$BASENAME_TEST ]; then \
+ echo "selftests: Warning: file $$BASENAME_TEST is not executable, correct this.";\
+ echo "selftests: $$BASENAME_TEST [FAIL]"; \
+ else \
+ cd `dirname $$TEST`; (./$$BASENAME_TEST && echo "selftests: $$BASENAME_TEST [PASS]") || echo "selftests: $$BASENAME_TEST [FAIL]"; cd -;\
+ fi; \
+ done;
+endef
+
# these tests require escalated privileges
# and may modify the system time or trigger
# other behavior like suspend
run_destructive_tests: run_tests
- ./alarmtimer-suspend
- ./valid-adjtimex
- ./adjtick
- ./change_skew
- ./skew_consistency
- ./clocksource-switch
- ./freq-step
- ./leap-a-day -s -i 10
- ./leapcrash
- ./set-tz
- ./set-tai
- ./set-2038
-
+ $(RUN_DESTRUCTIVE_TESTS)
diff --git a/tools/testing/selftests/timers/leap-a-day.c b/tools/testing/selftests/timers/leap-a-day.c
index fb46ad6ac92c..067017634057 100644
--- a/tools/testing/selftests/timers/leap-a-day.c
+++ b/tools/testing/selftests/timers/leap-a-day.c
@@ -190,18 +190,18 @@ int main(int argc, char **argv)
struct sigevent se;
struct sigaction act;
int signum = SIGRTMAX;
- int settime = 0;
+ int settime = 1;
int tai_time = 0;
int insert = 1;
- int iterations = -1;
+ int iterations = 10;
int opt;

/* Process arguments */
while ((opt = getopt(argc, argv, "sti:")) != -1) {
switch (opt) {
- case 's':
- printf("Setting time to speed up testing\n");
- settime = 1;
+ case 'w':
+ printf("Only setting leap-flag, not changing time. It could take up to a day for leap to trigger.\n");
+ settime = 0;
break;
case 'i':
iterations = atoi(optarg);
@@ -210,9 +210,10 @@ int main(int argc, char **argv)
tai_time = 1;
break;
default:
- printf("Usage: %s [-s] [-i <iterations>]\n", argv[0]);
- printf(" -s: Set time to right before leap second each iteration\n");
- printf(" -i: Number of iterations\n");
+ printf("Usage: %s [-w] [-i <iterations>]\n", argv[0]);
+ printf(" -w: Set flag and wait for leap second each iteration");
+ printf(" (default sets time to right before leapsecond)\n");
+ printf(" -i: Number of iterations (-1 = infinite, default is 10)\n");
printf(" -t: Print TAI time\n");
exit(-1);
}
diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c
index b4967d875236..f249e042b3b5 100644
--- a/tools/testing/selftests/x86/fsgsbase.c
+++ b/tools/testing/selftests/x86/fsgsbase.c
@@ -285,9 +285,12 @@ static void *threadproc(void *ctx)
}
}

-static void set_gs_and_switch_to(unsigned long local, unsigned long remote)
+static void set_gs_and_switch_to(unsigned long local,
+ unsigned short force_sel,
+ unsigned long remote)
{
unsigned long base;
+ unsigned short sel_pre_sched, sel_post_sched;

bool hard_zero = false;
if (local == HARD_ZERO) {
@@ -297,6 +300,8 @@ static void set_gs_and_switch_to(unsigned long local, unsigned long remote)

printf("[RUN]\tARCH_SET_GS(0x%lx)%s, then schedule to 0x%lx\n",
local, hard_zero ? " and clear gs" : "", remote);
+ if (force_sel)
+ printf("\tBefore schedule, set selector to 0x%hx\n", force_sel);
if (syscall(SYS_arch_prctl, ARCH_SET_GS, local) != 0)
err(1, "ARCH_SET_GS");
if (hard_zero)
@@ -307,18 +312,35 @@ static void set_gs_and_switch_to(unsigned long local, unsigned long remote)
printf("[FAIL]\tGSBASE wasn't set as expected\n");
}

+ if (force_sel) {
+ asm volatile ("mov %0, %%gs" : : "rm" (force_sel));
+ sel_pre_sched = force_sel;
+ local = read_base(GS);
+
+ /*
+ * Signal delivery seems to mess up weird selectors. Put it
+ * back.
+ */
+ asm volatile ("mov %0, %%gs" : : "rm" (force_sel));
+ } else {
+ asm volatile ("mov %%gs, %0" : "=rm" (sel_pre_sched));
+ }
+
remote_base = remote;
ftx = 1;
syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
while (ftx != 0)
syscall(SYS_futex, &ftx, FUTEX_WAIT, 1, NULL, NULL, 0);

+ asm volatile ("mov %%gs, %0" : "=rm" (sel_post_sched));
base = read_base(GS);
- if (base == local) {
- printf("[OK]\tGSBASE remained 0x%lx\n", local);
+ if (base == local && sel_pre_sched == sel_post_sched) {
+ printf("[OK]\tGS/BASE remained 0x%hx/0x%lx\n",
+ sel_pre_sched, local);
} else {
nerrs++;
- printf("[FAIL]\tGSBASE changed to 0x%lx\n", base);
+ printf("[FAIL]\tGS/BASE changed from 0x%hx/0x%lx to 0x%hx/0x%lx\n",
+ sel_pre_sched, local, sel_post_sched, base);
}
}

@@ -381,8 +403,15 @@ int main()

for (int local = 0; local < 4; local++) {
for (int remote = 0; remote < 4; remote++) {
- set_gs_and_switch_to(bases_with_hard_zero[local],
- bases_with_hard_zero[remote]);
+ for (unsigned short s = 0; s < 5; s++) {
+ unsigned short sel = s;
+ if (s == 4)
+ asm ("mov %%ss, %0" : "=rm" (sel));
+ set_gs_and_switch_to(
+ bases_with_hard_zero[local],
+ sel,
+ bases_with_hard_zero[remote]);
+ }
}
}