diff options
author | Anthony G. Basile <blueness@gentoo.org> | 2017-03-13 09:56:26 -0400 |
---|---|---|
committer | Anthony G. Basile <blueness@gentoo.org> | 2017-03-13 09:56:26 -0400 |
commit | 543fcca01ae724bf3e27aa69e98282afc8e41645 (patch) | |
tree | 84c9c490e59aca9d58303d2098468ebd7e485d44 | |
parent | grsecurity-3.1-4.9.13-201703052141 (diff) | |
download | hardened-patchset-543fcca01ae724bf3e27aa69e98282afc8e41645.tar.gz hardened-patchset-543fcca01ae724bf3e27aa69e98282afc8e41645.tar.bz2 hardened-patchset-543fcca01ae724bf3e27aa69e98282afc8e41645.zip |
grsecurity-3.1-4.9.14-20170312124520170312
-rw-r--r-- | 4.9.13/1012_linux-4.9.13.patch | 1079 | ||||
-rw-r--r-- | 4.9.14/0000_README (renamed from 4.9.13/0000_README) | 6 | ||||
-rw-r--r-- | 4.9.14/1013_linux-4.9.14.patch | 6768 | ||||
-rw-r--r-- | 4.9.14/4420_grsecurity-3.1-4.9.14-201703121245.patch (renamed from 4.9.13/4420_grsecurity-3.1-4.9.13-201703052141.patch) | 447 | ||||
-rw-r--r-- | 4.9.14/4425_grsec_remove_EI_PAX.patch (renamed from 4.9.13/4425_grsec_remove_EI_PAX.patch) | 0 | ||||
-rw-r--r-- | 4.9.14/4426_default_XATTR_PAX_FLAGS.patch (renamed from 4.9.13/4426_default_XATTR_PAX_FLAGS.patch) | 0 | ||||
-rw-r--r-- | 4.9.14/4427_force_XATTR_PAX_tmpfs.patch (renamed from 4.9.13/4427_force_XATTR_PAX_tmpfs.patch) | 0 | ||||
-rw-r--r-- | 4.9.14/4430_grsec-remove-localversion-grsec.patch (renamed from 4.9.13/4430_grsec-remove-localversion-grsec.patch) | 0 | ||||
-rw-r--r-- | 4.9.14/4435_grsec-mute-warnings.patch (renamed from 4.9.13/4435_grsec-mute-warnings.patch) | 0 | ||||
-rw-r--r-- | 4.9.14/4440_grsec-remove-protected-paths.patch (renamed from 4.9.13/4440_grsec-remove-protected-paths.patch) | 0 | ||||
-rw-r--r-- | 4.9.14/4450_grsec-kconfig-default-gids.patch (renamed from 4.9.13/4450_grsec-kconfig-default-gids.patch) | 0 | ||||
-rw-r--r-- | 4.9.14/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 4.9.13/4465_selinux-avc_audit-log-curr_ip.patch) | 0 | ||||
-rw-r--r-- | 4.9.14/4470_disable-compat_vdso.patch (renamed from 4.9.13/4470_disable-compat_vdso.patch) | 0 | ||||
-rw-r--r-- | 4.9.14/4475_emutramp_default_on.patch (renamed from 4.9.13/4475_emutramp_default_on.patch) | 0 |
14 files changed, 7138 insertions, 1162 deletions
diff --git a/4.9.13/1012_linux-4.9.13.patch b/4.9.13/1012_linux-4.9.13.patch deleted file mode 100644 index 8a48815..0000000 --- a/4.9.13/1012_linux-4.9.13.patch +++ /dev/null @@ -1,1079 +0,0 @@ -diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt -index 922dec8..65b05ba 100644 ---- a/Documentation/kernel-parameters.txt -+++ b/Documentation/kernel-parameters.txt -@@ -1391,6 +1391,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. - When zero, profiling data is discarded and associated - debugfs files are removed at module unload time. - -+ goldfish [X86] Enable the goldfish android emulator platform. -+ Don't use this when you are not running on the -+ android emulator -+ - gpt [EFI] Forces disk with valid GPT signature but - invalid Protective MBR to be treated as GPT. If the - primary GPT is corrupted, it enables the backup/alternate -diff --git a/Makefile b/Makefile -index 3cd6f6f..14dc275 100644 ---- a/Makefile -+++ b/Makefile -@@ -1,6 +1,6 @@ - VERSION = 4 - PATCHLEVEL = 9 --SUBLEVEL = 12 -+SUBLEVEL = 13 - EXTRAVERSION = - NAME = Roaring Lionus - -diff --git a/arch/x86/platform/goldfish/goldfish.c b/arch/x86/platform/goldfish/goldfish.c -index 1693107..0d17c0a 100644 ---- a/arch/x86/platform/goldfish/goldfish.c -+++ b/arch/x86/platform/goldfish/goldfish.c -@@ -42,10 +42,22 @@ static struct resource goldfish_pdev_bus_resources[] = { - } - }; - -+static bool goldfish_enable __initdata; -+ -+static int __init goldfish_setup(char *str) -+{ -+ goldfish_enable = true; -+ return 0; -+} -+__setup("goldfish", goldfish_setup); -+ - static int __init goldfish_init(void) - { -+ if (!goldfish_enable) -+ return -ENODEV; -+ - platform_device_register_simple("goldfish_pdev_bus", -1, -- goldfish_pdev_bus_resources, 2); -+ goldfish_pdev_bus_resources, 2); - return 0; - } - device_initcall(goldfish_init); -diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c -index 6bb21b3..a543ea6 100644 ---- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c -+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c -@@ -567,10 +567,14 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, - - mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); - -+ preempt_disable(); -+ - tcf_exts_to_list(f->exts, &actions); - list_for_each_entry(a, &actions, list) - tcf_action_stats_update(a, bytes, packets, lastuse); - -+ preempt_enable(); -+ - return 0; - } - -diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c -index b9087b8..3f1971d 100644 ---- a/drivers/net/ethernet/ti/cpsw.c -+++ b/drivers/net/ethernet/ti/cpsw.c -@@ -2925,7 +2925,7 @@ static int cpsw_resume(struct device *dev) - { - struct platform_device *pdev = to_platform_device(dev); - struct net_device *ndev = platform_get_drvdata(pdev); -- struct cpsw_common *cpsw = netdev_priv(ndev); -+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - - /* Select default pin state */ - pinctrl_pm_select_default_state(dev); -diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c -index 0fafaa9..d4f495b 100644 ---- a/drivers/net/vxlan.c -+++ b/drivers/net/vxlan.c -@@ -2449,7 +2449,8 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) - return -EINVAL; - rt = vxlan_get_route(vxlan, skb, 0, info->key.tos, - info->key.u.ipv4.dst, -- &info->key.u.ipv4.src, NULL, info); -+ &info->key.u.ipv4.src, -+ &info->dst_cache, info); - if (IS_ERR(rt)) - return PTR_ERR(rt); - ip_rt_put(rt); -@@ -2459,7 +2460,8 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) - - ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos, - info->key.label, &info->key.u.ipv6.dst, -- &info->key.u.ipv6.src, NULL, info); -+ &info->key.u.ipv6.src, -+ &info->dst_cache, info); - if (IS_ERR(ndst)) - return PTR_ERR(ndst); - dst_release(ndst); -diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c -index 3837bbd..ae0c48f 100644 ---- a/drivers/net/wireless/realtek/rtlwifi/usb.c -+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c -@@ -831,12 +831,30 @@ static void rtl_usb_stop(struct ieee80211_hw *hw) - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); -+ struct urb *urb; - - /* should after adapter start and interrupt enable. */ - set_hal_stop(rtlhal); - cancel_work_sync(&rtlpriv->works.fill_h2c_cmd); - /* Enable software */ - SET_USB_STOP(rtlusb); -+ -+ /* free pre-allocated URBs from rtl_usb_start() */ -+ usb_kill_anchored_urbs(&rtlusb->rx_submitted); -+ -+ tasklet_kill(&rtlusb->rx_work_tasklet); -+ cancel_work_sync(&rtlpriv->works.lps_change_work); -+ -+ flush_workqueue(rtlpriv->works.rtl_wq); -+ -+ skb_queue_purge(&rtlusb->rx_queue); -+ -+ while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) { -+ usb_free_coherent(urb->dev, urb->transfer_buffer_length, -+ urb->transfer_buffer, urb->transfer_dma); -+ usb_free_urb(urb); -+ } -+ - rtlpriv->cfg->ops->hw_disable(hw); - } - -diff --git a/drivers/platform/goldfish/pdev_bus.c b/drivers/platform/goldfish/pdev_bus.c -index 1f52462..dd9ea46 100644 ---- a/drivers/platform/goldfish/pdev_bus.c -+++ b/drivers/platform/goldfish/pdev_bus.c -@@ -157,23 +157,26 @@ static int goldfish_new_pdev(void) - static irqreturn_t goldfish_pdev_bus_interrupt(int irq, void *dev_id) - { - irqreturn_t ret = IRQ_NONE; -+ - while (1) { - u32 op = readl(pdev_bus_base + PDEV_BUS_OP); -- switch (op) { -- case PDEV_BUS_OP_DONE: -- return IRQ_NONE; - -+ switch (op) { - case PDEV_BUS_OP_REMOVE_DEV: - goldfish_pdev_remove(); -+ ret = IRQ_HANDLED; - break; - - case PDEV_BUS_OP_ADD_DEV: - goldfish_new_pdev(); -+ ret = IRQ_HANDLED; - break; -+ -+ case PDEV_BUS_OP_DONE: -+ default: -+ return ret; - } -- ret = IRQ_HANDLED; - } -- return ret; - } - - static int goldfish_pdev_bus_probe(struct platform_device *pdev) -diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c -index 7312e7e..6788e75 100644 ---- a/drivers/tty/serial/msm_serial.c -+++ b/drivers/tty/serial/msm_serial.c -@@ -1809,6 +1809,7 @@ static const struct of_device_id msm_match_table[] = { - { .compatible = "qcom,msm-uartdm" }, - {} - }; -+MODULE_DEVICE_TABLE(of, msm_match_table); - - static struct platform_driver msm_platform_driver = { - .remove = msm_serial_remove, -diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c -index 1532cde..7812052 100644 ---- a/drivers/usb/serial/ark3116.c -+++ b/drivers/usb/serial/ark3116.c -@@ -99,10 +99,17 @@ static int ark3116_read_reg(struct usb_serial *serial, - usb_rcvctrlpipe(serial->dev, 0), - 0xfe, 0xc0, 0, reg, - buf, 1, ARK_TIMEOUT); -- if (result < 0) -+ if (result < 1) { -+ dev_err(&serial->interface->dev, -+ "failed to read register %u: %d\n", -+ reg, result); -+ if (result >= 0) -+ result = -EIO; -+ - return result; -- else -- return buf[0]; -+ } -+ -+ return buf[0]; - } - - static inline int calc_divisor(int bps) -diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c -index 8967715..b6f1ade 100644 ---- a/drivers/usb/serial/console.c -+++ b/drivers/usb/serial/console.c -@@ -143,6 +143,7 @@ static int usb_console_setup(struct console *co, char *options) - tty->driver = usb_serial_tty_driver; - tty->index = co->index; - init_ldsem(&tty->ldisc_sem); -+ spin_lock_init(&tty->files_lock); - INIT_LIST_HEAD(&tty->tty_files); - kref_get(&tty->driver->kref); - __module_get(tty->driver->owner); -diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c -index 243ac5e..8bb4875 100644 ---- a/drivers/usb/serial/cp210x.c -+++ b/drivers/usb/serial/cp210x.c -@@ -172,6 +172,8 @@ static const struct usb_device_id id_table[] = { - { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ - { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ - { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ -+ { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */ -+ { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */ - { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ - { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ - { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ -diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c -index 6e9fc8b..99a0a5f 100644 ---- a/drivers/usb/serial/ftdi_sio.c -+++ b/drivers/usb/serial/ftdi_sio.c -@@ -1807,8 +1807,6 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port) - - mutex_init(&priv->cfg_lock); - -- priv->flags = ASYNC_LOW_LATENCY; -- - if (quirk && quirk->port_probe) - quirk->port_probe(priv); - -@@ -2072,6 +2070,20 @@ static int ftdi_process_packet(struct usb_serial_port *port, - priv->prev_status = status; - } - -+ /* save if the transmitter is empty or not */ -+ if (packet[1] & FTDI_RS_TEMT) -+ priv->transmit_empty = 1; -+ else -+ priv->transmit_empty = 0; -+ -+ len -= 2; -+ if (!len) -+ return 0; /* status only */ -+ -+ /* -+ * Break and error status must only be processed for packets with -+ * data payload to avoid over-reporting. -+ */ - flag = TTY_NORMAL; - if (packet[1] & FTDI_RS_ERR_MASK) { - /* Break takes precedence over parity, which takes precedence -@@ -2094,15 +2106,6 @@ static int ftdi_process_packet(struct usb_serial_port *port, - } - } - -- /* save if the transmitter is empty or not */ -- if (packet[1] & FTDI_RS_TEMT) -- priv->transmit_empty = 1; -- else -- priv->transmit_empty = 0; -- -- len -= 2; -- if (!len) -- return 0; /* status only */ - port->icount.rx += len; - ch = packet + 2; - -@@ -2433,8 +2436,12 @@ static int ftdi_get_modem_status(struct usb_serial_port *port, - FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE, - 0, priv->interface, - buf, len, WDR_TIMEOUT); -- if (ret < 0) { -+ -+ /* NOTE: We allow short responses and handle that below. */ -+ if (ret < 1) { - dev_err(&port->dev, "failed to get modem status: %d\n", ret); -+ if (ret >= 0) -+ ret = -EIO; - ret = usb_translate_errors(ret); - goto out; - } -diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c -index 4f9af47..5c4fc3a 100644 ---- a/drivers/usb/serial/mos7840.c -+++ b/drivers/usb/serial/mos7840.c -@@ -1024,6 +1024,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port) - * (can't set it up in mos7840_startup as the structures * - * were not set up at that time.) */ - if (port0->open_ports == 1) { -+ /* FIXME: Buffer never NULL, so URB is not submitted. */ - if (serial->port[0]->interrupt_in_buffer == NULL) { - /* set up interrupt urb */ - usb_fill_int_urb(serial->port[0]->interrupt_in_urb, -@@ -2119,7 +2120,8 @@ static int mos7840_calc_num_ports(struct usb_serial *serial) - static int mos7840_attach(struct usb_serial *serial) - { - if (serial->num_bulk_in < serial->num_ports || -- serial->num_bulk_out < serial->num_ports) { -+ serial->num_bulk_out < serial->num_ports || -+ serial->num_interrupt_in < 1) { - dev_err(&serial->interface->dev, "missing endpoints\n"); - return -ENODEV; - } -diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c -index 4b7bfb3..64bf258 100644 ---- a/drivers/usb/serial/opticon.c -+++ b/drivers/usb/serial/opticon.c -@@ -142,7 +142,7 @@ static int opticon_open(struct tty_struct *tty, struct usb_serial_port *port) - usb_clear_halt(port->serial->dev, port->read_urb->pipe); - - res = usb_serial_generic_open(tty, port); -- if (!res) -+ if (res) - return res; - - /* Request CTS line state, sometimes during opening the current -diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c -index 475e6c3..ddfd787 100644 ---- a/drivers/usb/serial/spcp8x5.c -+++ b/drivers/usb/serial/spcp8x5.c -@@ -232,11 +232,17 @@ static int spcp8x5_get_msr(struct usb_serial_port *port, u8 *status) - ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), - GET_UART_STATUS, GET_UART_STATUS_TYPE, - 0, GET_UART_STATUS_MSR, buf, 1, 100); -- if (ret < 0) -+ if (ret < 1) { - dev_err(&port->dev, "failed to get modem status: %d\n", ret); -+ if (ret >= 0) -+ ret = -EIO; -+ goto out; -+ } - - dev_dbg(&port->dev, "0xc0:0x22:0:6 %d - 0x02%x\n", ret, *buf); - *status = *buf; -+ ret = 0; -+out: - kfree(buf); - - return ret; -diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c -index cdc6bdd..e888961 100644 ---- a/fs/xfs/xfs_iomap.c -+++ b/fs/xfs/xfs_iomap.c -@@ -1068,7 +1068,15 @@ xfs_file_iomap_end_delalloc( - xfs_fileoff_t end_fsb; - int error = 0; - -- start_fsb = XFS_B_TO_FSB(mp, offset + written); -+ /* -+ * start_fsb refers to the first unused block after a short write. If -+ * nothing was written, round offset down to point at the first block in -+ * the range. -+ */ -+ if (unlikely(!written)) -+ start_fsb = XFS_B_TO_FSBT(mp, offset); -+ else -+ start_fsb = XFS_B_TO_FSB(mp, offset + written); - end_fsb = XFS_B_TO_FSB(mp, offset + length); - - /* -@@ -1080,6 +1088,9 @@ xfs_file_iomap_end_delalloc( - * blocks in the range, they are ours. - */ - if (start_fsb < end_fsb) { -+ truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb), -+ XFS_FSB_TO_B(mp, end_fsb) - 1); -+ - xfs_ilock(ip, XFS_ILOCK_EXCL); - error = xfs_bmap_punch_delalloc_range(ip, start_fsb, - end_fsb - start_fsb); -diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h -index 34cce72..fca1539 100644 ---- a/include/acpi/platform/acenv.h -+++ b/include/acpi/platform/acenv.h -@@ -177,7 +177,7 @@ - #include "acmsvc.h" - - #elif defined(__INTEL_COMPILER) --#include "acintel.h" -+#include <acpi/platform/acintel.h> - - #endif - -diff --git a/include/acpi/platform/acintel.h b/include/acpi/platform/acintel.h -new file mode 100644 -index 0000000..17bd3b7 ---- /dev/null -+++ b/include/acpi/platform/acintel.h -@@ -0,0 +1,87 @@ -+/****************************************************************************** -+ * -+ * Name: acintel.h - VC specific defines, etc. -+ * -+ *****************************************************************************/ -+ -+/* -+ * Copyright (C) 2000 - 2017, Intel Corp. -+ * All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * 1. Redistributions of source code must retain the above copyright -+ * notice, this list of conditions, and the following disclaimer, -+ * without modification. -+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer -+ * substantially similar to the "NO WARRANTY" disclaimer below -+ * ("Disclaimer") and any redistribution must be conditioned upon -+ * including a substantially similar Disclaimer requirement for further -+ * binary redistribution. -+ * 3. Neither the names of the above-listed copyright holders nor the names -+ * of any contributors may be used to endorse or promote products derived -+ * from this software without specific prior written permission. -+ * -+ * Alternatively, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") version 2 as published by the Free -+ * Software Foundation. -+ * -+ * NO WARRANTY -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS -+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGES. -+ */ -+ -+#ifndef __ACINTEL_H__ -+#define __ACINTEL_H__ -+ -+/* -+ * Use compiler specific <stdarg.h> is a good practice for even when -+ * -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined. -+ */ -+#include <stdarg.h> -+ -+/* Configuration specific to Intel 64-bit C compiler */ -+ -+#define COMPILER_DEPENDENT_INT64 __int64 -+#define COMPILER_DEPENDENT_UINT64 unsigned __int64 -+#define ACPI_INLINE __inline -+ -+/* -+ * Calling conventions: -+ * -+ * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) -+ * ACPI_EXTERNAL_XFACE - External ACPI interfaces -+ * ACPI_INTERNAL_XFACE - Internal ACPI interfaces -+ * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces -+ */ -+#define ACPI_SYSTEM_XFACE -+#define ACPI_EXTERNAL_XFACE -+#define ACPI_INTERNAL_XFACE -+#define ACPI_INTERNAL_VAR_XFACE -+ -+/* remark 981 - operands evaluated in no particular order */ -+#pragma warning(disable:981) -+ -+/* warn C4100: unreferenced formal parameter */ -+#pragma warning(disable:4100) -+ -+/* warn C4127: conditional expression is constant */ -+#pragma warning(disable:4127) -+ -+/* warn C4706: assignment within conditional expression */ -+#pragma warning(disable:4706) -+ -+/* warn C4214: bit field types other than int */ -+#pragma warning(disable:4214) -+ -+#endif /* __ACINTEL_H__ */ -diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h -index 2052011..6c70444 100644 ---- a/include/linux/ptr_ring.h -+++ b/include/linux/ptr_ring.h -@@ -111,6 +111,11 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) - return 0; - } - -+/* -+ * Note: resize (below) nests producer lock within consumer lock, so if you -+ * consume in interrupt or BH context, you must disable interrupts/BH when -+ * calling this. -+ */ - static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr) - { - int ret; -@@ -242,6 +247,11 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) - return ptr; - } - -+/* -+ * Note: resize (below) nests producer lock within consumer lock, so if you -+ * call this in interrupt or BH context, you must disable interrupts/BH when -+ * producing. -+ */ - static inline void *ptr_ring_consume(struct ptr_ring *r) - { - void *ptr; -@@ -357,7 +367,7 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, - void **old; - void *ptr; - -- while ((ptr = ptr_ring_consume(r))) -+ while ((ptr = __ptr_ring_consume(r))) - if (producer < size) - queue[producer++] = ptr; - else if (destroy) -@@ -372,6 +382,12 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, - return old; - } - -+/* -+ * Note: producer lock is nested within consumer lock, so if you -+ * resize you must make sure all uses nest correctly. -+ * In particular if you consume ring in interrupt or BH context, you must -+ * disable interrupts/BH when doing so. -+ */ - static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, - void (*destroy)(void *)) - { -@@ -382,17 +398,25 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, - if (!queue) - return -ENOMEM; - -- spin_lock_irqsave(&(r)->producer_lock, flags); -+ spin_lock_irqsave(&(r)->consumer_lock, flags); -+ spin_lock(&(r)->producer_lock); - - old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy); - -- spin_unlock_irqrestore(&(r)->producer_lock, flags); -+ spin_unlock(&(r)->producer_lock); -+ spin_unlock_irqrestore(&(r)->consumer_lock, flags); - - kfree(old); - - return 0; - } - -+/* -+ * Note: producer lock is nested within consumer lock, so if you -+ * resize you must make sure all uses nest correctly. -+ * In particular if you consume ring in interrupt or BH context, you must -+ * disable interrupts/BH when doing so. -+ */ - static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings, - int size, - gfp_t gfp, void (*destroy)(void *)) -@@ -412,10 +436,12 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings, - } - - for (i = 0; i < nrings; ++i) { -- spin_lock_irqsave(&(rings[i])->producer_lock, flags); -+ spin_lock_irqsave(&(rings[i])->consumer_lock, flags); -+ spin_lock(&(rings[i])->producer_lock); - queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], - size, gfp, destroy); -- spin_unlock_irqrestore(&(rings[i])->producer_lock, flags); -+ spin_unlock(&(rings[i])->producer_lock); -+ spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags); - } - - for (i = 0; i < nrings; ++i) -diff --git a/mm/backing-dev.c b/mm/backing-dev.c -index 8fde443..6ff2d77 100644 ---- a/mm/backing-dev.c -+++ b/mm/backing-dev.c -@@ -757,15 +757,20 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi) - if (!bdi->wb_congested) - return -ENOMEM; - -+ atomic_set(&bdi->wb_congested->refcnt, 1); -+ - err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); - if (err) { -- kfree(bdi->wb_congested); -+ wb_congested_put(bdi->wb_congested); - return err; - } - return 0; - } - --static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { } -+static void cgwb_bdi_destroy(struct backing_dev_info *bdi) -+{ -+ wb_congested_put(bdi->wb_congested); -+} - - #endif /* CONFIG_CGROUP_WRITEBACK */ - -diff --git a/net/core/neighbour.c b/net/core/neighbour.c -index 2ae929f..9901e5b 100644 ---- a/net/core/neighbour.c -+++ b/net/core/neighbour.c -@@ -2927,7 +2927,8 @@ static void neigh_proc_update(struct ctl_table *ctl, int write) - return; - - set_bit(index, p->data_state); -- call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p); -+ if (index == NEIGH_VAR_DELAY_PROBE_TIME) -+ call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p); - if (!dev) /* NULL dev means this is default value */ - neigh_copy_dflt_parms(net, p, index); - } -diff --git a/net/dccp/input.c b/net/dccp/input.c -index ba34718..8fedc2d 100644 ---- a/net/dccp/input.c -+++ b/net/dccp/input.c -@@ -606,7 +606,8 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, - if (inet_csk(sk)->icsk_af_ops->conn_request(sk, - skb) < 0) - return 1; -- goto discard; -+ consume_skb(skb); -+ return 0; - } - if (dh->dccph_type == DCCP_PKT_RESET) - goto discard; -diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c -index 65336f3..9826695 100644 ---- a/net/ipv4/ip_sockglue.c -+++ b/net/ipv4/ip_sockglue.c -@@ -105,10 +105,10 @@ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb, - if (skb->ip_summed != CHECKSUM_COMPLETE) - return; - -- if (offset != 0) -- csum = csum_sub(csum, -- csum_partial(skb_transport_header(skb) + tlen, -- offset, 0)); -+ if (offset != 0) { -+ int tend_off = skb_transport_offset(skb) + tlen; -+ csum = csum_sub(csum, skb_checksum(skb, tend_off, offset, 0)); -+ } - - put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum); - } -diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c -index acbe61c..160dc89 100644 ---- a/net/irda/irqueue.c -+++ b/net/irda/irqueue.c -@@ -383,9 +383,6 @@ EXPORT_SYMBOL(hashbin_new); - * for deallocating this structure if it's complex. If not the user can - * just supply kfree, which should take care of the job. - */ --#ifdef CONFIG_LOCKDEP --static int hashbin_lock_depth = 0; --#endif - int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) - { - irda_queue_t* queue; -@@ -396,22 +393,27 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) - IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;); - - /* Synchronize */ -- if ( hashbin->hb_type & HB_LOCK ) { -- spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags, -- hashbin_lock_depth++); -- } -+ if (hashbin->hb_type & HB_LOCK) -+ spin_lock_irqsave(&hashbin->hb_spinlock, flags); - - /* - * Free the entries in the hashbin, TODO: use hashbin_clear when - * it has been shown to work - */ - for (i = 0; i < HASHBIN_SIZE; i ++ ) { -- queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]); -- while (queue ) { -- if (free_func) -- (*free_func)(queue); -- queue = dequeue_first( -- (irda_queue_t**) &hashbin->hb_queue[i]); -+ while (1) { -+ queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]); -+ -+ if (!queue) -+ break; -+ -+ if (free_func) { -+ if (hashbin->hb_type & HB_LOCK) -+ spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); -+ free_func(queue); -+ if (hashbin->hb_type & HB_LOCK) -+ spin_lock_irqsave(&hashbin->hb_spinlock, flags); -+ } - } - } - -@@ -420,12 +422,8 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) - hashbin->magic = ~HB_MAGIC; - - /* Release lock */ -- if ( hashbin->hb_type & HB_LOCK) { -+ if (hashbin->hb_type & HB_LOCK) - spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); --#ifdef CONFIG_LOCKDEP -- hashbin_lock_depth--; --#endif -- } - - /* - * Free the hashbin structure -diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c -index 7e08a4d..a646f34 100644 ---- a/net/kcm/kcmsock.c -+++ b/net/kcm/kcmsock.c -@@ -929,23 +929,25 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) - goto out_error; - } - -- /* New message, alloc head skb */ -- head = alloc_skb(0, sk->sk_allocation); -- while (!head) { -- kcm_push(kcm); -- err = sk_stream_wait_memory(sk, &timeo); -- if (err) -- goto out_error; -- -+ if (msg_data_left(msg)) { -+ /* New message, alloc head skb */ - head = alloc_skb(0, sk->sk_allocation); -- } -+ while (!head) { -+ kcm_push(kcm); -+ err = sk_stream_wait_memory(sk, &timeo); -+ if (err) -+ goto out_error; - -- skb = head; -+ head = alloc_skb(0, sk->sk_allocation); -+ } - -- /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling -- * csum_and_copy_from_iter from skb_do_copy_data_nocache. -- */ -- skb->ip_summed = CHECKSUM_UNNECESSARY; -+ skb = head; -+ -+ /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling -+ * csum_and_copy_from_iter from skb_do_copy_data_nocache. -+ */ -+ skb->ip_summed = CHECKSUM_UNNECESSARY; -+ } - - start: - while (msg_data_left(msg)) { -@@ -1018,10 +1020,12 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) - if (eor) { - bool not_busy = skb_queue_empty(&sk->sk_write_queue); - -- /* Message complete, queue it on send buffer */ -- __skb_queue_tail(&sk->sk_write_queue, head); -- kcm->seq_skb = NULL; -- KCM_STATS_INCR(kcm->stats.tx_msgs); -+ if (head) { -+ /* Message complete, queue it on send buffer */ -+ __skb_queue_tail(&sk->sk_write_queue, head); -+ kcm->seq_skb = NULL; -+ KCM_STATS_INCR(kcm->stats.tx_msgs); -+ } - - if (msg->msg_flags & MSG_BATCH) { - kcm->tx_wait_more = true; -@@ -1040,8 +1044,10 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) - } else { - /* Message not complete, save state */ - partial_message: -- kcm->seq_skb = head; -- kcm_tx_msg(head)->last_skb = skb; -+ if (head) { -+ kcm->seq_skb = head; -+ kcm_tx_msg(head)->last_skb = skb; -+ } - } - - KCM_STATS_ADD(kcm->stats.tx_bytes, copied); -diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c -index 3e821da..8bc5a1b 100644 ---- a/net/llc/llc_conn.c -+++ b/net/llc/llc_conn.c -@@ -821,7 +821,10 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb) - * another trick required to cope with how the PROCOM state - * machine works. -acme - */ -+ skb_orphan(skb); -+ sock_hold(sk); - skb->sk = sk; -+ skb->destructor = sock_efree; - } - if (!sock_owned_by_user(sk)) - llc_conn_rcv(sk, skb); -diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c -index d0e1e80..5404d0d 100644 ---- a/net/llc/llc_sap.c -+++ b/net/llc/llc_sap.c -@@ -290,7 +290,10 @@ static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb, - - ev->type = LLC_SAP_EV_TYPE_PDU; - ev->reason = 0; -+ skb_orphan(skb); -+ sock_hold(sk); - skb->sk = sk; -+ skb->destructor = sock_efree; - llc_sap_state_process(sap, skb); - } - -diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c -index 7341adf..6dc44d9 100644 ---- a/net/netfilter/nf_conntrack_helper.c -+++ b/net/netfilter/nf_conntrack_helper.c -@@ -188,6 +188,26 @@ nf_ct_helper_ext_add(struct nf_conn *ct, - } - EXPORT_SYMBOL_GPL(nf_ct_helper_ext_add); - -+static struct nf_conntrack_helper * -+nf_ct_lookup_helper(struct nf_conn *ct, struct net *net) -+{ -+ if (!net->ct.sysctl_auto_assign_helper) { -+ if (net->ct.auto_assign_helper_warned) -+ return NULL; -+ if (!__nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple)) -+ return NULL; -+ pr_info("nf_conntrack: default automatic helper assignment " -+ "has been turned off for security reasons and CT-based " -+ " firewall rule not found. Use the iptables CT target " -+ "to attach helpers instead.\n"); -+ net->ct.auto_assign_helper_warned = 1; -+ return NULL; -+ } -+ -+ return __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); -+} -+ -+ - int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, - gfp_t flags) - { -@@ -213,21 +233,14 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, - } - - help = nfct_help(ct); -- if (net->ct.sysctl_auto_assign_helper && helper == NULL) { -- helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); -- if (unlikely(!net->ct.auto_assign_helper_warned && helper)) { -- pr_info("nf_conntrack: automatic helper " -- "assignment is deprecated and it will " -- "be removed soon. Use the iptables CT target " -- "to attach helpers instead.\n"); -- net->ct.auto_assign_helper_warned = true; -- } -- } - - if (helper == NULL) { -- if (help) -- RCU_INIT_POINTER(help->helper, NULL); -- return 0; -+ helper = nf_ct_lookup_helper(ct, net); -+ if (helper == NULL) { -+ if (help) -+ RCU_INIT_POINTER(help->helper, NULL); -+ return 0; -+ } - } - - if (help == NULL) { -diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c -index 458722b..34de326 100644 ---- a/net/packet/af_packet.c -+++ b/net/packet/af_packet.c -@@ -1497,6 +1497,8 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po) - f->arr[f->num_members] = sk; - smp_wmb(); - f->num_members++; -+ if (f->num_members == 1) -+ dev_add_pack(&f->prot_hook); - spin_unlock(&f->lock); - } - -@@ -1513,6 +1515,8 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po) - BUG_ON(i >= f->num_members); - f->arr[i] = f->arr[f->num_members - 1]; - f->num_members--; -+ if (f->num_members == 0) -+ __dev_remove_pack(&f->prot_hook); - spin_unlock(&f->lock); - } - -@@ -1619,6 +1623,7 @@ static void fanout_release_data(struct packet_fanout *f) - - static int fanout_add(struct sock *sk, u16 id, u16 type_flags) - { -+ struct packet_rollover *rollover = NULL; - struct packet_sock *po = pkt_sk(sk); - struct packet_fanout *f, *match; - u8 type = type_flags & 0xff; -@@ -1641,23 +1646,28 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) - return -EINVAL; - } - -+ mutex_lock(&fanout_mutex); -+ -+ err = -EINVAL; - if (!po->running) -- return -EINVAL; -+ goto out; - -+ err = -EALREADY; - if (po->fanout) -- return -EALREADY; -+ goto out; - - if (type == PACKET_FANOUT_ROLLOVER || - (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { -- po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL); -- if (!po->rollover) -- return -ENOMEM; -- atomic_long_set(&po->rollover->num, 0); -- atomic_long_set(&po->rollover->num_huge, 0); -- atomic_long_set(&po->rollover->num_failed, 0); -+ err = -ENOMEM; -+ rollover = kzalloc(sizeof(*rollover), GFP_KERNEL); -+ if (!rollover) -+ goto out; -+ atomic_long_set(&rollover->num, 0); -+ atomic_long_set(&rollover->num_huge, 0); -+ atomic_long_set(&rollover->num_failed, 0); -+ po->rollover = rollover; - } - -- mutex_lock(&fanout_mutex); - match = NULL; - list_for_each_entry(f, &fanout_list, list) { - if (f->id == id && -@@ -1687,7 +1697,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) - match->prot_hook.func = packet_rcv_fanout; - match->prot_hook.af_packet_priv = match; - match->prot_hook.id_match = match_fanout_group; -- dev_add_pack(&match->prot_hook); - list_add(&match->list, &fanout_list); - } - err = -EINVAL; -@@ -1704,36 +1713,40 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) - } - } - out: -- mutex_unlock(&fanout_mutex); -- if (err) { -- kfree(po->rollover); -+ if (err && rollover) { -+ kfree(rollover); - po->rollover = NULL; - } -+ mutex_unlock(&fanout_mutex); - return err; - } - --static void fanout_release(struct sock *sk) -+/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes -+ * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout. -+ * It is the responsibility of the caller to call fanout_release_data() and -+ * free the returned packet_fanout (after synchronize_net()) -+ */ -+static struct packet_fanout *fanout_release(struct sock *sk) - { - struct packet_sock *po = pkt_sk(sk); - struct packet_fanout *f; - -+ mutex_lock(&fanout_mutex); - f = po->fanout; -- if (!f) -- return; -+ if (f) { -+ po->fanout = NULL; - -- mutex_lock(&fanout_mutex); -- po->fanout = NULL; -+ if (atomic_dec_and_test(&f->sk_ref)) -+ list_del(&f->list); -+ else -+ f = NULL; - -- if (atomic_dec_and_test(&f->sk_ref)) { -- list_del(&f->list); -- dev_remove_pack(&f->prot_hook); -- fanout_release_data(f); -- kfree(f); -+ if (po->rollover) -+ kfree_rcu(po->rollover, rcu); - } - mutex_unlock(&fanout_mutex); - -- if (po->rollover) -- kfree_rcu(po->rollover, rcu); -+ return f; - } - - static bool packet_extra_vlan_len_allowed(const struct net_device *dev, -@@ -2965,6 +2978,7 @@ static int packet_release(struct socket *sock) - { - struct sock *sk = sock->sk; - struct packet_sock *po; -+ struct packet_fanout *f; - struct net *net; - union tpacket_req_u req_u; - -@@ -3004,9 +3018,14 @@ static int packet_release(struct socket *sock) - packet_set_ring(sk, &req_u, 1, 1); - } - -- fanout_release(sk); -+ f = fanout_release(sk); - - synchronize_net(); -+ -+ if (f) { -+ fanout_release_data(f); -+ kfree(f); -+ } - /* - * Now the socket is dead. No more input will appear. - */ -@@ -3958,7 +3977,6 @@ static int packet_notifier(struct notifier_block *this, - } - if (msg == NETDEV_UNREGISTER) { - packet_cached_dev_reset(po); -- fanout_release(sk); - po->ifindex = -1; - if (po->prot_hook.dev) - dev_put(po->prot_hook.dev); -diff --git a/net/socket.c b/net/socket.c -index 73dc69f..6bbccf0 100644 ---- a/net/socket.c -+++ b/net/socket.c -@@ -2197,8 +2197,10 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, - return err; - - err = sock_error(sock->sk); -- if (err) -+ if (err) { -+ datagrams = err; - goto out_put; -+ } - - entry = mmsg; - compat_entry = (struct compat_mmsghdr __user *)mmsg; diff --git a/4.9.13/0000_README b/4.9.14/0000_README index f323ffe..fc6a48b 100644 --- a/4.9.13/0000_README +++ b/4.9.14/0000_README @@ -2,11 +2,11 @@ README ----------------------------------------------------------------------------- Individual Patch Descriptions: ----------------------------------------------------------------------------- -Patch: 1012_linux-4.9.13.patch +Patch: 1013_linux-4.9.14.patch From: http://www.kernel.org -Desc: Linux 4.9.13 +Desc: Linux 4.9.14 -Patch: 4420_grsecurity-3.1-4.9.13-201703052141.patch +Patch: 4420_grsecurity-3.1-4.9.14-201703121245.patch From: http://www.grsecurity.net Desc: hardened-sources base patch from upstream grsecurity diff --git a/4.9.14/1013_linux-4.9.14.patch b/4.9.14/1013_linux-4.9.14.patch new file mode 100644 index 0000000..5d8c119 --- /dev/null +++ b/4.9.14/1013_linux-4.9.14.patch @@ -0,0 +1,6768 @@ +diff --git a/Makefile b/Makefile +index 14dc275..5e7706e 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 13 ++SUBLEVEL = 14 + EXTRAVERSION = + NAME = Roaring Lionus + +diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts +index 0b9a59d..30fac04 100644 +--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts ++++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts +@@ -148,6 +148,8 @@ + uart1: serial@f8020000 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart1_default>; ++ atmel,use-dma-rx; ++ atmel,use-dma-tx; + status = "okay"; + }; + +diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts +index ed7fce2..44d1171 100644 +--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts ++++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts +@@ -110,6 +110,8 @@ + }; + + usart3: serial@fc00c000 { ++ atmel,use-dma-rx; ++ atmel,use-dma-tx; + status = "okay"; + }; + +diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h +index 74a44727..a58bbaa 100644 +--- a/arch/arm/include/asm/kvm_mmu.h ++++ b/arch/arm/include/asm/kvm_mmu.h +@@ -150,18 +150,12 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, + * and iterate over the range. + */ + +- bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached; +- + VM_BUG_ON(size & ~PAGE_MASK); + +- if (!need_flush && !icache_is_pipt()) +- goto vipt_cache; +- + while (size) { + void *va = kmap_atomic_pfn(pfn); + +- if (need_flush) +- kvm_flush_dcache_to_poc(va, PAGE_SIZE); ++ kvm_flush_dcache_to_poc(va, PAGE_SIZE); + + if (icache_is_pipt()) + __cpuc_coherent_user_range((unsigned long)va, +@@ -173,7 +167,6 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, + kunmap_atomic(va); + } + +-vipt_cache: + if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) { + /* any kind of VIPT cache */ + __flush_icache_all(); +diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h +index 6f72fe8..6d22017 100644 +--- a/arch/arm64/include/asm/kvm_mmu.h ++++ b/arch/arm64/include/asm/kvm_mmu.h +@@ -241,8 +241,7 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, + { + void *va = page_address(pfn_to_page(pfn)); + +- if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) +- kvm_flush_dcache_to_poc(va, size); ++ kvm_flush_dcache_to_poc(va, size); + + if (!icache_is_aliasing()) { /* PIPT */ + flush_icache_range((unsigned long)va, +diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c +index c02504e..3a129d4 100644 +--- a/arch/arm64/kernel/cpufeature.c ++++ b/arch/arm64/kernel/cpufeature.c +@@ -653,15 +653,15 @@ static u64 __raw_read_system_reg(u32 sys_id) + case SYS_ID_ISAR2_EL1: return read_cpuid(ID_ISAR2_EL1); + case SYS_ID_ISAR3_EL1: return read_cpuid(ID_ISAR3_EL1); + case SYS_ID_ISAR4_EL1: return read_cpuid(ID_ISAR4_EL1); +- case SYS_ID_ISAR5_EL1: return read_cpuid(ID_ISAR4_EL1); ++ case SYS_ID_ISAR5_EL1: return read_cpuid(ID_ISAR5_EL1); + case SYS_MVFR0_EL1: return read_cpuid(MVFR0_EL1); + case SYS_MVFR1_EL1: return read_cpuid(MVFR1_EL1); + case SYS_MVFR2_EL1: return read_cpuid(MVFR2_EL1); + + case SYS_ID_AA64PFR0_EL1: return read_cpuid(ID_AA64PFR0_EL1); +- case SYS_ID_AA64PFR1_EL1: return read_cpuid(ID_AA64PFR0_EL1); ++ case SYS_ID_AA64PFR1_EL1: return read_cpuid(ID_AA64PFR1_EL1); + case SYS_ID_AA64DFR0_EL1: return read_cpuid(ID_AA64DFR0_EL1); +- case SYS_ID_AA64DFR1_EL1: return read_cpuid(ID_AA64DFR0_EL1); ++ case SYS_ID_AA64DFR1_EL1: return read_cpuid(ID_AA64DFR1_EL1); + case SYS_ID_AA64MMFR0_EL1: return read_cpuid(ID_AA64MMFR0_EL1); + case SYS_ID_AA64MMFR1_EL1: return read_cpuid(ID_AA64MMFR1_EL1); + case SYS_ID_AA64MMFR2_EL1: return read_cpuid(ID_AA64MMFR2_EL1); +diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c +index 02265a5..b5bf46c 100644 +--- a/arch/arm64/mm/dma-mapping.c ++++ b/arch/arm64/mm/dma-mapping.c +@@ -352,6 +352,13 @@ static int __swiotlb_dma_supported(struct device *hwdev, u64 mask) + return 1; + } + ++static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr) ++{ ++ if (swiotlb) ++ return swiotlb_dma_mapping_error(hwdev, addr); ++ return 0; ++} ++ + static struct dma_map_ops swiotlb_dma_ops = { + .alloc = __dma_alloc, + .free = __dma_free, +@@ -366,7 +373,7 @@ static struct dma_map_ops swiotlb_dma_ops = { + .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu, + .sync_sg_for_device = __swiotlb_sync_sg_for_device, + .dma_supported = __swiotlb_dma_supported, +- .mapping_error = swiotlb_dma_mapping_error, ++ .mapping_error = __swiotlb_dma_mapping_error, + }; + + static int __init atomic_pool_init(void) +diff --git a/arch/mips/bcm47xx/buttons.c b/arch/mips/bcm47xx/buttons.c +index 52caa75..e2f50d6 100644 +--- a/arch/mips/bcm47xx/buttons.c ++++ b/arch/mips/bcm47xx/buttons.c +@@ -17,6 +17,12 @@ + .active_low = 1, \ + } + ++#define BCM47XX_GPIO_KEY_H(_gpio, _code) \ ++ { \ ++ .code = _code, \ ++ .gpio = _gpio, \ ++ } ++ + /* Asus */ + + static const struct gpio_keys_button +@@ -79,8 +85,8 @@ bcm47xx_buttons_asus_wl500gpv2[] __initconst = { + + static const struct gpio_keys_button + bcm47xx_buttons_asus_wl500w[] __initconst = { +- BCM47XX_GPIO_KEY(6, KEY_RESTART), +- BCM47XX_GPIO_KEY(7, KEY_WPS_BUTTON), ++ BCM47XX_GPIO_KEY_H(6, KEY_RESTART), ++ BCM47XX_GPIO_KEY_H(7, KEY_WPS_BUTTON), + }; + + static const struct gpio_keys_button +diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S +index 64e08df..8b70041 100644 +--- a/arch/mips/cavium-octeon/octeon-memcpy.S ++++ b/arch/mips/cavium-octeon/octeon-memcpy.S +@@ -208,18 +208,18 @@ EXC( STORE t2, UNIT(6)(dst), s_exc_p10u) + ADD src, src, 16*NBYTES + EXC( STORE t3, UNIT(7)(dst), s_exc_p9u) + ADD dst, dst, 16*NBYTES +-EXC( LOAD t0, UNIT(-8)(src), l_exc_copy) +-EXC( LOAD t1, UNIT(-7)(src), l_exc_copy) +-EXC( LOAD t2, UNIT(-6)(src), l_exc_copy) +-EXC( LOAD t3, UNIT(-5)(src), l_exc_copy) ++EXC( LOAD t0, UNIT(-8)(src), l_exc_copy_rewind16) ++EXC( LOAD t1, UNIT(-7)(src), l_exc_copy_rewind16) ++EXC( LOAD t2, UNIT(-6)(src), l_exc_copy_rewind16) ++EXC( LOAD t3, UNIT(-5)(src), l_exc_copy_rewind16) + EXC( STORE t0, UNIT(-8)(dst), s_exc_p8u) + EXC( STORE t1, UNIT(-7)(dst), s_exc_p7u) + EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u) + EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u) +-EXC( LOAD t0, UNIT(-4)(src), l_exc_copy) +-EXC( LOAD t1, UNIT(-3)(src), l_exc_copy) +-EXC( LOAD t2, UNIT(-2)(src), l_exc_copy) +-EXC( LOAD t3, UNIT(-1)(src), l_exc_copy) ++EXC( LOAD t0, UNIT(-4)(src), l_exc_copy_rewind16) ++EXC( LOAD t1, UNIT(-3)(src), l_exc_copy_rewind16) ++EXC( LOAD t2, UNIT(-2)(src), l_exc_copy_rewind16) ++EXC( LOAD t3, UNIT(-1)(src), l_exc_copy_rewind16) + EXC( STORE t0, UNIT(-4)(dst), s_exc_p4u) + EXC( STORE t1, UNIT(-3)(dst), s_exc_p3u) + EXC( STORE t2, UNIT(-2)(dst), s_exc_p2u) +@@ -383,6 +383,10 @@ done: + nop + END(memcpy) + ++l_exc_copy_rewind16: ++ /* Rewind src and dst by 16*NBYTES for l_exc_copy */ ++ SUB src, src, 16*NBYTES ++ SUB dst, dst, 16*NBYTES + l_exc_copy: + /* + * Copy bytes from src until faulting load address (or until a +diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h +index bce1ce5..0e23197 100644 +--- a/arch/mips/include/asm/checksum.h ++++ b/arch/mips/include/asm/checksum.h +@@ -186,7 +186,9 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + " daddu %0, %4 \n" + " dsll32 $1, %0, 0 \n" + " daddu %0, $1 \n" ++ " sltu $1, %0, $1 \n" + " dsra32 %0, %0, 0 \n" ++ " addu %0, $1 \n" + #endif + " .set pop" + : "=r" (sum) +diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c +index 9514e5f..1652f36 100644 +--- a/arch/mips/kernel/process.c ++++ b/arch/mips/kernel/process.c +@@ -195,11 +195,9 @@ struct mips_frame_info { + #define J_TARGET(pc,target) \ + (((unsigned long)(pc) & 0xf0000000) | ((target) << 2)) + +-static inline int is_ra_save_ins(union mips_instruction *ip) ++static inline int is_ra_save_ins(union mips_instruction *ip, int *poff) + { + #ifdef CONFIG_CPU_MICROMIPS +- union mips_instruction mmi; +- + /* + * swsp ra,offset + * swm16 reglist,offset(sp) +@@ -209,29 +207,71 @@ static inline int is_ra_save_ins(union mips_instruction *ip) + * + * microMIPS is way more fun... + */ +- if (mm_insn_16bit(ip->halfword[0])) { +- mmi.word = (ip->halfword[0] << 16); +- return (mmi.mm16_r5_format.opcode == mm_swsp16_op && +- mmi.mm16_r5_format.rt == 31) || +- (mmi.mm16_m_format.opcode == mm_pool16c_op && +- mmi.mm16_m_format.func == mm_swm16_op); ++ if (mm_insn_16bit(ip->halfword[1])) { ++ switch (ip->mm16_r5_format.opcode) { ++ case mm_swsp16_op: ++ if (ip->mm16_r5_format.rt != 31) ++ return 0; ++ ++ *poff = ip->mm16_r5_format.simmediate; ++ *poff = (*poff << 2) / sizeof(ulong); ++ return 1; ++ ++ case mm_pool16c_op: ++ switch (ip->mm16_m_format.func) { ++ case mm_swm16_op: ++ *poff = ip->mm16_m_format.imm; ++ *poff += 1 + ip->mm16_m_format.rlist; ++ *poff = (*poff << 2) / sizeof(ulong); ++ return 1; ++ ++ default: ++ return 0; ++ } ++ ++ default: ++ return 0; ++ } + } +- else { +- mmi.halfword[0] = ip->halfword[1]; +- mmi.halfword[1] = ip->halfword[0]; +- return (mmi.mm_m_format.opcode == mm_pool32b_op && +- mmi.mm_m_format.rd > 9 && +- mmi.mm_m_format.base == 29 && +- mmi.mm_m_format.func == mm_swm32_func) || +- (mmi.i_format.opcode == mm_sw32_op && +- mmi.i_format.rs == 29 && +- mmi.i_format.rt == 31); ++ ++ switch (ip->i_format.opcode) { ++ case mm_sw32_op: ++ if (ip->i_format.rs != 29) ++ return 0; ++ if (ip->i_format.rt != 31) ++ return 0; ++ ++ *poff = ip->i_format.simmediate / sizeof(ulong); ++ return 1; ++ ++ case mm_pool32b_op: ++ switch (ip->mm_m_format.func) { ++ case mm_swm32_func: ++ if (ip->mm_m_format.rd < 0x10) ++ return 0; ++ if (ip->mm_m_format.base != 29) ++ return 0; ++ ++ *poff = ip->mm_m_format.simmediate; ++ *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32); ++ *poff /= sizeof(ulong); ++ return 1; ++ default: ++ return 0; ++ } ++ ++ default: ++ return 0; + } + #else + /* sw / sd $ra, offset($sp) */ +- return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && +- ip->i_format.rs == 29 && +- ip->i_format.rt == 31; ++ if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && ++ ip->i_format.rs == 29 && ip->i_format.rt == 31) { ++ *poff = ip->i_format.simmediate / sizeof(ulong); ++ return 1; ++ } ++ ++ return 0; + #endif + } + +@@ -246,13 +286,16 @@ static inline int is_jump_ins(union mips_instruction *ip) + * + * microMIPS is kind of more fun... + */ +- union mips_instruction mmi; +- +- mmi.word = (ip->halfword[0] << 16); ++ if (mm_insn_16bit(ip->halfword[1])) { ++ if ((ip->mm16_r5_format.opcode == mm_pool16c_op && ++ (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op)) ++ return 1; ++ return 0; ++ } + +- if ((mmi.mm16_r5_format.opcode == mm_pool16c_op && +- (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) || +- ip->j_format.opcode == mm_jal32_op) ++ if (ip->j_format.opcode == mm_j32_op) ++ return 1; ++ if (ip->j_format.opcode == mm_jal32_op) + return 1; + if (ip->r_format.opcode != mm_pool32a_op || + ip->r_format.func != mm_pool32axf_op) +@@ -280,15 +323,13 @@ static inline int is_sp_move_ins(union mips_instruction *ip) + * + * microMIPS is not more fun... + */ +- if (mm_insn_16bit(ip->halfword[0])) { +- union mips_instruction mmi; +- +- mmi.word = (ip->halfword[0] << 16); +- return (mmi.mm16_r3_format.opcode == mm_pool16d_op && +- mmi.mm16_r3_format.simmediate && mm_addiusp_func) || +- (mmi.mm16_r5_format.opcode == mm_pool16d_op && +- mmi.mm16_r5_format.rt == 29); ++ if (mm_insn_16bit(ip->halfword[1])) { ++ return (ip->mm16_r3_format.opcode == mm_pool16d_op && ++ ip->mm16_r3_format.simmediate && mm_addiusp_func) || ++ (ip->mm16_r5_format.opcode == mm_pool16d_op && ++ ip->mm16_r5_format.rt == 29); + } ++ + return ip->mm_i_format.opcode == mm_addiu32_op && + ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29; + #else +@@ -303,30 +344,36 @@ static inline int is_sp_move_ins(union mips_instruction *ip) + + static int get_frame_info(struct mips_frame_info *info) + { +-#ifdef CONFIG_CPU_MICROMIPS +- union mips_instruction *ip = (void *) (((char *) info->func) - 1); +-#else +- union mips_instruction *ip = info->func; +-#endif +- unsigned max_insns = info->func_size / sizeof(union mips_instruction); +- unsigned i; ++ bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); ++ union mips_instruction insn, *ip, *ip_end; ++ const unsigned int max_insns = 128; ++ unsigned int i; + + info->pc_offset = -1; + info->frame_size = 0; + ++ ip = (void *)msk_isa16_mode((ulong)info->func); + if (!ip) + goto err; + +- if (max_insns == 0) +- max_insns = 128U; /* unknown function size */ +- max_insns = min(128U, max_insns); ++ ip_end = (void *)ip + info->func_size; + +- for (i = 0; i < max_insns; i++, ip++) { ++ for (i = 0; i < max_insns && ip < ip_end; i++, ip++) { ++ if (is_mmips && mm_insn_16bit(ip->halfword[0])) { ++ insn.halfword[0] = 0; ++ insn.halfword[1] = ip->halfword[0]; ++ } else if (is_mmips) { ++ insn.halfword[0] = ip->halfword[1]; ++ insn.halfword[1] = ip->halfword[0]; ++ } else { ++ insn.word = ip->word; ++ } + +- if (is_jump_ins(ip)) ++ if (is_jump_ins(&insn)) + break; ++ + if (!info->frame_size) { +- if (is_sp_move_ins(ip)) ++ if (is_sp_move_ins(&insn)) + { + #ifdef CONFIG_CPU_MICROMIPS + if (mm_insn_16bit(ip->halfword[0])) +@@ -349,11 +396,9 @@ static int get_frame_info(struct mips_frame_info *info) + } + continue; + } +- if (info->pc_offset == -1 && is_ra_save_ins(ip)) { +- info->pc_offset = +- ip->i_format.simmediate / sizeof(long); ++ if (info->pc_offset == -1 && ++ is_ra_save_ins(&insn, &info->pc_offset)) + break; +- } + } + if (info->frame_size && info->pc_offset >= 0) /* nested */ + return 0; +diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c +index 236193b..9a61671 100644 +--- a/arch/mips/lantiq/xway/sysctrl.c ++++ b/arch/mips/lantiq/xway/sysctrl.c +@@ -545,7 +545,7 @@ void __init ltq_soc_init(void) + clkdev_add_pmu("1a800000.pcie", "msi", 1, 1, PMU1_PCIE2_MSI); + clkdev_add_pmu("1a800000.pcie", "pdi", 1, 1, PMU1_PCIE2_PDI); + clkdev_add_pmu("1a800000.pcie", "ctl", 1, 1, PMU1_PCIE2_CTL); +- clkdev_add_pmu("1e108000.eth", NULL, 1, 0, PMU_SWITCH | PMU_PPE_DP); ++ clkdev_add_pmu("1e108000.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP); + clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); + clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); + } else if (of_machine_is_compatible("lantiq,ar10")) { +@@ -553,7 +553,7 @@ void __init ltq_soc_init(void) + ltq_ar10_fpi_hz(), ltq_ar10_pp32_hz()); + clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0); + clkdev_add_pmu("1e106000.usb", "ctl", 1, 0, PMU_USB1); +- clkdev_add_pmu("1e108000.eth", NULL, 1, 0, PMU_SWITCH | ++ clkdev_add_pmu("1e108000.eth", NULL, 0, 0, PMU_SWITCH | + PMU_PPE_DP | PMU_PPE_TC); + clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); + clkdev_add_pmu("1f203000.rcu", "gphy", 1, 0, PMU_GPHY); +@@ -575,11 +575,11 @@ void __init ltq_soc_init(void) + clkdev_add_pmu(NULL, "ahb", 1, 0, PMU_AHBM | PMU_AHBS); + + clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); +- clkdev_add_pmu("1e108000.eth", NULL, 1, 0, ++ clkdev_add_pmu("1e108000.eth", NULL, 0, 0, + PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM | + PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 | + PMU_PPE_QSB | PMU_PPE_TOP); +- clkdev_add_pmu("1f203000.rcu", "gphy", 1, 0, PMU_GPHY); ++ clkdev_add_pmu("1f203000.rcu", "gphy", 0, 0, PMU_GPHY); + clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); + clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); + clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); +diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c +index 026cb59..f293a97 100644 +--- a/arch/mips/mm/sc-ip22.c ++++ b/arch/mips/mm/sc-ip22.c +@@ -31,26 +31,40 @@ static inline void indy_sc_wipe(unsigned long first, unsigned long last) + unsigned long tmp; + + __asm__ __volatile__( +- ".set\tpush\t\t\t# indy_sc_wipe\n\t" +- ".set\tnoreorder\n\t" +- ".set\tmips3\n\t" +- ".set\tnoat\n\t" +- "mfc0\t%2, $12\n\t" +- "li\t$1, 0x80\t\t\t# Go 64 bit\n\t" +- "mtc0\t$1, $12\n\t" +- +- "dli\t$1, 0x9000000080000000\n\t" +- "or\t%0, $1\t\t\t# first line to flush\n\t" +- "or\t%1, $1\t\t\t# last line to flush\n\t" +- ".set\tat\n\t" +- +- "1:\tsw\t$0, 0(%0)\n\t" +- "bne\t%0, %1, 1b\n\t" +- " daddu\t%0, 32\n\t" +- +- "mtc0\t%2, $12\t\t\t# Back to 32 bit\n\t" +- "nop; nop; nop; nop;\n\t" +- ".set\tpop" ++ " .set push # indy_sc_wipe \n" ++ " .set noreorder \n" ++ " .set mips3 \n" ++ " .set noat \n" ++ " mfc0 %2, $12 \n" ++ " li $1, 0x80 # Go 64 bit \n" ++ " mtc0 $1, $12 \n" ++ " \n" ++ " # \n" ++ " # Open code a dli $1, 0x9000000080000000 \n" ++ " # \n" ++ " # Required because binutils 2.25 will happily accept \n" ++ " # 64 bit instructions in .set mips3 mode but puke on \n" ++ " # 64 bit constants when generating 32 bit ELF \n" ++ " # \n" ++ " lui $1,0x9000 \n" ++ " dsll $1,$1,0x10 \n" ++ " ori $1,$1,0x8000 \n" ++ " dsll $1,$1,0x10 \n" ++ " \n" ++ " or %0, $1 # first line to flush \n" ++ " or %1, $1 # last line to flush \n" ++ " .set at \n" ++ " \n" ++ "1: sw $0, 0(%0) \n" ++ " bne %0, %1, 1b \n" ++ " daddu %0, 32 \n" ++ " \n" ++ " mtc0 %2, $12 # Back to 32 bit \n" ++ " nop # pipeline hazard \n" ++ " nop \n" ++ " nop \n" ++ " nop \n" ++ " .set pop \n" + : "=r" (first), "=r" (last), "=&r" (tmp) + : "0" (first), "1" (last)); + } +diff --git a/arch/mips/pic32/pic32mzda/Makefile b/arch/mips/pic32/pic32mzda/Makefile +index 4a4c272..c286496 100644 +--- a/arch/mips/pic32/pic32mzda/Makefile ++++ b/arch/mips/pic32/pic32mzda/Makefile +@@ -2,8 +2,7 @@ + # Joshua Henderson, <joshua.henderson@microchip.com> + # Copyright (C) 2015 Microchip Technology, Inc. All rights reserved. + # +-obj-y := init.o time.o config.o ++obj-y := config.o early_clk.o init.o time.o + + obj-$(CONFIG_EARLY_PRINTK) += early_console.o \ +- early_pin.o \ +- early_clk.o ++ early_pin.o +diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h +index a244e09..5d22b0b 100644 +--- a/arch/powerpc/include/asm/mmu.h ++++ b/arch/powerpc/include/asm/mmu.h +@@ -136,6 +136,7 @@ enum { + MMU_FTR_NO_SLBIE_B | MMU_FTR_16M_PAGE | MMU_FTR_TLBIEL | + MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE | + MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA | ++ MMU_FTR_KERNEL_RO | + #ifdef CONFIG_PPC_RADIX_MMU + MMU_FTR_TYPE_RADIX | + #endif +diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S +index 37c027c..7803756 100644 +--- a/arch/powerpc/kernel/cpu_setup_power.S ++++ b/arch/powerpc/kernel/cpu_setup_power.S +@@ -100,6 +100,8 @@ _GLOBAL(__setup_cpu_power9) + mfspr r3,SPRN_LPCR + LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE) + or r3, r3, r4 ++ LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR) ++ andc r3, r3, r4 + bl __init_LPCR + bl __init_HFSCR + bl __init_tlb_power9 +@@ -120,6 +122,8 @@ _GLOBAL(__restore_cpu_power9) + mfspr r3,SPRN_LPCR + LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE) + or r3, r3, r4 ++ LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR) ++ andc r3, r3, r4 + bl __init_LPCR + bl __init_HFSCR + bl __init_tlb_power9 +diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c +index 03d089b..469d86d 100644 +--- a/arch/powerpc/kernel/hw_breakpoint.c ++++ b/arch/powerpc/kernel/hw_breakpoint.c +@@ -228,8 +228,10 @@ int hw_breakpoint_handler(struct die_args *args) + rcu_read_lock(); + + bp = __this_cpu_read(bp_per_reg); +- if (!bp) ++ if (!bp) { ++ rc = NOTIFY_DONE; + goto out; ++ } + info = counter_arch_bp(bp); + + /* +diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h +index 34684ad..b3b09b9 100644 +--- a/arch/x86/include/asm/pkeys.h ++++ b/arch/x86/include/asm/pkeys.h +@@ -46,6 +46,15 @@ extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey, + static inline + bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) + { ++ /* ++ * "Allocated" pkeys are those that have been returned ++ * from pkey_alloc(). pkey 0 is special, and never ++ * returned from pkey_alloc(). ++ */ ++ if (pkey <= 0) ++ return false; ++ if (pkey >= arch_max_pkey()) ++ return false; + return mm_pkey_allocation_map(mm) & (1U << pkey); + } + +@@ -82,12 +91,6 @@ int mm_pkey_alloc(struct mm_struct *mm) + static inline + int mm_pkey_free(struct mm_struct *mm, int pkey) + { +- /* +- * pkey 0 is special, always allocated and can never +- * be freed. +- */ +- if (!pkey) +- return -EINVAL; + if (!mm_pkey_is_allocated(mm, pkey)) + return -EINVAL; + +diff --git a/crypto/testmgr.h b/crypto/testmgr.h +index e64a4ef..9033088 100644 +--- a/crypto/testmgr.h ++++ b/crypto/testmgr.h +@@ -22813,7 +22813,7 @@ static struct aead_testvec aes_ccm_enc_tv_template[] = { + "\x09\x75\x9a\x9b\x3c\x9b\x27\x39", + .klen = 32, + .iv = "\x03\xf9\xd9\x4e\x63\xb5\x3d\x9d" +- "\x43\xf6\x1e\x50", ++ "\x43\xf6\x1e\x50\0\0\0\0", + .assoc = "\x57\xf5\x6b\x8b\x57\x5c\x3d\x3b" + "\x13\x02\x01\x0c\x83\x4c\x96\x35" + "\x8e\xd6\x39\xcf\x7d\x14\x9b\x94" +diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c +index 2c1798e..3868823 100644 +--- a/drivers/bcma/main.c ++++ b/drivers/bcma/main.c +@@ -633,8 +633,11 @@ static int bcma_device_probe(struct device *dev) + drv); + int err = 0; + ++ get_device(dev); + if (adrv->probe) + err = adrv->probe(core); ++ if (err) ++ put_device(dev); + + return err; + } +@@ -647,6 +650,7 @@ static int bcma_device_remove(struct device *dev) + + if (adrv->remove) + adrv->remove(core); ++ put_device(dev); + + return 0; + } +diff --git a/drivers/block/loop.c b/drivers/block/loop.c +index 4af8187..24d6cef 100644 +--- a/drivers/block/loop.c ++++ b/drivers/block/loop.c +@@ -1097,9 +1097,12 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) + if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) + return -EINVAL; + ++ /* I/O need to be drained during transfer transition */ ++ blk_mq_freeze_queue(lo->lo_queue); ++ + err = loop_release_xfer(lo); + if (err) +- return err; ++ goto exit; + + if (info->lo_encrypt_type) { + unsigned int type = info->lo_encrypt_type; +@@ -1114,12 +1117,14 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) + + err = loop_init_xfer(lo, xfer, info); + if (err) +- return err; ++ goto exit; + + if (lo->lo_offset != info->lo_offset || + lo->lo_sizelimit != info->lo_sizelimit) +- if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) +- return -EFBIG; ++ if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) { ++ err = -EFBIG; ++ goto exit; ++ } + + loop_config_discard(lo); + +@@ -1137,13 +1142,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) + (info->lo_flags & LO_FLAGS_AUTOCLEAR)) + lo->lo_flags ^= LO_FLAGS_AUTOCLEAR; + +- if ((info->lo_flags & LO_FLAGS_PARTSCAN) && +- !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { +- lo->lo_flags |= LO_FLAGS_PARTSCAN; +- lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; +- loop_reread_partitions(lo, lo->lo_device); +- } +- + lo->lo_encrypt_key_size = info->lo_encrypt_key_size; + lo->lo_init[0] = info->lo_init[0]; + lo->lo_init[1] = info->lo_init[1]; +@@ -1156,7 +1154,17 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) + /* update dio if lo_offset or transfer is changed */ + __loop_update_dio(lo, lo->use_dio); + +- return 0; ++ exit: ++ blk_mq_unfreeze_queue(lo->lo_queue); ++ ++ if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) && ++ !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { ++ lo->lo_flags |= LO_FLAGS_PARTSCAN; ++ lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; ++ loop_reread_partitions(lo, lo->lo_device); ++ } ++ ++ return err; + } + + static int +diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c +index eaf5730..8022bea 100644 +--- a/drivers/char/tpm/tpm_tis.c ++++ b/drivers/char/tpm/tpm_tis.c +@@ -421,7 +421,7 @@ static int __init init_tis(void) + acpi_bus_unregister_driver(&tis_acpi_driver); + err_acpi: + #endif +- platform_device_unregister(force_pdev); ++ platform_driver_unregister(&tis_drv); + err_platform: + if (force_pdev) + platform_device_unregister(force_pdev); +diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c +index 712592c..7309c08 100644 +--- a/drivers/devfreq/devfreq.c ++++ b/drivers/devfreq/devfreq.c +@@ -130,7 +130,7 @@ static void devfreq_set_freq_table(struct devfreq *devfreq) + * @devfreq: the devfreq instance + * @freq: the update target frequency + */ +-static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) ++int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) + { + int lev, prev_lev, ret = 0; + unsigned long cur_time; +@@ -166,6 +166,7 @@ static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) + devfreq->last_stat_updated = cur_time; + return ret; + } ++EXPORT_SYMBOL(devfreq_update_status); + + /** + * find_devfreq_governor() - find devfreq governor from name +@@ -939,6 +940,9 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr, + if (df->governor == governor) { + ret = 0; + goto out; ++ } else if (df->governor->immutable || governor->immutable) { ++ ret = -EINVAL; ++ goto out; + } + + if (df->governor) { +@@ -968,13 +972,33 @@ static ssize_t available_governors_show(struct device *d, + struct device_attribute *attr, + char *buf) + { +- struct devfreq_governor *tmp_governor; ++ struct devfreq *df = to_devfreq(d); + ssize_t count = 0; + + mutex_lock(&devfreq_list_lock); +- list_for_each_entry(tmp_governor, &devfreq_governor_list, node) +- count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), +- "%s ", tmp_governor->name); ++ ++ /* ++ * The devfreq with immutable governor (e.g., passive) shows ++ * only own governor. ++ */ ++ if (df->governor->immutable) { ++ count = scnprintf(&buf[count], DEVFREQ_NAME_LEN, ++ "%s ", df->governor_name); ++ /* ++ * The devfreq device shows the registered governor except for ++ * immutable governors such as passive governor . ++ */ ++ } else { ++ struct devfreq_governor *governor; ++ ++ list_for_each_entry(governor, &devfreq_governor_list, node) { ++ if (governor->immutable) ++ continue; ++ count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), ++ "%s ", governor->name); ++ } ++ } ++ + mutex_unlock(&devfreq_list_lock); + + /* Truncate the trailing space */ +diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h +index fad7d63..71576b8 100644 +--- a/drivers/devfreq/governor.h ++++ b/drivers/devfreq/governor.h +@@ -38,4 +38,6 @@ extern void devfreq_interval_update(struct devfreq *devfreq, + extern int devfreq_add_governor(struct devfreq_governor *governor); + extern int devfreq_remove_governor(struct devfreq_governor *governor); + ++extern int devfreq_update_status(struct devfreq *devfreq, unsigned long freq); ++ + #endif /* _GOVERNOR_H */ +diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c +index 9ef46e2..5be96b2 100644 +--- a/drivers/devfreq/governor_passive.c ++++ b/drivers/devfreq/governor_passive.c +@@ -112,6 +112,11 @@ static int update_devfreq_passive(struct devfreq *devfreq, unsigned long freq) + if (ret < 0) + goto out; + ++ if (devfreq->profile->freq_table ++ && (devfreq_update_status(devfreq, freq))) ++ dev_err(&devfreq->dev, ++ "Couldn't update frequency transition information.\n"); ++ + devfreq->previous_freq = freq; + + out: +@@ -179,6 +184,7 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq, + + static struct devfreq_governor devfreq_passive = { + .name = "passive", ++ .immutable = 1, + .get_target_freq = devfreq_passive_get_target_freq, + .event_handler = devfreq_passive_event_handler, + }; +diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c +index dd184b5..2846278 100644 +--- a/drivers/dma/ipu/ipu_irq.c ++++ b/drivers/dma/ipu/ipu_irq.c +@@ -272,7 +272,7 @@ static void ipu_irq_handler(struct irq_desc *desc) + u32 status; + int i, line; + +- for (i = IPU_IRQ_NR_FN_BANKS; i < IPU_IRQ_NR_BANKS; i++) { ++ for (i = 0; i < IPU_IRQ_NR_BANKS; i++) { + struct ipu_irq_bank *bank = irq_bank + i; + + raw_spin_lock(&bank_lock); +diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c +index 5fb4c6d..be34547 100644 +--- a/drivers/hv/channel.c ++++ b/drivers/hv/channel.c +@@ -157,6 +157,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, + } + + init_completion(&open_info->waitevent); ++ open_info->waiting_channel = newchannel; + + open_msg = (struct vmbus_channel_open_channel *)open_info->msg; + open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL; +@@ -181,7 +182,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, + spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); + + ret = vmbus_post_msg(open_msg, +- sizeof(struct vmbus_channel_open_channel)); ++ sizeof(struct vmbus_channel_open_channel), true); + + if (ret != 0) { + err = ret; +@@ -194,6 +195,11 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, + list_del(&open_info->msglistentry); + spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); + ++ if (newchannel->rescind) { ++ err = -ENODEV; ++ goto error_free_gpadl; ++ } ++ + if (open_info->response.open_result.status) { + err = -EAGAIN; + goto error_free_gpadl; +@@ -233,7 +239,7 @@ int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id, + conn_msg.guest_endpoint_id = *shv_guest_servie_id; + conn_msg.host_service_id = *shv_host_servie_id; + +- return vmbus_post_msg(&conn_msg, sizeof(conn_msg)); ++ return vmbus_post_msg(&conn_msg, sizeof(conn_msg), true); + } + EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request); + +@@ -405,6 +411,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, + return ret; + + init_completion(&msginfo->waitevent); ++ msginfo->waiting_channel = channel; + + gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg; + gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER; +@@ -419,7 +426,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, + spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); + + ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize - +- sizeof(*msginfo)); ++ sizeof(*msginfo), true); + if (ret != 0) + goto cleanup; + +@@ -433,14 +440,19 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, + gpadl_body->gpadl = next_gpadl_handle; + + ret = vmbus_post_msg(gpadl_body, +- submsginfo->msgsize - +- sizeof(*submsginfo)); ++ submsginfo->msgsize - sizeof(*submsginfo), ++ true); + if (ret != 0) + goto cleanup; + + } + wait_for_completion(&msginfo->waitevent); + ++ if (channel->rescind) { ++ ret = -ENODEV; ++ goto cleanup; ++ } ++ + /* At this point, we received the gpadl created msg */ + *gpadl_handle = gpadlmsg->gpadl; + +@@ -474,6 +486,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle) + return -ENOMEM; + + init_completion(&info->waitevent); ++ info->waiting_channel = channel; + + msg = (struct vmbus_channel_gpadl_teardown *)info->msg; + +@@ -485,14 +498,19 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle) + list_add_tail(&info->msglistentry, + &vmbus_connection.chn_msg_list); + spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); +- ret = vmbus_post_msg(msg, +- sizeof(struct vmbus_channel_gpadl_teardown)); ++ ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown), ++ true); + + if (ret) + goto post_msg_err; + + wait_for_completion(&info->waitevent); + ++ if (channel->rescind) { ++ ret = -ENODEV; ++ goto post_msg_err; ++ } ++ + post_msg_err: + spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); + list_del(&info->msglistentry); +@@ -557,7 +575,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel) + msg->header.msgtype = CHANNELMSG_CLOSECHANNEL; + msg->child_relid = channel->offermsg.child_relid; + +- ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel)); ++ ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel), ++ true); + + if (ret) { + pr_err("Close failed: close post msg return is %d\n", ret); +diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c +index caf3418..cb95315 100644 +--- a/drivers/hv/channel_mgmt.c ++++ b/drivers/hv/channel_mgmt.c +@@ -147,6 +147,29 @@ static const struct { + { HV_RDV_GUID }, + }; + ++/* ++ * The rescinded channel may be blocked waiting for a response from the host; ++ * take care of that. ++ */ ++static void vmbus_rescind_cleanup(struct vmbus_channel *channel) ++{ ++ struct vmbus_channel_msginfo *msginfo; ++ unsigned long flags; ++ ++ ++ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); ++ ++ list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, ++ msglistentry) { ++ ++ if (msginfo->waiting_channel == channel) { ++ complete(&msginfo->waitevent); ++ break; ++ } ++ } ++ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); ++} ++ + static bool is_unsupported_vmbus_devs(const uuid_le *guid) + { + int i; +@@ -321,7 +344,8 @@ static void vmbus_release_relid(u32 relid) + memset(&msg, 0, sizeof(struct vmbus_channel_relid_released)); + msg.child_relid = relid; + msg.header.msgtype = CHANNELMSG_RELID_RELEASED; +- vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released)); ++ vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released), ++ true); + } + + void hv_event_tasklet_disable(struct vmbus_channel *channel) +@@ -728,7 +752,8 @@ void vmbus_initiate_unload(bool crash) + init_completion(&vmbus_connection.unload_event); + memset(&hdr, 0, sizeof(struct vmbus_channel_message_header)); + hdr.msgtype = CHANNELMSG_UNLOAD; +- vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header)); ++ vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header), ++ !crash); + + /* + * vmbus_initiate_unload() is also called on crash and the crash can be +@@ -823,6 +848,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) + channel->rescind = true; + spin_unlock_irqrestore(&channel->lock, flags); + ++ vmbus_rescind_cleanup(channel); ++ + if (channel->device_obj) { + if (channel->chn_rescind_callback) { + channel->chn_rescind_callback(channel); +@@ -1116,8 +1143,8 @@ int vmbus_request_offers(void) + msg->msgtype = CHANNELMSG_REQUESTOFFERS; + + +- ret = vmbus_post_msg(msg, +- sizeof(struct vmbus_channel_message_header)); ++ ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header), ++ true); + if (ret != 0) { + pr_err("Unable to request offers - %d\n", ret); + +diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c +index 78e6368..840b6db 100644 +--- a/drivers/hv/connection.c ++++ b/drivers/hv/connection.c +@@ -110,7 +110,8 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, + spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); + + ret = vmbus_post_msg(msg, +- sizeof(struct vmbus_channel_initiate_contact)); ++ sizeof(struct vmbus_channel_initiate_contact), ++ true); + if (ret != 0) { + spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); + list_del(&msginfo->msglistentry); +@@ -434,7 +435,7 @@ void vmbus_on_event(unsigned long data) + /* + * vmbus_post_msg - Send a msg on the vmbus's message connection + */ +-int vmbus_post_msg(void *buffer, size_t buflen) ++int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep) + { + union hv_connection_id conn_id; + int ret = 0; +@@ -449,7 +450,7 @@ int vmbus_post_msg(void *buffer, size_t buflen) + * insufficient resources. Retry the operation a couple of + * times before giving up. + */ +- while (retries < 20) { ++ while (retries < 100) { + ret = hv_post_message(conn_id, 1, buffer, buflen); + + switch (ret) { +@@ -472,8 +473,14 @@ int vmbus_post_msg(void *buffer, size_t buflen) + } + + retries++; +- udelay(usec); +- if (usec < 2048) ++ if (can_sleep && usec > 1000) ++ msleep(usec / 1000); ++ else if (usec < MAX_UDELAY_MS * 1000) ++ udelay(usec); ++ else ++ mdelay(usec / 1000); ++ ++ if (usec < 256000) + usec *= 2; + } + return ret; +diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c +index 60dbd6c..6e49a4d 100644 +--- a/drivers/hv/hv.c ++++ b/drivers/hv/hv.c +@@ -309,9 +309,10 @@ void hv_cleanup(bool crash) + + hypercall_msr.as_uint64 = 0; + wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64); +- if (!crash) ++ if (!crash) { + vfree(hv_context.tsc_page); +- hv_context.tsc_page = NULL; ++ hv_context.tsc_page = NULL; ++ } + } + #endif + } +@@ -411,7 +412,7 @@ int hv_synic_alloc(void) + goto err; + } + +- for_each_online_cpu(cpu) { ++ for_each_present_cpu(cpu) { + hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC); + if (hv_context.event_dpc[cpu] == NULL) { + pr_err("Unable to allocate event dpc\n"); +@@ -457,6 +458,8 @@ int hv_synic_alloc(void) + pr_err("Unable to allocate post msg page\n"); + goto err; + } ++ ++ INIT_LIST_HEAD(&hv_context.percpu_list[cpu]); + } + + return 0; +@@ -482,7 +485,7 @@ void hv_synic_free(void) + int cpu; + + kfree(hv_context.hv_numa_map); +- for_each_online_cpu(cpu) ++ for_each_present_cpu(cpu) + hv_synic_free_cpu(cpu); + } + +@@ -552,8 +555,6 @@ void hv_synic_init(void *arg) + rdmsrl(HV_X64_MSR_VP_INDEX, vp_index); + hv_context.vp_index[cpu] = (u32)vp_index; + +- INIT_LIST_HEAD(&hv_context.percpu_list[cpu]); +- + /* + * Register the per-cpu clockevent source. + */ +diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c +index 8b2ba98..e47d8c9 100644 +--- a/drivers/hv/hv_fcopy.c ++++ b/drivers/hv/hv_fcopy.c +@@ -61,6 +61,7 @@ static DECLARE_WORK(fcopy_send_work, fcopy_send_data); + static const char fcopy_devname[] = "vmbus/hv_fcopy"; + static u8 *recv_buffer; + static struct hvutil_transport *hvt; ++static struct completion release_event; + /* + * This state maintains the version number registered by the daemon. + */ +@@ -317,6 +318,7 @@ static void fcopy_on_reset(void) + + if (cancel_delayed_work_sync(&fcopy_timeout_work)) + fcopy_respond_to_host(HV_E_FAIL); ++ complete(&release_event); + } + + int hv_fcopy_init(struct hv_util_service *srv) +@@ -324,6 +326,7 @@ int hv_fcopy_init(struct hv_util_service *srv) + recv_buffer = srv->recv_buffer; + fcopy_transaction.recv_channel = srv->channel; + ++ init_completion(&release_event); + /* + * When this driver loads, the user level daemon that + * processes the host requests may not yet be running. +@@ -345,4 +348,5 @@ void hv_fcopy_deinit(void) + fcopy_transaction.state = HVUTIL_DEVICE_DYING; + cancel_delayed_work_sync(&fcopy_timeout_work); + hvutil_transport_destroy(hvt); ++ wait_for_completion(&release_event); + } +diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c +index 5e1fdc8..3abfc59 100644 +--- a/drivers/hv/hv_kvp.c ++++ b/drivers/hv/hv_kvp.c +@@ -88,6 +88,7 @@ static DECLARE_WORK(kvp_sendkey_work, kvp_send_key); + static const char kvp_devname[] = "vmbus/hv_kvp"; + static u8 *recv_buffer; + static struct hvutil_transport *hvt; ++static struct completion release_event; + /* + * Register the kernel component with the user-level daemon. + * As part of this registration, pass the LIC version number. +@@ -716,6 +717,7 @@ static void kvp_on_reset(void) + if (cancel_delayed_work_sync(&kvp_timeout_work)) + kvp_respond_to_host(NULL, HV_E_FAIL); + kvp_transaction.state = HVUTIL_DEVICE_INIT; ++ complete(&release_event); + } + + int +@@ -724,6 +726,7 @@ hv_kvp_init(struct hv_util_service *srv) + recv_buffer = srv->recv_buffer; + kvp_transaction.recv_channel = srv->channel; + ++ init_completion(&release_event); + /* + * When this driver loads, the user level daemon that + * processes the host requests may not yet be running. +@@ -747,4 +750,5 @@ void hv_kvp_deinit(void) + cancel_delayed_work_sync(&kvp_timeout_work); + cancel_work_sync(&kvp_sendkey_work); + hvutil_transport_destroy(hvt); ++ wait_for_completion(&release_event); + } +diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c +index a670713..a76e3db 100644 +--- a/drivers/hv/hv_snapshot.c ++++ b/drivers/hv/hv_snapshot.c +@@ -66,6 +66,7 @@ static int dm_reg_value; + static const char vss_devname[] = "vmbus/hv_vss"; + static __u8 *recv_buffer; + static struct hvutil_transport *hvt; ++static struct completion release_event; + + static void vss_timeout_func(struct work_struct *dummy); + static void vss_handle_request(struct work_struct *dummy); +@@ -330,11 +331,13 @@ static void vss_on_reset(void) + if (cancel_delayed_work_sync(&vss_timeout_work)) + vss_respond_to_host(HV_E_FAIL); + vss_transaction.state = HVUTIL_DEVICE_INIT; ++ complete(&release_event); + } + + int + hv_vss_init(struct hv_util_service *srv) + { ++ init_completion(&release_event); + if (vmbus_proto_version < VERSION_WIN8_1) { + pr_warn("Integration service 'Backup (volume snapshot)'" + " not supported on this host version.\n"); +@@ -365,4 +368,5 @@ void hv_vss_deinit(void) + cancel_delayed_work_sync(&vss_timeout_work); + cancel_work_sync(&vss_handle_request_work); + hvutil_transport_destroy(hvt); ++ wait_for_completion(&release_event); + } +diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h +index 2b13f2a..8d7f865 100644 +--- a/drivers/hv/hyperv_vmbus.h ++++ b/drivers/hv/hyperv_vmbus.h +@@ -683,7 +683,7 @@ void vmbus_free_channels(void); + int vmbus_connect(void); + void vmbus_disconnect(void); + +-int vmbus_post_msg(void *buffer, size_t buflen); ++int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep); + + void vmbus_on_event(unsigned long data); + void vmbus_on_msg_dpc(unsigned long data); +diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c +index 308dbda..e94ed1c 100644 +--- a/drivers/hv/ring_buffer.c ++++ b/drivers/hv/ring_buffer.c +@@ -298,6 +298,9 @@ int hv_ringbuffer_write(struct vmbus_channel *channel, + unsigned long flags = 0; + struct hv_ring_buffer_info *outring_info = &channel->outbound; + ++ if (channel->rescind) ++ return -ENODEV; ++ + for (i = 0; i < kv_count; i++) + totalbytes_towrite += kv_list[i].iov_len; + +@@ -350,6 +353,10 @@ int hv_ringbuffer_write(struct vmbus_channel *channel, + spin_unlock_irqrestore(&outring_info->ring_lock, flags); + + hv_signal_on_write(old_write, channel, kick_q); ++ ++ if (channel->rescind) ++ return -ENODEV; ++ + return 0; + } + +diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c +index ad82cb2..4314616 100644 +--- a/drivers/hwmon/it87.c ++++ b/drivers/hwmon/it87.c +@@ -1300,25 +1300,35 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr, + it87_write_value(data, IT87_REG_FAN_MAIN_CTRL, + data->fan_main_ctrl); + } else { ++ u8 ctrl; ++ + /* No on/off mode, set maximum pwm value */ + data->pwm_duty[nr] = pwm_to_reg(data, 0xff); + it87_write_value(data, IT87_REG_PWM_DUTY[nr], + data->pwm_duty[nr]); + /* and set manual mode */ +- data->pwm_ctrl[nr] = has_newer_autopwm(data) ? +- data->pwm_temp_map[nr] : +- data->pwm_duty[nr]; +- it87_write_value(data, IT87_REG_PWM[nr], +- data->pwm_ctrl[nr]); ++ if (has_newer_autopwm(data)) { ++ ctrl = (data->pwm_ctrl[nr] & 0x7c) | ++ data->pwm_temp_map[nr]; ++ } else { ++ ctrl = data->pwm_duty[nr]; ++ } ++ data->pwm_ctrl[nr] = ctrl; ++ it87_write_value(data, IT87_REG_PWM[nr], ctrl); + } + } else { +- if (val == 1) /* Manual mode */ +- data->pwm_ctrl[nr] = has_newer_autopwm(data) ? +- data->pwm_temp_map[nr] : +- data->pwm_duty[nr]; +- else /* Automatic mode */ +- data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr]; +- it87_write_value(data, IT87_REG_PWM[nr], data->pwm_ctrl[nr]); ++ u8 ctrl; ++ ++ if (has_newer_autopwm(data)) { ++ ctrl = (data->pwm_ctrl[nr] & 0x7c) | ++ data->pwm_temp_map[nr]; ++ if (val != 1) ++ ctrl |= 0x80; ++ } else { ++ ctrl = (val == 1 ? data->pwm_duty[nr] : 0x80); ++ } ++ data->pwm_ctrl[nr] = ctrl; ++ it87_write_value(data, IT87_REG_PWM[nr], ctrl); + + if (data->type != it8603 && nr < 3) { + /* set SmartGuardian mode */ +@@ -1344,6 +1354,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr, + return -EINVAL; + + mutex_lock(&data->update_lock); ++ it87_update_pwm_ctrl(data, nr); + if (has_newer_autopwm(data)) { + /* + * If we are in automatic mode, the PWM duty cycle register +@@ -1456,13 +1467,15 @@ static ssize_t set_pwm_temp_map(struct device *dev, + } + + mutex_lock(&data->update_lock); ++ it87_update_pwm_ctrl(data, nr); + data->pwm_temp_map[nr] = reg; + /* + * If we are in automatic mode, write the temp mapping immediately; + * otherwise, just store it for later use. + */ + if (data->pwm_ctrl[nr] & 0x80) { +- data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr]; ++ data->pwm_ctrl[nr] = (data->pwm_ctrl[nr] & 0xfc) | ++ data->pwm_temp_map[nr]; + it87_write_value(data, IT87_REG_PWM[nr], data->pwm_ctrl[nr]); + } + mutex_unlock(&data->update_lock); +diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c +index 49e0f1b..8e79056 100644 +--- a/drivers/hwtracing/coresight/coresight-stm.c ++++ b/drivers/hwtracing/coresight/coresight-stm.c +@@ -356,7 +356,7 @@ static void stm_generic_unlink(struct stm_data *stm_data, + if (!drvdata || !drvdata->csdev) + return; + +- stm_disable(drvdata->csdev, NULL); ++ coresight_disable(drvdata->csdev); + } + + static phys_addr_t +diff --git a/drivers/iio/pressure/mpl115.c b/drivers/iio/pressure/mpl115.c +index 73f2f0c..8f2bce2 100644 +--- a/drivers/iio/pressure/mpl115.c ++++ b/drivers/iio/pressure/mpl115.c +@@ -137,6 +137,7 @@ static const struct iio_chan_spec mpl115_channels[] = { + { + .type = IIO_TEMP, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), ++ .info_mask_shared_by_type = + BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE), + }, + }; +diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c +index 6392d7b..eb87948 100644 +--- a/drivers/iio/pressure/mpl3115.c ++++ b/drivers/iio/pressure/mpl3115.c +@@ -182,7 +182,7 @@ static const struct iio_chan_spec mpl3115_channels[] = { + { + .type = IIO_PRESSURE, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), +- BIT(IIO_CHAN_INFO_SCALE), ++ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .scan_index = 0, + .scan_type = { + .sign = 'u', +@@ -195,7 +195,7 @@ static const struct iio_chan_spec mpl3115_channels[] = { + { + .type = IIO_TEMP, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), +- BIT(IIO_CHAN_INFO_SCALE), ++ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .scan_index = 1, + .scan_type = { + .sign = 's', +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c +index c25768c..f2d40c0 100644 +--- a/drivers/infiniband/core/cma.c ++++ b/drivers/infiniband/core/cma.c +@@ -3540,6 +3540,9 @@ static int cma_accept_iw(struct rdma_id_private *id_priv, + struct iw_cm_conn_param iw_param; + int ret; + ++ if (!conn_param) ++ return -EINVAL; ++ + ret = cma_modify_qp_rtr(id_priv, conn_param); + if (ret) + return ret; +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c +index d82637a..34be95e 100644 +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -3325,13 +3325,14 @@ static int __init init_dmars(void) + iommu_identity_mapping |= IDENTMAP_GFX; + #endif + ++ check_tylersburg_isoch(); ++ + if (iommu_identity_mapping) { + ret = si_domain_init(hw_pass_through); + if (ret) + goto free_iommu; + } + +- check_tylersburg_isoch(); + + /* + * If we copied translations from a previous kernel in the kdump +diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c +index 59b2c50..c817627 100644 +--- a/drivers/md/dm-cache-target.c ++++ b/drivers/md/dm-cache-target.c +@@ -248,7 +248,7 @@ struct cache { + /* + * Fields for converting from sectors to blocks. + */ +- uint32_t sectors_per_block; ++ sector_t sectors_per_block; + int sectors_per_block_shift; + + spinlock_t lock; +@@ -3546,11 +3546,11 @@ static void cache_status(struct dm_target *ti, status_type_t type, + + residency = policy_residency(cache->policy); + +- DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ", ++ DMEMIT("%u %llu/%llu %llu %llu/%llu %u %u %u %u %u %u %lu ", + (unsigned)DM_CACHE_METADATA_BLOCK_SIZE, + (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata), + (unsigned long long)nr_blocks_metadata, +- cache->sectors_per_block, ++ (unsigned long long)cache->sectors_per_block, + (unsigned long long) from_cblock(residency), + (unsigned long long) from_cblock(cache->cache_size), + (unsigned) atomic_read(&cache->stats.read_hit), +diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c +index af2d79b..15daa36 100644 +--- a/drivers/md/dm-raid.c ++++ b/drivers/md/dm-raid.c +@@ -3621,6 +3621,8 @@ static int raid_preresume(struct dm_target *ti) + return r; + } + ++#define RESUME_STAY_FROZEN_FLAGS (CTR_FLAG_DELTA_DISKS | CTR_FLAG_DATA_OFFSET) ++ + static void raid_resume(struct dm_target *ti) + { + struct raid_set *rs = ti->private; +@@ -3638,7 +3640,15 @@ static void raid_resume(struct dm_target *ti) + mddev->ro = 0; + mddev->in_sync = 0; + +- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); ++ /* ++ * Keep the RAID set frozen if reshape/rebuild flags are set. ++ * The RAID set is unfrozen once the next table load/resume, ++ * which clears the reshape/rebuild flags, occurs. ++ * This ensures that the constructor for the inactive table ++ * retrieves an up-to-date reshape_position. ++ */ ++ if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS)) ++ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + + if (mddev->suspended) + mddev_resume(mddev); +diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c +index 6c25213..bdbb7e6 100644 +--- a/drivers/md/dm-round-robin.c ++++ b/drivers/md/dm-round-robin.c +@@ -17,8 +17,8 @@ + #include <linux/module.h> + + #define DM_MSG_PREFIX "multipath round-robin" +-#define RR_MIN_IO 1000 +-#define RR_VERSION "1.1.0" ++#define RR_MIN_IO 1 ++#define RR_VERSION "1.2.0" + + /*----------------------------------------------------------------- + * Path-handling code, paths are held in lists +@@ -47,44 +47,19 @@ struct selector { + struct list_head valid_paths; + struct list_head invalid_paths; + spinlock_t lock; +- struct dm_path * __percpu *current_path; +- struct percpu_counter repeat_count; + }; + +-static void set_percpu_current_path(struct selector *s, struct dm_path *path) +-{ +- int cpu; +- +- for_each_possible_cpu(cpu) +- *per_cpu_ptr(s->current_path, cpu) = path; +-} +- + static struct selector *alloc_selector(void) + { + struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL); + +- if (!s) +- return NULL; +- +- INIT_LIST_HEAD(&s->valid_paths); +- INIT_LIST_HEAD(&s->invalid_paths); +- spin_lock_init(&s->lock); +- +- s->current_path = alloc_percpu(struct dm_path *); +- if (!s->current_path) +- goto out_current_path; +- set_percpu_current_path(s, NULL); +- +- if (percpu_counter_init(&s->repeat_count, 0, GFP_KERNEL)) +- goto out_repeat_count; ++ if (s) { ++ INIT_LIST_HEAD(&s->valid_paths); ++ INIT_LIST_HEAD(&s->invalid_paths); ++ spin_lock_init(&s->lock); ++ } + + return s; +- +-out_repeat_count: +- free_percpu(s->current_path); +-out_current_path: +- kfree(s); +- return NULL;; + } + + static int rr_create(struct path_selector *ps, unsigned argc, char **argv) +@@ -105,8 +80,6 @@ static void rr_destroy(struct path_selector *ps) + + free_paths(&s->valid_paths); + free_paths(&s->invalid_paths); +- free_percpu(s->current_path); +- percpu_counter_destroy(&s->repeat_count); + kfree(s); + ps->context = NULL; + } +@@ -157,6 +130,11 @@ static int rr_add_path(struct path_selector *ps, struct dm_path *path, + return -EINVAL; + } + ++ if (repeat_count > 1) { ++ DMWARN_LIMIT("repeat_count > 1 is deprecated, using 1 instead"); ++ repeat_count = 1; ++ } ++ + /* allocate the path */ + pi = kmalloc(sizeof(*pi), GFP_KERNEL); + if (!pi) { +@@ -183,9 +161,6 @@ static void rr_fail_path(struct path_selector *ps, struct dm_path *p) + struct path_info *pi = p->pscontext; + + spin_lock_irqsave(&s->lock, flags); +- if (p == *this_cpu_ptr(s->current_path)) +- set_percpu_current_path(s, NULL); +- + list_move(&pi->list, &s->invalid_paths); + spin_unlock_irqrestore(&s->lock, flags); + } +@@ -208,29 +183,15 @@ static struct dm_path *rr_select_path(struct path_selector *ps, size_t nr_bytes) + unsigned long flags; + struct selector *s = ps->context; + struct path_info *pi = NULL; +- struct dm_path *current_path = NULL; +- +- local_irq_save(flags); +- current_path = *this_cpu_ptr(s->current_path); +- if (current_path) { +- percpu_counter_dec(&s->repeat_count); +- if (percpu_counter_read_positive(&s->repeat_count) > 0) { +- local_irq_restore(flags); +- return current_path; +- } +- } + +- spin_lock(&s->lock); ++ spin_lock_irqsave(&s->lock, flags); + if (!list_empty(&s->valid_paths)) { + pi = list_entry(s->valid_paths.next, struct path_info, list); + list_move_tail(&pi->list, &s->valid_paths); +- percpu_counter_set(&s->repeat_count, pi->repeat_count); +- set_percpu_current_path(s, pi->path); +- current_path = pi->path; + } + spin_unlock_irqrestore(&s->lock, flags); + +- return current_path; ++ return pi ? pi->path : NULL; + } + + static struct path_selector_type rr_ps = { +diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c +index 38b05f2..0250e7e 100644 +--- a/drivers/md/dm-stats.c ++++ b/drivers/md/dm-stats.c +@@ -175,6 +175,7 @@ static void dm_stat_free(struct rcu_head *head) + int cpu; + struct dm_stat *s = container_of(head, struct dm_stat, rcu_head); + ++ kfree(s->histogram_boundaries); + kfree(s->program_id); + kfree(s->aux_data); + for_each_possible_cpu(cpu) { +diff --git a/drivers/md/linear.c b/drivers/md/linear.c +index 86f5d43..b0c0aef 100644 +--- a/drivers/md/linear.c ++++ b/drivers/md/linear.c +@@ -52,18 +52,26 @@ static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector) + return conf->disks + lo; + } + ++/* ++ * In linear_congested() conf->raid_disks is used as a copy of ++ * mddev->raid_disks to iterate conf->disks[], because conf->raid_disks ++ * and conf->disks[] are created in linear_conf(), they are always ++ * consitent with each other, but mddev->raid_disks does not. ++ */ + static int linear_congested(struct mddev *mddev, int bits) + { + struct linear_conf *conf; + int i, ret = 0; + +- conf = mddev->private; ++ rcu_read_lock(); ++ conf = rcu_dereference(mddev->private); + +- for (i = 0; i < mddev->raid_disks && !ret ; i++) { ++ for (i = 0; i < conf->raid_disks && !ret ; i++) { + struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); + ret |= bdi_congested(&q->backing_dev_info, bits); + } + ++ rcu_read_unlock(); + return ret; + } + +@@ -143,6 +151,19 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) + conf->disks[i-1].end_sector + + conf->disks[i].rdev->sectors; + ++ /* ++ * conf->raid_disks is copy of mddev->raid_disks. The reason to ++ * keep a copy of mddev->raid_disks in struct linear_conf is, ++ * mddev->raid_disks may not be consistent with pointers number of ++ * conf->disks[] when it is updated in linear_add() and used to ++ * iterate old conf->disks[] earray in linear_congested(). ++ * Here conf->raid_disks is always consitent with number of ++ * pointers in conf->disks[] array, and mddev->private is updated ++ * with rcu_assign_pointer() in linear_addr(), such race can be ++ * avoided. ++ */ ++ conf->raid_disks = raid_disks; ++ + return conf; + + out: +@@ -195,15 +216,23 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev) + if (!newconf) + return -ENOMEM; + ++ /* newconf->raid_disks already keeps a copy of * the increased ++ * value of mddev->raid_disks, WARN_ONCE() is just used to make ++ * sure of this. It is possible that oldconf is still referenced ++ * in linear_congested(), therefore kfree_rcu() is used to free ++ * oldconf until no one uses it anymore. ++ */ + mddev_suspend(mddev); +- oldconf = mddev->private; ++ oldconf = rcu_dereference(mddev->private); + mddev->raid_disks++; +- mddev->private = newconf; ++ WARN_ONCE(mddev->raid_disks != newconf->raid_disks, ++ "copied raid_disks doesn't match mddev->raid_disks"); ++ rcu_assign_pointer(mddev->private, newconf); + md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); + set_capacity(mddev->gendisk, mddev->array_sectors); + mddev_resume(mddev); + revalidate_disk(mddev->gendisk); +- kfree(oldconf); ++ kfree_rcu(oldconf, rcu); + return 0; + } + +diff --git a/drivers/md/linear.h b/drivers/md/linear.h +index b685ddd..8d392e6 100644 +--- a/drivers/md/linear.h ++++ b/drivers/md/linear.h +@@ -10,6 +10,7 @@ struct linear_conf + { + struct rcu_head rcu; + sector_t array_sectors; ++ int raid_disks; /* a copy of mddev->raid_disks */ + struct dev_info disks[0]; + }; + #endif +diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c +index 95267c6..f6ebbb4 100644 +--- a/drivers/media/dvb-frontends/cxd2820r_core.c ++++ b/drivers/media/dvb-frontends/cxd2820r_core.c +@@ -615,6 +615,7 @@ static int cxd2820r_probe(struct i2c_client *client, + } + + priv->client[0] = client; ++ priv->fe.demodulator_priv = priv; + priv->i2c = client->adapter; + priv->ts_mode = pdata->ts_mode; + priv->ts_clk_inv = pdata->ts_clk_inv; +@@ -697,7 +698,6 @@ static int cxd2820r_probe(struct i2c_client *client, + memcpy(&priv->fe.ops, &cxd2820r_ops, sizeof(priv->fe.ops)); + if (!pdata->attach_in_use) + priv->fe.ops.release = NULL; +- priv->fe.demodulator_priv = priv; + i2c_set_clientdata(client, priv); + + /* Setup callbacks */ +diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c +index 2783531..4462d8c 100644 +--- a/drivers/media/media-device.c ++++ b/drivers/media/media-device.c +@@ -130,7 +130,7 @@ static long media_device_enum_entities(struct media_device *mdev, + * old range. + */ + if (ent->function < MEDIA_ENT_F_OLD_BASE || +- ent->function > MEDIA_ENT_T_DEVNODE_UNKNOWN) { ++ ent->function > MEDIA_ENT_F_TUNER) { + if (is_media_entity_v4l2_subdev(ent)) + entd->type = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN; + else if (ent->function != MEDIA_ENT_F_IO_V4L) +diff --git a/drivers/media/pci/dm1105/Kconfig b/drivers/media/pci/dm1105/Kconfig +index 173daf0..14fa7e4 100644 +--- a/drivers/media/pci/dm1105/Kconfig ++++ b/drivers/media/pci/dm1105/Kconfig +@@ -1,6 +1,6 @@ + config DVB_DM1105 + tristate "SDMC DM1105 based PCI cards" +- depends on DVB_CORE && PCI && I2C ++ depends on DVB_CORE && PCI && I2C && I2C_ALGOBIT + select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT + select DVB_STV0299 if MEDIA_SUBDRV_AUTOSELECT + select DVB_STV0288 if MEDIA_SUBDRV_AUTOSELECT +diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c +index b33b9e3..05489a4 100644 +--- a/drivers/media/platform/am437x/am437x-vpfe.c ++++ b/drivers/media/platform/am437x/am437x-vpfe.c +@@ -1576,7 +1576,7 @@ static int vpfe_s_fmt(struct file *file, void *priv, + return -EBUSY; + } + +- ret = vpfe_try_fmt(file, priv, &format); ++ ret = __vpfe_get_format(vpfe, &format, &bpp); + if (ret) + return ret; + +diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c +index 91f9bb8..6ebe895 100644 +--- a/drivers/media/rc/lirc_dev.c ++++ b/drivers/media/rc/lirc_dev.c +@@ -589,7 +589,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + result = put_user(ir->d.features, (__u32 __user *)arg); + break; + case LIRC_GET_REC_MODE: +- if (LIRC_CAN_REC(ir->d.features)) { ++ if (!LIRC_CAN_REC(ir->d.features)) { + result = -ENOTTY; + break; + } +@@ -599,7 +599,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + (__u32 __user *)arg); + break; + case LIRC_SET_REC_MODE: +- if (LIRC_CAN_REC(ir->d.features)) { ++ if (!LIRC_CAN_REC(ir->d.features)) { + result = -ENOTTY; + break; + } +diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c +index 77edd20..40e5a6b 100644 +--- a/drivers/media/usb/uvc/uvc_queue.c ++++ b/drivers/media/usb/uvc/uvc_queue.c +@@ -412,7 +412,7 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue, + nextbuf = NULL; + spin_unlock_irqrestore(&queue->irqlock, flags); + +- buf->state = buf->error ? VB2_BUF_STATE_ERROR : UVC_BUF_STATE_DONE; ++ buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE; + vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused); + vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE); + +diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c +index fa50635..41f3186 100644 +--- a/drivers/misc/mei/main.c ++++ b/drivers/misc/mei/main.c +@@ -182,32 +182,36 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, + goto out; + } + +- if (rets == -EBUSY && +- !mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, file)) { +- rets = -ENOMEM; +- goto out; +- } + +- do { +- mutex_unlock(&dev->device_lock); +- +- if (wait_event_interruptible(cl->rx_wait, +- (!list_empty(&cl->rd_completed)) || +- (!mei_cl_is_connected(cl)))) { ++again: ++ mutex_unlock(&dev->device_lock); ++ if (wait_event_interruptible(cl->rx_wait, ++ !list_empty(&cl->rd_completed) || ++ !mei_cl_is_connected(cl))) { ++ if (signal_pending(current)) ++ return -EINTR; ++ return -ERESTARTSYS; ++ } ++ mutex_lock(&dev->device_lock); + +- if (signal_pending(current)) +- return -EINTR; +- return -ERESTARTSYS; +- } ++ if (!mei_cl_is_connected(cl)) { ++ rets = -ENODEV; ++ goto out; ++ } + +- mutex_lock(&dev->device_lock); +- if (!mei_cl_is_connected(cl)) { +- rets = -ENODEV; +- goto out; +- } ++ cb = mei_cl_read_cb(cl, file); ++ if (!cb) { ++ /* ++ * For amthif all the waiters are woken up, ++ * but only fp with matching cb->fp get the cb, ++ * the others have to return to wait on read. ++ */ ++ if (cl == &dev->iamthif_cl) ++ goto again; + +- cb = mei_cl_read_cb(cl, file); +- } while (!cb); ++ rets = 0; ++ goto out; ++ } + + copy_buffer: + /* now copy the data to user space */ +diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c +index fddd0be..80918ab 100644 +--- a/drivers/mmc/host/sdhci-acpi.c ++++ b/drivers/mmc/host/sdhci-acpi.c +@@ -466,7 +466,10 @@ static int sdhci_acpi_probe(struct platform_device *pdev) + if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) { + bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL); + +- if (mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0, NULL)) { ++ err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0, NULL); ++ if (err) { ++ if (err == -EPROBE_DEFER) ++ goto err_free; + dev_warn(dev, "failed to setup card detect gpio\n"); + c->use_runtime_pm = false; + } +diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c +index 0a177b1..d1570f5 100644 +--- a/drivers/mtd/nand/fsl_ifc_nand.c ++++ b/drivers/mtd/nand/fsl_ifc_nand.c +@@ -258,9 +258,15 @@ static void fsl_ifc_run_command(struct mtd_info *mtd) + int bufnum = nctrl->page & priv->bufnum_mask; + int sector = bufnum * chip->ecc.steps; + int sector_end = sector + chip->ecc.steps - 1; ++ __be32 *eccstat_regs; ++ ++ if (ctrl->version >= FSL_IFC_VERSION_2_0_0) ++ eccstat_regs = ifc->ifc_nand.v2_nand_eccstat; ++ else ++ eccstat_regs = ifc->ifc_nand.v1_nand_eccstat; + + for (i = sector / 4; i <= sector_end / 4; i++) +- eccstat[i] = ifc_in32(&ifc->ifc_nand.nand_eccstat[i]); ++ eccstat[i] = ifc_in32(&eccstat_regs[i]); + + for (i = sector; i <= sector_end; i++) { + errors = check_read_ecc(mtd, ctrl, eccstat, i); +diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c +index 77e3cc0..a0dabd4 100644 +--- a/drivers/net/can/usb/gs_usb.c ++++ b/drivers/net/can/usb/gs_usb.c +@@ -908,10 +908,14 @@ static int gs_usb_probe(struct usb_interface *intf, + struct gs_usb *dev; + int rc = -ENOMEM; + unsigned int icount, i; +- struct gs_host_config hconf = { +- .byte_order = 0x0000beef, +- }; +- struct gs_device_config dconf; ++ struct gs_host_config *hconf; ++ struct gs_device_config *dconf; ++ ++ hconf = kmalloc(sizeof(*hconf), GFP_KERNEL); ++ if (!hconf) ++ return -ENOMEM; ++ ++ hconf->byte_order = 0x0000beef; + + /* send host config */ + rc = usb_control_msg(interface_to_usbdev(intf), +@@ -920,16 +924,22 @@ static int gs_usb_probe(struct usb_interface *intf, + USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, + 1, + intf->altsetting[0].desc.bInterfaceNumber, +- &hconf, +- sizeof(hconf), ++ hconf, ++ sizeof(*hconf), + 1000); + ++ kfree(hconf); ++ + if (rc < 0) { + dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", + rc); + return rc; + } + ++ dconf = kmalloc(sizeof(*dconf), GFP_KERNEL); ++ if (!dconf) ++ return -ENOMEM; ++ + /* read device config */ + rc = usb_control_msg(interface_to_usbdev(intf), + usb_rcvctrlpipe(interface_to_usbdev(intf), 0), +@@ -937,28 +947,33 @@ static int gs_usb_probe(struct usb_interface *intf, + USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, + 1, + intf->altsetting[0].desc.bInterfaceNumber, +- &dconf, +- sizeof(dconf), ++ dconf, ++ sizeof(*dconf), + 1000); + if (rc < 0) { + dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n", + rc); ++ kfree(dconf); + return rc; + } + +- icount = dconf.icount + 1; ++ icount = dconf->icount + 1; + dev_info(&intf->dev, "Configuring for %d interfaces\n", icount); + + if (icount > GS_MAX_INTF) { + dev_err(&intf->dev, + "Driver cannot handle more that %d CAN interfaces\n", + GS_MAX_INTF); ++ kfree(dconf); + return -EINVAL; + } + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); +- if (!dev) ++ if (!dev) { ++ kfree(dconf); + return -ENOMEM; ++ } ++ + init_usb_anchor(&dev->rx_submitted); + + atomic_set(&dev->active_channels, 0); +@@ -967,7 +982,7 @@ static int gs_usb_probe(struct usb_interface *intf, + dev->udev = interface_to_usbdev(intf); + + for (i = 0; i < icount; i++) { +- dev->canch[i] = gs_make_candev(i, intf, &dconf); ++ dev->canch[i] = gs_make_candev(i, intf, dconf); + if (IS_ERR_OR_NULL(dev->canch[i])) { + /* save error code to return later */ + rc = PTR_ERR(dev->canch[i]); +@@ -978,12 +993,15 @@ static int gs_usb_probe(struct usb_interface *intf, + gs_destroy_candev(dev->canch[i]); + + usb_kill_anchored_urbs(&dev->rx_submitted); ++ kfree(dconf); + kfree(dev); + return rc; + } + dev->canch[i]->parent = dev; + } + ++ kfree(dconf); ++ + return 0; + } + +diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c +index 108a30e..d000cb6 100644 +--- a/drivers/net/can/usb/usb_8dev.c ++++ b/drivers/net/can/usb/usb_8dev.c +@@ -951,8 +951,8 @@ static int usb_8dev_probe(struct usb_interface *intf, + for (i = 0; i < MAX_TX_URBS; i++) + priv->tx_contexts[i].echo_index = MAX_TX_URBS; + +- priv->cmd_msg_buffer = kzalloc(sizeof(struct usb_8dev_cmd_msg), +- GFP_KERNEL); ++ priv->cmd_msg_buffer = devm_kzalloc(&intf->dev, sizeof(struct usb_8dev_cmd_msg), ++ GFP_KERNEL); + if (!priv->cmd_msg_buffer) + goto cleanup_candev; + +@@ -966,7 +966,7 @@ static int usb_8dev_probe(struct usb_interface *intf, + if (err) { + netdev_err(netdev, + "couldn't register CAN device: %d\n", err); +- goto cleanup_cmd_msg_buffer; ++ goto cleanup_candev; + } + + err = usb_8dev_cmd_version(priv, &version); +@@ -987,9 +987,6 @@ static int usb_8dev_probe(struct usb_interface *intf, + cleanup_unregister_candev: + unregister_netdev(priv->netdev); + +-cleanup_cmd_msg_buffer: +- kfree(priv->cmd_msg_buffer); +- + cleanup_candev: + free_candev(netdev); + +diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c +index 0c45322..972b5e2 100644 +--- a/drivers/net/wireless/ath/ath10k/core.c ++++ b/drivers/net/wireless/ath/ath10k/core.c +@@ -1901,7 +1901,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n", + ar->hw->wiphy->fw_version); + +- if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map)) { ++ if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map) && ++ mode == ATH10K_FIRMWARE_MODE_NORMAL) { + val = 0; + if (ath10k_peer_stats_enabled(ar)) + val = WMI_10_4_PEER_STATS; +@@ -1954,10 +1955,13 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, + * possible to implicitly make it correct by creating a dummy vdev and + * then deleting it. + */ +- status = ath10k_core_reset_rx_filter(ar); +- if (status) { +- ath10k_err(ar, "failed to reset rx filter: %d\n", status); +- goto err_hif_stop; ++ if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { ++ status = ath10k_core_reset_rx_filter(ar); ++ if (status) { ++ ath10k_err(ar, ++ "failed to reset rx filter: %d\n", status); ++ goto err_hif_stop; ++ } + } + + /* If firmware indicates Full Rx Reorder support it must be used in a +diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c +index dc44cfe..16e052d 100644 +--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c ++++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c +@@ -502,8 +502,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + break; + return -EOPNOTSUPP; + default: +- WARN_ON(1); +- return -EINVAL; ++ return -EOPNOTSUPP; + } + + mutex_lock(&ah->lock); +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +index 107bcfb..cb37bf0 100644 +--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h ++++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +@@ -73,13 +73,13 @@ + #define AR9300_OTP_BASE \ + ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30000 : 0x14000) + #define AR9300_OTP_STATUS \ +- ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30018 : 0x15f18) ++ ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x31018 : 0x15f18) + #define AR9300_OTP_STATUS_TYPE 0x7 + #define AR9300_OTP_STATUS_VALID 0x4 + #define AR9300_OTP_STATUS_ACCESS_BUSY 0x2 + #define AR9300_OTP_STATUS_SM_BUSY 0x1 + #define AR9300_OTP_READ_DATA \ +- ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3001c : 0x15f1c) ++ ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3101c : 0x15f1c) + + enum targetPowerHTRates { + HT_TARGET_RATE_0_8_16, +diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h +index 26fc8ec..a731671 100644 +--- a/drivers/net/wireless/ath/ath9k/ath9k.h ++++ b/drivers/net/wireless/ath/ath9k/ath9k.h +@@ -959,6 +959,7 @@ struct ath_softc { + struct survey_info *cur_survey; + struct survey_info survey[ATH9K_NUM_CHANNELS]; + ++ spinlock_t intr_lock; + struct tasklet_struct intr_tq; + struct tasklet_struct bcon_tasklet; + struct ath_hw *sc_ah; +diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c +index cfa3fe8..297d4bb 100644 +--- a/drivers/net/wireless/ath/ath9k/init.c ++++ b/drivers/net/wireless/ath/ath9k/init.c +@@ -626,6 +626,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, + common->bt_ant_diversity = 1; + + spin_lock_init(&common->cc_lock); ++ spin_lock_init(&sc->intr_lock); + spin_lock_init(&sc->sc_serial_rw); + spin_lock_init(&sc->sc_pm_lock); + spin_lock_init(&sc->chan_lock); +diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c +index bba85d1..d937c39 100644 +--- a/drivers/net/wireless/ath/ath9k/mac.c ++++ b/drivers/net/wireless/ath/ath9k/mac.c +@@ -805,21 +805,12 @@ void ath9k_hw_disable_interrupts(struct ath_hw *ah) + } + EXPORT_SYMBOL(ath9k_hw_disable_interrupts); + +-void ath9k_hw_enable_interrupts(struct ath_hw *ah) ++static void __ath9k_hw_enable_interrupts(struct ath_hw *ah) + { + struct ath_common *common = ath9k_hw_common(ah); + u32 sync_default = AR_INTR_SYNC_DEFAULT; + u32 async_mask; + +- if (!(ah->imask & ATH9K_INT_GLOBAL)) +- return; +- +- if (!atomic_inc_and_test(&ah->intr_ref_cnt)) { +- ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n", +- atomic_read(&ah->intr_ref_cnt)); +- return; +- } +- + if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) || + AR_SREV_9561(ah)) + sync_default &= ~AR_INTR_SYNC_HOST1_FATAL; +@@ -841,6 +832,39 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah) + ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n", + REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER)); + } ++ ++void ath9k_hw_resume_interrupts(struct ath_hw *ah) ++{ ++ struct ath_common *common = ath9k_hw_common(ah); ++ ++ if (!(ah->imask & ATH9K_INT_GLOBAL)) ++ return; ++ ++ if (atomic_read(&ah->intr_ref_cnt) != 0) { ++ ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n", ++ atomic_read(&ah->intr_ref_cnt)); ++ return; ++ } ++ ++ __ath9k_hw_enable_interrupts(ah); ++} ++EXPORT_SYMBOL(ath9k_hw_resume_interrupts); ++ ++void ath9k_hw_enable_interrupts(struct ath_hw *ah) ++{ ++ struct ath_common *common = ath9k_hw_common(ah); ++ ++ if (!(ah->imask & ATH9K_INT_GLOBAL)) ++ return; ++ ++ if (!atomic_inc_and_test(&ah->intr_ref_cnt)) { ++ ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n", ++ atomic_read(&ah->intr_ref_cnt)); ++ return; ++ } ++ ++ __ath9k_hw_enable_interrupts(ah); ++} + EXPORT_SYMBOL(ath9k_hw_enable_interrupts); + + void ath9k_hw_set_interrupts(struct ath_hw *ah) +diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h +index 3bab014..770fc11 100644 +--- a/drivers/net/wireless/ath/ath9k/mac.h ++++ b/drivers/net/wireless/ath/ath9k/mac.h +@@ -744,6 +744,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah); + void ath9k_hw_enable_interrupts(struct ath_hw *ah); + void ath9k_hw_disable_interrupts(struct ath_hw *ah); + void ath9k_hw_kill_interrupts(struct ath_hw *ah); ++void ath9k_hw_resume_interrupts(struct ath_hw *ah); + + void ar9002_hw_attach_mac_ops(struct ath_hw *ah); + +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c +index e9f32b5..b868f02 100644 +--- a/drivers/net/wireless/ath/ath9k/main.c ++++ b/drivers/net/wireless/ath/ath9k/main.c +@@ -373,21 +373,20 @@ void ath9k_tasklet(unsigned long data) + struct ath_common *common = ath9k_hw_common(ah); + enum ath_reset_type type; + unsigned long flags; +- u32 status = sc->intrstatus; ++ u32 status; + u32 rxmask; + ++ spin_lock_irqsave(&sc->intr_lock, flags); ++ status = sc->intrstatus; ++ sc->intrstatus = 0; ++ spin_unlock_irqrestore(&sc->intr_lock, flags); ++ + ath9k_ps_wakeup(sc); + spin_lock(&sc->sc_pcu_lock); + + if (status & ATH9K_INT_FATAL) { + type = RESET_TYPE_FATAL_INT; + ath9k_queue_reset(sc, type); +- +- /* +- * Increment the ref. counter here so that +- * interrupts are enabled in the reset routine. +- */ +- atomic_inc(&ah->intr_ref_cnt); + ath_dbg(common, RESET, "FATAL: Skipping interrupts\n"); + goto out; + } +@@ -403,11 +402,6 @@ void ath9k_tasklet(unsigned long data) + type = RESET_TYPE_BB_WATCHDOG; + ath9k_queue_reset(sc, type); + +- /* +- * Increment the ref. counter here so that +- * interrupts are enabled in the reset routine. +- */ +- atomic_inc(&ah->intr_ref_cnt); + ath_dbg(common, RESET, + "BB_WATCHDOG: Skipping interrupts\n"); + goto out; +@@ -420,7 +414,6 @@ void ath9k_tasklet(unsigned long data) + if ((sc->gtt_cnt >= MAX_GTT_CNT) && !ath9k_hw_check_alive(ah)) { + type = RESET_TYPE_TX_GTT; + ath9k_queue_reset(sc, type); +- atomic_inc(&ah->intr_ref_cnt); + ath_dbg(common, RESET, + "GTT: Skipping interrupts\n"); + goto out; +@@ -477,7 +470,7 @@ void ath9k_tasklet(unsigned long data) + ath9k_btcoex_handle_interrupt(sc, status); + + /* re-enable hardware interrupt */ +- ath9k_hw_enable_interrupts(ah); ++ ath9k_hw_resume_interrupts(ah); + out: + spin_unlock(&sc->sc_pcu_lock); + ath9k_ps_restore(sc); +@@ -541,7 +534,9 @@ irqreturn_t ath_isr(int irq, void *dev) + return IRQ_NONE; + + /* Cache the status */ +- sc->intrstatus = status; ++ spin_lock(&sc->intr_lock); ++ sc->intrstatus |= status; ++ spin_unlock(&sc->intr_lock); + + if (status & SCHED_INTR) + sched = true; +@@ -587,7 +582,7 @@ irqreturn_t ath_isr(int irq, void *dev) + + if (sched) { + /* turn off every interrupt */ +- ath9k_hw_disable_interrupts(ah); ++ ath9k_hw_kill_interrupts(ah); + tasklet_schedule(&sc->intr_tq); + } + +diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h +index b951eba..d2f4dd4 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/pci.h ++++ b/drivers/net/wireless/realtek/rtlwifi/pci.h +@@ -275,10 +275,10 @@ struct mp_adapter { + }; + + struct rtl_pci_priv { ++ struct bt_coexist_info bt_coexist; ++ struct rtl_led_ctl ledctl; + struct rtl_pci dev; + struct mp_adapter ndis_adapter; +- struct rtl_led_ctl ledctl; +- struct bt_coexist_info bt_coexist; + }; + + #define rtl_pcipriv(hw) (((struct rtl_pci_priv *)(rtl_priv(hw))->priv)) +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +index ebf663e..cab4601 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +@@ -1006,7 +1006,7 @@ static void _rtl92ee_hw_configure(struct ieee80211_hw *hw) + rtl_write_word(rtlpriv, REG_SIFS_TRX, 0x100a); + + /* Note Data sheet don't define */ +- rtl_write_word(rtlpriv, 0x4C7, 0x80); ++ rtl_write_byte(rtlpriv, 0x4C7, 0x80); + + rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x20); + +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +index 1281ebe..2cbef96 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +@@ -1128,7 +1128,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr) + } + if (0 == tmp) { + read_addr = REG_DBI_RDATA + addr % 4; +- ret = rtl_read_word(rtlpriv, read_addr); ++ ret = rtl_read_byte(rtlpriv, read_addr); + } + return ret; + } +diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h +index 685273c..441c441 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/usb.h ++++ b/drivers/net/wireless/realtek/rtlwifi/usb.h +@@ -150,8 +150,9 @@ struct rtl_usb { + }; + + struct rtl_usb_priv { +- struct rtl_usb dev; ++ struct bt_coexist_info bt_coexist; + struct rtl_led_ctl ledctl; ++ struct rtl_usb dev; + }; + + #define rtl_usbpriv(hw) (((struct rtl_usb_priv *)(rtl_priv(hw))->priv)) +diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c +index 763ff87..61fc349 100644 +--- a/drivers/pci/host/pci-hyperv.c ++++ b/drivers/pci/host/pci-hyperv.c +@@ -130,7 +130,8 @@ union pci_version { + */ + union win_slot_encoding { + struct { +- u32 func:8; ++ u32 dev:5; ++ u32 func:3; + u32 reserved:24; + } bits; + u32 slot; +@@ -483,7 +484,8 @@ static u32 devfn_to_wslot(int devfn) + union win_slot_encoding wslot; + + wslot.slot = 0; +- wslot.bits.func = PCI_SLOT(devfn) | (PCI_FUNC(devfn) << 5); ++ wslot.bits.dev = PCI_SLOT(devfn); ++ wslot.bits.func = PCI_FUNC(devfn); + + return wslot.slot; + } +@@ -501,7 +503,7 @@ static int wslot_to_devfn(u32 wslot) + union win_slot_encoding slot_no; + + slot_no.slot = wslot; +- return PCI_DEVFN(0, slot_no.bits.func); ++ return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func); + } + + /* +diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c +index b0ac4df..f2907e7 100644 +--- a/drivers/pci/host/pcie-altera.c ++++ b/drivers/pci/host/pcie-altera.c +@@ -57,10 +57,14 @@ + #define TLP_WRITE_TAG 0x10 + #define RP_DEVFN 0 + #define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn)) +-#define TLP_CFG_DW0(pcie, bus) \ ++#define TLP_CFGRD_DW0(pcie, bus) \ + ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGRD0 \ + : TLP_FMTTYPE_CFGRD1) << 24) | \ + TLP_PAYLOAD_SIZE) ++#define TLP_CFGWR_DW0(pcie, bus) \ ++ ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGWR0 \ ++ : TLP_FMTTYPE_CFGWR1) << 24) | \ ++ TLP_PAYLOAD_SIZE) + #define TLP_CFG_DW1(pcie, tag, be) \ + (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be)) + #define TLP_CFG_DW2(bus, devfn, offset) \ +@@ -222,7 +226,7 @@ static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn, + { + u32 headers[TLP_HDR_SIZE]; + +- headers[0] = TLP_CFG_DW0(pcie, bus); ++ headers[0] = TLP_CFGRD_DW0(pcie, bus); + headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en); + headers[2] = TLP_CFG_DW2(bus, devfn, where); + +@@ -237,7 +241,7 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, + u32 headers[TLP_HDR_SIZE]; + int ret; + +- headers[0] = TLP_CFG_DW0(pcie, bus); ++ headers[0] = TLP_CFGWR_DW0(pcie, bus); + headers[1] = TLP_CFG_DW1(pcie, TLP_WRITE_TAG, byte_en); + headers[2] = TLP_CFG_DW2(bus, devfn, where); + +diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c +index 56efaf7..acb2be0 100644 +--- a/drivers/pci/hotplug/pnv_php.c ++++ b/drivers/pci/hotplug/pnv_php.c +@@ -35,9 +35,11 @@ static void pnv_php_register(struct device_node *dn); + static void pnv_php_unregister_one(struct device_node *dn); + static void pnv_php_unregister(struct device_node *dn); + +-static void pnv_php_disable_irq(struct pnv_php_slot *php_slot) ++static void pnv_php_disable_irq(struct pnv_php_slot *php_slot, ++ bool disable_device) + { + struct pci_dev *pdev = php_slot->pdev; ++ int irq = php_slot->irq; + u16 ctrl; + + if (php_slot->irq > 0) { +@@ -56,10 +58,14 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot) + php_slot->wq = NULL; + } + +- if (pdev->msix_enabled) +- pci_disable_msix(pdev); +- else if (pdev->msi_enabled) +- pci_disable_msi(pdev); ++ if (disable_device || irq > 0) { ++ if (pdev->msix_enabled) ++ pci_disable_msix(pdev); ++ else if (pdev->msi_enabled) ++ pci_disable_msi(pdev); ++ ++ pci_disable_device(pdev); ++ } + } + + static void pnv_php_free_slot(struct kref *kref) +@@ -68,7 +74,7 @@ static void pnv_php_free_slot(struct kref *kref) + struct pnv_php_slot, kref); + + WARN_ON(!list_empty(&php_slot->children)); +- pnv_php_disable_irq(php_slot); ++ pnv_php_disable_irq(php_slot, false); + kfree(php_slot->name); + kfree(php_slot); + } +@@ -759,7 +765,7 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq) + php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name); + if (!php_slot->wq) { + dev_warn(&pdev->dev, "Cannot alloc workqueue\n"); +- pnv_php_disable_irq(php_slot); ++ pnv_php_disable_irq(php_slot, true); + return; + } + +@@ -772,7 +778,7 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq) + ret = request_irq(irq, pnv_php_interrupt, IRQF_SHARED, + php_slot->name, php_slot); + if (ret) { +- pnv_php_disable_irq(php_slot); ++ pnv_php_disable_irq(php_slot, true); + dev_warn(&pdev->dev, "Error %d enabling IRQ %d\n", ret, irq); + return; + } +diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig +index c74c3f6..02e46bb 100644 +--- a/drivers/power/reset/Kconfig ++++ b/drivers/power/reset/Kconfig +@@ -32,7 +32,7 @@ config POWER_RESET_AT91_RESET + + config POWER_RESET_AT91_SAMA5D2_SHDWC + tristate "Atmel AT91 SAMA5D2-Compatible shutdown controller driver" +- depends on ARCH_AT91 || COMPILE_TEST ++ depends on ARCH_AT91 + default SOC_SAMA5 + help + This driver supports the alternate shutdown controller for some Atmel +diff --git a/drivers/power/reset/at91-poweroff.c b/drivers/power/reset/at91-poweroff.c +index e9e24df..2579f02 100644 +--- a/drivers/power/reset/at91-poweroff.c ++++ b/drivers/power/reset/at91-poweroff.c +@@ -14,9 +14,12 @@ + #include <linux/io.h> + #include <linux/module.h> + #include <linux/of.h> ++#include <linux/of_address.h> + #include <linux/platform_device.h> + #include <linux/printk.h> + ++#include <soc/at91/at91sam9_ddrsdr.h> ++ + #define AT91_SHDW_CR 0x00 /* Shut Down Control Register */ + #define AT91_SHDW_SHDW BIT(0) /* Shut Down command */ + #define AT91_SHDW_KEY (0xa5 << 24) /* KEY Password */ +@@ -50,6 +53,7 @@ static const char *shdwc_wakeup_modes[] = { + + static void __iomem *at91_shdwc_base; + static struct clk *sclk; ++static void __iomem *mpddrc_base; + + static void __init at91_wakeup_status(void) + { +@@ -73,6 +77,29 @@ static void at91_poweroff(void) + writel(AT91_SHDW_KEY | AT91_SHDW_SHDW, at91_shdwc_base + AT91_SHDW_CR); + } + ++static void at91_lpddr_poweroff(void) ++{ ++ asm volatile( ++ /* Align to cache lines */ ++ ".balign 32\n\t" ++ ++ /* Ensure AT91_SHDW_CR is in the TLB by reading it */ ++ " ldr r6, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t" ++ ++ /* Power down SDRAM0 */ ++ " str %1, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t" ++ /* Shutdown CPU */ ++ " str %3, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t" ++ ++ " b .\n\t" ++ : ++ : "r" (mpddrc_base), ++ "r" cpu_to_le32(AT91_DDRSDRC_LPDDR2_PWOFF), ++ "r" (at91_shdwc_base), ++ "r" cpu_to_le32(AT91_SHDW_KEY | AT91_SHDW_SHDW) ++ : "r0"); ++} ++ + static int at91_poweroff_get_wakeup_mode(struct device_node *np) + { + const char *pm; +@@ -124,6 +151,8 @@ static void at91_poweroff_dt_set_wakeup_mode(struct platform_device *pdev) + static int __init at91_poweroff_probe(struct platform_device *pdev) + { + struct resource *res; ++ struct device_node *np; ++ u32 ddr_type; + int ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +@@ -150,12 +179,30 @@ static int __init at91_poweroff_probe(struct platform_device *pdev) + + pm_power_off = at91_poweroff; + ++ np = of_find_compatible_node(NULL, NULL, "atmel,sama5d3-ddramc"); ++ if (!np) ++ return 0; ++ ++ mpddrc_base = of_iomap(np, 0); ++ of_node_put(np); ++ ++ if (!mpddrc_base) ++ return 0; ++ ++ ddr_type = readl(mpddrc_base + AT91_DDRSDRC_MDR) & AT91_DDRSDRC_MD; ++ if ((ddr_type == AT91_DDRSDRC_MD_LPDDR2) || ++ (ddr_type == AT91_DDRSDRC_MD_LPDDR3)) ++ pm_power_off = at91_lpddr_poweroff; ++ else ++ iounmap(mpddrc_base); ++ + return 0; + } + + static int __exit at91_poweroff_remove(struct platform_device *pdev) + { +- if (pm_power_off == at91_poweroff) ++ if (pm_power_off == at91_poweroff || ++ pm_power_off == at91_lpddr_poweroff) + pm_power_off = NULL; + + clk_disable_unprepare(sclk); +@@ -163,6 +210,11 @@ static int __exit at91_poweroff_remove(struct platform_device *pdev) + return 0; + } + ++static const struct of_device_id at91_ramc_of_match[] = { ++ { .compatible = "atmel,sama5d3-ddramc", }, ++ { /* sentinel */ } ++}; ++ + static const struct of_device_id at91_poweroff_of_match[] = { + { .compatible = "atmel,at91sam9260-shdwc", }, + { .compatible = "atmel,at91sam9rl-shdwc", }, +diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c +index 8a5ac97..90b0b5a 100644 +--- a/drivers/power/reset/at91-sama5d2_shdwc.c ++++ b/drivers/power/reset/at91-sama5d2_shdwc.c +@@ -22,9 +22,12 @@ + #include <linux/io.h> + #include <linux/module.h> + #include <linux/of.h> ++#include <linux/of_address.h> + #include <linux/platform_device.h> + #include <linux/printk.h> + ++#include <soc/at91/at91sam9_ddrsdr.h> ++ + #define SLOW_CLOCK_FREQ 32768 + + #define AT91_SHDW_CR 0x00 /* Shut Down Control Register */ +@@ -75,6 +78,7 @@ struct shdwc { + */ + static struct shdwc *at91_shdwc; + static struct clk *sclk; ++static void __iomem *mpddrc_base; + + static const unsigned long long sdwc_dbc_period[] = { + 0, 3, 32, 512, 4096, 32768, +@@ -108,6 +112,29 @@ static void at91_poweroff(void) + at91_shdwc->at91_shdwc_base + AT91_SHDW_CR); + } + ++static void at91_lpddr_poweroff(void) ++{ ++ asm volatile( ++ /* Align to cache lines */ ++ ".balign 32\n\t" ++ ++ /* Ensure AT91_SHDW_CR is in the TLB by reading it */ ++ " ldr r6, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t" ++ ++ /* Power down SDRAM0 */ ++ " str %1, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t" ++ /* Shutdown CPU */ ++ " str %3, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t" ++ ++ " b .\n\t" ++ : ++ : "r" (mpddrc_base), ++ "r" cpu_to_le32(AT91_DDRSDRC_LPDDR2_PWOFF), ++ "r" (at91_shdwc->at91_shdwc_base), ++ "r" cpu_to_le32(AT91_SHDW_KEY | AT91_SHDW_SHDW) ++ : "r0"); ++} ++ + static u32 at91_shdwc_debouncer_value(struct platform_device *pdev, + u32 in_period_us) + { +@@ -212,6 +239,8 @@ static int __init at91_shdwc_probe(struct platform_device *pdev) + { + struct resource *res; + const struct of_device_id *match; ++ struct device_node *np; ++ u32 ddr_type; + int ret; + + if (!pdev->dev.of_node) +@@ -249,6 +278,23 @@ static int __init at91_shdwc_probe(struct platform_device *pdev) + + pm_power_off = at91_poweroff; + ++ np = of_find_compatible_node(NULL, NULL, "atmel,sama5d3-ddramc"); ++ if (!np) ++ return 0; ++ ++ mpddrc_base = of_iomap(np, 0); ++ of_node_put(np); ++ ++ if (!mpddrc_base) ++ return 0; ++ ++ ddr_type = readl(mpddrc_base + AT91_DDRSDRC_MDR) & AT91_DDRSDRC_MD; ++ if ((ddr_type == AT91_DDRSDRC_MD_LPDDR2) || ++ (ddr_type == AT91_DDRSDRC_MD_LPDDR3)) ++ pm_power_off = at91_lpddr_poweroff; ++ else ++ iounmap(mpddrc_base); ++ + return 0; + } + +@@ -256,7 +302,8 @@ static int __exit at91_shdwc_remove(struct platform_device *pdev) + { + struct shdwc *shdw = platform_get_drvdata(pdev); + +- if (pm_power_off == at91_poweroff) ++ if (pm_power_off == at91_poweroff || ++ pm_power_off == at91_lpddr_poweroff) + pm_power_off = NULL; + + /* Reset values to disable wake-up features */ +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c +index 5c1519b..9faccfc 100644 +--- a/drivers/regulator/core.c ++++ b/drivers/regulator/core.c +@@ -4357,12 +4357,13 @@ static void regulator_summary_show_subtree(struct seq_file *s, + seq_puts(s, "\n"); + + list_for_each_entry(consumer, &rdev->consumer_list, list) { +- if (consumer->dev->class == ®ulator_class) ++ if (consumer->dev && consumer->dev->class == ®ulator_class) + continue; + + seq_printf(s, "%*s%-*s ", + (level + 1) * 3 + 1, "", +- 30 - (level + 1) * 3, dev_name(consumer->dev)); ++ 30 - (level + 1) * 3, ++ consumer->dev ? dev_name(consumer->dev) : "deviceless"); + + switch (rdev->desc->type) { + case REGULATOR_VOLTAGE: +diff --git a/drivers/remoteproc/qcom_mdt_loader.c b/drivers/remoteproc/qcom_mdt_loader.c +index 114e8e4..04db02d 100644 +--- a/drivers/remoteproc/qcom_mdt_loader.c ++++ b/drivers/remoteproc/qcom_mdt_loader.c +@@ -115,6 +115,7 @@ int qcom_mdt_load(struct rproc *rproc, + const struct elf32_phdr *phdrs; + const struct elf32_phdr *phdr; + const struct elf32_hdr *ehdr; ++ const struct firmware *seg_fw; + size_t fw_name_len; + char *fw_name; + void *ptr; +@@ -153,16 +154,16 @@ int qcom_mdt_load(struct rproc *rproc, + + if (phdr->p_filesz) { + sprintf(fw_name + fw_name_len - 3, "b%02d", i); +- ret = request_firmware(&fw, fw_name, &rproc->dev); ++ ret = request_firmware(&seg_fw, fw_name, &rproc->dev); + if (ret) { + dev_err(&rproc->dev, "failed to load %s\n", + fw_name); + break; + } + +- memcpy(ptr, fw->data, fw->size); ++ memcpy(ptr, seg_fw->data, seg_fw->size); + +- release_firmware(fw); ++ release_firmware(seg_fw); + } + + if (phdr->p_memsz > phdr->p_filesz) +diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig +index e859d14..0723c97 100644 +--- a/drivers/rtc/Kconfig ++++ b/drivers/rtc/Kconfig +@@ -1432,7 +1432,7 @@ config RTC_DRV_SUN4V + based RTC on SUN4V systems. + + config RTC_DRV_SUN6I +- tristate "Allwinner A31 RTC" ++ bool "Allwinner A31 RTC" + default MACH_SUN6I || MACH_SUN8I || COMPILE_TEST + depends on ARCH_SUNXI + help +diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c +index c169a2c..b0d45d2 100644 +--- a/drivers/rtc/rtc-sun6i.c ++++ b/drivers/rtc/rtc-sun6i.c +@@ -37,9 +37,11 @@ + + /* Control register */ + #define SUN6I_LOSC_CTRL 0x0000 ++#define SUN6I_LOSC_CTRL_KEY (0x16aa << 16) + #define SUN6I_LOSC_CTRL_ALM_DHMS_ACC BIT(9) + #define SUN6I_LOSC_CTRL_RTC_HMS_ACC BIT(8) + #define SUN6I_LOSC_CTRL_RTC_YMD_ACC BIT(7) ++#define SUN6I_LOSC_CTRL_EXT_OSC BIT(0) + #define SUN6I_LOSC_CTRL_ACC_MASK GENMASK(9, 7) + + /* RTC */ +@@ -114,13 +116,17 @@ struct sun6i_rtc_dev { + void __iomem *base; + int irq; + unsigned long alarm; ++ ++ spinlock_t lock; + }; + + static irqreturn_t sun6i_rtc_alarmirq(int irq, void *id) + { + struct sun6i_rtc_dev *chip = (struct sun6i_rtc_dev *) id; ++ irqreturn_t ret = IRQ_NONE; + u32 val; + ++ spin_lock(&chip->lock); + val = readl(chip->base + SUN6I_ALRM_IRQ_STA); + + if (val & SUN6I_ALRM_IRQ_STA_CNT_IRQ_PEND) { +@@ -129,10 +135,11 @@ static irqreturn_t sun6i_rtc_alarmirq(int irq, void *id) + + rtc_update_irq(chip->rtc, 1, RTC_AF | RTC_IRQF); + +- return IRQ_HANDLED; ++ ret = IRQ_HANDLED; + } ++ spin_unlock(&chip->lock); + +- return IRQ_NONE; ++ return ret; + } + + static void sun6i_rtc_setaie(int to, struct sun6i_rtc_dev *chip) +@@ -140,6 +147,7 @@ static void sun6i_rtc_setaie(int to, struct sun6i_rtc_dev *chip) + u32 alrm_val = 0; + u32 alrm_irq_val = 0; + u32 alrm_wake_val = 0; ++ unsigned long flags; + + if (to) { + alrm_val = SUN6I_ALRM_EN_CNT_EN; +@@ -150,9 +158,11 @@ static void sun6i_rtc_setaie(int to, struct sun6i_rtc_dev *chip) + chip->base + SUN6I_ALRM_IRQ_STA); + } + ++ spin_lock_irqsave(&chip->lock, flags); + writel(alrm_val, chip->base + SUN6I_ALRM_EN); + writel(alrm_irq_val, chip->base + SUN6I_ALRM_IRQ_EN); + writel(alrm_wake_val, chip->base + SUN6I_ALARM_CONFIG); ++ spin_unlock_irqrestore(&chip->lock, flags); + } + + static int sun6i_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) +@@ -191,11 +201,15 @@ static int sun6i_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) + static int sun6i_rtc_getalarm(struct device *dev, struct rtc_wkalrm *wkalrm) + { + struct sun6i_rtc_dev *chip = dev_get_drvdata(dev); ++ unsigned long flags; + u32 alrm_st; + u32 alrm_en; + ++ spin_lock_irqsave(&chip->lock, flags); + alrm_en = readl(chip->base + SUN6I_ALRM_IRQ_EN); + alrm_st = readl(chip->base + SUN6I_ALRM_IRQ_STA); ++ spin_unlock_irqrestore(&chip->lock, flags); ++ + wkalrm->enabled = !!(alrm_en & SUN6I_ALRM_EN_CNT_EN); + wkalrm->pending = !!(alrm_st & SUN6I_ALRM_EN_CNT_EN); + rtc_time_to_tm(chip->alarm, &wkalrm->time); +@@ -356,6 +370,7 @@ static int sun6i_rtc_probe(struct platform_device *pdev) + chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; ++ spin_lock_init(&chip->lock); + + platform_set_drvdata(pdev, chip); + chip->dev = &pdev->dev; +@@ -404,6 +419,10 @@ static int sun6i_rtc_probe(struct platform_device *pdev) + /* disable alarm wakeup */ + writel(0, chip->base + SUN6I_ALARM_CONFIG); + ++ /* switch to the external, more precise, oscillator */ ++ writel(SUN6I_LOSC_CTRL_KEY | SUN6I_LOSC_CTRL_EXT_OSC, ++ chip->base + SUN6I_LOSC_CTRL); ++ + chip->rtc = rtc_device_register("rtc-sun6i", &pdev->dev, + &sun6i_rtc_ops, THIS_MODULE); + if (IS_ERR(chip->rtc)) { +@@ -439,9 +458,4 @@ static struct platform_driver sun6i_rtc_driver = { + .of_match_table = sun6i_rtc_dt_ids, + }, + }; +- +-module_platform_driver(sun6i_rtc_driver); +- +-MODULE_DESCRIPTION("sun6i RTC driver"); +-MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>"); +-MODULE_LICENSE("GPL"); ++builtin_platform_driver(sun6i_rtc_driver); +diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c +index 0c45388..7b178d7 100644 +--- a/drivers/scsi/aacraid/src.c ++++ b/drivers/scsi/aacraid/src.c +@@ -414,16 +414,23 @@ static int aac_src_check_health(struct aac_dev *dev) + u32 status = src_readl(dev, MUnit.OMR); + + /* ++ * Check to see if the board panic'd. ++ */ ++ if (unlikely(status & KERNEL_PANIC)) ++ goto err_blink; ++ ++ /* + * Check to see if the board failed any self tests. + */ + if (unlikely(status & SELF_TEST_FAILED)) +- return -1; ++ goto err_out; + + /* +- * Check to see if the board panic'd. ++ * Check to see if the board failed any self tests. + */ +- if (unlikely(status & KERNEL_PANIC)) +- return (status >> 16) & 0xFF; ++ if (unlikely(status & MONITOR_PANIC)) ++ goto err_out; ++ + /* + * Wait for the adapter to be up and running. + */ +@@ -433,6 +440,12 @@ static int aac_src_check_health(struct aac_dev *dev) + * Everything is OK + */ + return 0; ++ ++err_out: ++ return -1; ++ ++err_blink: ++ return (status > 16) & 0xFF; + } + + /** +diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h +index ee802273..55faa94 100644 +--- a/drivers/scsi/lpfc/lpfc_hw4.h ++++ b/drivers/scsi/lpfc/lpfc_hw4.h +@@ -1185,6 +1185,7 @@ struct lpfc_mbx_wq_create { + #define lpfc_mbx_wq_create_page_size_SHIFT 0 + #define lpfc_mbx_wq_create_page_size_MASK 0x000000FF + #define lpfc_mbx_wq_create_page_size_WORD word1 ++#define LPFC_WQ_PAGE_SIZE_4096 0x1 + #define lpfc_mbx_wq_create_wqe_size_SHIFT 8 + #define lpfc_mbx_wq_create_wqe_size_MASK 0x0000000F + #define lpfc_mbx_wq_create_wqe_size_WORD word1 +@@ -1256,6 +1257,7 @@ struct rq_context { + #define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */ + #define lpfc_rq_context_page_size_MASK 0x000000FF + #define lpfc_rq_context_page_size_WORD word0 ++#define LPFC_RQ_PAGE_SIZE_4096 0x1 + uint32_t reserved1; + uint32_t word2; + #define lpfc_rq_context_cq_id_SHIFT 16 +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c +index f4f77c5..49b4c79 100644 +--- a/drivers/scsi/lpfc/lpfc_sli.c ++++ b/drivers/scsi/lpfc/lpfc_sli.c +@@ -13678,7 +13678,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, + LPFC_WQ_WQE_SIZE_128); + bf_set(lpfc_mbx_wq_create_page_size, + &wq_create->u.request_1, +- (PAGE_SIZE/SLI4_PAGE_SIZE)); ++ LPFC_WQ_PAGE_SIZE_4096); + page = wq_create->u.request_1.page; + break; + } +@@ -13704,8 +13704,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, + LPFC_WQ_WQE_SIZE_128); + break; + } +- bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, +- (PAGE_SIZE/SLI4_PAGE_SIZE)); ++ bf_set(lpfc_mbx_wq_create_page_size, ++ &wq_create->u.request_1, ++ LPFC_WQ_PAGE_SIZE_4096); + page = wq_create->u.request_1.page; + break; + default: +@@ -13891,7 +13892,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, + LPFC_RQE_SIZE_8); + bf_set(lpfc_rq_context_page_size, + &rq_create->u.request.context, +- (PAGE_SIZE/SLI4_PAGE_SIZE)); ++ LPFC_RQ_PAGE_SIZE_4096); + } else { + switch (hrq->entry_count) { + default: +diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c +index b8d3b97..84addee 100644 +--- a/drivers/scsi/scsi_dh.c ++++ b/drivers/scsi/scsi_dh.c +@@ -219,20 +219,6 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh) + } + EXPORT_SYMBOL_GPL(scsi_unregister_device_handler); + +-static struct scsi_device *get_sdev_from_queue(struct request_queue *q) +-{ +- struct scsi_device *sdev; +- unsigned long flags; +- +- spin_lock_irqsave(q->queue_lock, flags); +- sdev = q->queuedata; +- if (!sdev || !get_device(&sdev->sdev_gendev)) +- sdev = NULL; +- spin_unlock_irqrestore(q->queue_lock, flags); +- +- return sdev; +-} +- + /* + * scsi_dh_activate - activate the path associated with the scsi_device + * corresponding to the given request queue. +@@ -251,7 +237,7 @@ int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data) + struct scsi_device *sdev; + int err = SCSI_DH_NOSYS; + +- sdev = get_sdev_from_queue(q); ++ sdev = scsi_device_from_queue(q); + if (!sdev) { + if (fn) + fn(data, err); +@@ -298,7 +284,7 @@ int scsi_dh_set_params(struct request_queue *q, const char *params) + struct scsi_device *sdev; + int err = -SCSI_DH_NOSYS; + +- sdev = get_sdev_from_queue(q); ++ sdev = scsi_device_from_queue(q); + if (!sdev) + return err; + +@@ -321,7 +307,7 @@ int scsi_dh_attach(struct request_queue *q, const char *name) + struct scsi_device_handler *scsi_dh; + int err = 0; + +- sdev = get_sdev_from_queue(q); ++ sdev = scsi_device_from_queue(q); + if (!sdev) + return -ENODEV; + +@@ -359,7 +345,7 @@ const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp) + struct scsi_device *sdev; + const char *handler_name = NULL; + +- sdev = get_sdev_from_queue(q); ++ sdev = scsi_device_from_queue(q); + if (!sdev) + return NULL; + +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c +index e64eae4..d8099c7 100644 +--- a/drivers/scsi/scsi_lib.c ++++ b/drivers/scsi/scsi_lib.c +@@ -2127,6 +2127,29 @@ void scsi_mq_destroy_tags(struct Scsi_Host *shost) + blk_mq_free_tag_set(&shost->tag_set); + } + ++/** ++ * scsi_device_from_queue - return sdev associated with a request_queue ++ * @q: The request queue to return the sdev from ++ * ++ * Return the sdev associated with a request queue or NULL if the ++ * request_queue does not reference a SCSI device. ++ */ ++struct scsi_device *scsi_device_from_queue(struct request_queue *q) ++{ ++ struct scsi_device *sdev = NULL; ++ ++ if (q->mq_ops) { ++ if (q->mq_ops == &scsi_mq_ops) ++ sdev = q->queuedata; ++ } else if (q->request_fn == scsi_request_fn) ++ sdev = q->queuedata; ++ if (!sdev || !get_device(&sdev->sdev_gendev)) ++ sdev = NULL; ++ ++ return sdev; ++} ++EXPORT_SYMBOL_GPL(scsi_device_from_queue); ++ + /* + * Function: scsi_block_requests() + * +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c +index 8ccfc9e..3f218f5 100644 +--- a/drivers/scsi/storvsc_drv.c ++++ b/drivers/scsi/storvsc_drv.c +@@ -136,6 +136,8 @@ struct hv_fc_wwn_packet { + #define SRB_FLAGS_PORT_DRIVER_RESERVED 0x0F000000 + #define SRB_FLAGS_CLASS_DRIVER_RESERVED 0xF0000000 + ++#define SP_UNTAGGED ((unsigned char) ~0) ++#define SRB_SIMPLE_TAG_REQUEST 0x20 + + /* + * Platform neutral description of a scsi request - +@@ -375,6 +377,7 @@ enum storvsc_request_type { + #define SRB_STATUS_SUCCESS 0x01 + #define SRB_STATUS_ABORTED 0x02 + #define SRB_STATUS_ERROR 0x04 ++#define SRB_STATUS_DATA_OVERRUN 0x12 + + #define SRB_STATUS(status) \ + (status & ~(SRB_STATUS_AUTOSENSE_VALID | SRB_STATUS_QUEUE_FROZEN)) +@@ -889,6 +892,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb, + switch (SRB_STATUS(vm_srb->srb_status)) { + case SRB_STATUS_ERROR: + /* ++ * Let upper layer deal with error when ++ * sense message is present. ++ */ ++ ++ if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) ++ break; ++ /* + * If there is an error; offline the device since all + * error recovery strategies would have already been + * deployed on the host side. However, if the command +@@ -953,6 +963,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request, + struct scsi_cmnd *scmnd = cmd_request->cmd; + struct scsi_sense_hdr sense_hdr; + struct vmscsi_request *vm_srb; ++ u32 data_transfer_length; + struct Scsi_Host *host; + u32 payload_sz = cmd_request->payload_sz; + void *payload = cmd_request->payload; +@@ -960,6 +971,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request, + host = stor_dev->host; + + vm_srb = &cmd_request->vstor_packet.vm_srb; ++ data_transfer_length = vm_srb->data_transfer_length; + + scmnd->result = vm_srb->scsi_status; + +@@ -973,13 +985,20 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request, + &sense_hdr); + } + +- if (vm_srb->srb_status != SRB_STATUS_SUCCESS) ++ if (vm_srb->srb_status != SRB_STATUS_SUCCESS) { + storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc, + sense_hdr.ascq); ++ /* ++ * The Windows driver set data_transfer_length on ++ * SRB_STATUS_DATA_OVERRUN. On other errors, this value ++ * is untouched. In these cases we set it to 0. ++ */ ++ if (vm_srb->srb_status != SRB_STATUS_DATA_OVERRUN) ++ data_transfer_length = 0; ++ } + + scsi_set_resid(scmnd, +- cmd_request->payload->range.len - +- vm_srb->data_transfer_length); ++ cmd_request->payload->range.len - data_transfer_length); + + scmnd->scsi_done(scmnd); + +@@ -1451,6 +1470,13 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) + vm_srb->win8_extension.srb_flags |= + SRB_FLAGS_DISABLE_SYNCH_TRANSFER; + ++ if (scmnd->device->tagged_supported) { ++ vm_srb->win8_extension.srb_flags |= ++ (SRB_FLAGS_QUEUE_ACTION_ENABLE | SRB_FLAGS_NO_QUEUE_FREEZE); ++ vm_srb->win8_extension.queue_tag = SP_UNTAGGED; ++ vm_srb->win8_extension.queue_action = SRB_SIMPLE_TAG_REQUEST; ++ } ++ + /* Build the SRB */ + switch (scmnd->sc_data_direction) { + case DMA_TO_DEVICE: +diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c +index 3c09e94..186342b 100644 +--- a/drivers/spi/spi-s3c64xx.c ++++ b/drivers/spi/spi-s3c64xx.c +@@ -1003,7 +1003,7 @@ static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev) + sci->num_cs = temp; + } + +- sci->no_cs = of_property_read_bool(dev->of_node, "broken-cs"); ++ sci->no_cs = of_property_read_bool(dev->of_node, "no-cs-readback"); + + return sci; + } +diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c +index 7882306..29dc249 100644 +--- a/drivers/staging/greybus/loopback.c ++++ b/drivers/staging/greybus/loopback.c +@@ -1051,8 +1051,13 @@ static int gb_loopback_fn(void *data) + gb_loopback_calculate_stats(gb, !!error); + } + gb->send_count++; +- if (us_wait) +- udelay(us_wait); ++ ++ if (us_wait) { ++ if (us_wait < 20000) ++ usleep_range(us_wait, us_wait + 100); ++ else ++ msleep(us_wait / 1000); ++ } + } + + gb_pm_runtime_put_autosuspend(bundle); +diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c +index f5619d8..0256d65 100644 +--- a/drivers/staging/lustre/lnet/selftest/rpc.c ++++ b/drivers/staging/lustre/lnet/selftest/rpc.c +@@ -252,7 +252,7 @@ srpc_service_init(struct srpc_service *svc) + svc->sv_shuttingdown = 0; + + svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(), +- sizeof(*svc->sv_cpt_data)); ++ sizeof(**svc->sv_cpt_data)); + if (!svc->sv_cpt_data) + return -ENOMEM; + +diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c +index b87cbbb..b39fd1e 100644 +--- a/drivers/staging/rtl8188eu/core/rtw_recv.c ++++ b/drivers/staging/rtl8188eu/core/rtw_recv.c +@@ -1383,6 +1383,9 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe) + ptr = recvframe_pull(precvframe, (rmv_len-sizeof(struct ethhdr) + (bsnaphdr ? 2 : 0))); + } + ++ if (!ptr) ++ return _FAIL; ++ + memcpy(ptr, pattrib->dst, ETH_ALEN); + memcpy(ptr+ETH_ALEN, pattrib->src, ETH_ALEN); + +diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c +index cbd2e51..cedf25b 100644 +--- a/drivers/staging/rtl8712/rtl871x_recv.c ++++ b/drivers/staging/rtl8712/rtl871x_recv.c +@@ -643,11 +643,16 @@ sint r8712_wlanhdr_to_ethhdr(union recv_frame *precvframe) + /* append rx status for mp test packets */ + ptr = recvframe_pull(precvframe, (rmv_len - + sizeof(struct ethhdr) + 2) - 24); ++ if (!ptr) ++ return _FAIL; + memcpy(ptr, get_rxmem(precvframe), 24); + ptr += 24; +- } else ++ } else { + ptr = recvframe_pull(precvframe, (rmv_len - + sizeof(struct ethhdr) + (bsnaphdr ? 2 : 0))); ++ if (!ptr) ++ return _FAIL; ++ } + + memcpy(ptr, pattrib->dst, ETH_ALEN); + memcpy(ptr + ETH_ALEN, pattrib->src, ETH_ALEN); +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index d2b860e..5dc6bfc 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -1234,6 +1234,9 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) + unsigned transfer_in_flight; + unsigned started; + ++ if (dep->flags & DWC3_EP_STALL) ++ return 0; ++ + if (dep->number > 1) + trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue); + else +@@ -1258,6 +1261,8 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) + else + dep->flags |= DWC3_EP_STALL; + } else { ++ if (!(dep->flags & DWC3_EP_STALL)) ++ return 0; + + ret = dwc3_send_clear_stall_ep_cmd(dep); + if (ret) +diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c +index e2966f8..b0f7195 100644 +--- a/drivers/usb/gadget/function/f_hid.c ++++ b/drivers/usb/gadget/function/f_hid.c +@@ -50,12 +50,12 @@ struct f_hidg { + + /* recv report */ + struct list_head completed_out_req; +- spinlock_t spinlock; ++ spinlock_t read_spinlock; + wait_queue_head_t read_queue; + unsigned int qlen; + + /* send report */ +- struct mutex lock; ++ spinlock_t write_spinlock; + bool write_pending; + wait_queue_head_t write_queue; + struct usb_request *req; +@@ -204,28 +204,35 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer, + if (!access_ok(VERIFY_WRITE, buffer, count)) + return -EFAULT; + +- spin_lock_irqsave(&hidg->spinlock, flags); ++ spin_lock_irqsave(&hidg->read_spinlock, flags); + + #define READ_COND (!list_empty(&hidg->completed_out_req)) + + /* wait for at least one buffer to complete */ + while (!READ_COND) { +- spin_unlock_irqrestore(&hidg->spinlock, flags); ++ spin_unlock_irqrestore(&hidg->read_spinlock, flags); + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; + + if (wait_event_interruptible(hidg->read_queue, READ_COND)) + return -ERESTARTSYS; + +- spin_lock_irqsave(&hidg->spinlock, flags); ++ spin_lock_irqsave(&hidg->read_spinlock, flags); + } + + /* pick the first one */ + list = list_first_entry(&hidg->completed_out_req, + struct f_hidg_req_list, list); ++ ++ /* ++ * Remove this from list to protect it from beign free() ++ * while host disables our function ++ */ ++ list_del(&list->list); ++ + req = list->req; + count = min_t(unsigned int, count, req->actual - list->pos); +- spin_unlock_irqrestore(&hidg->spinlock, flags); ++ spin_unlock_irqrestore(&hidg->read_spinlock, flags); + + /* copy to user outside spinlock */ + count -= copy_to_user(buffer, req->buf + list->pos, count); +@@ -238,15 +245,20 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer, + * call, taking into account its current read position. + */ + if (list->pos == req->actual) { +- spin_lock_irqsave(&hidg->spinlock, flags); +- list_del(&list->list); + kfree(list); +- spin_unlock_irqrestore(&hidg->spinlock, flags); + + req->length = hidg->report_length; + ret = usb_ep_queue(hidg->out_ep, req, GFP_KERNEL); +- if (ret < 0) ++ if (ret < 0) { ++ free_ep_req(hidg->out_ep, req); + return ret; ++ } ++ } else { ++ spin_lock_irqsave(&hidg->read_spinlock, flags); ++ list_add(&list->list, &hidg->completed_out_req); ++ spin_unlock_irqrestore(&hidg->read_spinlock, flags); ++ ++ wake_up(&hidg->read_queue); + } + + return count; +@@ -255,13 +267,16 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer, + static void f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req) + { + struct f_hidg *hidg = (struct f_hidg *)ep->driver_data; ++ unsigned long flags; + + if (req->status != 0) { + ERROR(hidg->func.config->cdev, + "End Point Request ERROR: %d\n", req->status); + } + ++ spin_lock_irqsave(&hidg->write_spinlock, flags); + hidg->write_pending = 0; ++ spin_unlock_irqrestore(&hidg->write_spinlock, flags); + wake_up(&hidg->write_queue); + } + +@@ -269,18 +284,19 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer, + size_t count, loff_t *offp) + { + struct f_hidg *hidg = file->private_data; ++ unsigned long flags; + ssize_t status = -ENOMEM; + + if (!access_ok(VERIFY_READ, buffer, count)) + return -EFAULT; + +- mutex_lock(&hidg->lock); ++ spin_lock_irqsave(&hidg->write_spinlock, flags); + + #define WRITE_COND (!hidg->write_pending) + + /* write queue */ + while (!WRITE_COND) { +- mutex_unlock(&hidg->lock); ++ spin_unlock_irqrestore(&hidg->write_spinlock, flags); + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; + +@@ -288,17 +304,20 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer, + hidg->write_queue, WRITE_COND)) + return -ERESTARTSYS; + +- mutex_lock(&hidg->lock); ++ spin_lock_irqsave(&hidg->write_spinlock, flags); + } + ++ hidg->write_pending = 1; + count = min_t(unsigned, count, hidg->report_length); ++ ++ spin_unlock_irqrestore(&hidg->write_spinlock, flags); + status = copy_from_user(hidg->req->buf, buffer, count); + + if (status != 0) { + ERROR(hidg->func.config->cdev, + "copy_from_user error\n"); +- mutex_unlock(&hidg->lock); +- return -EINVAL; ++ status = -EINVAL; ++ goto release_write_pending; + } + + hidg->req->status = 0; +@@ -306,19 +325,23 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer, + hidg->req->length = count; + hidg->req->complete = f_hidg_req_complete; + hidg->req->context = hidg; +- hidg->write_pending = 1; + + status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC); + if (status < 0) { + ERROR(hidg->func.config->cdev, + "usb_ep_queue error on int endpoint %zd\n", status); +- hidg->write_pending = 0; +- wake_up(&hidg->write_queue); ++ goto release_write_pending; + } else { + status = count; + } + +- mutex_unlock(&hidg->lock); ++ return status; ++release_write_pending: ++ spin_lock_irqsave(&hidg->write_spinlock, flags); ++ hidg->write_pending = 0; ++ spin_unlock_irqrestore(&hidg->write_spinlock, flags); ++ ++ wake_up(&hidg->write_queue); + + return status; + } +@@ -371,20 +394,36 @@ static inline struct usb_request *hidg_alloc_ep_req(struct usb_ep *ep, + static void hidg_set_report_complete(struct usb_ep *ep, struct usb_request *req) + { + struct f_hidg *hidg = (struct f_hidg *) req->context; ++ struct usb_composite_dev *cdev = hidg->func.config->cdev; + struct f_hidg_req_list *req_list; + unsigned long flags; + +- req_list = kzalloc(sizeof(*req_list), GFP_ATOMIC); +- if (!req_list) +- return; ++ switch (req->status) { ++ case 0: ++ req_list = kzalloc(sizeof(*req_list), GFP_ATOMIC); ++ if (!req_list) { ++ ERROR(cdev, "Unable to allocate mem for req_list\n"); ++ goto free_req; ++ } + +- req_list->req = req; ++ req_list->req = req; + +- spin_lock_irqsave(&hidg->spinlock, flags); +- list_add_tail(&req_list->list, &hidg->completed_out_req); +- spin_unlock_irqrestore(&hidg->spinlock, flags); ++ spin_lock_irqsave(&hidg->read_spinlock, flags); ++ list_add_tail(&req_list->list, &hidg->completed_out_req); ++ spin_unlock_irqrestore(&hidg->read_spinlock, flags); + +- wake_up(&hidg->read_queue); ++ wake_up(&hidg->read_queue); ++ break; ++ default: ++ ERROR(cdev, "Set report failed %d\n", req->status); ++ /* FALLTHROUGH */ ++ case -ECONNABORTED: /* hardware forced ep reset */ ++ case -ECONNRESET: /* request dequeued */ ++ case -ESHUTDOWN: /* disconnect from host */ ++free_req: ++ free_ep_req(ep, req); ++ return; ++ } + } + + static int hidg_setup(struct usb_function *f, +@@ -490,14 +529,18 @@ static void hidg_disable(struct usb_function *f) + { + struct f_hidg *hidg = func_to_hidg(f); + struct f_hidg_req_list *list, *next; ++ unsigned long flags; + + usb_ep_disable(hidg->in_ep); + usb_ep_disable(hidg->out_ep); + ++ spin_lock_irqsave(&hidg->read_spinlock, flags); + list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) { ++ free_ep_req(hidg->out_ep, list->req); + list_del(&list->list); + kfree(list); + } ++ spin_unlock_irqrestore(&hidg->read_spinlock, flags); + } + + static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) +@@ -646,8 +689,8 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) + if (status) + goto fail; + +- mutex_init(&hidg->lock); +- spin_lock_init(&hidg->spinlock); ++ spin_lock_init(&hidg->write_spinlock); ++ spin_lock_init(&hidg->read_spinlock); + init_waitqueue_head(&hidg->write_queue); + init_waitqueue_head(&hidg->read_queue); + INIT_LIST_HEAD(&hidg->completed_out_req); +diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c +index 0402177..d685d82 100644 +--- a/drivers/usb/gadget/udc/core.c ++++ b/drivers/usb/gadget/udc/core.c +@@ -1080,6 +1080,24 @@ static void usb_udc_nop_release(struct device *dev) + dev_vdbg(dev, "%s\n", __func__); + } + ++/* should be called with udc_lock held */ ++static int check_pending_gadget_drivers(struct usb_udc *udc) ++{ ++ struct usb_gadget_driver *driver; ++ int ret = 0; ++ ++ list_for_each_entry(driver, &gadget_driver_pending_list, pending) ++ if (!driver->udc_name || strcmp(driver->udc_name, ++ dev_name(&udc->dev)) == 0) { ++ ret = udc_bind_to_driver(udc, driver); ++ if (ret != -EPROBE_DEFER) ++ list_del(&driver->pending); ++ break; ++ } ++ ++ return ret; ++} ++ + /** + * usb_add_gadget_udc_release - adds a new gadget to the udc class driver list + * @parent: the parent device to this udc. Usually the controller driver's +@@ -1093,7 +1111,6 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, + void (*release)(struct device *dev)) + { + struct usb_udc *udc; +- struct usb_gadget_driver *driver; + int ret = -ENOMEM; + + udc = kzalloc(sizeof(*udc), GFP_KERNEL); +@@ -1136,17 +1153,9 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, + udc->vbus = true; + + /* pick up one of pending gadget drivers */ +- list_for_each_entry(driver, &gadget_driver_pending_list, pending) { +- if (!driver->udc_name || strcmp(driver->udc_name, +- dev_name(&udc->dev)) == 0) { +- ret = udc_bind_to_driver(udc, driver); +- if (ret != -EPROBE_DEFER) +- list_del(&driver->pending); +- if (ret) +- goto err5; +- break; +- } +- } ++ ret = check_pending_gadget_drivers(udc); ++ if (ret) ++ goto err5; + + mutex_unlock(&udc_lock); + +@@ -1356,14 +1365,22 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) + return -EINVAL; + + mutex_lock(&udc_lock); +- list_for_each_entry(udc, &udc_list, list) ++ list_for_each_entry(udc, &udc_list, list) { + if (udc->driver == driver) { + usb_gadget_remove_driver(udc); + usb_gadget_set_state(udc->gadget, +- USB_STATE_NOTATTACHED); ++ USB_STATE_NOTATTACHED); ++ ++ /* Maybe there is someone waiting for this UDC? */ ++ check_pending_gadget_drivers(udc); ++ /* ++ * For now we ignore bind errors as probably it's ++ * not a valid reason to fail other's gadget unbind ++ */ + ret = 0; + break; + } ++ } + + if (ret) { + list_del(&driver->pending); +diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c +index aab5221..aac0ce8 100644 +--- a/drivers/usb/gadget/udc/fsl_udc_core.c ++++ b/drivers/usb/gadget/udc/fsl_udc_core.c +@@ -1249,6 +1249,12 @@ static const struct usb_gadget_ops fsl_gadget_ops = { + .udc_stop = fsl_udc_stop, + }; + ++/* ++ * Empty complete function used by this driver to fill in the req->complete ++ * field when creating a request since the complete field is mandatory. ++ */ ++static void fsl_noop_complete(struct usb_ep *ep, struct usb_request *req) { } ++ + /* Set protocol stall on ep0, protocol stall will automatically be cleared + on new transaction */ + static void ep0stall(struct fsl_udc *udc) +@@ -1283,7 +1289,7 @@ static int ep0_prime_status(struct fsl_udc *udc, int direction) + req->req.length = 0; + req->req.status = -EINPROGRESS; + req->req.actual = 0; +- req->req.complete = NULL; ++ req->req.complete = fsl_noop_complete; + req->dtd_count = 0; + + ret = usb_gadget_map_request(&ep->udc->gadget, &req->req, ep_is_in(ep)); +@@ -1366,7 +1372,7 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value, + req->req.length = 2; + req->req.status = -EINPROGRESS; + req->req.actual = 0; +- req->req.complete = NULL; ++ req->req.complete = fsl_noop_complete; + req->dtd_count = 0; + + ret = usb_gadget_map_request(&ep->udc->gadget, &req->req, ep_is_in(ep)); +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c +index ed56bf9..abe3606 100644 +--- a/drivers/usb/host/xhci-plat.c ++++ b/drivers/usb/host/xhci-plat.c +@@ -223,9 +223,6 @@ static int xhci_plat_probe(struct platform_device *pdev) + if (device_property_read_bool(&pdev->dev, "usb3-lpm-capable")) + xhci->quirks |= XHCI_LPM_SUPPORT; + +- if (HCC_MAX_PSA(xhci->hcc_params) >= 4) +- xhci->shared_hcd->can_do_streams = 1; +- + hcd->usb_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0); + if (IS_ERR(hcd->usb_phy)) { + ret = PTR_ERR(hcd->usb_phy); +@@ -242,6 +239,9 @@ static int xhci_plat_probe(struct platform_device *pdev) + if (ret) + goto disable_usb_phy; + ++ if (HCC_MAX_PSA(xhci->hcc_params) >= 4) ++ xhci->shared_hcd->can_do_streams = 1; ++ + ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); + if (ret) + goto dealloc_usb2_hcd; +diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c +index 2440f88..bacee0f 100644 +--- a/drivers/usb/musb/da8xx.c ++++ b/drivers/usb/musb/da8xx.c +@@ -434,15 +434,11 @@ static int da8xx_musb_exit(struct musb *musb) + } + + static const struct musb_platform_ops da8xx_ops = { +- .quirks = MUSB_DMA_CPPI | MUSB_INDEXED_EP, ++ .quirks = MUSB_INDEXED_EP, + .init = da8xx_musb_init, + .exit = da8xx_musb_exit, + + .fifo_mode = 2, +-#ifdef CONFIG_USB_TI_CPPI_DMA +- .dma_init = cppi_dma_controller_create, +- .dma_exit = cppi_dma_controller_destroy, +-#endif + .enable = da8xx_musb_enable, + .disable = da8xx_musb_disable, + +diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c +index 181793f..9d2738e 100644 +--- a/drivers/virtio/virtio_balloon.c ++++ b/drivers/virtio/virtio_balloon.c +@@ -615,8 +615,12 @@ static void virtballoon_remove(struct virtio_device *vdev) + cancel_work_sync(&vb->update_balloon_stats_work); + + remove_common(vb); ++#ifdef CONFIG_BALLOON_COMPACTION + if (vb->vb_dev_info.inode) + iput(vb->vb_dev_info.inode); ++ ++ kern_unmount(balloon_mnt); ++#endif + kfree(vb); + } + +diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c +index bdbadaa..0035cf7 100644 +--- a/drivers/vme/vme.c ++++ b/drivers/vme/vme.c +@@ -1625,10 +1625,25 @@ static int vme_bus_probe(struct device *dev) + return retval; + } + ++static int vme_bus_remove(struct device *dev) ++{ ++ int retval = -ENODEV; ++ struct vme_driver *driver; ++ struct vme_dev *vdev = dev_to_vme_dev(dev); ++ ++ driver = dev->platform_data; ++ ++ if (driver->remove != NULL) ++ retval = driver->remove(vdev); ++ ++ return retval; ++} ++ + struct bus_type vme_bus_type = { + .name = "vme", + .match = vme_bus_match, + .probe = vme_bus_probe, ++ .remove = vme_bus_remove, + }; + EXPORT_SYMBOL(vme_bus_type); + +diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c +index 049a884..59d74d1 100644 +--- a/drivers/w1/masters/ds2490.c ++++ b/drivers/w1/masters/ds2490.c +@@ -153,6 +153,9 @@ struct ds_device + */ + u16 spu_bit; + ++ u8 st_buf[ST_SIZE]; ++ u8 byte_buf; ++ + struct w1_bus_master master; + }; + +@@ -174,7 +177,6 @@ struct ds_status + u8 data_in_buffer_status; + u8 reserved1; + u8 reserved2; +- + }; + + static struct usb_device_id ds_id_table [] = { +@@ -244,28 +246,6 @@ static int ds_send_control(struct ds_device *dev, u16 value, u16 index) + return err; + } + +-static int ds_recv_status_nodump(struct ds_device *dev, struct ds_status *st, +- unsigned char *buf, int size) +-{ +- int count, err; +- +- memset(st, 0, sizeof(*st)); +- +- count = 0; +- err = usb_interrupt_msg(dev->udev, usb_rcvintpipe(dev->udev, +- dev->ep[EP_STATUS]), buf, size, &count, 1000); +- if (err < 0) { +- pr_err("Failed to read 1-wire data from 0x%x: err=%d.\n", +- dev->ep[EP_STATUS], err); +- return err; +- } +- +- if (count >= sizeof(*st)) +- memcpy(st, buf, sizeof(*st)); +- +- return count; +-} +- + static inline void ds_print_msg(unsigned char *buf, unsigned char *str, int off) + { + pr_info("%45s: %8x\n", str, buf[off]); +@@ -324,6 +304,35 @@ static void ds_dump_status(struct ds_device *dev, unsigned char *buf, int count) + } + } + ++static int ds_recv_status(struct ds_device *dev, struct ds_status *st, ++ bool dump) ++{ ++ int count, err; ++ ++ if (st) ++ memset(st, 0, sizeof(*st)); ++ ++ count = 0; ++ err = usb_interrupt_msg(dev->udev, ++ usb_rcvintpipe(dev->udev, ++ dev->ep[EP_STATUS]), ++ dev->st_buf, sizeof(dev->st_buf), ++ &count, 1000); ++ if (err < 0) { ++ pr_err("Failed to read 1-wire data from 0x%x: err=%d.\n", ++ dev->ep[EP_STATUS], err); ++ return err; ++ } ++ ++ if (dump) ++ ds_dump_status(dev, dev->st_buf, count); ++ ++ if (st && count >= sizeof(*st)) ++ memcpy(st, dev->st_buf, sizeof(*st)); ++ ++ return count; ++} ++ + static void ds_reset_device(struct ds_device *dev) + { + ds_send_control_cmd(dev, CTL_RESET_DEVICE, 0); +@@ -344,7 +353,6 @@ static void ds_reset_device(struct ds_device *dev) + static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size) + { + int count, err; +- struct ds_status st; + + /* Careful on size. If size is less than what is available in + * the input buffer, the device fails the bulk transfer and +@@ -359,14 +367,9 @@ static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size) + err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]), + buf, size, &count, 1000); + if (err < 0) { +- u8 buf[ST_SIZE]; +- int count; +- + pr_info("Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]); + usb_clear_halt(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN])); +- +- count = ds_recv_status_nodump(dev, &st, buf, sizeof(buf)); +- ds_dump_status(dev, buf, count); ++ ds_recv_status(dev, NULL, true); + return err; + } + +@@ -404,7 +407,6 @@ int ds_stop_pulse(struct ds_device *dev, int limit) + { + struct ds_status st; + int count = 0, err = 0; +- u8 buf[ST_SIZE]; + + do { + err = ds_send_control(dev, CTL_HALT_EXE_IDLE, 0); +@@ -413,7 +415,7 @@ int ds_stop_pulse(struct ds_device *dev, int limit) + err = ds_send_control(dev, CTL_RESUME_EXE, 0); + if (err) + break; +- err = ds_recv_status_nodump(dev, &st, buf, sizeof(buf)); ++ err = ds_recv_status(dev, &st, false); + if (err) + break; + +@@ -456,18 +458,17 @@ int ds_detect(struct ds_device *dev, struct ds_status *st) + + static int ds_wait_status(struct ds_device *dev, struct ds_status *st) + { +- u8 buf[ST_SIZE]; + int err, count = 0; + + do { + st->status = 0; +- err = ds_recv_status_nodump(dev, st, buf, sizeof(buf)); ++ err = ds_recv_status(dev, st, false); + #if 0 + if (err >= 0) { + int i; + printk("0x%x: count=%d, status: ", dev->ep[EP_STATUS], err); + for (i=0; i<err; ++i) +- printk("%02x ", buf[i]); ++ printk("%02x ", dev->st_buf[i]); + printk("\n"); + } + #endif +@@ -485,7 +486,7 @@ static int ds_wait_status(struct ds_device *dev, struct ds_status *st) + * can do something with it). + */ + if (err > 16 || count >= 100 || err < 0) +- ds_dump_status(dev, buf, err); ++ ds_dump_status(dev, dev->st_buf, err); + + /* Extended data isn't an error. Well, a short is, but the dump + * would have already told the user that and we can't do anything +@@ -608,7 +609,6 @@ static int ds_write_byte(struct ds_device *dev, u8 byte) + { + int err; + struct ds_status st; +- u8 rbyte; + + err = ds_send_control(dev, COMM_BYTE_IO | COMM_IM | dev->spu_bit, byte); + if (err) +@@ -621,11 +621,11 @@ static int ds_write_byte(struct ds_device *dev, u8 byte) + if (err) + return err; + +- err = ds_recv_data(dev, &rbyte, sizeof(rbyte)); ++ err = ds_recv_data(dev, &dev->byte_buf, 1); + if (err < 0) + return err; + +- return !(byte == rbyte); ++ return !(byte == dev->byte_buf); + } + + static int ds_read_byte(struct ds_device *dev, u8 *byte) +@@ -712,7 +712,6 @@ static void ds9490r_search(void *data, struct w1_master *master, + int err; + u16 value, index; + struct ds_status st; +- u8 st_buf[ST_SIZE]; + int search_limit; + int found = 0; + int i; +@@ -724,7 +723,12 @@ static void ds9490r_search(void *data, struct w1_master *master, + /* FIFO 128 bytes, bulk packet size 64, read a multiple of the + * packet size. + */ +- u64 buf[2*64/8]; ++ const size_t bufsize = 2 * 64; ++ u64 *buf; ++ ++ buf = kmalloc(bufsize, GFP_KERNEL); ++ if (!buf) ++ return; + + mutex_lock(&master->bus_mutex); + +@@ -745,10 +749,9 @@ static void ds9490r_search(void *data, struct w1_master *master, + do { + schedule_timeout(jtime); + +- if (ds_recv_status_nodump(dev, &st, st_buf, sizeof(st_buf)) < +- sizeof(st)) { ++ err = ds_recv_status(dev, &st, false); ++ if (err < 0 || err < sizeof(st)) + break; +- } + + if (st.data_in_buffer_status) { + /* Bulk in can receive partial ids, but when it does +@@ -758,7 +761,7 @@ static void ds9490r_search(void *data, struct w1_master *master, + * bulk without first checking if status says there + * is data to read. + */ +- err = ds_recv_data(dev, (u8 *)buf, sizeof(buf)); ++ err = ds_recv_data(dev, (u8 *)buf, bufsize); + if (err < 0) + break; + for (i = 0; i < err/8; ++i) { +@@ -794,9 +797,14 @@ static void ds9490r_search(void *data, struct w1_master *master, + } + search_out: + mutex_unlock(&master->bus_mutex); ++ kfree(buf); + } + + #if 0 ++/* ++ * FIXME: if this disabled code is ever used in the future all ds_send_data() ++ * calls must be changed to use a DMAable buffer. ++ */ + static int ds_match_access(struct ds_device *dev, u64 init) + { + int err; +@@ -845,13 +853,12 @@ static int ds_set_path(struct ds_device *dev, u64 init) + + static u8 ds9490r_touch_bit(void *data, u8 bit) + { +- u8 ret; + struct ds_device *dev = data; + +- if (ds_touch_bit(dev, bit, &ret)) ++ if (ds_touch_bit(dev, bit, &dev->byte_buf)) + return 0; + +- return ret; ++ return dev->byte_buf; + } + + #if 0 +@@ -866,13 +873,12 @@ static u8 ds9490r_read_bit(void *data) + { + struct ds_device *dev = data; + int err; +- u8 bit = 0; + +- err = ds_touch_bit(dev, 1, &bit); ++ err = ds_touch_bit(dev, 1, &dev->byte_buf); + if (err) + return 0; + +- return bit & 1; ++ return dev->byte_buf & 1; + } + #endif + +@@ -887,32 +893,52 @@ static u8 ds9490r_read_byte(void *data) + { + struct ds_device *dev = data; + int err; +- u8 byte = 0; + +- err = ds_read_byte(dev, &byte); ++ err = ds_read_byte(dev, &dev->byte_buf); + if (err) + return 0; + +- return byte; ++ return dev->byte_buf; + } + + static void ds9490r_write_block(void *data, const u8 *buf, int len) + { + struct ds_device *dev = data; ++ u8 *tbuf; ++ ++ if (len <= 0) ++ return; ++ ++ tbuf = kmalloc(len, GFP_KERNEL); ++ if (!tbuf) ++ return; + +- ds_write_block(dev, (u8 *)buf, len); ++ memcpy(tbuf, buf, len); ++ ds_write_block(dev, tbuf, len); ++ ++ kfree(tbuf); + } + + static u8 ds9490r_read_block(void *data, u8 *buf, int len) + { + struct ds_device *dev = data; + int err; ++ u8 *tbuf; + +- err = ds_read_block(dev, buf, len); +- if (err < 0) ++ if (len <= 0) ++ return 0; ++ ++ tbuf = kmalloc(len, GFP_KERNEL); ++ if (!tbuf) + return 0; + +- return len; ++ err = ds_read_block(dev, tbuf, len); ++ if (err >= 0) ++ memcpy(buf, tbuf, len); ++ ++ kfree(tbuf); ++ ++ return err >= 0 ? len : 0; + } + + static u8 ds9490r_reset(void *data) +diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c +index e213c67..ab0931e 100644 +--- a/drivers/w1/w1.c ++++ b/drivers/w1/w1.c +@@ -763,6 +763,7 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn) + dev_err(&dev->dev, "%s: Attaching %s failed.\n", __func__, + sl->name); + w1_family_put(sl->family); ++ atomic_dec(&sl->master->refcnt); + kfree(sl); + return err; + } +diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c +index ef3ebd7..1e643c7 100644 +--- a/fs/ceph/addr.c ++++ b/fs/ceph/addr.c +@@ -363,6 +363,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) + nr_pages = i; + if (nr_pages > 0) { + len = nr_pages << PAGE_SHIFT; ++ osd_req_op_extent_update(req, 0, len); + break; + } + goto out_pages; +diff --git a/fs/cifs/file.c b/fs/cifs/file.c +index 18a1e1d..1cd0e2e 100644 +--- a/fs/cifs/file.c ++++ b/fs/cifs/file.c +@@ -2884,7 +2884,15 @@ cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter) + for (i = 0; i < rdata->nr_pages; i++) { + struct page *page = rdata->pages[i]; + size_t copy = min_t(size_t, remaining, PAGE_SIZE); +- size_t written = copy_page_to_iter(page, 0, copy, iter); ++ size_t written; ++ ++ if (unlikely(iter->type & ITER_PIPE)) { ++ void *addr = kmap_atomic(page); ++ ++ written = copy_to_iter(addr, copy, iter); ++ kunmap_atomic(addr); ++ } else ++ written = copy_page_to_iter(page, 0, copy, iter); + remaining -= written; + if (written < copy && iov_iter_count(iter) > 0) + break; +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c +index c930a01..9fbf92c 100644 +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -5344,7 +5344,8 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, + ext4_lblk_t stop, *iterator, ex_start, ex_end; + + /* Let path point to the last extent */ +- path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0); ++ path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, ++ EXT4_EX_NOCACHE); + if (IS_ERR(path)) + return PTR_ERR(path); + +@@ -5353,15 +5354,15 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, + if (!extent) + goto out; + +- stop = le32_to_cpu(extent->ee_block) + +- ext4_ext_get_actual_len(extent); ++ stop = le32_to_cpu(extent->ee_block); + + /* + * In case of left shift, Don't start shifting extents until we make + * sure the hole is big enough to accommodate the shift. + */ + if (SHIFT == SHIFT_LEFT) { +- path = ext4_find_extent(inode, start - 1, &path, 0); ++ path = ext4_find_extent(inode, start - 1, &path, ++ EXT4_EX_NOCACHE); + if (IS_ERR(path)) + return PTR_ERR(path); + depth = path->p_depth; +@@ -5393,9 +5394,14 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, + else + iterator = &stop; + +- /* Its safe to start updating extents */ +- while (start < stop) { +- path = ext4_find_extent(inode, *iterator, &path, 0); ++ /* ++ * Its safe to start updating extents. Start and stop are unsigned, so ++ * in case of right shift if extent with 0 block is reached, iterator ++ * becomes NULL to indicate the end of the loop. ++ */ ++ while (iterator && start <= stop) { ++ path = ext4_find_extent(inode, *iterator, &path, ++ EXT4_EX_NOCACHE); + if (IS_ERR(path)) + return PTR_ERR(path); + depth = path->p_depth; +@@ -5422,8 +5428,11 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, + ext4_ext_get_actual_len(extent); + } else { + extent = EXT_FIRST_EXTENT(path[depth].p_hdr); +- *iterator = le32_to_cpu(extent->ee_block) > 0 ? +- le32_to_cpu(extent->ee_block) - 1 : 0; ++ if (le32_to_cpu(extent->ee_block) > 0) ++ *iterator = le32_to_cpu(extent->ee_block) - 1; ++ else ++ /* Beginning is reached, end of the loop */ ++ iterator = NULL; + /* Update path extent in case we need to stop */ + while (le32_to_cpu(extent->ee_block) < start) + extent++; +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c +index d8ca4b9..37b521e 100644 +--- a/fs/ext4/inline.c ++++ b/fs/ext4/inline.c +@@ -376,7 +376,7 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode, + static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode, + unsigned int len) + { +- int ret, size; ++ int ret, size, no_expand; + struct ext4_inode_info *ei = EXT4_I(inode); + + if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) +@@ -386,15 +386,14 @@ static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode, + if (size < len) + return -ENOSPC; + +- down_write(&EXT4_I(inode)->xattr_sem); ++ ext4_write_lock_xattr(inode, &no_expand); + + if (ei->i_inline_off) + ret = ext4_update_inline_data(handle, inode, len); + else + ret = ext4_create_inline_data(handle, inode, len); + +- up_write(&EXT4_I(inode)->xattr_sem); +- ++ ext4_write_unlock_xattr(inode, &no_expand); + return ret; + } + +@@ -523,7 +522,7 @@ static int ext4_convert_inline_data_to_extent(struct address_space *mapping, + struct inode *inode, + unsigned flags) + { +- int ret, needed_blocks; ++ int ret, needed_blocks, no_expand; + handle_t *handle = NULL; + int retries = 0, sem_held = 0; + struct page *page = NULL; +@@ -563,7 +562,7 @@ static int ext4_convert_inline_data_to_extent(struct address_space *mapping, + goto out; + } + +- down_write(&EXT4_I(inode)->xattr_sem); ++ ext4_write_lock_xattr(inode, &no_expand); + sem_held = 1; + /* If some one has already done this for us, just exit. */ + if (!ext4_has_inline_data(inode)) { +@@ -600,7 +599,7 @@ static int ext4_convert_inline_data_to_extent(struct address_space *mapping, + put_page(page); + page = NULL; + ext4_orphan_add(handle, inode); +- up_write(&EXT4_I(inode)->xattr_sem); ++ ext4_write_unlock_xattr(inode, &no_expand); + sem_held = 0; + ext4_journal_stop(handle); + handle = NULL; +@@ -626,7 +625,7 @@ static int ext4_convert_inline_data_to_extent(struct address_space *mapping, + put_page(page); + } + if (sem_held) +- up_write(&EXT4_I(inode)->xattr_sem); ++ ext4_write_unlock_xattr(inode, &no_expand); + if (handle) + ext4_journal_stop(handle); + brelse(iloc.bh); +@@ -719,7 +718,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping, + int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, + unsigned copied, struct page *page) + { +- int ret; ++ int ret, no_expand; + void *kaddr; + struct ext4_iloc iloc; + +@@ -737,7 +736,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, + goto out; + } + +- down_write(&EXT4_I(inode)->xattr_sem); ++ ext4_write_lock_xattr(inode, &no_expand); + BUG_ON(!ext4_has_inline_data(inode)); + + kaddr = kmap_atomic(page); +@@ -747,7 +746,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, + /* clear page dirty so that writepages wouldn't work for us. */ + ClearPageDirty(page); + +- up_write(&EXT4_I(inode)->xattr_sem); ++ ext4_write_unlock_xattr(inode, &no_expand); + brelse(iloc.bh); + out: + return copied; +@@ -758,7 +757,7 @@ ext4_journalled_write_inline_data(struct inode *inode, + unsigned len, + struct page *page) + { +- int ret; ++ int ret, no_expand; + void *kaddr; + struct ext4_iloc iloc; + +@@ -768,11 +767,11 @@ ext4_journalled_write_inline_data(struct inode *inode, + return NULL; + } + +- down_write(&EXT4_I(inode)->xattr_sem); ++ ext4_write_lock_xattr(inode, &no_expand); + kaddr = kmap_atomic(page); + ext4_write_inline_data(inode, &iloc, kaddr, 0, len); + kunmap_atomic(kaddr); +- up_write(&EXT4_I(inode)->xattr_sem); ++ ext4_write_unlock_xattr(inode, &no_expand); + + return iloc.bh; + } +@@ -934,8 +933,15 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos, + struct page *page) + { + int i_size_changed = 0; ++ int ret; + +- copied = ext4_write_inline_data_end(inode, pos, len, copied, page); ++ ret = ext4_write_inline_data_end(inode, pos, len, copied, page); ++ if (ret < 0) { ++ unlock_page(page); ++ put_page(page); ++ return ret; ++ } ++ copied = ret; + + /* + * No need to use i_size_read() here, the i_size +@@ -1249,7 +1255,7 @@ static int ext4_convert_inline_data_nolock(handle_t *handle, + int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname, + struct inode *dir, struct inode *inode) + { +- int ret, inline_size; ++ int ret, inline_size, no_expand; + void *inline_start; + struct ext4_iloc iloc; + +@@ -1257,7 +1263,7 @@ int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname, + if (ret) + return ret; + +- down_write(&EXT4_I(dir)->xattr_sem); ++ ext4_write_lock_xattr(dir, &no_expand); + if (!ext4_has_inline_data(dir)) + goto out; + +@@ -1303,7 +1309,7 @@ int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname, + + out: + ext4_mark_inode_dirty(handle, dir); +- up_write(&EXT4_I(dir)->xattr_sem); ++ ext4_write_unlock_xattr(dir, &no_expand); + brelse(iloc.bh); + return ret; + } +@@ -1663,7 +1669,7 @@ int ext4_delete_inline_entry(handle_t *handle, + struct buffer_head *bh, + int *has_inline_data) + { +- int err, inline_size; ++ int err, inline_size, no_expand; + struct ext4_iloc iloc; + void *inline_start; + +@@ -1671,7 +1677,7 @@ int ext4_delete_inline_entry(handle_t *handle, + if (err) + return err; + +- down_write(&EXT4_I(dir)->xattr_sem); ++ ext4_write_lock_xattr(dir, &no_expand); + if (!ext4_has_inline_data(dir)) { + *has_inline_data = 0; + goto out; +@@ -1705,7 +1711,7 @@ int ext4_delete_inline_entry(handle_t *handle, + + ext4_show_inline_dir(dir, iloc.bh, inline_start, inline_size); + out: +- up_write(&EXT4_I(dir)->xattr_sem); ++ ext4_write_unlock_xattr(dir, &no_expand); + brelse(iloc.bh); + if (err != -ENOENT) + ext4_std_error(dir->i_sb, err); +@@ -1804,11 +1810,11 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data) + + int ext4_destroy_inline_data(handle_t *handle, struct inode *inode) + { +- int ret; ++ int ret, no_expand; + +- down_write(&EXT4_I(inode)->xattr_sem); ++ ext4_write_lock_xattr(inode, &no_expand); + ret = ext4_destroy_inline_data_nolock(handle, inode); +- up_write(&EXT4_I(inode)->xattr_sem); ++ ext4_write_unlock_xattr(inode, &no_expand); + + return ret; + } +@@ -1893,7 +1899,7 @@ int ext4_try_to_evict_inline_data(handle_t *handle, + void ext4_inline_data_truncate(struct inode *inode, int *has_inline) + { + handle_t *handle; +- int inline_size, value_len, needed_blocks; ++ int inline_size, value_len, needed_blocks, no_expand; + size_t i_size; + void *value = NULL; + struct ext4_xattr_ibody_find is = { +@@ -1910,7 +1916,7 @@ void ext4_inline_data_truncate(struct inode *inode, int *has_inline) + if (IS_ERR(handle)) + return; + +- down_write(&EXT4_I(inode)->xattr_sem); ++ ext4_write_lock_xattr(inode, &no_expand); + if (!ext4_has_inline_data(inode)) { + *has_inline = 0; + ext4_journal_stop(handle); +@@ -1968,7 +1974,7 @@ void ext4_inline_data_truncate(struct inode *inode, int *has_inline) + up_write(&EXT4_I(inode)->i_data_sem); + out: + brelse(is.iloc.bh); +- up_write(&EXT4_I(inode)->xattr_sem); ++ ext4_write_unlock_xattr(inode, &no_expand); + kfree(value); + if (inode->i_nlink) + ext4_orphan_del(handle, inode); +@@ -1984,7 +1990,7 @@ void ext4_inline_data_truncate(struct inode *inode, int *has_inline) + + int ext4_convert_inline_data(struct inode *inode) + { +- int error, needed_blocks; ++ int error, needed_blocks, no_expand; + handle_t *handle; + struct ext4_iloc iloc; + +@@ -2006,15 +2012,10 @@ int ext4_convert_inline_data(struct inode *inode) + goto out_free; + } + +- down_write(&EXT4_I(inode)->xattr_sem); +- if (!ext4_has_inline_data(inode)) { +- up_write(&EXT4_I(inode)->xattr_sem); +- goto out; +- } +- +- error = ext4_convert_inline_data_nolock(handle, inode, &iloc); +- up_write(&EXT4_I(inode)->xattr_sem); +-out: ++ ext4_write_lock_xattr(inode, &no_expand); ++ if (ext4_has_inline_data(inode)) ++ error = ext4_convert_inline_data_nolock(handle, inode, &iloc); ++ ext4_write_unlock_xattr(inode, &no_expand); + ext4_journal_stop(handle); + out_free: + brelse(iloc.bh); +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 33a509c..1d4f5fa 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -1324,8 +1324,11 @@ static int ext4_write_end(struct file *file, + if (ext4_has_inline_data(inode)) { + ret = ext4_write_inline_data_end(inode, pos, len, + copied, page); +- if (ret < 0) ++ if (ret < 0) { ++ unlock_page(page); ++ put_page(page); + goto errout; ++ } + copied = ret; + } else + copied = block_write_end(file, mapping, pos, +@@ -1379,7 +1382,9 @@ static int ext4_write_end(struct file *file, + * set the buffer to be dirty, since in data=journalled mode we need + * to call ext4_handle_dirty_metadata() instead. + */ +-static void zero_new_buffers(struct page *page, unsigned from, unsigned to) ++static void ext4_journalled_zero_new_buffers(handle_t *handle, ++ struct page *page, ++ unsigned from, unsigned to) + { + unsigned int block_start = 0, block_end; + struct buffer_head *head, *bh; +@@ -1396,7 +1401,7 @@ static void zero_new_buffers(struct page *page, unsigned from, unsigned to) + size = min(to, block_end) - start; + + zero_user(page, start, size); +- set_buffer_uptodate(bh); ++ write_end_fn(handle, bh); + } + clear_buffer_new(bh); + } +@@ -1425,18 +1430,25 @@ static int ext4_journalled_write_end(struct file *file, + + BUG_ON(!ext4_handle_valid(handle)); + +- if (ext4_has_inline_data(inode)) +- copied = ext4_write_inline_data_end(inode, pos, len, +- copied, page); +- else { +- if (copied < len) { +- if (!PageUptodate(page)) +- copied = 0; +- zero_new_buffers(page, from+copied, to); ++ if (ext4_has_inline_data(inode)) { ++ ret = ext4_write_inline_data_end(inode, pos, len, ++ copied, page); ++ if (ret < 0) { ++ unlock_page(page); ++ put_page(page); ++ goto errout; + } +- ++ copied = ret; ++ } else if (unlikely(copied < len) && !PageUptodate(page)) { ++ copied = 0; ++ ext4_journalled_zero_new_buffers(handle, page, from, to); ++ } else { ++ if (unlikely(copied < len)) ++ ext4_journalled_zero_new_buffers(handle, page, ++ from + copied, to); + ret = ext4_walk_page_buffers(handle, page_buffers(page), from, +- to, &partial, write_end_fn); ++ from + copied, &partial, ++ write_end_fn); + if (!partial) + SetPageUptodate(page); + } +@@ -1462,6 +1474,7 @@ static int ext4_journalled_write_end(struct file *file, + */ + ext4_orphan_add(handle, inode); + ++errout: + ret2 = ext4_journal_stop(handle); + if (!ret) + ret = ret2; +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index 7ae43c5..2e9fc7a 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -3123,6 +3123,13 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, + if (ar->pright && start + size - 1 >= ar->lright) + size -= start + size - ar->lright; + ++ /* ++ * Trim allocation request for filesystems with artificially small ++ * groups. ++ */ ++ if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) ++ size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); ++ + end = start + size; + + /* check we don't cross already preallocated blocks */ +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c +index 104f8bf..c4a389a 100644 +--- a/fs/ext4/namei.c ++++ b/fs/ext4/namei.c +@@ -1616,13 +1616,15 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi + !fscrypt_has_permitted_context(dir, inode)) { + int nokey = ext4_encrypted_inode(inode) && + !fscrypt_has_encryption_key(inode); +- iput(inode); +- if (nokey) ++ if (nokey) { ++ iput(inode); + return ERR_PTR(-ENOKEY); ++ } + ext4_warning(inode->i_sb, + "Inconsistent encryption contexts: %lu/%lu", + (unsigned long) dir->i_ino, + (unsigned long) inode->i_ino); ++ iput(inode); + return ERR_PTR(-EPERM); + } + } +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index bbc316d..afe29ba 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -825,6 +825,7 @@ static void ext4_put_super(struct super_block *sb) + { + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct ext4_super_block *es = sbi->s_es; ++ int aborted = 0; + int i, err; + + ext4_unregister_li_request(sb); +@@ -834,9 +835,10 @@ static void ext4_put_super(struct super_block *sb) + destroy_workqueue(sbi->rsv_conversion_wq); + + if (sbi->s_journal) { ++ aborted = is_journal_aborted(sbi->s_journal); + err = jbd2_journal_destroy(sbi->s_journal); + sbi->s_journal = NULL; +- if (err < 0) ++ if ((err < 0) && !aborted) + ext4_abort(sb, "Couldn't clean up the journal"); + } + +@@ -847,7 +849,7 @@ static void ext4_put_super(struct super_block *sb) + ext4_mb_release(sb); + ext4_ext_release(sb); + +- if (!(sb->s_flags & MS_RDONLY)) { ++ if (!(sb->s_flags & MS_RDONLY) && !aborted) { + ext4_clear_feature_journal_needs_recovery(sb); + es->s_state = cpu_to_le16(sbi->s_mount_state); + } +@@ -3911,7 +3913,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + * root first: it may be modified in the journal! + */ + if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) { +- if (ext4_load_journal(sb, es, journal_devnum)) ++ err = ext4_load_journal(sb, es, journal_devnum); ++ if (err) + goto failed_mount3a; + } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) && + ext4_has_feature_journal_needs_recovery(sb)) { +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c +index d77be9e..4448ed3 100644 +--- a/fs/ext4/xattr.c ++++ b/fs/ext4/xattr.c +@@ -1174,16 +1174,14 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, + struct ext4_xattr_block_find bs = { + .s = { .not_found = -ENODATA, }, + }; +- unsigned long no_expand; ++ int no_expand; + int error; + + if (!name) + return -EINVAL; + if (strlen(name) > 255) + return -ERANGE; +- down_write(&EXT4_I(inode)->xattr_sem); +- no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND); +- ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND); ++ ext4_write_lock_xattr(inode, &no_expand); + + error = ext4_reserve_inode_write(handle, inode, &is.iloc); + if (error) +@@ -1251,7 +1249,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, + ext4_xattr_update_super_block(handle, inode->i_sb); + inode->i_ctime = ext4_current_time(inode); + if (!value) +- ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND); ++ no_expand = 0; + error = ext4_mark_iloc_dirty(handle, inode, &is.iloc); + /* + * The bh is consumed by ext4_mark_iloc_dirty, even with +@@ -1265,9 +1263,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, + cleanup: + brelse(is.iloc.bh); + brelse(bs.bh); +- if (no_expand == 0) +- ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND); +- up_write(&EXT4_I(inode)->xattr_sem); ++ ext4_write_unlock_xattr(inode, &no_expand); + return error; + } + +@@ -1484,12 +1480,11 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize, + int error = 0, tried_min_extra_isize = 0; + int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize); + int isize_diff; /* How much do we need to grow i_extra_isize */ ++ int no_expand; ++ ++ if (ext4_write_trylock_xattr(inode, &no_expand) == 0) ++ return 0; + +- down_write(&EXT4_I(inode)->xattr_sem); +- /* +- * Set EXT4_STATE_NO_EXPAND to avoid recursion when marking inode dirty +- */ +- ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND); + retry: + isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize; + if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) +@@ -1571,17 +1566,16 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize, + EXT4_I(inode)->i_extra_isize = new_extra_isize; + brelse(bh); + out: +- ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND); +- up_write(&EXT4_I(inode)->xattr_sem); ++ ext4_write_unlock_xattr(inode, &no_expand); + return 0; + + cleanup: + brelse(bh); + /* +- * We deliberately leave EXT4_STATE_NO_EXPAND set here since inode +- * size expansion failed. ++ * Inode size expansion failed; don't try again + */ +- up_write(&EXT4_I(inode)->xattr_sem); ++ no_expand = 1; ++ ext4_write_unlock_xattr(inode, &no_expand); + return error; + } + +diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h +index a92e783..099c8b6 100644 +--- a/fs/ext4/xattr.h ++++ b/fs/ext4/xattr.h +@@ -102,6 +102,38 @@ extern const struct xattr_handler ext4_xattr_security_handler; + + #define EXT4_XATTR_NAME_ENCRYPTION_CONTEXT "c" + ++/* ++ * The EXT4_STATE_NO_EXPAND is overloaded and used for two purposes. ++ * The first is to signal that there the inline xattrs and data are ++ * taking up so much space that we might as well not keep trying to ++ * expand it. The second is that xattr_sem is taken for writing, so ++ * we shouldn't try to recurse into the inode expansion. For this ++ * second case, we need to make sure that we take save and restore the ++ * NO_EXPAND state flag appropriately. ++ */ ++static inline void ext4_write_lock_xattr(struct inode *inode, int *save) ++{ ++ down_write(&EXT4_I(inode)->xattr_sem); ++ *save = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND); ++ ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND); ++} ++ ++static inline int ext4_write_trylock_xattr(struct inode *inode, int *save) ++{ ++ if (down_write_trylock(&EXT4_I(inode)->xattr_sem) == 0) ++ return 0; ++ *save = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND); ++ ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND); ++ return 1; ++} ++ ++static inline void ext4_write_unlock_xattr(struct inode *inode, int *save) ++{ ++ if (*save == 0) ++ ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND); ++ up_write(&EXT4_I(inode)->xattr_sem); ++} ++ + extern ssize_t ext4_listxattr(struct dentry *, char *, size_t); + + extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t); +diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c +index 369f451..ebdc90f 100644 +--- a/fs/f2fs/dir.c ++++ b/fs/f2fs/dir.c +@@ -207,9 +207,13 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, + f2fs_put_page(dentry_page, 0); + } + +- if (!de && room && F2FS_I(dir)->chash != namehash) { +- F2FS_I(dir)->chash = namehash; +- F2FS_I(dir)->clevel = level; ++ /* This is to increase the speed of f2fs_create */ ++ if (!de && room) { ++ F2FS_I(dir)->task = current; ++ if (F2FS_I(dir)->chash != namehash) { ++ F2FS_I(dir)->chash = namehash; ++ F2FS_I(dir)->clevel = level; ++ } + } + + return de; +@@ -643,14 +647,34 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, + struct inode *inode, nid_t ino, umode_t mode) + { + struct fscrypt_name fname; ++ struct page *page = NULL; ++ struct f2fs_dir_entry *de = NULL; + int err; + + err = fscrypt_setup_filename(dir, name, 0, &fname); + if (err) + return err; + +- err = __f2fs_do_add_link(dir, &fname, inode, ino, mode); +- ++ /* ++ * An immature stakable filesystem shows a race condition between lookup ++ * and create. If we have same task when doing lookup and create, it's ++ * definitely fine as expected by VFS normally. Otherwise, let's just ++ * verify on-disk dentry one more time, which guarantees filesystem ++ * consistency more. ++ */ ++ if (current != F2FS_I(dir)->task) { ++ de = __f2fs_find_entry(dir, &fname, &page); ++ F2FS_I(dir)->task = NULL; ++ } ++ if (de) { ++ f2fs_dentry_kunmap(dir, page); ++ f2fs_put_page(page, 0); ++ err = -EEXIST; ++ } else if (IS_ERR(page)) { ++ err = PTR_ERR(page); ++ } else { ++ err = __f2fs_do_add_link(dir, &fname, inode, ino, mode); ++ } + fscrypt_free_filename(&fname); + return err; + } +diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c +index 2b06d4f..7b32ce9 100644 +--- a/fs/f2fs/extent_cache.c ++++ b/fs/f2fs/extent_cache.c +@@ -352,11 +352,12 @@ static struct extent_node *__try_merge_extent_node(struct inode *inode, + } + + if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) { +- if (en) +- __release_extent_node(sbi, et, prev_ex); + next_ex->ei.fofs = ei->fofs; + next_ex->ei.blk = ei->blk; + next_ex->ei.len += ei->len; ++ if (en) ++ __release_extent_node(sbi, et, prev_ex); ++ + en = next_ex; + } + +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h +index 506af45..3a1640b 100644 +--- a/fs/f2fs/f2fs.h ++++ b/fs/f2fs/f2fs.h +@@ -431,6 +431,7 @@ struct f2fs_inode_info { + atomic_t dirty_pages; /* # of dirty pages */ + f2fs_hash_t chash; /* hash value of given file name */ + unsigned int clevel; /* maximum level of given file name */ ++ struct task_struct *task; /* lookup and create consistency */ + nid_t i_xattr_nid; /* node id that contains xattrs */ + unsigned long long xattr_ver; /* cp version of xattr modification */ + loff_t last_disk_size; /* lastly written file size */ +@@ -833,6 +834,9 @@ struct f2fs_sb_info { + struct f2fs_gc_kthread *gc_thread; /* GC thread */ + unsigned int cur_victim_sec; /* current victim section num */ + ++ /* threshold for converting bg victims for fg */ ++ u64 fggc_threshold; ++ + /* maximum # of trials to find a victim segment for SSR and GC */ + unsigned int max_victim_search; + +diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c +index 6f14ee9..34a69e7 100644 +--- a/fs/f2fs/gc.c ++++ b/fs/f2fs/gc.c +@@ -166,7 +166,8 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type, + p->ofs_unit = sbi->segs_per_sec; + } + +- if (p->max_search > sbi->max_victim_search) ++ /* we need to check every dirty segments in the FG_GC case */ ++ if (gc_type != FG_GC && p->max_search > sbi->max_victim_search) + p->max_search = sbi->max_victim_search; + + p->offset = sbi->last_victim[p->gc_mode]; +@@ -199,6 +200,10 @@ static unsigned int check_bg_victims(struct f2fs_sb_info *sbi) + for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) { + if (sec_usage_check(sbi, secno)) + continue; ++ ++ if (no_fggc_candidate(sbi, secno)) ++ continue; ++ + clear_bit(secno, dirty_i->victim_secmap); + return secno * sbi->segs_per_sec; + } +@@ -322,13 +327,15 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, + nsearched++; + } + +- + secno = GET_SECNO(sbi, segno); + + if (sec_usage_check(sbi, secno)) + goto next; + if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap)) + goto next; ++ if (gc_type == FG_GC && p.alloc_mode == LFS && ++ no_fggc_candidate(sbi, secno)) ++ goto next; + + cost = get_gc_cost(sbi, segno, &p); + +@@ -972,5 +979,16 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync) + + void build_gc_manager(struct f2fs_sb_info *sbi) + { ++ u64 main_count, resv_count, ovp_count, blocks_per_sec; ++ + DIRTY_I(sbi)->v_ops = &default_v_ops; ++ ++ /* threshold of # of valid blocks in a section for victims of FG_GC */ ++ main_count = SM_I(sbi)->main_segments << sbi->log_blocks_per_seg; ++ resv_count = SM_I(sbi)->reserved_segments << sbi->log_blocks_per_seg; ++ ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg; ++ blocks_per_sec = sbi->blocks_per_seg * sbi->segs_per_sec; ++ ++ sbi->fggc_threshold = div_u64((main_count - ovp_count) * blocks_per_sec, ++ (main_count - resv_count)); + } +diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c +index fc886f0..a7943f86 100644 +--- a/fs/f2fs/segment.c ++++ b/fs/f2fs/segment.c +@@ -813,6 +813,8 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc) + start = start_segno + sbi->segs_per_sec; + if (start < end) + goto next; ++ else ++ end = start - 1; + } + mutex_unlock(&dirty_i->seglist_lock); + +diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h +index fecb856..b164f83 100644 +--- a/fs/f2fs/segment.h ++++ b/fs/f2fs/segment.h +@@ -688,6 +688,15 @@ static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type) + - (base + 1) + type; + } + ++static inline bool no_fggc_candidate(struct f2fs_sb_info *sbi, ++ unsigned int secno) ++{ ++ if (get_valid_blocks(sbi, secno, sbi->segs_per_sec) >= ++ sbi->fggc_threshold) ++ return true; ++ return false; ++} ++ + static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno) + { + if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno)) +diff --git a/fs/fuse/file.c b/fs/fuse/file.c +index 2401c5d..5ec5870 100644 +--- a/fs/fuse/file.c ++++ b/fs/fuse/file.c +@@ -100,6 +100,7 @@ static void fuse_file_put(struct fuse_file *ff, bool sync) + iput(req->misc.release.inode); + fuse_put_request(ff->fc, req); + } else if (sync) { ++ __set_bit(FR_FORCE, &req->flags); + __clear_bit(FR_BACKGROUND, &req->flags); + fuse_request_send(ff->fc, req); + iput(req->misc.release.inode); +diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c +index 14cbf60..133f322 100644 +--- a/fs/gfs2/glock.c ++++ b/fs/gfs2/glock.c +@@ -658,9 +658,11 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, + struct kmem_cache *cachep; + int ret, tries = 0; + ++ rcu_read_lock(); + gl = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms); + if (gl && !lockref_get_not_dead(&gl->gl_lockref)) + gl = NULL; ++ rcu_read_unlock(); + + *glp = gl; + if (gl) +@@ -728,15 +730,18 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, + + if (ret == -EEXIST) { + ret = 0; ++ rcu_read_lock(); + tmp = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms); + if (tmp == NULL || !lockref_get_not_dead(&tmp->gl_lockref)) { + if (++tries < 100) { ++ rcu_read_unlock(); + cond_resched(); + goto again; + } + tmp = NULL; + ret = -ENOMEM; + } ++ rcu_read_unlock(); + } else { + WARN_ON_ONCE(ret); + } +diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c +index e165266..5e659ee 100644 +--- a/fs/jbd2/transaction.c ++++ b/fs/jbd2/transaction.c +@@ -1863,7 +1863,9 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh) + + __blist_del_buffer(list, jh); + jh->b_jlist = BJ_None; +- if (test_clear_buffer_jbddirty(bh)) ++ if (transaction && is_journal_aborted(transaction->t_journal)) ++ clear_buffer_jbddirty(bh); ++ else if (test_clear_buffer_jbddirty(bh)) + mark_buffer_dirty(bh); /* Expose it to the VM */ + } + +diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c +index a5c3888..13abd60 100644 +--- a/fs/nfs/flexfilelayout/flexfilelayout.c ++++ b/fs/nfs/flexfilelayout/flexfilelayout.c +@@ -1073,9 +1073,6 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task, + struct nfs_client *mds_client = mds_server->nfs_client; + struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table; + +- if (task->tk_status >= 0) +- return 0; +- + switch (task->tk_status) { + /* MDS state errors */ + case -NFS4ERR_DELEG_REVOKED: +@@ -1176,9 +1173,6 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task, + { + struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); + +- if (task->tk_status >= 0) +- return 0; +- + switch (task->tk_status) { + /* File access problems. Don't mark the device as unavailable */ + case -EACCES: +@@ -1213,6 +1207,13 @@ static int ff_layout_async_handle_error(struct rpc_task *task, + { + int vers = clp->cl_nfs_mod->rpc_vers->number; + ++ if (task->tk_status >= 0) ++ return 0; ++ ++ /* Handle the case of an invalid layout segment */ ++ if (!pnfs_is_valid_lseg(lseg)) ++ return -NFS4ERR_RESET_TO_PNFS; ++ + switch (vers) { + case 3: + return ff_layout_async_handle_error_v3(task, lseg, idx); +diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c +index 6085019..5cda392 100644 +--- a/fs/nfs/nfs42proc.c ++++ b/fs/nfs/nfs42proc.c +@@ -128,30 +128,26 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len) + return err; + } + +-static ssize_t _nfs42_proc_copy(struct file *src, loff_t pos_src, ++static ssize_t _nfs42_proc_copy(struct file *src, + struct nfs_lock_context *src_lock, +- struct file *dst, loff_t pos_dst, ++ struct file *dst, + struct nfs_lock_context *dst_lock, +- size_t count) ++ struct nfs42_copy_args *args, ++ struct nfs42_copy_res *res) + { +- struct nfs42_copy_args args = { +- .src_fh = NFS_FH(file_inode(src)), +- .src_pos = pos_src, +- .dst_fh = NFS_FH(file_inode(dst)), +- .dst_pos = pos_dst, +- .count = count, +- }; +- struct nfs42_copy_res res; + struct rpc_message msg = { + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY], +- .rpc_argp = &args, +- .rpc_resp = &res, ++ .rpc_argp = args, ++ .rpc_resp = res, + }; + struct inode *dst_inode = file_inode(dst); + struct nfs_server *server = NFS_SERVER(dst_inode); ++ loff_t pos_src = args->src_pos; ++ loff_t pos_dst = args->dst_pos; ++ size_t count = args->count; + int status; + +- status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context, ++ status = nfs4_set_rw_stateid(&args->src_stateid, src_lock->open_context, + src_lock, FMODE_READ); + if (status) + return status; +@@ -161,7 +157,7 @@ static ssize_t _nfs42_proc_copy(struct file *src, loff_t pos_src, + if (status) + return status; + +- status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context, ++ status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context, + dst_lock, FMODE_WRITE); + if (status) + return status; +@@ -171,22 +167,22 @@ static ssize_t _nfs42_proc_copy(struct file *src, loff_t pos_src, + return status; + + status = nfs4_call_sync(server->client, server, &msg, +- &args.seq_args, &res.seq_res, 0); ++ &args->seq_args, &res->seq_res, 0); + if (status == -ENOTSUPP) + server->caps &= ~NFS_CAP_COPY; + if (status) + return status; + +- if (res.write_res.verifier.committed != NFS_FILE_SYNC) { +- status = nfs_commit_file(dst, &res.write_res.verifier.verifier); ++ if (res->write_res.verifier.committed != NFS_FILE_SYNC) { ++ status = nfs_commit_file(dst, &res->write_res.verifier.verifier); + if (status) + return status; + } + + truncate_pagecache_range(dst_inode, pos_dst, +- pos_dst + res.write_res.count); ++ pos_dst + res->write_res.count); + +- return res.write_res.count; ++ return res->write_res.count; + } + + ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, +@@ -196,8 +192,22 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, + struct nfs_server *server = NFS_SERVER(file_inode(dst)); + struct nfs_lock_context *src_lock; + struct nfs_lock_context *dst_lock; +- struct nfs4_exception src_exception = { }; +- struct nfs4_exception dst_exception = { }; ++ struct nfs42_copy_args args = { ++ .src_fh = NFS_FH(file_inode(src)), ++ .src_pos = pos_src, ++ .dst_fh = NFS_FH(file_inode(dst)), ++ .dst_pos = pos_dst, ++ .count = count, ++ }; ++ struct nfs42_copy_res res; ++ struct nfs4_exception src_exception = { ++ .inode = file_inode(src), ++ .stateid = &args.src_stateid, ++ }; ++ struct nfs4_exception dst_exception = { ++ .inode = file_inode(dst), ++ .stateid = &args.dst_stateid, ++ }; + ssize_t err, err2; + + if (!nfs_server_capable(file_inode(dst), NFS_CAP_COPY)) +@@ -207,7 +217,6 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, + if (IS_ERR(src_lock)) + return PTR_ERR(src_lock); + +- src_exception.inode = file_inode(src); + src_exception.state = src_lock->open_context->state; + + dst_lock = nfs_get_lock_context(nfs_file_open_context(dst)); +@@ -216,15 +225,17 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, + goto out_put_src_lock; + } + +- dst_exception.inode = file_inode(dst); + dst_exception.state = dst_lock->open_context->state; + + do { + inode_lock(file_inode(dst)); +- err = _nfs42_proc_copy(src, pos_src, src_lock, +- dst, pos_dst, dst_lock, count); ++ err = _nfs42_proc_copy(src, src_lock, ++ dst, dst_lock, ++ &args, &res); + inode_unlock(file_inode(dst)); + ++ if (err >= 0) ++ break; + if (err == -ENOTSUPP) { + err = -EOPNOTSUPP; + break; +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 78ff8b6..609840d 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -2708,6 +2708,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, + ret = PTR_ERR(state); + if (IS_ERR(state)) + goto out; ++ ctx->state = state; + if (server->caps & NFS_CAP_POSIX_LOCK) + set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); + if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK) +@@ -2733,7 +2734,6 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, + if (ret != 0) + goto out; + +- ctx->state = state; + if (d_inode(dentry) == state->inode) { + nfs_inode_attach_open_context(ctx); + if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) +@@ -4990,7 +4990,7 @@ static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size + */ + static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) + { +- struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; ++ struct page *pages[NFS4ACL_MAXPAGES + 1] = {NULL, }; + struct nfs_getaclargs args = { + .fh = NFS_FH(inode), + .acl_pages = pages, +@@ -5004,13 +5004,9 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu + .rpc_argp = &args, + .rpc_resp = &res, + }; +- unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); ++ unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1; + int ret = -ENOMEM, i; + +- /* As long as we're doing a round trip to the server anyway, +- * let's be prepared for a page of acl data. */ +- if (npages == 0) +- npages = 1; + if (npages > ARRAY_SIZE(pages)) + return -ERANGE; + +diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c +index fc89e5e..c9c4d985 100644 +--- a/fs/nfs/nfs4xdr.c ++++ b/fs/nfs/nfs4xdr.c +@@ -2492,7 +2492,7 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr, + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); +- replen = hdr.replen + op_decode_hdr_maxsz + 1; ++ replen = hdr.replen + op_decode_hdr_maxsz; + encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr); + + xdr_inline_pages(&req->rq_rcv_buf, replen << 2, +diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c +index 8ca642f..b829cc9 100644 +--- a/fs/nfsd/vfs.c ++++ b/fs/nfsd/vfs.c +@@ -377,7 +377,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, + __be32 err; + int host_err; + bool get_write_count; +- int size_change = 0; ++ bool size_change = (iap->ia_valid & ATTR_SIZE); + + if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE)) + accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE; +@@ -390,11 +390,11 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, + /* Get inode */ + err = fh_verify(rqstp, fhp, ftype, accmode); + if (err) +- goto out; ++ return err; + if (get_write_count) { + host_err = fh_want_write(fhp); + if (host_err) +- return nfserrno(host_err); ++ goto out; + } + + dentry = fhp->fh_dentry; +@@ -405,20 +405,28 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, + iap->ia_valid &= ~ATTR_MODE; + + if (!iap->ia_valid) +- goto out; ++ return 0; + + nfsd_sanitize_attrs(inode, iap); + ++ if (check_guard && guardtime != inode->i_ctime.tv_sec) ++ return nfserr_notsync; ++ + /* + * The size case is special, it changes the file in addition to the +- * attributes. ++ * attributes, and file systems don't expect it to be mixed with ++ * "random" attribute changes. We thus split out the size change ++ * into a separate call to ->setattr, and do the rest as a separate ++ * setattr call. + */ +- if (iap->ia_valid & ATTR_SIZE) { ++ if (size_change) { + err = nfsd_get_write_access(rqstp, fhp, iap); + if (err) +- goto out; +- size_change = 1; ++ return err; ++ } + ++ fh_lock(fhp); ++ if (size_change) { + /* + * RFC5661, Section 18.30.4: + * Changing the size of a file with SETATTR indirectly +@@ -426,29 +434,36 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, + * + * (and similar for the older RFCs) + */ +- if (iap->ia_size != i_size_read(inode)) +- iap->ia_valid |= ATTR_MTIME; +- } ++ struct iattr size_attr = { ++ .ia_valid = ATTR_SIZE | ATTR_CTIME | ATTR_MTIME, ++ .ia_size = iap->ia_size, ++ }; + +- iap->ia_valid |= ATTR_CTIME; ++ host_err = notify_change(dentry, &size_attr, NULL); ++ if (host_err) ++ goto out_unlock; ++ iap->ia_valid &= ~ATTR_SIZE; + +- if (check_guard && guardtime != inode->i_ctime.tv_sec) { +- err = nfserr_notsync; +- goto out_put_write_access; ++ /* ++ * Avoid the additional setattr call below if the only other ++ * attribute that the client sends is the mtime, as we update ++ * it as part of the size change above. ++ */ ++ if ((iap->ia_valid & ~ATTR_MTIME) == 0) ++ goto out_unlock; + } + +- fh_lock(fhp); ++ iap->ia_valid |= ATTR_CTIME; + host_err = notify_change(dentry, iap, NULL); +- fh_unlock(fhp); +- err = nfserrno(host_err); + +-out_put_write_access: ++out_unlock: ++ fh_unlock(fhp); + if (size_change) + put_write_access(inode); +- if (!err) +- err = nfserrno(commit_metadata(fhp)); + out: +- return err; ++ if (!host_err) ++ host_err = commit_metadata(fhp); ++ return nfserrno(host_err); + } + + #if defined(CONFIG_NFSD_V4) +diff --git a/include/linux/compat.h b/include/linux/compat.h +index 6360939..d8535a4 100644 +--- a/include/linux/compat.h ++++ b/include/linux/compat.h +@@ -711,8 +711,10 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long); + compat_stack_t __user *__uss = uss; \ + struct task_struct *t = current; \ + put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \ +- put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \ ++ put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \ + put_user_ex(t->sas_ss_size, &__uss->ss_size); \ ++ if (t->sas_ss_flags & SS_AUTODISARM) \ ++ sas_ss_reset(t); \ + } while (0); + + asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, +diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h +index 2de4e2e..e0acb0e 100644 +--- a/include/linux/devfreq.h ++++ b/include/linux/devfreq.h +@@ -104,6 +104,8 @@ struct devfreq_dev_profile { + * struct devfreq_governor - Devfreq policy governor + * @node: list node - contains registered devfreq governors + * @name: Governor's name ++ * @immutable: Immutable flag for governor. If the value is 1, ++ * this govenror is never changeable to other governor. + * @get_target_freq: Returns desired operating frequency for the device. + * Basically, get_target_freq will run + * devfreq_dev_profile.get_dev_status() to get the +@@ -121,6 +123,7 @@ struct devfreq_governor { + struct list_head node; + + const char name[DEVFREQ_NAME_LEN]; ++ const unsigned int immutable; + int (*get_target_freq)(struct devfreq *this, unsigned long *freq); + int (*event_handler)(struct devfreq *devfreq, + unsigned int event, void *data); +diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h +index 3f9778c..c332f0a 100644 +--- a/include/linux/fsl_ifc.h ++++ b/include/linux/fsl_ifc.h +@@ -733,8 +733,12 @@ struct fsl_ifc_nand { + __be32 nand_erattr1; + u32 res19[0x10]; + __be32 nand_fsr; +- u32 res20[0x3]; +- __be32 nand_eccstat[6]; ++ u32 res20; ++ /* The V1 nand_eccstat is actually 4 words that overlaps the ++ * V2 nand_eccstat. ++ */ ++ __be32 v1_nand_eccstat[2]; ++ __be32 v2_nand_eccstat[6]; + u32 res21[0x1c]; + __be32 nanndcr; + u32 res22[0x2]; +diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h +index c92a083..192eef2f 100644 +--- a/include/linux/hyperv.h ++++ b/include/linux/hyperv.h +@@ -641,6 +641,7 @@ struct vmbus_channel_msginfo { + + /* Synchronize the request/response if needed */ + struct completion waitevent; ++ struct vmbus_channel *waiting_channel; + union { + struct vmbus_channel_version_supported version_supported; + struct vmbus_channel_open_result open_result; +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h +index d49e26c..23e129e 100644 +--- a/include/linux/intel-iommu.h ++++ b/include/linux/intel-iommu.h +@@ -153,8 +153,8 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) + #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) + #define DMA_TLB_DSI_FLUSH (((u64)2) << 60) + #define DMA_TLB_PSI_FLUSH (((u64)3) << 60) +-#define DMA_TLB_IIRG(type) ((type >> 60) & 7) +-#define DMA_TLB_IAIG(val) (((val) >> 57) & 7) ++#define DMA_TLB_IIRG(type) ((type >> 60) & 3) ++#define DMA_TLB_IAIG(val) (((val) >> 57) & 3) + #define DMA_TLB_READ_DRAIN (((u64)1) << 49) + #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48) + #define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32) +@@ -164,9 +164,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) + + /* INVALID_DESC */ + #define DMA_CCMD_INVL_GRANU_OFFSET 61 +-#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3) +-#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3) +-#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3) ++#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4) ++#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4) ++#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4) + #define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7) + #define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6) + #define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16))) +@@ -316,8 +316,8 @@ enum { + #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) + #define QI_DEV_EIOTLB_GLOB(g) ((u64)g) + #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) +-#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) +-#define QI_DEV_EIOTLB_QDEP(qd) (((qd) & 0x1f) << 16) ++#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) ++#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) + #define QI_DEV_EIOTLB_MAX_INVS 32 + + #define QI_PGRP_IDX(idx) (((u64)(idx)) << 55) +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h +index f99c993..7e273e2 100644 +--- a/include/linux/mmzone.h ++++ b/include/linux/mmzone.h +@@ -779,7 +779,7 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) + #endif + } + +-extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru); ++extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx); + + #ifdef CONFIG_HAVE_MEMORY_PRESENT + void memory_present(int nid, unsigned long start, unsigned long end); +diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h +index 5ee7aab..fd0e532 100644 +--- a/include/rdma/ib_sa.h ++++ b/include/rdma/ib_sa.h +@@ -153,12 +153,12 @@ struct ib_sa_path_rec { + union ib_gid sgid; + __be16 dlid; + __be16 slid; +- int raw_traffic; ++ u8 raw_traffic; + /* reserved */ + __be32 flow_label; + u8 hop_limit; + u8 traffic_class; +- int reversible; ++ u8 reversible; + u8 numb_path; + __be16 pkey; + __be16 qos_class; +@@ -220,7 +220,7 @@ struct ib_sa_mcmember_rec { + u8 hop_limit; + u8 scope; + u8 join_state; +- int proxy_join; ++ u8 proxy_join; + }; + + /* Service Record Component Mask Sec 15.2.5.14 Ver 1.1 */ +diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h +index 8a95631..b9ec493 100644 +--- a/include/scsi/scsi_device.h ++++ b/include/scsi/scsi_device.h +@@ -315,6 +315,7 @@ extern void scsi_remove_device(struct scsi_device *); + extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh); + void scsi_attach_vpd(struct scsi_device *sdev); + ++extern struct scsi_device *scsi_device_from_queue(struct request_queue *q); + extern int scsi_device_get(struct scsi_device *); + extern void scsi_device_put(struct scsi_device *); + extern struct scsi_device *scsi_device_lookup(struct Scsi_Host *, +diff --git a/include/soc/at91/at91sam9_ddrsdr.h b/include/soc/at91/at91sam9_ddrsdr.h +index dc10c52..393362b 100644 +--- a/include/soc/at91/at91sam9_ddrsdr.h ++++ b/include/soc/at91/at91sam9_ddrsdr.h +@@ -81,6 +81,7 @@ + #define AT91_DDRSDRC_LPCB_POWER_DOWN 2 + #define AT91_DDRSDRC_LPCB_DEEP_POWER_DOWN 3 + #define AT91_DDRSDRC_CLKFR (1 << 2) /* Clock Frozen */ ++#define AT91_DDRSDRC_LPDDR2_PWOFF (1 << 3) /* LPDDR Power Off */ + #define AT91_DDRSDRC_PASR (7 << 4) /* Partial Array Self Refresh */ + #define AT91_DDRSDRC_TCSR (3 << 8) /* Temperature Compensated Self Refresh */ + #define AT91_DDRSDRC_DS (3 << 10) /* Drive Strength */ +@@ -96,7 +97,9 @@ + #define AT91_DDRSDRC_MD_SDR 0 + #define AT91_DDRSDRC_MD_LOW_POWER_SDR 1 + #define AT91_DDRSDRC_MD_LOW_POWER_DDR 3 ++#define AT91_DDRSDRC_MD_LPDDR3 5 + #define AT91_DDRSDRC_MD_DDR2 6 /* [SAM9 Only] */ ++#define AT91_DDRSDRC_MD_LPDDR2 7 + #define AT91_DDRSDRC_DBW (1 << 4) /* Data Bus Width */ + #define AT91_DDRSDRC_DBW_32BITS (0 << 4) + #define AT91_DDRSDRC_DBW_16BITS (1 << 4) +diff --git a/ipc/shm.c b/ipc/shm.c +index dbac886..e2072ae 100644 +--- a/ipc/shm.c ++++ b/ipc/shm.c +@@ -1085,8 +1085,8 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) + * "raddr" thing points to kernel space, and there has to be a wrapper around + * this. + */ +-long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, +- unsigned long shmlba) ++long do_shmat(int shmid, char __user *shmaddr, int shmflg, ++ ulong *raddr, unsigned long shmlba) + { + struct shmid_kernel *shp; + unsigned long addr; +@@ -1107,8 +1107,13 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, + goto out; + else if ((addr = (ulong)shmaddr)) { + if (addr & (shmlba - 1)) { +- if (shmflg & SHM_RND) +- addr &= ~(shmlba - 1); /* round down */ ++ /* ++ * Round down to the nearest multiple of shmlba. ++ * For sane do_mmap_pgoff() parameters, avoid ++ * round downs that trigger nil-page and MAP_FIXED. ++ */ ++ if ((shmflg & SHM_RND) && addr >= shmlba) ++ addr &= ~(shmlba - 1); + else + #ifndef __ARCH_FORCE_SHMLBA + if (addr & ~PAGE_MASK) +diff --git a/kernel/membarrier.c b/kernel/membarrier.c +index 536c727..9f9284f 100644 +--- a/kernel/membarrier.c ++++ b/kernel/membarrier.c +@@ -16,6 +16,7 @@ + + #include <linux/syscalls.h> + #include <linux/membarrier.h> ++#include <linux/tick.h> + + /* + * Bitmask made from a "or" of all commands within enum membarrier_cmd, +@@ -51,6 +52,9 @@ + */ + SYSCALL_DEFINE2(membarrier, int, cmd, int, flags) + { ++ /* MEMBARRIER_CMD_SHARED is not compatible with nohz_full. */ ++ if (tick_nohz_full_enabled()) ++ return -ENOSYS; + if (unlikely(flags)) + return -EINVAL; + switch (cmd) { +diff --git a/kernel/memremap.c b/kernel/memremap.c +index 9ecedc2..0612323 100644 +--- a/kernel/memremap.c ++++ b/kernel/memremap.c +@@ -246,9 +246,13 @@ static void devm_memremap_pages_release(struct device *dev, void *data) + /* pages are dead and unused, undo the arch mapping */ + align_start = res->start & ~(SECTION_SIZE - 1); + align_size = ALIGN(resource_size(res), SECTION_SIZE); ++ ++ lock_device_hotplug(); + mem_hotplug_begin(); + arch_remove_memory(align_start, align_size); + mem_hotplug_done(); ++ unlock_device_hotplug(); ++ + untrack_pfn(NULL, PHYS_PFN(align_start), align_size); + pgmap_radix_release(res); + dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc, +@@ -360,9 +364,11 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, + if (error) + goto err_pfn_remap; + ++ lock_device_hotplug(); + mem_hotplug_begin(); + error = arch_add_memory(nid, align_start, align_size, true); + mem_hotplug_done(); ++ unlock_device_hotplug(); + if (error) + goto err_add_memory; + +diff --git a/kernel/signal.c b/kernel/signal.c +index 75761ac..0b14157 100644 +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -3226,10 +3226,17 @@ int compat_restore_altstack(const compat_stack_t __user *uss) + + int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) + { ++ int err; + struct task_struct *t = current; +- return __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) | +- __put_user(sas_ss_flags(sp), &uss->ss_flags) | ++ err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), ++ &uss->ss_sp) | ++ __put_user(t->sas_ss_flags, &uss->ss_flags) | + __put_user(t->sas_ss_size, &uss->ss_size); ++ if (err) ++ return err; ++ if (t->sas_ss_flags & SS_AUTODISARM) ++ sas_ss_reset(t); ++ return 0; + } + #endif + +diff --git a/mm/filemap.c b/mm/filemap.c +index d8d7df8..edfb90e 100644 +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -910,9 +910,12 @@ void page_endio(struct page *page, bool is_write, int err) + unlock_page(page); + } else { + if (err) { ++ struct address_space *mapping; ++ + SetPageError(page); +- if (page->mapping) +- mapping_set_error(page->mapping, err); ++ mapping = page_mapping(page); ++ if (mapping) ++ mapping_set_error(mapping, err); + } + end_page_writeback(page); + } +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index f4a02e2..1460e6a 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -2858,7 +2858,7 @@ bool zone_watermark_ok_safe(struct zone *z, unsigned int order, + #ifdef CONFIG_NUMA + static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) + { +- return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) < ++ return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= + RECLAIM_DISTANCE; + } + #else /* CONFIG_NUMA */ +diff --git a/mm/vmpressure.c b/mm/vmpressure.c +index 149fdf6..6063581 100644 +--- a/mm/vmpressure.c ++++ b/mm/vmpressure.c +@@ -112,9 +112,16 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned, + unsigned long reclaimed) + { + unsigned long scale = scanned + reclaimed; +- unsigned long pressure; ++ unsigned long pressure = 0; + + /* ++ * reclaimed can be greater than scanned in cases ++ * like THP, where the scanned is 1 and reclaimed ++ * could be 512 ++ */ ++ if (reclaimed >= scanned) ++ goto out; ++ /* + * We calculate the ratio (in percents) of how many pages were + * scanned vs. reclaimed in a given time frame (window). Note that + * time is in VM reclaimer's "ticks", i.e. number of pages +@@ -124,6 +131,7 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned, + pressure = scale - (reclaimed * scale / scanned); + pressure = pressure * 100 / scale; + ++out: + pr_debug("%s: %3lu (s: %lu r: %lu)\n", __func__, pressure, + scanned, reclaimed); + +diff --git a/mm/vmscan.c b/mm/vmscan.c +index fa30010..30a88b9 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -234,22 +234,39 @@ bool pgdat_reclaimable(struct pglist_data *pgdat) + pgdat_reclaimable_pages(pgdat) * 6; + } + +-unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru) ++/** ++ * lruvec_lru_size - Returns the number of pages on the given LRU list. ++ * @lruvec: lru vector ++ * @lru: lru to use ++ * @zone_idx: zones to consider (use MAX_NR_ZONES for the whole LRU list) ++ */ ++unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx) + { ++ unsigned long lru_size; ++ int zid; ++ + if (!mem_cgroup_disabled()) +- return mem_cgroup_get_lru_size(lruvec, lru); ++ lru_size = mem_cgroup_get_lru_size(lruvec, lru); ++ else ++ lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru); + +- return node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru); +-} ++ for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) { ++ struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; ++ unsigned long size; + +-unsigned long lruvec_zone_lru_size(struct lruvec *lruvec, enum lru_list lru, +- int zone_idx) +-{ +- if (!mem_cgroup_disabled()) +- return mem_cgroup_get_zone_lru_size(lruvec, lru, zone_idx); ++ if (!managed_zone(zone)) ++ continue; ++ ++ if (!mem_cgroup_disabled()) ++ size = mem_cgroup_get_zone_lru_size(lruvec, lru, zid); ++ else ++ size = zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zid], ++ NR_ZONE_LRU_BASE + lru); ++ lru_size -= min(size, lru_size); ++ } ++ ++ return lru_size; + +- return zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zone_idx], +- NR_ZONE_LRU_BASE + lru); + } + + /* +@@ -2028,11 +2045,10 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file, + struct scan_control *sc) + { + unsigned long inactive_ratio; +- unsigned long inactive; +- unsigned long active; ++ unsigned long inactive, active; ++ enum lru_list inactive_lru = file * LRU_FILE; ++ enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE; + unsigned long gb; +- struct pglist_data *pgdat = lruvec_pgdat(lruvec); +- int zid; + + /* + * If we don't have swap space, anonymous page deactivation +@@ -2041,27 +2057,8 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file, + if (!file && !total_swap_pages) + return false; + +- inactive = lruvec_lru_size(lruvec, file * LRU_FILE); +- active = lruvec_lru_size(lruvec, file * LRU_FILE + LRU_ACTIVE); +- +- /* +- * For zone-constrained allocations, it is necessary to check if +- * deactivations are required for lowmem to be reclaimed. This +- * calculates the inactive/active pages available in eligible zones. +- */ +- for (zid = sc->reclaim_idx + 1; zid < MAX_NR_ZONES; zid++) { +- struct zone *zone = &pgdat->node_zones[zid]; +- unsigned long inactive_zone, active_zone; +- +- if (!managed_zone(zone)) +- continue; +- +- inactive_zone = lruvec_zone_lru_size(lruvec, file * LRU_FILE, zid); +- active_zone = lruvec_zone_lru_size(lruvec, (file * LRU_FILE) + LRU_ACTIVE, zid); +- +- inactive -= min(inactive, inactive_zone); +- active -= min(active, active_zone); +- } ++ inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx); ++ active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx); + + gb = (inactive + active) >> (30 - PAGE_SHIFT); + if (gb) +@@ -2208,7 +2205,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, + * system is under heavy pressure. + */ + if (!inactive_list_is_low(lruvec, true, sc) && +- lruvec_lru_size(lruvec, LRU_INACTIVE_FILE) >> sc->priority) { ++ lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) { + scan_balance = SCAN_FILE; + goto out; + } +@@ -2234,10 +2231,10 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, + * anon in [0], file in [1] + */ + +- anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON) + +- lruvec_lru_size(lruvec, LRU_INACTIVE_ANON); +- file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE) + +- lruvec_lru_size(lruvec, LRU_INACTIVE_FILE); ++ anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) + ++ lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES); ++ file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES) + ++ lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES); + + spin_lock_irq(&pgdat->lru_lock); + if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { +@@ -2275,7 +2272,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, + unsigned long size; + unsigned long scan; + +- size = lruvec_lru_size(lruvec, lru); ++ size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); + scan = size >> sc->priority; + + if (!scan && pass && force_scan) +diff --git a/mm/workingset.c b/mm/workingset.c +index fb1f918..33f6f4d 100644 +--- a/mm/workingset.c ++++ b/mm/workingset.c +@@ -266,7 +266,7 @@ bool workingset_refault(void *shadow) + } + lruvec = mem_cgroup_lruvec(pgdat, memcg); + refault = atomic_long_read(&lruvec->inactive_age); +- active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE); ++ active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES); + rcu_read_unlock(); + + /* +diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c +index e6ae15b..0ffeb60 100644 +--- a/net/ceph/osd_client.c ++++ b/net/ceph/osd_client.c +@@ -672,7 +672,8 @@ void osd_req_op_extent_update(struct ceph_osd_request *osd_req, + BUG_ON(length > previous); + + op->extent.length = length; +- op->indata_len -= previous - length; ++ if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL) ++ op->indata_len -= previous - length; + } + EXPORT_SYMBOL(osd_req_op_extent_update); + +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c +index 0f87e5d..6bd1508 100644 +--- a/net/netfilter/nf_conntrack_core.c ++++ b/net/netfilter/nf_conntrack_core.c +@@ -85,11 +85,11 @@ static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock); + static __read_mostly bool nf_conntrack_locks_all; + + /* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */ +-#define GC_MAX_BUCKETS_DIV 64u +-/* upper bound of scan intervals */ +-#define GC_INTERVAL_MAX (2 * HZ) +-/* maximum conntracks to evict per gc run */ +-#define GC_MAX_EVICTS 256u ++#define GC_MAX_BUCKETS_DIV 128u ++/* upper bound of full table scan */ ++#define GC_MAX_SCAN_JIFFIES (16u * HZ) ++/* desired ratio of entries found to be expired */ ++#define GC_EVICT_RATIO 50u + + static struct conntrack_gc_work conntrack_gc_work; + +@@ -938,6 +938,7 @@ static noinline int early_drop(struct net *net, unsigned int _hash) + + static void gc_worker(struct work_struct *work) + { ++ unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u); + unsigned int i, goal, buckets = 0, expired_count = 0; + struct conntrack_gc_work *gc_work; + unsigned int ratio, scanned = 0; +@@ -979,8 +980,7 @@ static void gc_worker(struct work_struct *work) + */ + rcu_read_unlock(); + cond_resched_rcu_qs(); +- } while (++buckets < goal && +- expired_count < GC_MAX_EVICTS); ++ } while (++buckets < goal); + + if (gc_work->exiting) + return; +@@ -997,27 +997,25 @@ static void gc_worker(struct work_struct *work) + * 1. Minimize time until we notice a stale entry + * 2. Maximize scan intervals to not waste cycles + * +- * Normally, expired_count will be 0, this increases the next_run time +- * to priorize 2) above. ++ * Normally, expire ratio will be close to 0. + * +- * As soon as a timed-out entry is found, move towards 1) and increase +- * the scan frequency. +- * In case we have lots of evictions next scan is done immediately. ++ * As soon as a sizeable fraction of the entries have expired ++ * increase scan frequency. + */ + ratio = scanned ? expired_count * 100 / scanned : 0; +- if (ratio >= 90 || expired_count == GC_MAX_EVICTS) { +- gc_work->next_gc_run = 0; +- next_run = 0; +- } else if (expired_count) { +- gc_work->next_gc_run /= 2U; +- next_run = msecs_to_jiffies(1); ++ if (ratio > GC_EVICT_RATIO) { ++ gc_work->next_gc_run = min_interval; + } else { +- if (gc_work->next_gc_run < GC_INTERVAL_MAX) +- gc_work->next_gc_run += msecs_to_jiffies(1); ++ unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV; + +- next_run = gc_work->next_gc_run; ++ BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0); ++ ++ gc_work->next_gc_run += min_interval; ++ if (gc_work->next_gc_run > max) ++ gc_work->next_gc_run = max; + } + ++ next_run = gc_work->next_gc_run; + gc_work->last_bucket = i; + queue_delayed_work(system_long_wq, &gc_work->dwork, next_run); + } +@@ -1025,7 +1023,7 @@ static void gc_worker(struct work_struct *work) + static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) + { + INIT_DELAYED_WORK(&gc_work->dwork, gc_worker); +- gc_work->next_gc_run = GC_INTERVAL_MAX; ++ gc_work->next_gc_run = HZ; + gc_work->exiting = false; + } + +@@ -1918,7 +1916,7 @@ int nf_conntrack_init_start(void) + nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); + + conntrack_gc_work_init(&conntrack_gc_work); +- queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, GC_INTERVAL_MAX); ++ queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, HZ); + + return 0; + +diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c +index d987c2d..f57c9f0 100644 +--- a/net/sunrpc/xprtrdma/rpc_rdma.c ++++ b/net/sunrpc/xprtrdma/rpc_rdma.c +@@ -125,14 +125,34 @@ void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt) + /* The client can send a request inline as long as the RPCRDMA header + * plus the RPC call fit under the transport's inline limit. If the + * combined call message size exceeds that limit, the client must use +- * the read chunk list for this operation. ++ * a Read chunk for this operation. ++ * ++ * A Read chunk is also required if sending the RPC call inline would ++ * exceed this device's max_sge limit. + */ + static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt, + struct rpc_rqst *rqst) + { +- struct rpcrdma_ia *ia = &r_xprt->rx_ia; ++ struct xdr_buf *xdr = &rqst->rq_snd_buf; ++ unsigned int count, remaining, offset; ++ ++ if (xdr->len > r_xprt->rx_ia.ri_max_inline_write) ++ return false; ++ ++ if (xdr->page_len) { ++ remaining = xdr->page_len; ++ offset = xdr->page_base & ~PAGE_MASK; ++ count = 0; ++ while (remaining) { ++ remaining -= min_t(unsigned int, ++ PAGE_SIZE - offset, remaining); ++ offset = 0; ++ if (++count > r_xprt->rx_ia.ri_max_send_sges) ++ return false; ++ } ++ } + +- return rqst->rq_snd_buf.len <= ia->ri_max_inline_write; ++ return true; + } + + /* The client can't know how large the actual reply will be. Thus it +@@ -186,9 +206,9 @@ rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg, int n) + */ + + static int +-rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos, +- enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, +- bool reminv_expected) ++rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf, ++ unsigned int pos, enum rpcrdma_chunktype type, ++ struct rpcrdma_mr_seg *seg) + { + int len, n, p, page_base; + struct page **ppages; +@@ -226,22 +246,21 @@ rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos, + if (len && n == RPCRDMA_MAX_SEGS) + goto out_overflow; + +- /* When encoding the read list, the tail is always sent inline */ +- if (type == rpcrdma_readch) ++ /* When encoding a Read chunk, the tail iovec contains an ++ * XDR pad and may be omitted. ++ */ ++ if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup) + return n; + +- /* When encoding the Write list, some servers need to see an extra +- * segment for odd-length Write chunks. The upper layer provides +- * space in the tail iovec for this purpose. ++ /* When encoding a Write chunk, some servers need to see an ++ * extra segment for non-XDR-aligned Write chunks. The upper ++ * layer provides space in the tail iovec that may be used ++ * for this purpose. + */ +- if (type == rpcrdma_writech && reminv_expected) ++ if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup) + return n; + + if (xdrbuf->tail[0].iov_len) { +- /* the rpcrdma protocol allows us to omit any trailing +- * xdr pad bytes, saving the server an RDMA operation. */ +- if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize) +- return n; + n = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, n); + if (n == RPCRDMA_MAX_SEGS) + goto out_overflow; +@@ -293,7 +312,8 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, + if (rtype == rpcrdma_areadch) + pos = 0; + seg = req->rl_segments; +- nsegs = rpcrdma_convert_iovs(&rqst->rq_snd_buf, pos, rtype, seg, false); ++ nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos, ++ rtype, seg); + if (nsegs < 0) + return ERR_PTR(nsegs); + +@@ -355,10 +375,9 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, + } + + seg = req->rl_segments; +- nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf, ++ nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, + rqst->rq_rcv_buf.head[0].iov_len, +- wtype, seg, +- r_xprt->rx_ia.ri_reminv_expected); ++ wtype, seg); + if (nsegs < 0) + return ERR_PTR(nsegs); + +@@ -423,8 +442,7 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, + } + + seg = req->rl_segments; +- nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf, 0, wtype, seg, +- r_xprt->rx_ia.ri_reminv_expected); ++ nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg); + if (nsegs < 0) + return ERR_PTR(nsegs); + +diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c +index ed5e285..fa324fe 100644 +--- a/net/sunrpc/xprtrdma/transport.c ++++ b/net/sunrpc/xprtrdma/transport.c +@@ -67,7 +67,7 @@ unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE; + static unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE; + static unsigned int xprt_rdma_inline_write_padding; + static unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR; +- int xprt_rdma_pad_optimize = 1; ++ int xprt_rdma_pad_optimize = 0; + + #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) + +diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c +index 8da7f6a..e2c37061 100644 +--- a/net/sunrpc/xprtrdma/verbs.c ++++ b/net/sunrpc/xprtrdma/verbs.c +@@ -208,6 +208,7 @@ rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt, + + /* Default settings for RPC-over-RDMA Version One */ + r_xprt->rx_ia.ri_reminv_expected = false; ++ r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize; + rsize = RPCRDMA_V1_DEF_INLINE_SIZE; + wsize = RPCRDMA_V1_DEF_INLINE_SIZE; + +@@ -215,6 +216,7 @@ rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt, + pmsg->cp_magic == rpcrdma_cmp_magic && + pmsg->cp_version == RPCRDMA_CMP_VERSION) { + r_xprt->rx_ia.ri_reminv_expected = true; ++ r_xprt->rx_ia.ri_implicit_roundup = true; + rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size); + wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size); + } +@@ -477,18 +479,19 @@ rpcrdma_ia_close(struct rpcrdma_ia *ia) + */ + int + rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, +- struct rpcrdma_create_data_internal *cdata) ++ struct rpcrdma_create_data_internal *cdata) + { + struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private; ++ unsigned int max_qp_wr, max_sge; + struct ib_cq *sendcq, *recvcq; +- unsigned int max_qp_wr; + int rc; + +- if (ia->ri_device->attrs.max_sge < RPCRDMA_MAX_SEND_SGES) { +- dprintk("RPC: %s: insufficient sge's available\n", +- __func__); ++ max_sge = min(ia->ri_device->attrs.max_sge, RPCRDMA_MAX_SEND_SGES); ++ if (max_sge < RPCRDMA_MIN_SEND_SGES) { ++ pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); + return -ENOMEM; + } ++ ia->ri_max_send_sges = max_sge - RPCRDMA_MIN_SEND_SGES; + + if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) { + dprintk("RPC: %s: insufficient wqe's available\n", +@@ -513,7 +516,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, + ep->rep_attr.cap.max_recv_wr = cdata->max_requests; + ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS; + ep->rep_attr.cap.max_recv_wr += 1; /* drain cqe */ +- ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_SEND_SGES; ++ ep->rep_attr.cap.max_send_sge = max_sge; + ep->rep_attr.cap.max_recv_sge = 1; + ep->rep_attr.cap.max_inline_data = 0; + ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR; +diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h +index f6ae1b2..48989d5 100644 +--- a/net/sunrpc/xprtrdma/xprt_rdma.h ++++ b/net/sunrpc/xprtrdma/xprt_rdma.h +@@ -74,7 +74,9 @@ struct rpcrdma_ia { + unsigned int ri_max_frmr_depth; + unsigned int ri_max_inline_write; + unsigned int ri_max_inline_read; ++ unsigned int ri_max_send_sges; + bool ri_reminv_expected; ++ bool ri_implicit_roundup; + struct ib_qp_attr ri_qp_attr; + struct ib_qp_init_attr ri_qp_init_attr; + }; +@@ -309,6 +311,7 @@ struct rpcrdma_mr_seg { /* chunk descriptors */ + * - xdr_buf tail iovec + */ + enum { ++ RPCRDMA_MIN_SEND_SGES = 3, + RPCRDMA_MAX_SEND_PAGES = PAGE_SIZE + RPCRDMA_MAX_INLINE - 1, + RPCRDMA_MAX_PAGE_SGES = (RPCRDMA_MAX_SEND_PAGES >> PAGE_SHIFT) + 1, + RPCRDMA_MAX_SEND_SGES = 1 + 1 + RPCRDMA_MAX_PAGE_SGES + 1, +diff --git a/samples/seccomp/bpf-helper.h b/samples/seccomp/bpf-helper.h +index 38ee70f..1d8de9e 100644 +--- a/samples/seccomp/bpf-helper.h ++++ b/samples/seccomp/bpf-helper.h +@@ -138,7 +138,7 @@ union arg64 { + #define ARG_32(idx) \ + BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)) + +-/* Loads hi into A and lo in X */ ++/* Loads lo into M[0] and hi into M[1] and A */ + #define ARG_64(idx) \ + BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)), \ + BPF_STMT(BPF_ST, 0), /* lo -> M[0] */ \ +@@ -153,88 +153,107 @@ union arg64 { + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 1, 0), \ + jt + +-/* Checks the lo, then swaps to check the hi. A=lo,X=hi */ ++#define JA32(value, jt) \ ++ BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \ ++ jt ++ ++#define JGE32(value, jt) \ ++ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \ ++ jt ++ ++#define JGT32(value, jt) \ ++ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \ ++ jt ++ ++#define JLE32(value, jt) \ ++ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \ ++ jt ++ ++#define JLT32(value, jt) \ ++ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \ ++ jt ++ ++/* ++ * All the JXX64 checks assume lo is saved in M[0] and hi is saved in both ++ * A and M[1]. This invariant is kept by restoring A if necessary. ++ */ + #define JEQ64(lo, hi, jt) \ ++ /* if (hi != arg.hi) goto NOMATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ + BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ ++ /* if (lo != arg.lo) goto NOMATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 0, 2), \ +- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1), \ + jt, \ +- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ ++ BPF_STMT(BPF_LD+BPF_MEM, 1) + + #define JNE64(lo, hi, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 5, 0), \ +- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ ++ /* if (hi != arg.hi) goto MATCH; */ \ ++ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \ ++ BPF_STMT(BPF_LD+BPF_MEM, 0), \ ++ /* if (lo != arg.lo) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 2, 0), \ +- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1), \ + jt, \ +- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ +- +-#define JA32(value, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \ +- jt ++ BPF_STMT(BPF_LD+BPF_MEM, 1) + + #define JA64(lo, hi, jt) \ ++ /* if (hi & arg.hi) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (hi), 3, 0), \ +- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 0), \ ++ /* if (lo & arg.lo) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (lo), 0, 2), \ +- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1), \ + jt, \ +- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ ++ BPF_STMT(BPF_LD+BPF_MEM, 1) + +-#define JGE32(value, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \ +- jt +- +-#define JLT32(value, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \ +- jt +- +-/* Shortcut checking if hi > arg.hi. */ + #define JGE64(lo, hi, jt) \ ++ /* if (hi > arg.hi) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \ ++ /* if (hi != arg.hi) goto NOMATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ +- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 0), \ ++ /* if (lo >= arg.lo) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 0, 2), \ +- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ +- jt, \ +- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ +- +-#define JLT64(lo, hi, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \ +- BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ +- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ +- BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \ +- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1), \ + jt, \ +- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ ++ BPF_STMT(BPF_LD+BPF_MEM, 1) + +-#define JGT32(value, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \ +- jt +- +-#define JLE32(value, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \ +- jt +- +-/* Check hi > args.hi first, then do the GE checking */ + #define JGT64(lo, hi, jt) \ ++ /* if (hi > arg.hi) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \ ++ /* if (hi != arg.hi) goto NOMATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ +- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 0), \ ++ /* if (lo > arg.lo) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 0, 2), \ +- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1), \ + jt, \ +- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ ++ BPF_STMT(BPF_LD+BPF_MEM, 1) + + #define JLE64(lo, hi, jt) \ +- BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 6, 0), \ +- BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \ +- BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ ++ /* if (hi < arg.hi) goto MATCH; */ \ ++ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \ ++ /* if (hi != arg.hi) goto NOMATCH; */ \ ++ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ ++ BPF_STMT(BPF_LD+BPF_MEM, 0), \ ++ /* if (lo <= arg.lo) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \ +- BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1), \ ++ jt, \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1) ++ ++#define JLT64(lo, hi, jt) \ ++ /* if (hi < arg.hi) goto MATCH; */ \ ++ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \ ++ /* if (hi != arg.hi) goto NOMATCH; */ \ ++ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ ++ BPF_STMT(BPF_LD+BPF_MEM, 0), \ ++ /* if (lo < arg.lo) goto MATCH; */ \ ++ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 2, 0), \ ++ BPF_STMT(BPF_LD+BPF_MEM, 1), \ + jt, \ +- BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ ++ BPF_STMT(BPF_LD+BPF_MEM, 1) + + #define LOAD_SYSCALL_NR \ + BPF_STMT(BPF_LD+BPF_W+BPF_ABS, \ +diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h +index db25f54..df7834a 100644 +--- a/security/integrity/ima/ima.h ++++ b/security/integrity/ima/ima.h +@@ -173,7 +173,7 @@ int ima_store_template(struct ima_template_entry *entry, int violation, + struct inode *inode, + const unsigned char *filename, int pcr); + void ima_free_template_entry(struct ima_template_entry *entry); +-const char *ima_d_path(const struct path *path, char **pathbuf); ++const char *ima_d_path(const struct path *path, char **pathbuf, char *filename); + + /* IMA policy related functions */ + int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask, +diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c +index 9df26a2..d01a52f 100644 +--- a/security/integrity/ima/ima_api.c ++++ b/security/integrity/ima/ima_api.c +@@ -318,7 +318,17 @@ void ima_audit_measurement(struct integrity_iint_cache *iint, + iint->flags |= IMA_AUDITED; + } + +-const char *ima_d_path(const struct path *path, char **pathbuf) ++/* ++ * ima_d_path - return a pointer to the full pathname ++ * ++ * Attempt to return a pointer to the full pathname for use in the ++ * IMA measurement list, IMA audit records, and auditing logs. ++ * ++ * On failure, return a pointer to a copy of the filename, not dname. ++ * Returning a pointer to dname, could result in using the pointer ++ * after the memory has been freed. ++ */ ++const char *ima_d_path(const struct path *path, char **pathbuf, char *namebuf) + { + char *pathname = NULL; + +@@ -331,5 +341,11 @@ const char *ima_d_path(const struct path *path, char **pathbuf) + pathname = NULL; + } + } +- return pathname ?: (const char *)path->dentry->d_name.name; ++ ++ if (!pathname) { ++ strlcpy(namebuf, path->dentry->d_name.name, NAME_MAX); ++ pathname = namebuf; ++ } ++ ++ return pathname; + } +diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c +index 423d111..0e87629 100644 +--- a/security/integrity/ima/ima_main.c ++++ b/security/integrity/ima/ima_main.c +@@ -83,6 +83,7 @@ static void ima_rdwr_violation_check(struct file *file, + const char **pathname) + { + struct inode *inode = file_inode(file); ++ char filename[NAME_MAX]; + fmode_t mode = file->f_mode; + bool send_tomtou = false, send_writers = false; + +@@ -102,7 +103,7 @@ static void ima_rdwr_violation_check(struct file *file, + if (!send_tomtou && !send_writers) + return; + +- *pathname = ima_d_path(&file->f_path, pathbuf); ++ *pathname = ima_d_path(&file->f_path, pathbuf, filename); + + if (send_tomtou) + ima_add_violation(file, *pathname, iint, +@@ -161,6 +162,7 @@ static int process_measurement(struct file *file, char *buf, loff_t size, + struct integrity_iint_cache *iint = NULL; + struct ima_template_desc *template_desc; + char *pathbuf = NULL; ++ char filename[NAME_MAX]; + const char *pathname = NULL; + int rc = -ENOMEM, action, must_appraise; + int pcr = CONFIG_IMA_MEASURE_PCR_IDX; +@@ -239,8 +241,8 @@ static int process_measurement(struct file *file, char *buf, loff_t size, + goto out_digsig; + } + +- if (!pathname) /* ima_rdwr_violation possibly pre-fetched */ +- pathname = ima_d_path(&file->f_path, &pathbuf); ++ if (!pathbuf) /* ima_rdwr_violation possibly pre-fetched */ ++ pathname = ima_d_path(&file->f_path, &pathbuf, filename); + + if (action & IMA_MEASURE) + ima_store_measurement(iint, file, pathname, +diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c +index 1d5acbe..86240d0 100644 +--- a/sound/core/seq/seq_fifo.c ++++ b/sound/core/seq/seq_fifo.c +@@ -135,6 +135,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f, + f->tail = cell; + if (f->head == NULL) + f->head = cell; ++ cell->next = NULL; + f->cells++; + spin_unlock_irqrestore(&f->lock, flags); + +@@ -214,6 +215,8 @@ void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f, + spin_lock_irqsave(&f->lock, flags); + cell->next = f->head; + f->head = cell; ++ if (!f->tail) ++ f->tail = cell; + f->cells++; + spin_unlock_irqrestore(&f->lock, flags); + } +diff --git a/sound/core/timer.c b/sound/core/timer.c +index fc144f4..ad15314 100644 +--- a/sound/core/timer.c ++++ b/sound/core/timer.c +@@ -1702,9 +1702,21 @@ static int snd_timer_user_params(struct file *file, + return -EBADFD; + if (copy_from_user(¶ms, _params, sizeof(params))) + return -EFAULT; +- if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE) && params.ticks < 1) { +- err = -EINVAL; +- goto _end; ++ if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE)) { ++ u64 resolution; ++ ++ if (params.ticks < 1) { ++ err = -EINVAL; ++ goto _end; ++ } ++ ++ /* Don't allow resolution less than 1ms */ ++ resolution = snd_timer_resolution(tu->timeri); ++ resolution *= params.ticks; ++ if (resolution < 1000000) { ++ err = -EINVAL; ++ goto _end; ++ } + } + if (params.queue_size > 0 && + (params.queue_size < 32 || params.queue_size > 1024)) { +diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c +index 9667cbf..ab4cdab 100644 +--- a/sound/pci/ctxfi/cthw20k1.c ++++ b/sound/pci/ctxfi/cthw20k1.c +@@ -27,12 +27,6 @@ + #include "cthw20k1.h" + #include "ct20k1reg.h" + +-#if BITS_PER_LONG == 32 +-#define CT_XFI_DMA_MASK DMA_BIT_MASK(32) /* 32 bit PTE */ +-#else +-#define CT_XFI_DMA_MASK DMA_BIT_MASK(64) /* 64 bit PTE */ +-#endif +- + struct hw20k1 { + struct hw hw; + spinlock_t reg_20k1_lock; +@@ -1904,19 +1898,18 @@ static int hw_card_start(struct hw *hw) + { + int err; + struct pci_dev *pci = hw->pci; ++ const unsigned int dma_bits = BITS_PER_LONG; + + err = pci_enable_device(pci); + if (err < 0) + return err; + + /* Set DMA transfer mask */ +- if (dma_set_mask(&pci->dev, CT_XFI_DMA_MASK) < 0 || +- dma_set_coherent_mask(&pci->dev, CT_XFI_DMA_MASK) < 0) { +- dev_err(hw->card->dev, +- "architecture does not support PCI busmaster DMA with mask 0x%llx\n", +- CT_XFI_DMA_MASK); +- err = -ENXIO; +- goto error1; ++ if (dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) { ++ dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits)); ++ } else { ++ dma_set_mask(&pci->dev, DMA_BIT_MASK(32)); ++ dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(32)); + } + + if (!hw->io_base) { +diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c +index 6414ecf..18ee776 100644 +--- a/sound/pci/ctxfi/cthw20k2.c ++++ b/sound/pci/ctxfi/cthw20k2.c +@@ -26,12 +26,6 @@ + #include "cthw20k2.h" + #include "ct20k2reg.h" + +-#if BITS_PER_LONG == 32 +-#define CT_XFI_DMA_MASK DMA_BIT_MASK(32) /* 32 bit PTE */ +-#else +-#define CT_XFI_DMA_MASK DMA_BIT_MASK(64) /* 64 bit PTE */ +-#endif +- + struct hw20k2 { + struct hw hw; + /* for i2c */ +@@ -2029,19 +2023,18 @@ static int hw_card_start(struct hw *hw) + int err = 0; + struct pci_dev *pci = hw->pci; + unsigned int gctl; ++ const unsigned int dma_bits = BITS_PER_LONG; + + err = pci_enable_device(pci); + if (err < 0) + return err; + + /* Set DMA transfer mask */ +- if (dma_set_mask(&pci->dev, CT_XFI_DMA_MASK) < 0 || +- dma_set_coherent_mask(&pci->dev, CT_XFI_DMA_MASK) < 0) { +- dev_err(hw->card->dev, +- "architecture does not support PCI busmaster DMA with mask 0x%llx\n", +- CT_XFI_DMA_MASK); +- err = -ENXIO; +- goto error1; ++ if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) { ++ dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits)); ++ } else { ++ dma_set_mask(&pci->dev, DMA_BIT_MASK(32)); ++ dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(32)); + } + + if (!hw->io_base) { +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index c64d986..bc44626 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -2197,9 +2197,9 @@ static const struct pci_device_id azx_ids[] = { + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, + /* Lewisburg */ + { PCI_DEVICE(0x8086, 0xa1f0), +- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, ++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, + { PCI_DEVICE(0x8086, 0xa270), +- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, ++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, + /* Lynx Point-LP */ + { PCI_DEVICE(0x8086, 0x9c20), + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 758ac86..0c62b1d 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -5577,6 +5577,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE), + SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), + SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), ++ SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), +@@ -5692,6 +5693,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460), + SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), + SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), ++ SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), + SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), + SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), + SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), +@@ -6065,6 +6067,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { + SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC298_STANDARD_PINS, + {0x17, 0x90170150}), ++ SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_SPK_VOLUME, ++ {0x12, 0xb7a60140}, ++ {0x13, 0xb7a60150}, ++ {0x17, 0x90170110}, ++ {0x1a, 0x03011020}, ++ {0x21, 0x03211030}), + {} + }; + +diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c +index ae58b49..ecf6236 100644 +--- a/tools/perf/util/callchain.c ++++ b/tools/perf/util/callchain.c +@@ -437,7 +437,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor) + } + call->ip = cursor_node->ip; + call->ms.sym = cursor_node->sym; +- call->ms.map = cursor_node->map; ++ call->ms.map = map__get(cursor_node->map); + list_add_tail(&call->list, &node->val); + + callchain_cursor_advance(cursor); +@@ -462,6 +462,7 @@ add_child(struct callchain_node *parent, + + list_for_each_entry_safe(call, tmp, &new->val, list) { + list_del(&call->list); ++ map__zput(call->ms.map); + free(call); + } + free(new); +@@ -730,6 +731,7 @@ merge_chain_branch(struct callchain_cursor *cursor, + callchain_cursor_append(cursor, list->ip, + list->ms.map, list->ms.sym); + list_del(&list->list); ++ map__zput(list->ms.map); + free(list); + } + +@@ -778,7 +780,8 @@ int callchain_cursor_append(struct callchain_cursor *cursor, + } + + node->ip = ip; +- node->map = map; ++ map__zput(node->map); ++ node->map = map__get(map); + node->sym = sym; + + cursor->nr++; +@@ -945,11 +948,13 @@ static void free_callchain_node(struct callchain_node *node) + + list_for_each_entry_safe(list, tmp, &node->parent_val, list) { + list_del(&list->list); ++ map__zput(list->ms.map); + free(list); + } + + list_for_each_entry_safe(list, tmp, &node->val, list) { + list_del(&list->list); ++ map__zput(list->ms.map); + free(list); + } + +@@ -1013,6 +1018,7 @@ int callchain_node__make_parent_list(struct callchain_node *node) + goto out; + *new = *chain; + new->has_children = false; ++ map__get(new->ms.map); + list_add_tail(&new->list, &head); + } + parent = parent->parent; +@@ -1033,6 +1039,7 @@ int callchain_node__make_parent_list(struct callchain_node *node) + out: + list_for_each_entry_safe(chain, new, &head, list) { + list_del(&chain->list); ++ map__zput(chain->ms.map); + free(chain); + } + return -ENOMEM; +diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h +index 47cfd10..b7cbabb 100644 +--- a/tools/perf/util/callchain.h ++++ b/tools/perf/util/callchain.h +@@ -5,6 +5,7 @@ + #include <linux/list.h> + #include <linux/rbtree.h> + #include "event.h" ++#include "map.h" + #include "symbol.h" + + #define HELP_PAD "\t\t\t\t" +@@ -174,8 +175,13 @@ int callchain_merge(struct callchain_cursor *cursor, + */ + static inline void callchain_cursor_reset(struct callchain_cursor *cursor) + { ++ struct callchain_cursor_node *node; ++ + cursor->nr = 0; + cursor->last = &cursor->first; ++ ++ for (node = cursor->first; node != NULL; node = node->next) ++ map__zput(node->map); + } + + int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip, +diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c +index a69f027..10849a0 100644 +--- a/tools/perf/util/hist.c ++++ b/tools/perf/util/hist.c +@@ -1,6 +1,7 @@ + #include "util.h" + #include "build-id.h" + #include "hist.h" ++#include "map.h" + #include "session.h" + #include "sort.h" + #include "evlist.h" +@@ -1019,6 +1020,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, + int max_stack_depth, void *arg) + { + int err, err2; ++ struct map *alm = NULL; ++ ++ if (al && al->map) ++ alm = map__get(al->map); + + err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent, + iter->evsel, al, max_stack_depth); +@@ -1058,6 +1063,8 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, + if (!err) + err = err2; + ++ map__put(alm); ++ + return err; + } + +diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c +index d918dcf..f138ed2 100644 +--- a/virt/kvm/arm/vgic/vgic-irqfd.c ++++ b/virt/kvm/arm/vgic/vgic-irqfd.c +@@ -99,6 +99,9 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, + if (!vgic_has_its(kvm)) + return -ENODEV; + ++ if (!level) ++ return -1; ++ + return vgic_its_inject_msi(kvm, &msi); + } + diff --git a/4.9.13/4420_grsecurity-3.1-4.9.13-201703052141.patch b/4.9.14/4420_grsecurity-3.1-4.9.14-201703121245.patch index 66274b3..907f037 100644 --- a/4.9.13/4420_grsecurity-3.1-4.9.13-201703052141.patch +++ b/4.9.14/4420_grsecurity-3.1-4.9.14-201703121245.patch @@ -419,7 +419,7 @@ index 3d0ae15..84e5412 100644 cmd_syscalls = $(CONFIG_SHELL) $< $(CC) $(c_flags) $(missing_syscalls_flags) diff --git a/Makefile b/Makefile -index 14dc275..3ff2e6b 100644 +index 5e7706e..e0bcf96 100644 --- a/Makefile +++ b/Makefile @@ -302,7 +302,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ @@ -7328,10 +7328,10 @@ index 7cf653e..7df52f6 100644 /* Run the generated entry code */ diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c -index 9514e5f..a3fc550 100644 +index 1652f36..0e22377 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c -@@ -545,18 +545,6 @@ unsigned long get_wchan(struct task_struct *task) +@@ -590,18 +590,6 @@ unsigned long get_wchan(struct task_struct *task) return pc; } @@ -50430,10 +50430,10 @@ index bf99e11..a44361c 100644 mutex_init(&accel_dev->vf.vf2pf_lock); return 0; diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c -index 712592c..d7a18b2 100644 +index 7309c08..857ee48 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c -@@ -813,7 +813,7 @@ int devfreq_add_governor(struct devfreq_governor *governor) +@@ -814,7 +814,7 @@ int devfreq_add_governor(struct devfreq_governor *governor) goto err_out; } @@ -50442,7 +50442,7 @@ index 712592c..d7a18b2 100644 list_for_each_entry(devfreq, &devfreq_list, node) { int ret = 0; -@@ -901,7 +901,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor) +@@ -902,7 +902,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor) } } @@ -50452,10 +50452,10 @@ index 712592c..d7a18b2 100644 mutex_unlock(&devfreq_list_lock); diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c -index 9ef46e2..775fc75 100644 +index 5be96b2..c5f2db5 100644 --- a/drivers/devfreq/governor_passive.c +++ b/drivers/devfreq/governor_passive.c -@@ -151,7 +151,7 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq, +@@ -156,7 +156,7 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq, struct devfreq_passive_data *p_data = (struct devfreq_passive_data *)devfreq->data; struct devfreq *parent = (struct devfreq *)p_data->parent; @@ -55328,10 +55328,10 @@ index c13fb5b..55a3802 100644 *off += size; diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c -index 5fb4c6d..29316a6 100644 +index be34547..df73ac5 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c -@@ -398,7 +398,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, +@@ -404,7 +404,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, int ret = 0; next_gpadl_handle = @@ -55340,7 +55340,7 @@ index 5fb4c6d..29316a6 100644 ret = create_gpadl_header(kbuffer, size, &msginfo); if (ret) -@@ -715,9 +715,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, +@@ -734,9 +734,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, * Adjust the size down since vmbus_channel_packet_page_buffer is the * largest size we support */ @@ -55352,7 +55352,7 @@ index 5fb4c6d..29316a6 100644 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c -index 60dbd6c..80ce7a1 100644 +index 6e49a4d..e4b9aa2 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c @@ -183,6 +183,7 @@ static struct clocksource hyperv_cs_tsc = { @@ -55508,7 +55508,7 @@ index fdf8da9..d3fefc5 100644 cap_msg.caps.cap_bits.balloon = 1; cap_msg.caps.cap_bits.hot_add = 1; diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h -index 2b13f2a..e150466 100644 +index 8d7f865..86be1c8 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -567,7 +567,7 @@ enum vmbus_connect_state { @@ -61414,7 +61414,7 @@ index 2d82692..3507386 100644 seq_printf(seq, "\n"); diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c -index 59b2c50..60bca53 100644 +index c817627..ec8c021 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -118,7 +118,7 @@ static void iot_io_end(struct io_tracker *iot, sector_t len) @@ -61694,7 +61694,7 @@ index 59b2c50..60bca53 100644 if (r) { if (r < 0) @@ -3553,12 +3557,12 @@ static void cache_status(struct dm_target *ti, status_type_t type, - cache->sectors_per_block, + (unsigned long long)cache->sectors_per_block, (unsigned long long) from_cblock(residency), (unsigned long long) from_cblock(cache->cache_size), - (unsigned) atomic_read(&cache->stats.read_hit), @@ -61799,7 +61799,7 @@ index e477af8..a5b1fce 100644 DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) + (m->pg_init_retries > 0) * 2 + diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c -index af2d79b..d879687 100644 +index 15daa36..d19c604 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3196,7 +3196,7 @@ static void raid_status(struct dm_target *ti, status_type_t type, @@ -61897,10 +61897,10 @@ index 9a8b710..ae1bf13 100644 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' : diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c -index 38b05f2..4f99595 100644 +index 0250e7e..ac07762 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c -@@ -435,7 +435,7 @@ static int dm_stats_delete(struct dm_stats *stats, int id) +@@ -436,7 +436,7 @@ static int dm_stats_delete(struct dm_stats *stats, int id) synchronize_rcu_expedited(); dm_stat_free(&s->rcu_head); } else { @@ -61909,7 +61909,7 @@ index 38b05f2..4f99595 100644 call_rcu(&s->rcu_head, dm_stat_free); } return 0; -@@ -647,8 +647,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw, +@@ -648,8 +648,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw, ((bi_rw == WRITE) == (ACCESS_ONCE(last->last_rw) == WRITE)) )); @@ -62664,7 +62664,7 @@ index 113b094..c9424e6 100644 return DVBFE_ALGO_HW; } diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c -index 95267c6..479fdb5 100644 +index f6ebbb4..3e0d8cb 100644 --- a/drivers/media/dvb-frontends/cxd2820r_core.c +++ b/drivers/media/dvb-frontends/cxd2820r_core.c @@ -403,7 +403,7 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe) @@ -62799,7 +62799,7 @@ index b5e3d90..bd00dc6 100644 return DVBFE_ALGO_HW; } diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c -index 2783531..e80f3f4 100644 +index 4462d8c..11b1da8 100644 --- a/drivers/media/media-device.c +++ b/drivers/media/media-device.c @@ -58,9 +58,10 @@ static int media_device_close(struct file *filp) @@ -70648,10 +70648,10 @@ index 9cbca12..eae7c79 100644 struct ath_nf_limits { s16 max; diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c -index e9f32b5..d394d93 100644 +index b868f02..95db87b 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c -@@ -2622,16 +2622,18 @@ void ath9k_fill_chanctx_ops(void) +@@ -2617,16 +2617,18 @@ void ath9k_fill_chanctx_ops(void) if (!ath9k_is_chanctx_enabled()) return; @@ -76412,7 +76412,7 @@ index 302e626..12579af 100644 da->attr.name = info->pin_config[i].name; da->attr.mode = 0644; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c -index 5c1519b..eb73d91 100644 +index 9faccfc..8e0e4af 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -3916,7 +3916,7 @@ regulator_register(const struct regulator_desc *regulator_desc, @@ -79810,7 +79810,7 @@ index cf04a36..54dd630 100644 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) { char b[120]; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c -index e64eae4..465011a 100644 +index d8099c7..1c6eb29 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1514,7 +1514,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) @@ -84743,6 +84743,291 @@ index 54cab59..3c05ac4 100644 tty_port_tty_set(port, tty); dlci->modem_rx = 0; +diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c +index a7fa016..6d1e2f7 100644 +--- a/drivers/tty/n_hdlc.c ++++ b/drivers/tty/n_hdlc.c +@@ -114,7 +114,7 @@ + #define DEFAULT_TX_BUF_COUNT 3 + + struct n_hdlc_buf { +- struct n_hdlc_buf *link; ++ struct list_head list_item; + int count; + char buf[1]; + }; +@@ -122,8 +122,7 @@ struct n_hdlc_buf { + #define N_HDLC_BUF_SIZE (sizeof(struct n_hdlc_buf) + maxframe) + + struct n_hdlc_buf_list { +- struct n_hdlc_buf *head; +- struct n_hdlc_buf *tail; ++ struct list_head list; + int count; + spinlock_t spinlock; + }; +@@ -136,7 +135,6 @@ struct n_hdlc_buf_list { + * @backup_tty - TTY to use if tty gets closed + * @tbusy - reentrancy flag for tx wakeup code + * @woke_up - FIXME: describe this field +- * @tbuf - currently transmitting tx buffer + * @tx_buf_list - list of pending transmit frame buffers + * @rx_buf_list - list of received frame buffers + * @tx_free_buf_list - list unused transmit frame buffers +@@ -149,7 +147,6 @@ struct n_hdlc { + struct tty_struct *backup_tty; + int tbusy; + int woke_up; +- struct n_hdlc_buf *tbuf; + struct n_hdlc_buf_list tx_buf_list; + struct n_hdlc_buf_list rx_buf_list; + struct n_hdlc_buf_list tx_free_buf_list; +@@ -159,6 +156,8 @@ struct n_hdlc { + /* + * HDLC buffer list manipulation functions + */ ++static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list, ++ struct n_hdlc_buf *buf); + static void n_hdlc_buf_put(struct n_hdlc_buf_list *list, + struct n_hdlc_buf *buf); + static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list); +@@ -208,16 +207,9 @@ static void flush_tx_queue(struct tty_struct *tty) + { + struct n_hdlc *n_hdlc = tty2n_hdlc(tty); + struct n_hdlc_buf *buf; +- unsigned long flags; + + while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list))) + n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf); +- spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags); +- if (n_hdlc->tbuf) { +- n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf); +- n_hdlc->tbuf = NULL; +- } +- spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); + } + + static struct tty_ldisc_ops n_hdlc_ldisc = { +@@ -283,7 +275,6 @@ static void n_hdlc_release(struct n_hdlc *n_hdlc) + } else + break; + } +- kfree(n_hdlc->tbuf); + kfree(n_hdlc); + + } /* end of n_hdlc_release() */ +@@ -402,13 +393,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) + n_hdlc->woke_up = 0; + spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); + +- /* get current transmit buffer or get new transmit */ +- /* buffer from list of pending transmit buffers */ +- +- tbuf = n_hdlc->tbuf; +- if (!tbuf) +- tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list); +- ++ tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list); + while (tbuf) { + if (debuglevel >= DEBUG_LEVEL_INFO) + printk("%s(%d)sending frame %p, count=%d\n", +@@ -420,7 +405,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) + + /* rollback was possible and has been done */ + if (actual == -ERESTARTSYS) { +- n_hdlc->tbuf = tbuf; ++ n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf); + break; + } + /* if transmit error, throw frame away by */ +@@ -435,10 +420,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) + + /* free current transmit buffer */ + n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf); +- +- /* this tx buffer is done */ +- n_hdlc->tbuf = NULL; +- ++ + /* wait up sleeping writers */ + wake_up_interruptible(&tty->write_wait); + +@@ -448,10 +430,12 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) + if (debuglevel >= DEBUG_LEVEL_INFO) + printk("%s(%d)frame %p pending\n", + __FILE__,__LINE__,tbuf); +- +- /* buffer not accepted by driver */ +- /* set this buffer as pending buffer */ +- n_hdlc->tbuf = tbuf; ++ ++ /* ++ * the buffer was not accepted by driver, ++ * return it back into tx queue ++ */ ++ n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf); + break; + } + } +@@ -749,7 +733,8 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, + int error = 0; + int count; + unsigned long flags; +- ++ struct n_hdlc_buf *buf = NULL; ++ + if (debuglevel >= DEBUG_LEVEL_INFO) + printk("%s(%d)n_hdlc_tty_ioctl() called %d\n", + __FILE__,__LINE__,cmd); +@@ -763,8 +748,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, + /* report count of read data available */ + /* in next available frame (if any) */ + spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags); +- if (n_hdlc->rx_buf_list.head) +- count = n_hdlc->rx_buf_list.head->count; ++ buf = list_first_entry_or_null(&n_hdlc->rx_buf_list.list, ++ struct n_hdlc_buf, list_item); ++ if (buf) ++ count = buf->count; + else + count = 0; + spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags); +@@ -776,8 +763,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, + count = tty_chars_in_buffer(tty); + /* add size of next output frame in queue */ + spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags); +- if (n_hdlc->tx_buf_list.head) +- count += n_hdlc->tx_buf_list.head->count; ++ buf = list_first_entry_or_null(&n_hdlc->tx_buf_list.list, ++ struct n_hdlc_buf, list_item); ++ if (buf) ++ count += buf->count; + spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags); + error = put_user(count, (int __user *)arg); + break; +@@ -825,14 +814,14 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp, + poll_wait(filp, &tty->write_wait, wait); + + /* set bits for operations that won't block */ +- if (n_hdlc->rx_buf_list.head) ++ if (!list_empty(&n_hdlc->rx_buf_list.list)) + mask |= POLLIN | POLLRDNORM; /* readable */ + if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) + mask |= POLLHUP; + if (tty_hung_up_p(filp)) + mask |= POLLHUP; + if (!tty_is_writelocked(tty) && +- n_hdlc->tx_free_buf_list.head) ++ !list_empty(&n_hdlc->tx_free_buf_list.list)) + mask |= POLLOUT | POLLWRNORM; /* writable */ + } + return mask; +@@ -856,7 +845,12 @@ static struct n_hdlc *n_hdlc_alloc(void) + spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock); + spin_lock_init(&n_hdlc->rx_buf_list.spinlock); + spin_lock_init(&n_hdlc->tx_buf_list.spinlock); +- ++ ++ INIT_LIST_HEAD(&n_hdlc->rx_free_buf_list.list); ++ INIT_LIST_HEAD(&n_hdlc->tx_free_buf_list.list); ++ INIT_LIST_HEAD(&n_hdlc->rx_buf_list.list); ++ INIT_LIST_HEAD(&n_hdlc->tx_buf_list.list); ++ + /* allocate free rx buffer list */ + for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) { + buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL); +@@ -884,53 +878,65 @@ static struct n_hdlc *n_hdlc_alloc(void) + } /* end of n_hdlc_alloc() */ + + /** ++ * n_hdlc_buf_return - put the HDLC buffer after the head of the specified list ++ * @buf_list - pointer to the buffer list ++ * @buf - pointer to the buffer ++ */ ++static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list, ++ struct n_hdlc_buf *buf) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&buf_list->spinlock, flags); ++ ++ list_add(&buf->list_item, &buf_list->list); ++ buf_list->count++; ++ ++ spin_unlock_irqrestore(&buf_list->spinlock, flags); ++} ++ ++/** + * n_hdlc_buf_put - add specified HDLC buffer to tail of specified list +- * @list - pointer to buffer list ++ * @buf_list - pointer to buffer list + * @buf - pointer to buffer + */ +-static void n_hdlc_buf_put(struct n_hdlc_buf_list *list, ++static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list, + struct n_hdlc_buf *buf) + { + unsigned long flags; +- spin_lock_irqsave(&list->spinlock,flags); +- +- buf->link=NULL; +- if (list->tail) +- list->tail->link = buf; +- else +- list->head = buf; +- list->tail = buf; +- (list->count)++; +- +- spin_unlock_irqrestore(&list->spinlock,flags); +- ++ ++ spin_lock_irqsave(&buf_list->spinlock, flags); ++ ++ list_add_tail(&buf->list_item, &buf_list->list); ++ buf_list->count++; ++ ++ spin_unlock_irqrestore(&buf_list->spinlock, flags); + } /* end of n_hdlc_buf_put() */ + + /** + * n_hdlc_buf_get - remove and return an HDLC buffer from list +- * @list - pointer to HDLC buffer list ++ * @buf_list - pointer to HDLC buffer list + * + * Remove and return an HDLC buffer from the head of the specified HDLC buffer + * list. + * Returns a pointer to HDLC buffer if available, otherwise %NULL. + */ +-static struct n_hdlc_buf* n_hdlc_buf_get(struct n_hdlc_buf_list *list) ++static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list) + { + unsigned long flags; + struct n_hdlc_buf *buf; +- spin_lock_irqsave(&list->spinlock,flags); +- +- buf = list->head; ++ ++ spin_lock_irqsave(&buf_list->spinlock, flags); ++ ++ buf = list_first_entry_or_null(&buf_list->list, ++ struct n_hdlc_buf, list_item); + if (buf) { +- list->head = buf->link; +- (list->count)--; ++ list_del(&buf->list_item); ++ buf_list->count--; + } +- if (!list->head) +- list->tail = NULL; +- +- spin_unlock_irqrestore(&list->spinlock,flags); ++ ++ spin_unlock_irqrestore(&buf_list->spinlock, flags); + return buf; +- + } /* end of n_hdlc_buf_get() */ + + static char hdlc_banner[] __initdata = diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index bdf0e6e..ea92f7e 100644 --- a/drivers/tty/n_tty.c @@ -105544,7 +105829,7 @@ index 203287f..f1b702c 100644 GLOBAL_EXTERN atomic_t smBufAllocCount; GLOBAL_EXTERN atomic_t midCount; diff --git a/fs/cifs/file.c b/fs/cifs/file.c -index 18a1e1d..18caa3d 100644 +index 1cd0e2e..50411b0 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1431,7 +1431,7 @@ cifs_free_llist(struct list_head *llist) @@ -107520,7 +107805,7 @@ index a8a750f..cd13d3f 100644 /* locality groups */ diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c -index c930a01..39ab8a4 100644 +index 9fbf92c..b0cab2a 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -876,7 +876,7 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block, @@ -107533,7 +107818,7 @@ index c930a01..39ab8a4 100644 eh = ext_inode_hdr(inode); diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c -index 7ae43c5..d417c85 100644 +index 2e9fc7a..90aa5cc 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -1921,7 +1921,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, @@ -107590,7 +107875,7 @@ index 7ae43c5..d417c85 100644 } free_percpu(sbi->s_locality_groups); -@@ -3222,16 +3222,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) +@@ -3229,16 +3229,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) { @@ -107613,7 +107898,7 @@ index 7ae43c5..d417c85 100644 } if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) -@@ -3658,7 +3658,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) +@@ -3665,7 +3665,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) trace_ext4_mb_new_inode_pa(ac, pa); ext4_mb_use_inode_pa(ac, pa); @@ -107622,7 +107907,7 @@ index 7ae43c5..d417c85 100644 ei = EXT4_I(ac->ac_inode); grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); -@@ -3718,7 +3718,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac) +@@ -3725,7 +3725,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac) trace_ext4_mb_new_group_pa(ac, pa); ext4_mb_use_group_pa(ac, pa); @@ -107631,7 +107916,7 @@ index 7ae43c5..d417c85 100644 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); lg = ac->ac_lg; -@@ -3807,7 +3807,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, +@@ -3814,7 +3814,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, * from the bitmap and continue. */ } @@ -107640,7 +107925,7 @@ index 7ae43c5..d417c85 100644 return err; } -@@ -3825,7 +3825,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b, +@@ -3832,7 +3832,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b, ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); BUG_ON(group != e4b->bd_group && pa->pa_len != 0); mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); @@ -107692,10 +107977,10 @@ index cf68100..f96c5c0 100644 err = ext4_handle_dirty_metadata(handle, NULL, bh); if (unlikely(err)) diff --git a/fs/ext4/super.c b/fs/ext4/super.c -index bbc316d..1dc79da 100644 +index afe29ba..6032d48 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c -@@ -992,10 +992,12 @@ static void init_once(void *foo) +@@ -994,10 +994,12 @@ static void init_once(void *foo) static int __init init_inodecache(void) { @@ -107709,7 +107994,7 @@ index bbc316d..1dc79da 100644 init_once); if (ext4_inode_cachep == NULL) return -ENOMEM; -@@ -1391,7 +1393,7 @@ static ext4_fsblk_t get_sb_block(void **data) +@@ -1393,7 +1395,7 @@ static ext4_fsblk_t get_sb_block(void **data) } #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3)) @@ -107732,7 +108017,7 @@ index 42145be..1f1db90 100644 static ssize_t session_write_kbytes_show(struct ext4_attr *a, struct ext4_sb_info *sbi, char *buf) diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c -index d77be9e..744be29 100644 +index 4448ed3..523c675 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -414,7 +414,7 @@ static int @@ -107757,7 +108042,7 @@ index d77be9e..744be29 100644 static int diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h -index 506af45..17c9a0d 100644 +index 3a1640b..0bb94f2 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -52,7 +52,7 @@ enum { @@ -107769,7 +108054,7 @@ index 506af45..17c9a0d 100644 unsigned int inject_rate; unsigned int inject_type; }; -@@ -891,9 +891,9 @@ static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) +@@ -895,9 +895,9 @@ static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) if (!IS_FAULT_SET(ffi, type)) return false; @@ -109561,10 +109846,10 @@ index c5b6b71..527e347 100644 cuse_class = class_create(THIS_MODULE, "cuse"); if (IS_ERR(cuse_class)) diff --git a/fs/fuse/file.c b/fs/fuse/file.c -index 2401c5d..36b3ad7 100644 +index 5ec5870..ba44fba 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c -@@ -838,9 +838,9 @@ struct fuse_fill_data { +@@ -839,9 +839,9 @@ struct fuse_fill_data { unsigned nr_pages; }; @@ -109625,7 +109910,7 @@ index e23ff70..b17b736 100644 for (tmp = max_data; tmp > sdp->sd_diptrs;) { tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs); diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c -index 14cbf60..4c6225b 100644 +index 133f322..7ecda6d 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -324,9 +324,9 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state) @@ -109652,7 +109937,7 @@ index 14cbf60..4c6225b 100644 return; out_unlock: -@@ -690,7 +690,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, +@@ -692,7 +692,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, gl->gl_node.next = NULL; gl->gl_flags = 0; gl->gl_name = name; @@ -109661,7 +109946,7 @@ index 14cbf60..4c6225b 100644 gl->gl_state = LM_ST_UNLOCKED; gl->gl_target = LM_ST_UNLOCKED; gl->gl_demote_state = LM_ST_EXCLUSIVE; -@@ -979,9 +979,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh) +@@ -984,9 +984,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh) if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) && test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { set_bit(GLF_REPLY_PENDING, &gl->gl_flags); @@ -109673,7 +109958,7 @@ index 14cbf60..4c6225b 100644 } run_queue(gl, 1); spin_unlock(&gl->gl_lockref.lock); -@@ -1286,7 +1286,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret) +@@ -1291,7 +1291,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret) } } @@ -109682,7 +109967,7 @@ index 14cbf60..4c6225b 100644 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); spin_unlock(&gl->gl_lockref.lock); -@@ -1345,12 +1345,12 @@ __acquires(&lru_lock) +@@ -1350,12 +1350,12 @@ __acquires(&lru_lock) goto add_back_to_lru; } clear_bit(GLF_LRU, &gl->gl_flags); @@ -109697,7 +109982,7 @@ index 14cbf60..4c6225b 100644 spin_unlock(&gl->gl_lockref.lock); cond_resched_lock(&lru_lock); } -@@ -1677,7 +1677,7 @@ void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl) +@@ -1682,7 +1682,7 @@ void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl) state2str(gl->gl_demote_state), dtime, atomic_read(&gl->gl_ail_count), atomic_read(&gl->gl_revokes), @@ -109847,7 +110132,7 @@ index 31f8ca0..0319008 100644 commit_transaction->t_tid, &stats.run); stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0; diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c -index e165266..0799fc5 100644 +index 5e659ee..43133d6 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -91,7 +91,7 @@ jbd2_get_transaction(journal_t *journal, transaction_t *transaction) @@ -113651,7 +113936,7 @@ index 8b26058..b31170f 100644 int status; diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c -index fc89e5e..0e7fc08 100644 +index c9c4d985..5134393 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -2081,9 +2081,10 @@ static u32 nfs4_xdr_minorversion(const struct nfs4_sequence_args *args) @@ -119762,10 +120047,10 @@ index 41b468a..44e3e32 100644 return 1; } diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c -index 8ca642f..ff8523c 100644 +index b829cc9..584ba4f 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c -@@ -886,7 +886,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen, +@@ -901,7 +901,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen, oldfs = get_fs(); set_fs(KERNEL_DS); @@ -119774,7 +120059,7 @@ index 8ca642f..ff8523c 100644 set_fs(oldfs); return nfsd_finish_read(file, count, host_err); } -@@ -976,7 +976,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, +@@ -991,7 +991,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, /* Write the data. */ oldfs = get_fs(); set_fs(KERNEL_DS); @@ -119783,7 +120068,7 @@ index 8ca642f..ff8523c 100644 set_fs(oldfs); if (host_err < 0) goto out_nfserr; -@@ -1475,7 +1475,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp) +@@ -1490,7 +1490,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp) */ oldfs = get_fs(); set_fs(KERNEL_DS); @@ -137442,7 +137727,7 @@ index a428aec..24e7490 100644 /** * struct clk_init_data - holds init data that's common to all clocks and is diff --git a/include/linux/compat.h b/include/linux/compat.h -index 6360939..40d0088 100644 +index d8535a4..b81d592 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -30,31 +30,83 @@ @@ -138314,10 +138599,10 @@ index 7925bf0..d5143d2 100644 #define large_malloc(a) vmalloc(a) diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h -index 2de4e2e..510a09b8 100644 +index e0acb0e..7627e4b 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h -@@ -124,7 +124,7 @@ struct devfreq_governor { +@@ -127,7 +127,7 @@ struct devfreq_governor { int (*get_target_freq)(struct devfreq *this, unsigned long *freq); int (*event_handler)(struct devfreq *devfreq, unsigned int event, void *data); @@ -141435,7 +141720,7 @@ index 3ba327a..85cd5ce 100644 } diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h -index f99c993..224b9e3 100644 +index 7e273e2..bd5c3aef 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -491,7 +491,7 @@ struct zone { @@ -146428,7 +146713,7 @@ index 7428a53..9d6aaef 100644 u8 qfull; u16 vlan; diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h -index 8a95631..bd0f3e5 100644 +index b9ec493..7222efe 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -193,9 +193,9 @@ struct scsi_device { @@ -147727,7 +148012,7 @@ index 10b94bc..136c0ae 100644 /* diff --git a/ipc/shm.c b/ipc/shm.c -index dbac886..ef5e42d 100644 +index e2072ae..a0da911 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -72,9 +72,17 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp); @@ -147759,7 +148044,7 @@ index dbac886..ef5e42d 100644 shp->shm_segsz = size; shp->shm_nattch = 0; shp->shm_file = file; -@@ -1133,6 +1144,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, +@@ -1138,6 +1149,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, f_mode = FMODE_READ | FMODE_WRITE; } if (shmflg & SHM_EXEC) { @@ -147772,7 +148057,7 @@ index dbac886..ef5e42d 100644 prot |= PROT_EXEC; acc_mode |= S_IXUGO; } -@@ -1157,6 +1174,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, +@@ -1162,6 +1179,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, if (err) goto out_unlock; @@ -147788,7 +148073,7 @@ index dbac886..ef5e42d 100644 ipc_lock_object(&shp->shm_perm); /* check if shm_destroy() is tearing down shp */ -@@ -1169,6 +1195,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, +@@ -1174,6 +1200,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, path = shp->shm_file->f_path; path_get(&path); shp->shm_nattch++; @@ -147798,7 +148083,7 @@ index dbac886..ef5e42d 100644 size = i_size_read(d_inode(path.dentry)); ipc_unlock_object(&shp->shm_perm); rcu_read_unlock(); -@@ -1372,7 +1401,8 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) +@@ -1377,7 +1406,8 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) static int sysvipc_shm_proc_show(struct seq_file *s, void *it) { struct user_namespace *user_ns = seq_user_ns(s); @@ -152924,7 +153209,7 @@ index 0db7c8a..5e67101 100644 unlikely(current->ptrace & PT_SUSPEND_SECCOMP)) return 0; diff --git a/kernel/signal.c b/kernel/signal.c -index 75761ac..5d44ce3 100644 +index 0b14157..3a99520 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep; @@ -153118,7 +153403,7 @@ index 75761ac..5d44ce3 100644 compat_user_stack_pointer()); set_fs(seg); if (ret >= 0 && uoss_ptr) { -@@ -3488,7 +3523,7 @@ SYSCALL_DEFINE1(ssetmask, int, newmask) +@@ -3495,7 +3530,7 @@ SYSCALL_DEFINE1(ssetmask, int, newmask) SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) { struct k_sigaction new_sa, old_sa; @@ -153127,7 +153412,7 @@ index 75761ac..5d44ce3 100644 new_sa.sa.sa_handler = handler; new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; -@@ -3496,7 +3531,7 @@ SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) +@@ -3503,7 +3538,7 @@ SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) ret = do_sigaction(sig, &new_sa, &old_sa); @@ -156699,10 +156984,10 @@ index 6c707bf..c8d0529 100644 return sys_fadvise64_64(fd, offset, len, advice); } diff --git a/mm/filemap.c b/mm/filemap.c -index d8d7df8..56aaf83 100644 +index edfb90e..b6f95cc 100644 --- a/mm/filemap.c +++ b/mm/filemap.c -@@ -2342,7 +2342,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma) +@@ -2345,7 +2345,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma) struct address_space *mapping = file->f_mapping; if (!mapping->a_ops->readpage) @@ -156711,7 +156996,7 @@ index d8d7df8..56aaf83 100644 file_accessed(file); vma->vm_ops = &generic_file_vm_ops; return 0; -@@ -2385,7 +2385,7 @@ static struct page *wait_on_page_read(struct page *page) +@@ -2388,7 +2388,7 @@ static struct page *wait_on_page_read(struct page *page) static struct page *do_read_cache_page(struct address_space *mapping, pgoff_t index, @@ -156720,7 +157005,7 @@ index d8d7df8..56aaf83 100644 void *data, gfp_t gfp) { -@@ -2492,7 +2492,7 @@ static struct page *do_read_cache_page(struct address_space *mapping, +@@ -2495,7 +2495,7 @@ static struct page *do_read_cache_page(struct address_space *mapping, */ struct page *read_cache_page(struct address_space *mapping, pgoff_t index, @@ -156729,7 +157014,7 @@ index d8d7df8..56aaf83 100644 void *data) { return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); -@@ -2514,7 +2514,7 @@ struct page *read_cache_page_gfp(struct address_space *mapping, +@@ -2517,7 +2517,7 @@ struct page *read_cache_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp) { @@ -156738,7 +157023,7 @@ index d8d7df8..56aaf83 100644 return do_read_cache_page(mapping, index, filler, NULL, gfp); } -@@ -2544,6 +2544,7 @@ inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from) +@@ -2547,6 +2547,7 @@ inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from) pos = iocb->ki_pos; if (limit != RLIM_INFINITY) { @@ -159919,7 +160204,7 @@ index 439cc63..d342d3b 100644 struct bdi_writeback *wb = dtc->wb; unsigned long write_bw = wb->avg_write_bandwidth; diff --git a/mm/page_alloc.c b/mm/page_alloc.c -index f4a02e2..59c5bf35 100644 +index 1460e6a..154adc1f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -64,6 +64,7 @@ @@ -169029,10 +169314,10 @@ index 45da11a..ef3e5dc 100644 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table), GFP_KERNEL); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c -index 0f87e5d..76343a8 100644 +index 6bd1508..079b587 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c -@@ -1790,7 +1790,7 @@ int nf_conntrack_hash_resize(unsigned int hashsize) +@@ -1788,7 +1788,7 @@ int nf_conntrack_hash_resize(unsigned int hashsize) return 0; } @@ -209408,10 +209693,10 @@ index 0000000..0928e48 +size_mei_msg_data_65529_fields size mei_msg_data 0 65529 NULL diff --git a/scripts/gcc-plugins/size_overflow_plugin/e_fns.data b/scripts/gcc-plugins/size_overflow_plugin/e_fns.data new file mode 100644 -index 0000000..103f4c7 +index 0000000..6dbe9b1 --- /dev/null +++ b/scripts/gcc-plugins/size_overflow_plugin/e_fns.data -@@ -0,0 +1,5033 @@ +@@ -0,0 +1,5035 @@ +logi_dj_recv_query_paired_devices_fndecl_13_fns logi_dj_recv_query_paired_devices fndecl 0 13 NULL +response_length_ib_uverbs_ex_destroy_wq_resp_15_fns response_length ib_uverbs_ex_destroy_wq_resp 0 15 NULL +kfd_wait_on_events_fndecl_19_fns kfd_wait_on_events fndecl 2 19 NULL @@ -209620,6 +209905,7 @@ index 0000000..103f4c7 +rx_pkt_map_sz_tg3_3106_fns rx_pkt_map_sz tg3 0 3106 NULL +mt76_init_sband_fndecl_3112_fns mt76_init_sband fndecl 6 3112 NULL +compat_filldir64_fndecl_3119_fns compat_filldir64 fndecl 3 3119 NULL ++ds9490r_write_block_fndecl_3142_fns ds9490r_write_block fndecl 3 3142 NULL +sys_sendto_fndecl_3162_fns sys_sendto fndecl 6 3162 NULL +fill_elf_header_fndecl_3182_fns fill_elf_header fndecl 2 3182 NULL +wl1271_format_buffer_fndecl_3185_fns wl1271_format_buffer fndecl 2 3185 NULL @@ -212262,6 +212548,7 @@ index 0000000..103f4c7 +pwr_rcvd_beacons_read_fndecl_37751_fns pwr_rcvd_beacons_read fndecl 3 37751 NULL +cache_read_procfs_fndecl_37761_fns cache_read_procfs fndecl 3 37761 NULL +kvm_kvzalloc_fndecl_37773_fns kvm_kvzalloc fndecl 1 37773 NULL ++ds9490r_read_block_fndecl_37799_fns ds9490r_read_block fndecl 3 37799 NULL +ifc_len_ifconf_37839_fns ifc_len ifconf 0 37839 NULL +__kfifo_skip_r_fndecl_37849_fns __kfifo_skip_r fndecl 2 37849 NULL +sctp_init_cause_fixed_fndecl_37853_fns sctp_init_cause_fixed fndecl 3 37853 NULL @@ -223863,7 +224150,7 @@ index 8df676f..77e2cb5 100644 if (bprm->cap_effective) return 1; diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h -index db25f54..f303f44 100644 +index df7834a..be87c8d 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h @@ -130,8 +130,8 @@ int ima_init_template(void); @@ -223878,7 +224165,7 @@ index db25f54..f303f44 100644 }; extern struct ima_h_table ima_htable; diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c -index 9df26a2..2c6fc00 100644 +index d01a52f..512130e 100644 --- a/security/integrity/ima/ima_api.c +++ b/security/integrity/ima/ima_api.c @@ -138,7 +138,7 @@ void ima_add_violation(struct file *file, const unsigned char *filename, @@ -224557,7 +224844,7 @@ index 4c93520..e4032f9 100644 } #endif diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c -index 1d5acbe..5f55223 100644 +index 86240d0..08b468d 100644 --- a/sound/core/seq/seq_fifo.c +++ b/sound/core/seq/seq_fifo.c @@ -50,7 +50,7 @@ struct snd_seq_fifo *snd_seq_fifo_new(int poolsize) diff --git a/4.9.13/4425_grsec_remove_EI_PAX.patch b/4.9.14/4425_grsec_remove_EI_PAX.patch index 594598a..594598a 100644 --- a/4.9.13/4425_grsec_remove_EI_PAX.patch +++ b/4.9.14/4425_grsec_remove_EI_PAX.patch diff --git a/4.9.13/4426_default_XATTR_PAX_FLAGS.patch b/4.9.14/4426_default_XATTR_PAX_FLAGS.patch index f7e97b5..f7e97b5 100644 --- a/4.9.13/4426_default_XATTR_PAX_FLAGS.patch +++ b/4.9.14/4426_default_XATTR_PAX_FLAGS.patch diff --git a/4.9.13/4427_force_XATTR_PAX_tmpfs.patch b/4.9.14/4427_force_XATTR_PAX_tmpfs.patch index 3871139..3871139 100644 --- a/4.9.13/4427_force_XATTR_PAX_tmpfs.patch +++ b/4.9.14/4427_force_XATTR_PAX_tmpfs.patch diff --git a/4.9.13/4430_grsec-remove-localversion-grsec.patch b/4.9.14/4430_grsec-remove-localversion-grsec.patch index 31cf878..31cf878 100644 --- a/4.9.13/4430_grsec-remove-localversion-grsec.patch +++ b/4.9.14/4430_grsec-remove-localversion-grsec.patch diff --git a/4.9.13/4435_grsec-mute-warnings.patch b/4.9.14/4435_grsec-mute-warnings.patch index 8929222..8929222 100644 --- a/4.9.13/4435_grsec-mute-warnings.patch +++ b/4.9.14/4435_grsec-mute-warnings.patch diff --git a/4.9.13/4440_grsec-remove-protected-paths.patch b/4.9.14/4440_grsec-remove-protected-paths.patch index 741546d..741546d 100644 --- a/4.9.13/4440_grsec-remove-protected-paths.patch +++ b/4.9.14/4440_grsec-remove-protected-paths.patch diff --git a/4.9.13/4450_grsec-kconfig-default-gids.patch b/4.9.14/4450_grsec-kconfig-default-gids.patch index cee6e27..cee6e27 100644 --- a/4.9.13/4450_grsec-kconfig-default-gids.patch +++ b/4.9.14/4450_grsec-kconfig-default-gids.patch diff --git a/4.9.13/4465_selinux-avc_audit-log-curr_ip.patch b/4.9.14/4465_selinux-avc_audit-log-curr_ip.patch index 06a5294..06a5294 100644 --- a/4.9.13/4465_selinux-avc_audit-log-curr_ip.patch +++ b/4.9.14/4465_selinux-avc_audit-log-curr_ip.patch diff --git a/4.9.13/4470_disable-compat_vdso.patch b/4.9.14/4470_disable-compat_vdso.patch index a1401d8..a1401d8 100644 --- a/4.9.13/4470_disable-compat_vdso.patch +++ b/4.9.14/4470_disable-compat_vdso.patch diff --git a/4.9.13/4475_emutramp_default_on.patch b/4.9.14/4475_emutramp_default_on.patch index feb8c7b..feb8c7b 100644 --- a/4.9.13/4475_emutramp_default_on.patch +++ b/4.9.14/4475_emutramp_default_on.patch |