diff options
-rw-r--r-- | 4.4.4/1003_linux-4.4.4.patch | 13326 | ||||
-rw-r--r-- | 4.4.5/0000_README (renamed from 4.4.4/0000_README) | 6 | ||||
-rw-r--r-- | 4.4.5/1004_linux-4.4.5.patch | 3396 | ||||
-rw-r--r-- | 4.4.5/4420_grsecurity-3.1-4.4.5-201603102309.patch (renamed from 4.4.4/4420_grsecurity-3.1-4.4.4-201603032158.patch) | 644 | ||||
-rw-r--r-- | 4.4.5/4425_grsec_remove_EI_PAX.patch (renamed from 4.4.4/4425_grsec_remove_EI_PAX.patch) | 0 | ||||
-rw-r--r-- | 4.4.5/4427_force_XATTR_PAX_tmpfs.patch (renamed from 4.4.4/4427_force_XATTR_PAX_tmpfs.patch) | 0 | ||||
-rw-r--r-- | 4.4.5/4430_grsec-remove-localversion-grsec.patch (renamed from 4.4.4/4430_grsec-remove-localversion-grsec.patch) | 0 | ||||
-rw-r--r-- | 4.4.5/4435_grsec-mute-warnings.patch (renamed from 4.4.4/4435_grsec-mute-warnings.patch) | 0 | ||||
-rw-r--r-- | 4.4.5/4440_grsec-remove-protected-paths.patch (renamed from 4.4.4/4440_grsec-remove-protected-paths.patch) | 0 | ||||
-rw-r--r-- | 4.4.5/4450_grsec-kconfig-default-gids.patch (renamed from 4.4.4/4450_grsec-kconfig-default-gids.patch) | 0 | ||||
-rw-r--r-- | 4.4.5/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 4.4.4/4465_selinux-avc_audit-log-curr_ip.patch) | 0 | ||||
-rw-r--r-- | 4.4.5/4470_disable-compat_vdso.patch (renamed from 4.4.4/4470_disable-compat_vdso.patch) | 0 | ||||
-rw-r--r-- | 4.4.5/4475_emutramp_default_on.patch (renamed from 4.4.4/4475_emutramp_default_on.patch) | 0 |
13 files changed, 3760 insertions, 13612 deletions
diff --git a/4.4.4/1003_linux-4.4.4.patch b/4.4.4/1003_linux-4.4.4.patch deleted file mode 100644 index 57fd383..0000000 --- a/4.4.4/1003_linux-4.4.4.patch +++ /dev/null @@ -1,13326 +0,0 @@ -diff --git a/Documentation/filesystems/efivarfs.txt b/Documentation/filesystems/efivarfs.txt -index c477af0..686a64b 100644 ---- a/Documentation/filesystems/efivarfs.txt -+++ b/Documentation/filesystems/efivarfs.txt -@@ -14,3 +14,10 @@ filesystem. - efivarfs is typically mounted like this, - - mount -t efivarfs none /sys/firmware/efi/efivars -+ -+Due to the presence of numerous firmware bugs where removing non-standard -+UEFI variables causes the system firmware to fail to POST, efivarfs -+files that are not well-known standardized variables are created -+as immutable files. This doesn't prevent removal - "chattr -i" will work - -+but it does prevent this kind of failure from being accomplished -+accidentally. -diff --git a/Makefile b/Makefile -index 802be10..344bc6f 100644 ---- a/Makefile -+++ b/Makefile -@@ -1,6 +1,6 @@ - VERSION = 4 - PATCHLEVEL = 4 --SUBLEVEL = 3 -+SUBLEVEL = 4 - EXTRAVERSION = - NAME = Blurry Fish Butt - -diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h -index 258b0e5..68b60923 100644 ---- a/arch/arc/include/asm/irqflags-arcv2.h -+++ b/arch/arc/include/asm/irqflags-arcv2.h -@@ -22,6 +22,7 @@ - #define AUX_IRQ_CTRL 0x00E - #define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */ - #define AUX_IRQ_LVL_PEND 0x200 /* Pending Intr across all levels */ -+#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */ - #define AUX_IRQ_PRIORITY 0x206 - #define ICAUSE 0x40a - #define AUX_IRQ_SELECT 0x40b -@@ -112,6 +113,16 @@ static inline int arch_irqs_disabled(void) - return arch_irqs_disabled_flags(arch_local_save_flags()); - } - -+static inline void arc_softirq_trigger(int irq) -+{ -+ write_aux_reg(AUX_IRQ_HINT, irq); -+} -+ -+static inline void arc_softirq_clear(int irq) -+{ -+ write_aux_reg(AUX_IRQ_HINT, 0); -+} -+ - #else - - .macro IRQ_DISABLE scratch -diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S -index cbfec79..c126460 100644 ---- a/arch/arc/kernel/entry-arcv2.S -+++ b/arch/arc/kernel/entry-arcv2.S -@@ -45,11 +45,12 @@ VECTOR reserved ; Reserved slots - VECTOR handle_interrupt ; (16) Timer0 - VECTOR handle_interrupt ; unused (Timer1) - VECTOR handle_interrupt ; unused (WDT) --VECTOR handle_interrupt ; (19) ICI (inter core interrupt) --VECTOR handle_interrupt --VECTOR handle_interrupt --VECTOR handle_interrupt --VECTOR handle_interrupt ; (23) End of fixed IRQs -+VECTOR handle_interrupt ; (19) Inter core Interrupt (IPI) -+VECTOR handle_interrupt ; (20) perf Interrupt -+VECTOR handle_interrupt ; (21) Software Triggered Intr (Self IPI) -+VECTOR handle_interrupt ; unused -+VECTOR handle_interrupt ; (23) unused -+# End of fixed IRQs - - .rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8 - VECTOR handle_interrupt -@@ -211,7 +212,11 @@ debug_marker_syscall: - ; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig - ; entry was via Exception in DS which got preempted in kernel). - ; --; IRQ RTIE won't reliably restore DE bit and/or BTA, needs handling -+; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround -+; -+; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline -+; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly -+ - .Lintr_ret_to_delay_slot: - debug_marker_ds: - -@@ -222,18 +227,23 @@ debug_marker_ds: - ld r2, [sp, PT_ret] - ld r3, [sp, PT_status32] - -+ ; STAT32 for Int return created from scratch -+ ; (No delay dlot, disable Further intr in trampoline) -+ - bic r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK - st r0, [sp, PT_status32] - - mov r1, .Lintr_ret_to_delay_slot_2 - st r1, [sp, PT_ret] - -+ ; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots - st r2, [sp, 0] - st r3, [sp, 4] - - b .Lisr_ret_fast_path - - .Lintr_ret_to_delay_slot_2: -+ ; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP - sub sp, sp, SZ_PT_REGS - st r9, [sp, -4] - -@@ -243,11 +253,19 @@ debug_marker_ds: - ld r9, [sp, 4] - sr r9, [erstatus] - -+ ; restore AUX_USER_SP if returning to U mode -+ bbit0 r9, STATUS_U_BIT, 1f -+ ld r9, [sp, PT_sp] -+ sr r9, [AUX_USER_SP] -+ -+1: - ld r9, [sp, 8] - sr r9, [erbta] - - ld r9, [sp, -4] - add sp, sp, SZ_PT_REGS -+ -+ ; return from pure kernel mode to delay slot - rtie - - END(ret_from_exception) -diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c -index bd237ac..30d806c 100644 ---- a/arch/arc/kernel/mcip.c -+++ b/arch/arc/kernel/mcip.c -@@ -11,9 +11,12 @@ - #include <linux/smp.h> - #include <linux/irq.h> - #include <linux/spinlock.h> -+#include <asm/irqflags-arcv2.h> - #include <asm/mcip.h> - #include <asm/setup.h> - -+#define SOFTIRQ_IRQ 21 -+ - static char smp_cpuinfo_buf[128]; - static int idu_detected; - -@@ -22,6 +25,7 @@ static DEFINE_RAW_SPINLOCK(mcip_lock); - static void mcip_setup_per_cpu(int cpu) - { - smp_ipi_irq_setup(cpu, IPI_IRQ); -+ smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ); - } - - static void mcip_ipi_send(int cpu) -@@ -29,6 +33,12 @@ static void mcip_ipi_send(int cpu) - unsigned long flags; - int ipi_was_pending; - -+ /* ARConnect can only send IPI to others */ -+ if (unlikely(cpu == raw_smp_processor_id())) { -+ arc_softirq_trigger(SOFTIRQ_IRQ); -+ return; -+ } -+ - /* - * NOTE: We must spin here if the other cpu hasn't yet - * serviced a previous message. This can burn lots -@@ -63,6 +73,11 @@ static void mcip_ipi_clear(int irq) - unsigned long flags; - unsigned int __maybe_unused copy; - -+ if (unlikely(irq == SOFTIRQ_IRQ)) { -+ arc_softirq_clear(irq); -+ return; -+ } -+ - raw_spin_lock_irqsave(&mcip_lock, flags); - - /* Who sent the IPI */ -diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug -index 259c0ca..ddbb361 100644 ---- a/arch/arm/Kconfig.debug -+++ b/arch/arm/Kconfig.debug -@@ -162,10 +162,9 @@ choice - mobile SoCs in the Kona family of chips (e.g. bcm28155, - bcm11351, etc...) - -- config DEBUG_BCM63XX -+ config DEBUG_BCM63XX_UART - bool "Kernel low-level debugging on BCM63XX UART" - depends on ARCH_BCM_63XX -- select DEBUG_UART_BCM63XX - - config DEBUG_BERLIN_UART - bool "Marvell Berlin SoC Debug UART" -@@ -1348,7 +1347,7 @@ config DEBUG_LL_INCLUDE - default "debug/vf.S" if DEBUG_VF_UART - default "debug/vt8500.S" if DEBUG_VT8500_UART0 - default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1 -- default "debug/bcm63xx.S" if DEBUG_UART_BCM63XX -+ default "debug/bcm63xx.S" if DEBUG_BCM63XX_UART - default "debug/digicolor.S" if DEBUG_DIGICOLOR_UA0 - default "mach/debug-macro.S" - -@@ -1364,10 +1363,6 @@ config DEBUG_UART_8250 - ARCH_IOP33X || ARCH_IXP4XX || \ - ARCH_LPC32XX || ARCH_MV78XX0 || ARCH_ORION5X || ARCH_RPC - --# Compatibility options for BCM63xx --config DEBUG_UART_BCM63XX -- def_bool ARCH_BCM_63XX -- - config DEBUG_UART_PHYS - hex "Physical base address of debug UART" - default 0x00100a00 if DEBUG_NETX_UART -@@ -1462,7 +1457,7 @@ config DEBUG_UART_PHYS - default 0xfffb0000 if DEBUG_OMAP1UART1 || DEBUG_OMAP7XXUART1 - default 0xfffb0800 if DEBUG_OMAP1UART2 || DEBUG_OMAP7XXUART2 - default 0xfffb9800 if DEBUG_OMAP1UART3 || DEBUG_OMAP7XXUART3 -- default 0xfffe8600 if DEBUG_UART_BCM63XX -+ default 0xfffe8600 if DEBUG_BCM63XX_UART - default 0xfffff700 if ARCH_IOP33X - depends on ARCH_EP93XX || \ - DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \ -@@ -1474,7 +1469,7 @@ config DEBUG_UART_PHYS - DEBUG_RCAR_GEN2_SCIF0 || DEBUG_RCAR_GEN2_SCIF2 || \ - DEBUG_RMOBILE_SCIFA0 || DEBUG_RMOBILE_SCIFA1 || \ - DEBUG_RMOBILE_SCIFA4 || DEBUG_S3C24XX_UART || \ -- DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \ -+ DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \ - DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0 || \ - DEBUG_AT91_UART - -@@ -1515,7 +1510,7 @@ config DEBUG_UART_VIRT - default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT - default 0xfc40ab00 if DEBUG_BRCMSTB_UART - default 0xfc705000 if DEBUG_ZTE_ZX -- default 0xfcfe8600 if DEBUG_UART_BCM63XX -+ default 0xfcfe8600 if DEBUG_BCM63XX_UART - default 0xfd000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX - default 0xfd000000 if ARCH_SPEAR13XX - default 0xfd012000 if ARCH_MV78XX0 -@@ -1566,7 +1561,7 @@ config DEBUG_UART_VIRT - DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \ - DEBUG_NETX_UART || \ - DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \ -- DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \ -+ DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \ - DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0 - - config DEBUG_UART_8250_SHIFT -diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h -index 1afe246..b0c912fe 100644 ---- a/arch/arm/boot/dts/sama5d2-pinfunc.h -+++ b/arch/arm/boot/dts/sama5d2-pinfunc.h -@@ -90,7 +90,7 @@ - #define PIN_PA14__I2SC1_MCK PINMUX_PIN(PIN_PA14, 4, 2) - #define PIN_PA14__FLEXCOM3_IO2 PINMUX_PIN(PIN_PA14, 5, 1) - #define PIN_PA14__D9 PINMUX_PIN(PIN_PA14, 6, 2) --#define PIN_PA15 14 -+#define PIN_PA15 15 - #define PIN_PA15__GPIO PINMUX_PIN(PIN_PA15, 0, 0) - #define PIN_PA15__SPI0_MOSI PINMUX_PIN(PIN_PA15, 1, 1) - #define PIN_PA15__TF1 PINMUX_PIN(PIN_PA15, 2, 1) -diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h -index 68ee3ce..b4c6d99 100644 ---- a/arch/arm/include/asm/psci.h -+++ b/arch/arm/include/asm/psci.h -@@ -16,7 +16,7 @@ - - extern struct smp_operations psci_smp_ops; - --#ifdef CONFIG_ARM_PSCI -+#if defined(CONFIG_SMP) && defined(CONFIG_ARM_PSCI) - bool psci_smp_available(void); - #else - static inline bool psci_smp_available(void) { return false; } -diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h -index 0375c8c..9408a99 100644 ---- a/arch/arm/include/asm/xen/page-coherent.h -+++ b/arch/arm/include/asm/xen/page-coherent.h -@@ -35,14 +35,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, - dma_addr_t dev_addr, unsigned long offset, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) - { -- bool local = XEN_PFN_DOWN(dev_addr) == page_to_xen_pfn(page); -+ unsigned long page_pfn = page_to_xen_pfn(page); -+ unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); -+ unsigned long compound_pages = -+ (1<<compound_order(page)) * XEN_PFN_PER_PAGE; -+ bool local = (page_pfn <= dev_pfn) && -+ (dev_pfn - page_pfn < compound_pages); -+ - /* -- * Dom0 is mapped 1:1, while the Linux page can be spanned accross -- * multiple Xen page, it's not possible to have a mix of local and -- * foreign Xen page. So if the first xen_pfn == mfn the page is local -- * otherwise it's a foreign page grant-mapped in dom0. If the page is -- * local we can safely call the native dma_ops function, otherwise we -- * call the xen specific function. -+ * Dom0 is mapped 1:1, while the Linux page can span across -+ * multiple Xen pages, it's not possible for it to contain a -+ * mix of local and foreign Xen pages. So if the first xen_pfn -+ * == mfn the page is local otherwise it's a foreign page -+ * grant-mapped in dom0. If the page is local we can safely -+ * call the native dma_ops function, otherwise we call the xen -+ * specific function. - */ - if (local) - __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); -diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c -index 7b76ce0..8633c70 100644 ---- a/arch/arm/mach-omap2/gpmc-onenand.c -+++ b/arch/arm/mach-omap2/gpmc-onenand.c -@@ -101,10 +101,8 @@ static void omap2_onenand_set_async_mode(void __iomem *onenand_base) - - static void set_onenand_cfg(void __iomem *onenand_base) - { -- u32 reg; -+ u32 reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT; - -- reg = readw(onenand_base + ONENAND_REG_SYS_CFG1); -- reg &= ~((0x7 << ONENAND_SYS_CFG1_BRL_SHIFT) | (0x7 << 9)); - reg |= (latency << ONENAND_SYS_CFG1_BRL_SHIFT) | - ONENAND_SYS_CFG1_BL_16; - if (onenand_flags & ONENAND_FLAG_SYNCREAD) -@@ -123,6 +121,7 @@ static void set_onenand_cfg(void __iomem *onenand_base) - reg |= ONENAND_SYS_CFG1_VHF; - else - reg &= ~ONENAND_SYS_CFG1_VHF; -+ - writew(reg, onenand_base + ONENAND_REG_SYS_CFG1); - } - -@@ -289,6 +288,7 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base) - } - } - -+ onenand_async.sync_write = true; - omap2_onenand_calc_async_timings(&t); - - ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_async); -diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile -index cd822d8..b6c90e5 100644 ---- a/arch/arm64/Makefile -+++ b/arch/arm64/Makefile -@@ -27,6 +27,7 @@ $(warning LSE atomics not supported by binutils) - endif - - KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) -+KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads) - KBUILD_AFLAGS += $(lseinstr) - - ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) -diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h -index 2046c02..21ed715 100644 ---- a/arch/mips/include/asm/page.h -+++ b/arch/mips/include/asm/page.h -@@ -33,7 +33,7 @@ - #define PAGE_SHIFT 16 - #endif - #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) --#define PAGE_MASK (~(PAGE_SIZE - 1)) -+#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) - - /* - * This is used for calculating the real page sizes -diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h -index 8957f15..18826aa 100644 ---- a/arch/mips/include/asm/pgtable.h -+++ b/arch/mips/include/asm/pgtable.h -@@ -353,7 +353,7 @@ static inline pte_t pte_mkdirty(pte_t pte) - static inline pte_t pte_mkyoung(pte_t pte) - { - pte_val(pte) |= _PAGE_ACCESSED; --#ifdef CONFIG_CPU_MIPSR2 -+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) - if (!(pte_val(pte) & _PAGE_NO_READ)) - pte_val(pte) |= _PAGE_SILENT_READ; - else -@@ -560,7 +560,7 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd) - { - pmd_val(pmd) |= _PAGE_ACCESSED; - --#ifdef CONFIG_CPU_MIPSR2 -+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) - if (!(pmd_val(pmd) & _PAGE_NO_READ)) - pmd_val(pmd) |= _PAGE_SILENT_READ; - else -diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h -index 6499d93..47bc45a 100644 ---- a/arch/mips/include/asm/syscall.h -+++ b/arch/mips/include/asm/syscall.h -@@ -101,10 +101,8 @@ static inline void syscall_get_arguments(struct task_struct *task, - /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */ - if ((config_enabled(CONFIG_32BIT) || - test_tsk_thread_flag(task, TIF_32BIT_REGS)) && -- (regs->regs[2] == __NR_syscall)) { -+ (regs->regs[2] == __NR_syscall)) - i++; -- n++; -- } - - while (n--) - ret |= mips_get_syscall_arg(args++, task, regs, i++); -diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c -index bf9f1a7..a2631a5 100644 ---- a/arch/mips/loongson64/loongson-3/hpet.c -+++ b/arch/mips/loongson64/loongson-3/hpet.c -@@ -13,6 +13,9 @@ - #define SMBUS_PCI_REG64 0x64 - #define SMBUS_PCI_REGB4 0xb4 - -+#define HPET_MIN_CYCLES 64 -+#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1)) -+ - static DEFINE_SPINLOCK(hpet_lock); - DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device); - -@@ -161,8 +164,9 @@ static int hpet_next_event(unsigned long delta, - cnt += delta; - hpet_write(HPET_T0_CMP, cnt); - -- res = ((int)(hpet_read(HPET_COUNTER) - cnt) > 0) ? -ETIME : 0; -- return res; -+ res = (int)(cnt - hpet_read(HPET_COUNTER)); -+ -+ return res < HPET_MIN_CYCLES ? -ETIME : 0; - } - - static irqreturn_t hpet_irq_handler(int irq, void *data) -@@ -237,7 +241,7 @@ void __init setup_hpet_timer(void) - cd->cpumask = cpumask_of(cpu); - clockevent_set_clock(cd, HPET_FREQ); - cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); -- cd->min_delta_ns = 5000; -+ cd->min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA, cd); - - clockevents_register_device(cd); - setup_irq(HPET_T0_IRQ, &hpet_irq); -diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c -index 1a4738a..509832a9 100644 ---- a/arch/mips/loongson64/loongson-3/smp.c -+++ b/arch/mips/loongson64/loongson-3/smp.c -@@ -30,13 +30,13 @@ - #include "smp.h" - - DEFINE_PER_CPU(int, cpu_state); --DEFINE_PER_CPU(uint32_t, core0_c0count); - - static void *ipi_set0_regs[16]; - static void *ipi_clear0_regs[16]; - static void *ipi_status0_regs[16]; - static void *ipi_en0_regs[16]; - static void *ipi_mailbox_buf[16]; -+static uint32_t core0_c0count[NR_CPUS]; - - /* read a 32bit value from ipi register */ - #define loongson3_ipi_read32(addr) readl(addr) -@@ -275,12 +275,14 @@ void loongson3_ipi_interrupt(struct pt_regs *regs) - if (action & SMP_ASK_C0COUNT) { - BUG_ON(cpu != 0); - c0count = read_c0_count(); -- for (i = 1; i < num_possible_cpus(); i++) -- per_cpu(core0_c0count, i) = c0count; -+ c0count = c0count ? c0count : 1; -+ for (i = 1; i < nr_cpu_ids; i++) -+ core0_c0count[i] = c0count; -+ __wbflush(); /* Let others see the result ASAP */ - } - } - --#define MAX_LOOPS 1111 -+#define MAX_LOOPS 800 - /* - * SMP init and finish on secondary CPUs - */ -@@ -305,16 +307,20 @@ static void loongson3_init_secondary(void) - cpu_logical_map(cpu) / loongson_sysconf.cores_per_package; - - i = 0; -- __this_cpu_write(core0_c0count, 0); -+ core0_c0count[cpu] = 0; - loongson3_send_ipi_single(0, SMP_ASK_C0COUNT); -- while (!__this_cpu_read(core0_c0count)) { -+ while (!core0_c0count[cpu]) { - i++; - cpu_relax(); - } - - if (i > MAX_LOOPS) - i = MAX_LOOPS; -- initcount = __this_cpu_read(core0_c0count) + i; -+ if (cpu_data[cpu].package) -+ initcount = core0_c0count[cpu] + i; -+ else /* Local access is faster for loops */ -+ initcount = core0_c0count[cpu] + i/2; -+ - write_c0_count(initcount); - } - -diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c -index 32e0be2..29f73e0 100644 ---- a/arch/mips/mm/tlbex.c -+++ b/arch/mips/mm/tlbex.c -@@ -242,7 +242,7 @@ static void output_pgtable_bits_defines(void) - pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT); - pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT); - #endif --#ifdef CONFIG_CPU_MIPSR2 -+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) - if (cpu_has_rixi) { - #ifdef _PAGE_NO_EXEC_SHIFT - pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT); -diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c -index f69ecaa..52c1e27 100644 ---- a/arch/powerpc/kernel/eeh_driver.c -+++ b/arch/powerpc/kernel/eeh_driver.c -@@ -418,8 +418,7 @@ static void *eeh_rmv_device(void *data, void *userdata) - eeh_pcid_put(dev); - if (driver->err_handler && - driver->err_handler->error_detected && -- driver->err_handler->slot_reset && -- driver->err_handler->resume) -+ driver->err_handler->slot_reset) - return NULL; - } - -diff --git a/arch/s390/include/asm/fpu/internal.h b/arch/s390/include/asm/fpu/internal.h -index 2559b16..17d9dcd 100644 ---- a/arch/s390/include/asm/fpu/internal.h -+++ b/arch/s390/include/asm/fpu/internal.h -@@ -48,6 +48,7 @@ static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs) - static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu) - { - fpregs->pad = 0; -+ fpregs->fpc = fpu->fpc; - if (MACHINE_HAS_VX) - convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs); - else -@@ -57,6 +58,7 @@ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu) - - static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu) - { -+ fpu->fpc = fpregs->fpc; - if (MACHINE_HAS_VX) - convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs); - else -diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h -index efaac2c..e9a983f 100644 ---- a/arch/s390/include/asm/kvm_host.h -+++ b/arch/s390/include/asm/kvm_host.h -@@ -506,7 +506,6 @@ struct kvm_vcpu_arch { - struct kvm_s390_sie_block *sie_block; - unsigned int host_acrs[NUM_ACRS]; - struct fpu host_fpregs; -- struct fpu guest_fpregs; - struct kvm_s390_local_interrupt local_int; - struct hrtimer ckc_timer; - struct kvm_s390_pgm_info pgm; -diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c -index 9cd248f..dc6c9c6 100644 ---- a/arch/s390/kernel/asm-offsets.c -+++ b/arch/s390/kernel/asm-offsets.c -@@ -181,6 +181,7 @@ int main(void) - OFFSET(__LC_PSW_SAVE_AREA, _lowcore, psw_save_area); - OFFSET(__LC_PREFIX_SAVE_AREA, _lowcore, prefixreg_save_area); - OFFSET(__LC_FP_CREG_SAVE_AREA, _lowcore, fpt_creg_save_area); -+ OFFSET(__LC_TOD_PROGREG_SAVE_AREA, _lowcore, tod_progreg_save_area); - OFFSET(__LC_CPU_TIMER_SAVE_AREA, _lowcore, cpu_timer_save_area); - OFFSET(__LC_CLOCK_COMP_SAVE_AREA, _lowcore, clock_comp_save_area); - OFFSET(__LC_AREGS_SAVE_AREA, _lowcore, access_regs_save_area); -diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c -index 66c9441..4af6037 100644 ---- a/arch/s390/kernel/compat_signal.c -+++ b/arch/s390/kernel/compat_signal.c -@@ -271,7 +271,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs, - - /* Restore high gprs from signal stack */ - if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high, -- sizeof(&sregs_ext->gprs_high))) -+ sizeof(sregs_ext->gprs_high))) - return -EFAULT; - for (i = 0; i < NUM_GPRS; i++) - *(__u32 *)®s->gprs[i] = gprs_high[i]; -diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c -index 8465892..a08d0af 100644 ---- a/arch/s390/kvm/kvm-s390.c -+++ b/arch/s390/kvm/kvm-s390.c -@@ -1268,44 +1268,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) - return 0; - } - --/* -- * Backs up the current FP/VX register save area on a particular -- * destination. Used to switch between different register save -- * areas. -- */ --static inline void save_fpu_to(struct fpu *dst) --{ -- dst->fpc = current->thread.fpu.fpc; -- dst->regs = current->thread.fpu.regs; --} -- --/* -- * Switches the FP/VX register save area from which to lazy -- * restore register contents. -- */ --static inline void load_fpu_from(struct fpu *from) --{ -- current->thread.fpu.fpc = from->fpc; -- current->thread.fpu.regs = from->regs; --} -- - void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) - { - /* Save host register state */ - save_fpu_regs(); -- save_fpu_to(&vcpu->arch.host_fpregs); -- -- if (test_kvm_facility(vcpu->kvm, 129)) { -- current->thread.fpu.fpc = vcpu->run->s.regs.fpc; -- /* -- * Use the register save area in the SIE-control block -- * for register restore and save in kvm_arch_vcpu_put() -- */ -- current->thread.fpu.vxrs = -- (__vector128 *)&vcpu->run->s.regs.vrs; -- } else -- load_fpu_from(&vcpu->arch.guest_fpregs); -+ vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc; -+ vcpu->arch.host_fpregs.regs = current->thread.fpu.regs; - -+ /* Depending on MACHINE_HAS_VX, data stored to vrs either -+ * has vector register or floating point register format. -+ */ -+ current->thread.fpu.regs = vcpu->run->s.regs.vrs; -+ current->thread.fpu.fpc = vcpu->run->s.regs.fpc; - if (test_fp_ctl(current->thread.fpu.fpc)) - /* User space provided an invalid FPC, let's clear it */ - current->thread.fpu.fpc = 0; -@@ -1321,19 +1295,13 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) - atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); - gmap_disable(vcpu->arch.gmap); - -+ /* Save guest register state */ - save_fpu_regs(); -+ vcpu->run->s.regs.fpc = current->thread.fpu.fpc; - -- if (test_kvm_facility(vcpu->kvm, 129)) -- /* -- * kvm_arch_vcpu_load() set up the register save area to -- * the &vcpu->run->s.regs.vrs and, thus, the vector registers -- * are already saved. Only the floating-point control must be -- * copied. -- */ -- vcpu->run->s.regs.fpc = current->thread.fpu.fpc; -- else -- save_fpu_to(&vcpu->arch.guest_fpregs); -- load_fpu_from(&vcpu->arch.host_fpregs); -+ /* Restore host register state */ -+ current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; -+ current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; - - save_access_regs(vcpu->run->s.regs.acrs); - restore_access_regs(vcpu->arch.host_acrs); -@@ -1351,8 +1319,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) - memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); - vcpu->arch.sie_block->gcr[0] = 0xE0UL; - vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; -- vcpu->arch.guest_fpregs.fpc = 0; -- asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); -+ /* make sure the new fpc will be lazily loaded */ -+ save_fpu_regs(); -+ current->thread.fpu.fpc = 0; - vcpu->arch.sie_block->gbea = 1; - vcpu->arch.sie_block->pp = 0; - vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; -@@ -1501,19 +1470,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, - vcpu->arch.local_int.wq = &vcpu->wq; - vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; - -- /* -- * Allocate a save area for floating-point registers. If the vector -- * extension is available, register contents are saved in the SIE -- * control block. The allocated save area is still required in -- * particular places, for example, in kvm_s390_vcpu_store_status(). -- */ -- vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS, -- GFP_KERNEL); -- if (!vcpu->arch.guest_fpregs.fprs) { -- rc = -ENOMEM; -- goto out_free_sie_block; -- } -- - rc = kvm_vcpu_init(vcpu, kvm, id); - if (rc) - goto out_free_sie_block; -@@ -1734,19 +1690,27 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, - - int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) - { -+ /* make sure the new values will be lazily loaded */ -+ save_fpu_regs(); - if (test_fp_ctl(fpu->fpc)) - return -EINVAL; -- memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); -- vcpu->arch.guest_fpregs.fpc = fpu->fpc; -- save_fpu_regs(); -- load_fpu_from(&vcpu->arch.guest_fpregs); -+ current->thread.fpu.fpc = fpu->fpc; -+ if (MACHINE_HAS_VX) -+ convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs); -+ else -+ memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs)); - return 0; - } - - int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) - { -- memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); -- fpu->fpc = vcpu->arch.guest_fpregs.fpc; -+ /* make sure we have the latest values */ -+ save_fpu_regs(); -+ if (MACHINE_HAS_VX) -+ convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs); -+ else -+ memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs)); -+ fpu->fpc = current->thread.fpu.fpc; - return 0; - } - -@@ -2266,41 +2230,50 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) - int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) - { - unsigned char archmode = 1; -+ freg_t fprs[NUM_FPRS]; - unsigned int px; - u64 clkcomp; - int rc; - -+ px = kvm_s390_get_prefix(vcpu); - if (gpa == KVM_S390_STORE_STATUS_NOADDR) { - if (write_guest_abs(vcpu, 163, &archmode, 1)) - return -EFAULT; -- gpa = SAVE_AREA_BASE; -+ gpa = 0; - } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) { - if (write_guest_real(vcpu, 163, &archmode, 1)) - return -EFAULT; -- gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE); -+ gpa = px; -+ } else -+ gpa -= __LC_FPREGS_SAVE_AREA; -+ -+ /* manually convert vector registers if necessary */ -+ if (MACHINE_HAS_VX) { -+ convert_vx_to_fp(fprs, current->thread.fpu.vxrs); -+ rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA, -+ fprs, 128); -+ } else { -+ rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA, -+ vcpu->run->s.regs.vrs, 128); - } -- rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs), -- vcpu->arch.guest_fpregs.fprs, 128); -- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs), -+ rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA, - vcpu->run->s.regs.gprs, 128); -- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw), -+ rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA, - &vcpu->arch.sie_block->gpsw, 16); -- px = kvm_s390_get_prefix(vcpu); -- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg), -+ rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA, - &px, 4); -- rc |= write_guest_abs(vcpu, -- gpa + offsetof(struct save_area, fp_ctrl_reg), -- &vcpu->arch.guest_fpregs.fpc, 4); -- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg), -+ rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA, -+ &vcpu->run->s.regs.fpc, 4); -+ rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA, - &vcpu->arch.sie_block->todpr, 4); -- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer), -+ rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA, - &vcpu->arch.sie_block->cputm, 8); - clkcomp = vcpu->arch.sie_block->ckc >> 8; -- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp), -+ rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA, - &clkcomp, 8); -- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs), -+ rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA, - &vcpu->run->s.regs.acrs, 64); -- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs), -+ rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA, - &vcpu->arch.sie_block->gcr, 128); - return rc ? -EFAULT : 0; - } -@@ -2313,19 +2286,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) - * it into the save area - */ - save_fpu_regs(); -- if (test_kvm_facility(vcpu->kvm, 129)) { -- /* -- * If the vector extension is available, the vector registers -- * which overlaps with floating-point registers are saved in -- * the SIE-control block. Hence, extract the floating-point -- * registers and the FPC value and store them in the -- * guest_fpregs structure. -- */ -- vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc; -- convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs, -- current->thread.fpu.vxrs); -- } else -- save_fpu_to(&vcpu->arch.guest_fpregs); -+ vcpu->run->s.regs.fpc = current->thread.fpu.fpc; - save_access_regs(vcpu->run->s.regs.acrs); - - return kvm_s390_store_status_unloaded(vcpu, addr); -diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c -index 4d1ee88..18c8b81 100644 ---- a/arch/s390/mm/extable.c -+++ b/arch/s390/mm/extable.c -@@ -52,12 +52,16 @@ void sort_extable(struct exception_table_entry *start, - int i; - - /* Normalize entries to being relative to the start of the section */ -- for (p = start, i = 0; p < finish; p++, i += 8) -+ for (p = start, i = 0; p < finish; p++, i += 8) { - p->insn += i; -+ p->fixup += i + 4; -+ } - sort(start, finish - start, sizeof(*start), cmp_ex, NULL); - /* Denormalize all entries */ -- for (p = start, i = 0; p < finish; p++, i += 8) -+ for (p = start, i = 0; p < finish; p++, i += 8) { - p->insn -= i; -+ p->fixup -= i + 4; -+ } - } - - #ifdef CONFIG_MODULES -diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c -index 30e7ddb..c690c8e 100644 ---- a/arch/sparc/kernel/sys_sparc_64.c -+++ b/arch/sparc/kernel/sys_sparc_64.c -@@ -413,7 +413,7 @@ out: - - SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality) - { -- int ret; -+ long ret; - - if (personality(current->personality) == PER_LINUX32 && - personality(personality) == PER_LINUX) -diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c -index 47f1ff0..22a358e 100644 ---- a/arch/um/os-Linux/start_up.c -+++ b/arch/um/os-Linux/start_up.c -@@ -94,6 +94,8 @@ static int start_ptraced_child(void) - { - int pid, n, status; - -+ fflush(stdout); -+ - pid = fork(); - if (pid == 0) - ptrace_child(); -diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S -index 6a1ae37..15cfeba 100644 ---- a/arch/x86/entry/entry_64_compat.S -+++ b/arch/x86/entry/entry_64_compat.S -@@ -267,6 +267,7 @@ ENTRY(entry_INT80_compat) - * Interrupts are off on entry. - */ - PARAVIRT_ADJUST_EXCEPTION_FRAME -+ ASM_CLAC /* Do this early to minimize exposure */ - SWAPGS - - /* -diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h -index 881b476..e7de5c9 100644 ---- a/arch/x86/include/asm/irq.h -+++ b/arch/x86/include/asm/irq.h -@@ -23,11 +23,13 @@ extern void irq_ctx_init(int cpu); - - #define __ARCH_HAS_DO_SOFTIRQ - -+struct irq_desc; -+ - #ifdef CONFIG_HOTPLUG_CPU - #include <linux/cpumask.h> - extern int check_irq_vectors_for_cpu_disable(void); - extern void fixup_irqs(void); --extern void irq_force_complete_move(int); -+extern void irq_force_complete_move(struct irq_desc *desc); - #endif - - #ifdef CONFIG_HAVE_KVM -@@ -37,7 +39,6 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void)); - extern void (*x86_platform_ipi_callback)(void); - extern void native_init_IRQ(void); - --struct irq_desc; - extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs); - - extern __visible unsigned int do_IRQ(struct pt_regs *regs); -diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c -index f253218..fdb0fbf 100644 ---- a/arch/x86/kernel/apic/io_apic.c -+++ b/arch/x86/kernel/apic/io_apic.c -@@ -2521,6 +2521,7 @@ void __init setup_ioapic_dest(void) - { - int pin, ioapic, irq, irq_entry; - const struct cpumask *mask; -+ struct irq_desc *desc; - struct irq_data *idata; - struct irq_chip *chip; - -@@ -2536,7 +2537,9 @@ void __init setup_ioapic_dest(void) - if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq)) - continue; - -- idata = irq_get_irq_data(irq); -+ desc = irq_to_desc(irq); -+ raw_spin_lock_irq(&desc->lock); -+ idata = irq_desc_get_irq_data(desc); - - /* - * Honour affinities which have been set in early boot -@@ -2550,6 +2553,7 @@ void __init setup_ioapic_dest(void) - /* Might be lapic_chip for irq 0 */ - if (chip->irq_set_affinity) - chip->irq_set_affinity(idata, mask, false); -+ raw_spin_unlock_irq(&desc->lock); - } - } - #endif -diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c -index 861bc59..a35f6b5 100644 ---- a/arch/x86/kernel/apic/vector.c -+++ b/arch/x86/kernel/apic/vector.c -@@ -30,7 +30,7 @@ struct apic_chip_data { - - struct irq_domain *x86_vector_domain; - static DEFINE_RAW_SPINLOCK(vector_lock); --static cpumask_var_t vector_cpumask; -+static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask; - static struct irq_chip lapic_controller; - #ifdef CONFIG_X86_IO_APIC - static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY]; -@@ -116,35 +116,47 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d, - */ - static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; - static int current_offset = VECTOR_OFFSET_START % 16; -- int cpu, err; -+ int cpu, vector; - -- if (d->move_in_progress) -+ /* -+ * If there is still a move in progress or the previous move has not -+ * been cleaned up completely, tell the caller to come back later. -+ */ -+ if (d->move_in_progress || -+ cpumask_intersects(d->old_domain, cpu_online_mask)) - return -EBUSY; - - /* Only try and allocate irqs on cpus that are present */ -- err = -ENOSPC; - cpumask_clear(d->old_domain); -+ cpumask_clear(searched_cpumask); - cpu = cpumask_first_and(mask, cpu_online_mask); - while (cpu < nr_cpu_ids) { -- int new_cpu, vector, offset; -+ int new_cpu, offset; - -+ /* Get the possible target cpus for @mask/@cpu from the apic */ - apic->vector_allocation_domain(cpu, vector_cpumask, mask); - -+ /* -+ * Clear the offline cpus from @vector_cpumask for searching -+ * and verify whether the result overlaps with @mask. If true, -+ * then the call to apic->cpu_mask_to_apicid_and() will -+ * succeed as well. If not, no point in trying to find a -+ * vector in this mask. -+ */ -+ cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask); -+ if (!cpumask_intersects(vector_searchmask, mask)) -+ goto next_cpu; -+ - if (cpumask_subset(vector_cpumask, d->domain)) { -- err = 0; - if (cpumask_equal(vector_cpumask, d->domain)) -- break; -+ goto success; - /* -- * New cpumask using the vector is a proper subset of -- * the current in use mask. So cleanup the vector -- * allocation for the members that are not used anymore. -+ * Mark the cpus which are not longer in the mask for -+ * cleanup. - */ -- cpumask_andnot(d->old_domain, d->domain, -- vector_cpumask); -- d->move_in_progress = -- cpumask_intersects(d->old_domain, cpu_online_mask); -- cpumask_and(d->domain, d->domain, vector_cpumask); -- break; -+ cpumask_andnot(d->old_domain, d->domain, vector_cpumask); -+ vector = d->cfg.vector; -+ goto update; - } - - vector = current_vector; -@@ -156,45 +168,60 @@ next: - vector = FIRST_EXTERNAL_VECTOR + offset; - } - -- if (unlikely(current_vector == vector)) { -- cpumask_or(d->old_domain, d->old_domain, -- vector_cpumask); -- cpumask_andnot(vector_cpumask, mask, d->old_domain); -- cpu = cpumask_first_and(vector_cpumask, -- cpu_online_mask); -- continue; -- } -+ /* If the search wrapped around, try the next cpu */ -+ if (unlikely(current_vector == vector)) -+ goto next_cpu; - - if (test_bit(vector, used_vectors)) - goto next; - -- for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) { -+ for_each_cpu(new_cpu, vector_searchmask) { - if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector])) - goto next; - } - /* Found one! */ - current_vector = vector; - current_offset = offset; -- if (d->cfg.vector) { -+ /* Schedule the old vector for cleanup on all cpus */ -+ if (d->cfg.vector) - cpumask_copy(d->old_domain, d->domain); -- d->move_in_progress = -- cpumask_intersects(d->old_domain, cpu_online_mask); -- } -- for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) -+ for_each_cpu(new_cpu, vector_searchmask) - per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq); -- d->cfg.vector = vector; -- cpumask_copy(d->domain, vector_cpumask); -- err = 0; -- break; -- } -+ goto update; - -- if (!err) { -- /* cache destination APIC IDs into cfg->dest_apicid */ -- err = apic->cpu_mask_to_apicid_and(mask, d->domain, -- &d->cfg.dest_apicid); -+next_cpu: -+ /* -+ * We exclude the current @vector_cpumask from the requested -+ * @mask and try again with the next online cpu in the -+ * result. We cannot modify @mask, so we use @vector_cpumask -+ * as a temporary buffer here as it will be reassigned when -+ * calling apic->vector_allocation_domain() above. -+ */ -+ cpumask_or(searched_cpumask, searched_cpumask, vector_cpumask); -+ cpumask_andnot(vector_cpumask, mask, searched_cpumask); -+ cpu = cpumask_first_and(vector_cpumask, cpu_online_mask); -+ continue; - } -+ return -ENOSPC; - -- return err; -+update: -+ /* -+ * Exclude offline cpus from the cleanup mask and set the -+ * move_in_progress flag when the result is not empty. -+ */ -+ cpumask_and(d->old_domain, d->old_domain, cpu_online_mask); -+ d->move_in_progress = !cpumask_empty(d->old_domain); -+ d->cfg.vector = vector; -+ cpumask_copy(d->domain, vector_cpumask); -+success: -+ /* -+ * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail -+ * as we already established, that mask & d->domain & cpu_online_mask -+ * is not empty. -+ */ -+ BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain, -+ &d->cfg.dest_apicid)); -+ return 0; - } - - static int assign_irq_vector(int irq, struct apic_chip_data *data, -@@ -224,10 +251,8 @@ static int assign_irq_vector_policy(int irq, int node, - static void clear_irq_vector(int irq, struct apic_chip_data *data) - { - struct irq_desc *desc; -- unsigned long flags; - int cpu, vector; - -- raw_spin_lock_irqsave(&vector_lock, flags); - BUG_ON(!data->cfg.vector); - - vector = data->cfg.vector; -@@ -237,10 +262,13 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data) - data->cfg.vector = 0; - cpumask_clear(data->domain); - -- if (likely(!data->move_in_progress)) { -- raw_spin_unlock_irqrestore(&vector_lock, flags); -+ /* -+ * If move is in progress or the old_domain mask is not empty, -+ * i.e. the cleanup IPI has not been processed yet, we need to remove -+ * the old references to desc from all cpus vector tables. -+ */ -+ if (!data->move_in_progress && cpumask_empty(data->old_domain)) - return; -- } - - desc = irq_to_desc(irq); - for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) { -@@ -253,7 +281,6 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data) - } - } - data->move_in_progress = 0; -- raw_spin_unlock_irqrestore(&vector_lock, flags); - } - - void init_irq_alloc_info(struct irq_alloc_info *info, -@@ -274,19 +301,24 @@ void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src) - static void x86_vector_free_irqs(struct irq_domain *domain, - unsigned int virq, unsigned int nr_irqs) - { -+ struct apic_chip_data *apic_data; - struct irq_data *irq_data; -+ unsigned long flags; - int i; - - for (i = 0; i < nr_irqs; i++) { - irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i); - if (irq_data && irq_data->chip_data) { -+ raw_spin_lock_irqsave(&vector_lock, flags); - clear_irq_vector(virq + i, irq_data->chip_data); -- free_apic_chip_data(irq_data->chip_data); -+ apic_data = irq_data->chip_data; -+ irq_domain_reset_irq_data(irq_data); -+ raw_spin_unlock_irqrestore(&vector_lock, flags); -+ free_apic_chip_data(apic_data); - #ifdef CONFIG_X86_IO_APIC - if (virq + i < nr_legacy_irqs()) - legacy_irq_data[virq + i] = NULL; - #endif -- irq_domain_reset_irq_data(irq_data); - } - } - } -@@ -404,6 +436,8 @@ int __init arch_early_irq_init(void) - arch_init_htirq_domain(x86_vector_domain); - - BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL)); -+ BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL)); -+ BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL)); - - return arch_early_ioapic_init(); - } -@@ -492,14 +526,7 @@ static int apic_set_affinity(struct irq_data *irq_data, - return -EINVAL; - - err = assign_irq_vector(irq, data, dest); -- if (err) { -- if (assign_irq_vector(irq, data, -- irq_data_get_affinity_mask(irq_data))) -- pr_err("Failed to recover vector for irq %d\n", irq); -- return err; -- } -- -- return IRQ_SET_MASK_OK; -+ return err ? err : IRQ_SET_MASK_OK; - } - - static struct irq_chip lapic_controller = { -@@ -511,20 +538,12 @@ static struct irq_chip lapic_controller = { - #ifdef CONFIG_SMP - static void __send_cleanup_vector(struct apic_chip_data *data) - { -- cpumask_var_t cleanup_mask; -- -- if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { -- unsigned int i; -- -- for_each_cpu_and(i, data->old_domain, cpu_online_mask) -- apic->send_IPI_mask(cpumask_of(i), -- IRQ_MOVE_CLEANUP_VECTOR); -- } else { -- cpumask_and(cleanup_mask, data->old_domain, cpu_online_mask); -- apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); -- free_cpumask_var(cleanup_mask); -- } -+ raw_spin_lock(&vector_lock); -+ cpumask_and(data->old_domain, data->old_domain, cpu_online_mask); - data->move_in_progress = 0; -+ if (!cpumask_empty(data->old_domain)) -+ apic->send_IPI_mask(data->old_domain, IRQ_MOVE_CLEANUP_VECTOR); -+ raw_spin_unlock(&vector_lock); - } - - void send_cleanup_vector(struct irq_cfg *cfg) -@@ -568,12 +587,25 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) - goto unlock; - - /* -- * Check if the irq migration is in progress. If so, we -- * haven't received the cleanup request yet for this irq. -+ * Nothing to cleanup if irq migration is in progress -+ * or this cpu is not set in the cleanup mask. - */ -- if (data->move_in_progress) -+ if (data->move_in_progress || -+ !cpumask_test_cpu(me, data->old_domain)) - goto unlock; - -+ /* -+ * We have two cases to handle here: -+ * 1) vector is unchanged but the target mask got reduced -+ * 2) vector and the target mask has changed -+ * -+ * #1 is obvious, but in #2 we have two vectors with the same -+ * irq descriptor: the old and the new vector. So we need to -+ * make sure that we only cleanup the old vector. The new -+ * vector has the current @vector number in the config and -+ * this cpu is part of the target mask. We better leave that -+ * one alone. -+ */ - if (vector == data->cfg.vector && - cpumask_test_cpu(me, data->domain)) - goto unlock; -@@ -591,6 +623,7 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) - goto unlock; - } - __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); -+ cpumask_clear_cpu(me, data->old_domain); - unlock: - raw_spin_unlock(&desc->lock); - } -@@ -619,12 +652,48 @@ void irq_complete_move(struct irq_cfg *cfg) - __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); - } - --void irq_force_complete_move(int irq) -+/* -+ * Called with @desc->lock held and interrupts disabled. -+ */ -+void irq_force_complete_move(struct irq_desc *desc) - { -- struct irq_cfg *cfg = irq_cfg(irq); -+ struct irq_data *irqdata = irq_desc_get_irq_data(desc); -+ struct apic_chip_data *data = apic_chip_data(irqdata); -+ struct irq_cfg *cfg = data ? &data->cfg : NULL; - -- if (cfg) -- __irq_complete_move(cfg, cfg->vector); -+ if (!cfg) -+ return; -+ -+ __irq_complete_move(cfg, cfg->vector); -+ -+ /* -+ * This is tricky. If the cleanup of @data->old_domain has not been -+ * done yet, then the following setaffinity call will fail with -+ * -EBUSY. This can leave the interrupt in a stale state. -+ * -+ * The cleanup cannot make progress because we hold @desc->lock. So in -+ * case @data->old_domain is not yet cleaned up, we need to drop the -+ * lock and acquire it again. @desc cannot go away, because the -+ * hotplug code holds the sparse irq lock. -+ */ -+ raw_spin_lock(&vector_lock); -+ /* Clean out all offline cpus (including ourself) first. */ -+ cpumask_and(data->old_domain, data->old_domain, cpu_online_mask); -+ while (!cpumask_empty(data->old_domain)) { -+ raw_spin_unlock(&vector_lock); -+ raw_spin_unlock(&desc->lock); -+ cpu_relax(); -+ raw_spin_lock(&desc->lock); -+ /* -+ * Reevaluate apic_chip_data. It might have been cleared after -+ * we dropped @desc->lock. -+ */ -+ data = apic_chip_data(irqdata); -+ if (!data) -+ return; -+ raw_spin_lock(&vector_lock); -+ } -+ raw_spin_unlock(&vector_lock); - } - #endif - -diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c -index f8062aa..61521dc 100644 ---- a/arch/x86/kernel/irq.c -+++ b/arch/x86/kernel/irq.c -@@ -462,7 +462,7 @@ void fixup_irqs(void) - * non intr-remapping case, we can't wait till this interrupt - * arrives at this cpu before completing the irq move. - */ -- irq_force_complete_move(irq); -+ irq_force_complete_move(desc); - - if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { - break_affinity = 1; -@@ -470,6 +470,15 @@ void fixup_irqs(void) - } - - chip = irq_data_get_irq_chip(data); -+ /* -+ * The interrupt descriptor might have been cleaned up -+ * already, but it is not yet removed from the radix tree -+ */ -+ if (!chip) { -+ raw_spin_unlock(&desc->lock); -+ continue; -+ } -+ - if (!irqd_can_move_in_process_context(data) && chip->irq_mask) - chip->irq_mask(data); - -diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c -index 1505587..b9b09fe 100644 ---- a/arch/x86/kvm/emulate.c -+++ b/arch/x86/kvm/emulate.c -@@ -650,10 +650,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, - u16 sel; - - la = seg_base(ctxt, addr.seg) + addr.ea; -- *linear = la; - *max_size = 0; - switch (mode) { - case X86EMUL_MODE_PROT64: -+ *linear = la; - if (is_noncanonical_address(la)) - goto bad; - -@@ -662,6 +662,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, - goto bad; - break; - default: -+ *linear = la = (u32)la; - usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, - addr.seg); - if (!usable) -@@ -689,7 +690,6 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, - if (size > *max_size) - goto bad; - } -- la &= (u32)-1; - break; - } - if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0)) -diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h -index 3058a22..7be8a25 100644 ---- a/arch/x86/kvm/paging_tmpl.h -+++ b/arch/x86/kvm/paging_tmpl.h -@@ -249,7 +249,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, - return ret; - - kvm_vcpu_mark_page_dirty(vcpu, table_gfn); -- walker->ptes[level] = pte; -+ walker->ptes[level - 1] = pte; - } - return 0; - } -diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index 9a2ed89..6ef3856 100644 ---- a/arch/x86/kvm/x86.c -+++ b/arch/x86/kvm/x86.c -@@ -2736,6 +2736,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) - } - - kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); -+ vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; - } - - void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) -diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c -index b2fd67d..ef05755 100644 ---- a/arch/x86/mm/mpx.c -+++ b/arch/x86/mm/mpx.c -@@ -123,7 +123,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs, - break; - } - -- if (regno > nr_registers) { -+ if (regno >= nr_registers) { - WARN_ONCE(1, "decoded an instruction with an invalid register"); - return -EINVAL; - } -diff --git a/block/bio.c b/block/bio.c -index 4f184d9..d4d1443 100644 ---- a/block/bio.c -+++ b/block/bio.c -@@ -1090,9 +1090,12 @@ int bio_uncopy_user(struct bio *bio) - if (!bio_flagged(bio, BIO_NULL_MAPPED)) { - /* - * if we're in a workqueue, the request is orphaned, so -- * don't copy into a random user address space, just free. -+ * don't copy into a random user address space, just free -+ * and return -EINTR so user space doesn't expect any data. - */ -- if (current->mm && bio_data_dir(bio) == READ) -+ if (!current->mm) -+ ret = -EINTR; -+ else if (bio_data_dir(bio) == READ) - ret = bio_copy_to_iter(bio, bmd->iter); - if (bmd->is_our_pages) - bio_free_pages(bio); -diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c -index 3405f7a..5fdac39 100644 ---- a/drivers/acpi/acpi_video.c -+++ b/drivers/acpi/acpi_video.c -@@ -465,6 +465,15 @@ static struct dmi_system_id video_dmi_table[] = { - * as brightness control does not work. - */ - { -+ /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */ -+ .callback = video_disable_backlight_sysfs_if, -+ .ident = "Toshiba Portege R700", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"), -+ }, -+ }, -+ { - /* https://bugs.freedesktop.org/show_bug.cgi?id=82634 */ - .callback = video_disable_backlight_sysfs_if, - .ident = "Toshiba Portege R830", -@@ -473,6 +482,15 @@ static struct dmi_system_id video_dmi_table[] = { - DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R830"), - }, - }, -+ { -+ /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */ -+ .callback = video_disable_backlight_sysfs_if, -+ .ident = "Toshiba Satellite R830", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE R830"), -+ }, -+ }, - /* - * Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set - * but the IDs actually follow the Device ID Scheme. -diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c -index aa45d48..11d8209 100644 ---- a/drivers/acpi/nfit.c -+++ b/drivers/acpi/nfit.c -@@ -468,37 +468,16 @@ static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, - nfit_mem->bdw = NULL; - } - --static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc, -+static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, - struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) - { - u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; - struct nfit_memdev *nfit_memdev; - struct nfit_flush *nfit_flush; -- struct nfit_dcr *nfit_dcr; - struct nfit_bdw *nfit_bdw; - struct nfit_idt *nfit_idt; - u16 idt_idx, range_index; - -- list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { -- if (nfit_dcr->dcr->region_index != dcr) -- continue; -- nfit_mem->dcr = nfit_dcr->dcr; -- break; -- } -- -- if (!nfit_mem->dcr) { -- dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n", -- spa->range_index, __to_nfit_memdev(nfit_mem) -- ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR"); -- return -ENODEV; -- } -- -- /* -- * We've found enough to create an nvdimm, optionally -- * find an associated BDW -- */ -- list_add(&nfit_mem->list, &acpi_desc->dimms); -- - list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { - if (nfit_bdw->bdw->region_index != dcr) - continue; -@@ -507,12 +486,12 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc, - } - - if (!nfit_mem->bdw) -- return 0; -+ return; - - nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); - - if (!nfit_mem->spa_bdw) -- return 0; -+ return; - - range_index = nfit_mem->spa_bdw->range_index; - list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { -@@ -537,8 +516,6 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc, - } - break; - } -- -- return 0; - } - - static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, -@@ -547,7 +524,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, - struct nfit_mem *nfit_mem, *found; - struct nfit_memdev *nfit_memdev; - int type = nfit_spa_type(spa); -- u16 dcr; - - switch (type) { - case NFIT_SPA_DCR: -@@ -558,14 +534,18 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, - } - - list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { -- int rc; -+ struct nfit_dcr *nfit_dcr; -+ u32 device_handle; -+ u16 dcr; - - if (nfit_memdev->memdev->range_index != spa->range_index) - continue; - found = NULL; - dcr = nfit_memdev->memdev->region_index; -+ device_handle = nfit_memdev->memdev->device_handle; - list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) -- if (__to_nfit_memdev(nfit_mem)->region_index == dcr) { -+ if (__to_nfit_memdev(nfit_mem)->device_handle -+ == device_handle) { - found = nfit_mem; - break; - } -@@ -578,6 +558,31 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, - if (!nfit_mem) - return -ENOMEM; - INIT_LIST_HEAD(&nfit_mem->list); -+ list_add(&nfit_mem->list, &acpi_desc->dimms); -+ } -+ -+ list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { -+ if (nfit_dcr->dcr->region_index != dcr) -+ continue; -+ /* -+ * Record the control region for the dimm. For -+ * the ACPI 6.1 case, where there are separate -+ * control regions for the pmem vs blk -+ * interfaces, be sure to record the extended -+ * blk details. -+ */ -+ if (!nfit_mem->dcr) -+ nfit_mem->dcr = nfit_dcr->dcr; -+ else if (nfit_mem->dcr->windows == 0 -+ && nfit_dcr->dcr->windows) -+ nfit_mem->dcr = nfit_dcr->dcr; -+ break; -+ } -+ -+ if (dcr && !nfit_mem->dcr) { -+ dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n", -+ spa->range_index, dcr); -+ return -ENODEV; - } - - if (type == NFIT_SPA_DCR) { -@@ -594,6 +599,7 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, - nfit_mem->idt_dcr = nfit_idt->idt; - break; - } -+ nfit_mem_init_bdw(acpi_desc, nfit_mem, spa); - } else { - /* - * A single dimm may belong to multiple SPA-PM -@@ -602,13 +608,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, - */ - nfit_mem->memdev_pmem = nfit_memdev->memdev; - } -- -- if (found) -- continue; -- -- rc = nfit_mem_add(acpi_desc, nfit_mem, spa); -- if (rc) -- return rc; - } - - return 0; -diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c -index daaf1c4..80e55cb 100644 ---- a/drivers/acpi/video_detect.c -+++ b/drivers/acpi/video_detect.c -@@ -135,14 +135,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = { - DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"), - }, - }, -- { -- .callback = video_detect_force_vendor, -- .ident = "Dell Inspiron 5737", -- .matches = { -- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), -- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"), -- }, -- }, - - /* - * These models have a working acpi_video backlight control, and using -diff --git a/drivers/android/binder.c b/drivers/android/binder.c -index a39e85f..7d00b7a 100644 ---- a/drivers/android/binder.c -+++ b/drivers/android/binder.c -@@ -2074,7 +2074,7 @@ static int binder_thread_write(struct binder_proc *proc, - if (get_user(cookie, (binder_uintptr_t __user *)ptr)) - return -EFAULT; - -- ptr += sizeof(void *); -+ ptr += sizeof(cookie); - list_for_each_entry(w, &proc->delivered_death, entry) { - struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); - -diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c -index cdf6215..7dbba38 100644 ---- a/drivers/ata/libata-sff.c -+++ b/drivers/ata/libata-sff.c -@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap, - static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) - { - struct ata_port *ap = qc->ap; -- unsigned long flags; - - if (ap->ops->error_handler) { - if (in_wq) { -- spin_lock_irqsave(ap->lock, flags); -- - /* EH might have kicked in while host lock is - * released. - */ -@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) - } else - ata_port_freeze(ap); - } -- -- spin_unlock_irqrestore(ap->lock, flags); - } else { - if (likely(!(qc->err_mask & AC_ERR_HSM))) - ata_qc_complete(qc); -@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) - } - } else { - if (in_wq) { -- spin_lock_irqsave(ap->lock, flags); - ata_sff_irq_on(ap); - ata_qc_complete(qc); -- spin_unlock_irqrestore(ap->lock, flags); - } else - ata_qc_complete(qc); - } -@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, - { - struct ata_link *link = qc->dev->link; - struct ata_eh_info *ehi = &link->eh_info; -- unsigned long flags = 0; - int poll_next; - -+ lockdep_assert_held(ap->lock); -+ - WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); - - /* Make sure ata_sff_qc_issue() does not throw things -@@ -1112,14 +1106,6 @@ fsm_start: - } - } - -- /* Send the CDB (atapi) or the first data block (ata pio out). -- * During the state transition, interrupt handler shouldn't -- * be invoked before the data transfer is complete and -- * hsm_task_state is changed. Hence, the following locking. -- */ -- if (in_wq) -- spin_lock_irqsave(ap->lock, flags); -- - if (qc->tf.protocol == ATA_PROT_PIO) { - /* PIO data out protocol. - * send first data block. -@@ -1135,9 +1121,6 @@ fsm_start: - /* send CDB */ - atapi_send_cdb(ap, qc); - -- if (in_wq) -- spin_unlock_irqrestore(ap->lock, flags); -- - /* if polling, ata_sff_pio_task() handles the rest. - * otherwise, interrupt handler takes over from here. - */ -@@ -1361,12 +1344,14 @@ static void ata_sff_pio_task(struct work_struct *work) - u8 status; - int poll_next; - -+ spin_lock_irq(ap->lock); -+ - BUG_ON(ap->sff_pio_task_link == NULL); - /* qc can be NULL if timeout occurred */ - qc = ata_qc_from_tag(ap, link->active_tag); - if (!qc) { - ap->sff_pio_task_link = NULL; -- return; -+ goto out_unlock; - } - - fsm_start: -@@ -1381,11 +1366,14 @@ fsm_start: - */ - status = ata_sff_busy_wait(ap, ATA_BUSY, 5); - if (status & ATA_BUSY) { -+ spin_unlock_irq(ap->lock); - ata_msleep(ap, 2); -+ spin_lock_irq(ap->lock); -+ - status = ata_sff_busy_wait(ap, ATA_BUSY, 10); - if (status & ATA_BUSY) { - ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); -- return; -+ goto out_unlock; - } - } - -@@ -1402,6 +1390,8 @@ fsm_start: - */ - if (poll_next) - goto fsm_start; -+out_unlock: -+ spin_unlock_irq(ap->lock); - } - - /** -diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c -index 92f0ee3..9688971 100644 ---- a/drivers/bluetooth/btusb.c -+++ b/drivers/bluetooth/btusb.c -@@ -153,6 +153,10 @@ static const struct usb_device_id btusb_table[] = { - { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01), - .driver_info = BTUSB_BCM_PATCHRAM }, - -+ /* Toshiba Corp - Broadcom based */ -+ { USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01), -+ .driver_info = BTUSB_BCM_PATCHRAM }, -+ - /* Intel Bluetooth USB Bootloader (RAM module) */ - { USB_DEVICE(0x8087, 0x0a5a), - .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC }, -diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c -index 2fe37f7..813003d 100644 ---- a/drivers/clk/samsung/clk-cpu.c -+++ b/drivers/clk/samsung/clk-cpu.c -@@ -148,6 +148,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, - unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent); - unsigned long alt_div = 0, alt_div_mask = DIV_MASK; - unsigned long div0, div1 = 0, mux_reg; -+ unsigned long flags; - - /* find out the divider values to use for clock data */ - while ((cfg_data->prate * 1000) != ndata->new_rate) { -@@ -156,7 +157,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, - cfg_data++; - } - -- spin_lock(cpuclk->lock); -+ spin_lock_irqsave(cpuclk->lock, flags); - - /* - * For the selected PLL clock frequency, get the pre-defined divider -@@ -212,7 +213,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, - DIV_MASK_ALL); - } - -- spin_unlock(cpuclk->lock); -+ spin_unlock_irqrestore(cpuclk->lock, flags); - return 0; - } - -@@ -223,6 +224,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata, - const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg; - unsigned long div = 0, div_mask = DIV_MASK; - unsigned long mux_reg; -+ unsigned long flags; - - /* find out the divider values to use for clock data */ - if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) { -@@ -233,7 +235,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata, - } - } - -- spin_lock(cpuclk->lock); -+ spin_lock_irqsave(cpuclk->lock, flags); - - /* select mout_apll as the alternate parent */ - mux_reg = readl(base + E4210_SRC_CPU); -@@ -246,7 +248,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata, - } - - exynos_set_safe_div(base, div, div_mask); -- spin_unlock(cpuclk->lock); -+ spin_unlock_irqrestore(cpuclk->lock, flags); - return 0; - } - -diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c -index 6ee9140..4da2af9 100644 ---- a/drivers/clocksource/tcb_clksrc.c -+++ b/drivers/clocksource/tcb_clksrc.c -@@ -98,7 +98,8 @@ static int tc_shutdown(struct clock_event_device *d) - - __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR)); - __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); -- clk_disable(tcd->clk); -+ if (!clockevent_state_detached(d)) -+ clk_disable(tcd->clk); - - return 0; - } -diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c -index a92e94b..dfc3bb4 100644 ---- a/drivers/clocksource/vt8500_timer.c -+++ b/drivers/clocksource/vt8500_timer.c -@@ -50,6 +50,8 @@ - - #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) - -+#define MIN_OSCR_DELTA 16 -+ - static void __iomem *regbase; - - static cycle_t vt8500_timer_read(struct clocksource *cs) -@@ -80,7 +82,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles, - cpu_relax(); - writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL); - -- if ((signed)(alarm - clocksource.read(&clocksource)) <= 16) -+ if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA) - return -ETIME; - - writel(1, regbase + TIMER_IER_VAL); -@@ -151,7 +153,7 @@ static void __init vt8500_timer_init(struct device_node *np) - pr_err("%s: setup_irq failed for %s\n", __func__, - clockevent.name); - clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ, -- 4, 0xf0000000); -+ MIN_OSCR_DELTA * 2, 0xf0000000); - } - - CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init); -diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c -index b260576..d994b0f 100644 ---- a/drivers/cpufreq/cpufreq_governor.c -+++ b/drivers/cpufreq/cpufreq_governor.c -@@ -356,16 +356,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy, - if (!have_governor_per_policy()) - cdata->gdbs_data = dbs_data; - -+ policy->governor_data = dbs_data; -+ - ret = sysfs_create_group(get_governor_parent_kobj(policy), - get_sysfs_attr(dbs_data)); - if (ret) - goto reset_gdbs_data; - -- policy->governor_data = dbs_data; -- - return 0; - - reset_gdbs_data: -+ policy->governor_data = NULL; -+ - if (!have_governor_per_policy()) - cdata->gdbs_data = NULL; - cdata->exit(dbs_data, !policy->governor->initialized); -@@ -386,16 +388,19 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy, - if (!cdbs->shared || cdbs->shared->policy) - return -EBUSY; - -- policy->governor_data = NULL; - if (!--dbs_data->usage_count) { - sysfs_remove_group(get_governor_parent_kobj(policy), - get_sysfs_attr(dbs_data)); - -+ policy->governor_data = NULL; -+ - if (!have_governor_per_policy()) - cdata->gdbs_data = NULL; - - cdata->exit(dbs_data, policy->governor->initialized == 1); - kfree(dbs_data); -+ } else { -+ policy->governor_data = NULL; - } - - free_common_dbs_info(policy, cdata); -diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c -index 1d99c97..0963772 100644 ---- a/drivers/cpufreq/pxa2xx-cpufreq.c -+++ b/drivers/cpufreq/pxa2xx-cpufreq.c -@@ -202,7 +202,7 @@ static void __init pxa_cpufreq_init_voltages(void) - } - } - #else --static int pxa_cpufreq_change_voltage(struct pxa_freqs *pxa_freq) -+static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq) - { - return 0; - } -diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c -index 370c661..fa00f3a 100644 ---- a/drivers/dma/at_xdmac.c -+++ b/drivers/dma/at_xdmac.c -@@ -1688,6 +1688,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan) - list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) - at_xdmac_remove_xfer(atchan, desc); - -+ clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); - clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); - spin_unlock_irqrestore(&atchan->lock, flags); - -@@ -1820,6 +1821,8 @@ static int atmel_xdmac_resume(struct device *dev) - atchan = to_at_xdmac_chan(chan); - at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc); - if (at_xdmac_chan_is_cyclic(atchan)) { -+ if (at_xdmac_chan_is_paused(atchan)) -+ at_xdmac_device_resume(chan); - at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda); - at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc); - at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim); -diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c -index 7067b6d..4f099ea 100644 ---- a/drivers/dma/dw/core.c -+++ b/drivers/dma/dw/core.c -@@ -536,16 +536,17 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr); - - /* Called with dwc->lock held and all DMAC interrupts disabled */ - static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, -- u32 status_err, u32 status_xfer) -+ u32 status_block, u32 status_err, u32 status_xfer) - { - unsigned long flags; - -- if (dwc->mask) { -+ if (status_block & dwc->mask) { - void (*callback)(void *param); - void *callback_param; - - dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", - channel_readl(dwc, LLP)); -+ dma_writel(dw, CLEAR.BLOCK, dwc->mask); - - callback = dwc->cdesc->period_callback; - callback_param = dwc->cdesc->period_callback_param; -@@ -577,6 +578,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, - channel_writel(dwc, CTL_LO, 0); - channel_writel(dwc, CTL_HI, 0); - -+ dma_writel(dw, CLEAR.BLOCK, dwc->mask); - dma_writel(dw, CLEAR.ERROR, dwc->mask); - dma_writel(dw, CLEAR.XFER, dwc->mask); - -@@ -585,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, - - spin_unlock_irqrestore(&dwc->lock, flags); - } -+ -+ /* Re-enable interrupts */ -+ channel_set_bit(dw, MASK.BLOCK, dwc->mask); - } - - /* ------------------------------------------------------------------------- */ -@@ -593,10 +598,12 @@ static void dw_dma_tasklet(unsigned long data) - { - struct dw_dma *dw = (struct dw_dma *)data; - struct dw_dma_chan *dwc; -+ u32 status_block; - u32 status_xfer; - u32 status_err; - int i; - -+ status_block = dma_readl(dw, RAW.BLOCK); - status_xfer = dma_readl(dw, RAW.XFER); - status_err = dma_readl(dw, RAW.ERROR); - -@@ -605,16 +612,15 @@ static void dw_dma_tasklet(unsigned long data) - for (i = 0; i < dw->dma.chancnt; i++) { - dwc = &dw->chan[i]; - if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) -- dwc_handle_cyclic(dw, dwc, status_err, status_xfer); -+ dwc_handle_cyclic(dw, dwc, status_block, status_err, -+ status_xfer); - else if (status_err & (1 << i)) - dwc_handle_error(dw, dwc); - else if (status_xfer & (1 << i)) - dwc_scan_descriptors(dw, dwc); - } - -- /* -- * Re-enable interrupts. -- */ -+ /* Re-enable interrupts */ - channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); - channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); - } -@@ -635,6 +641,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) - * softirq handler. - */ - channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); -+ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); - channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); - - status = dma_readl(dw, STATUS_INT); -@@ -645,6 +652,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) - - /* Try to recover */ - channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); -+ channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); - channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); - channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); - channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); -@@ -1111,6 +1119,7 @@ static void dw_dma_off(struct dw_dma *dw) - dma_writel(dw, CFG, 0); - - channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); -+ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); - channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); - channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); - channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); -@@ -1216,6 +1225,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) - - /* Disable interrupts */ - channel_clear_bit(dw, MASK.XFER, dwc->mask); -+ channel_clear_bit(dw, MASK.BLOCK, dwc->mask); - channel_clear_bit(dw, MASK.ERROR, dwc->mask); - - spin_unlock_irqrestore(&dwc->lock, flags); -@@ -1245,7 +1255,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) - int dw_dma_cyclic_start(struct dma_chan *chan) - { - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); -- struct dw_dma *dw = to_dw_dma(dwc->chan.device); -+ struct dw_dma *dw = to_dw_dma(chan->device); - unsigned long flags; - - if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { -@@ -1255,25 +1265,10 @@ int dw_dma_cyclic_start(struct dma_chan *chan) - - spin_lock_irqsave(&dwc->lock, flags); - -- /* Assert channel is idle */ -- if (dma_readl(dw, CH_EN) & dwc->mask) { -- dev_err(chan2dev(&dwc->chan), -- "%s: BUG: Attempted to start non-idle channel\n", -- __func__); -- dwc_dump_chan_regs(dwc); -- spin_unlock_irqrestore(&dwc->lock, flags); -- return -EBUSY; -- } -- -- dma_writel(dw, CLEAR.ERROR, dwc->mask); -- dma_writel(dw, CLEAR.XFER, dwc->mask); -+ /* Enable interrupts to perform cyclic transfer */ -+ channel_set_bit(dw, MASK.BLOCK, dwc->mask); - -- /* Setup DMAC channel registers */ -- channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); -- channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); -- channel_writel(dwc, CTL_HI, 0); -- -- channel_set_bit(dw, CH_EN, dwc->mask); -+ dwc_dostart(dwc, dwc->cdesc->desc[0]); - - spin_unlock_irqrestore(&dwc->lock, flags); - -@@ -1479,6 +1474,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan) - - dwc_chan_disable(dw, dwc); - -+ dma_writel(dw, CLEAR.BLOCK, dwc->mask); - dma_writel(dw, CLEAR.ERROR, dwc->mask); - dma_writel(dw, CLEAR.XFER, dwc->mask); - -@@ -1567,9 +1563,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) - /* Force dma off, just in case */ - dw_dma_off(dw); - -- /* Disable BLOCK interrupts as well */ -- channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); -- - /* Create a pool of consistent memory blocks for hardware descriptors */ - dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, - sizeof(struct dw_desc), 4, 0); -diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c -index 592af5f..5358737 100644 ---- a/drivers/edac/edac_device.c -+++ b/drivers/edac/edac_device.c -@@ -435,16 +435,13 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, - */ - void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev) - { -- int status; -- - if (!edac_dev->edac_check) - return; - -- status = cancel_delayed_work(&edac_dev->work); -- if (status == 0) { -- /* workq instance might be running, wait for it */ -- flush_workqueue(edac_workqueue); -- } -+ edac_dev->op_state = OP_OFFLINE; -+ -+ cancel_delayed_work_sync(&edac_dev->work); -+ flush_workqueue(edac_workqueue); - } - - /* -diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c -index 77ecd6a..1b2c218 100644 ---- a/drivers/edac/edac_mc.c -+++ b/drivers/edac/edac_mc.c -@@ -586,18 +586,10 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec, - */ - static void edac_mc_workq_teardown(struct mem_ctl_info *mci) - { -- int status; -- -- if (mci->op_state != OP_RUNNING_POLL) -- return; -- -- status = cancel_delayed_work(&mci->work); -- if (status == 0) { -- edac_dbg(0, "not canceled, flush the queue\n"); -+ mci->op_state = OP_OFFLINE; - -- /* workq instance might be running, wait for it */ -- flush_workqueue(edac_workqueue); -- } -+ cancel_delayed_work_sync(&mci->work); -+ flush_workqueue(edac_workqueue); - } - - /* -diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c -index a75acea..58aed67 100644 ---- a/drivers/edac/edac_mc_sysfs.c -+++ b/drivers/edac/edac_mc_sysfs.c -@@ -880,21 +880,26 @@ static struct device_type mci_attr_type = { - int edac_create_sysfs_mci_device(struct mem_ctl_info *mci, - const struct attribute_group **groups) - { -+ char *name; - int i, err; - - /* - * The memory controller needs its own bus, in order to avoid - * namespace conflicts at /sys/bus/edac. - */ -- mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); -- if (!mci->bus->name) -+ name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); -+ if (!name) - return -ENOMEM; - -+ mci->bus->name = name; -+ - edac_dbg(0, "creating bus %s\n", mci->bus->name); - - err = bus_register(mci->bus); -- if (err < 0) -- goto fail_free_name; -+ if (err < 0) { -+ kfree(name); -+ return err; -+ } - - /* get the /sys/devices/system/edac subsys reference */ - mci->dev.type = &mci_attr_type; -@@ -961,8 +966,8 @@ fail_unregister_dimm: - device_unregister(&mci->dev); - fail_unregister_bus: - bus_unregister(mci->bus); --fail_free_name: -- kfree(mci->bus->name); -+ kfree(name); -+ - return err; - } - -@@ -993,10 +998,12 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) - - void edac_unregister_sysfs(struct mem_ctl_info *mci) - { -+ const char *name = mci->bus->name; -+ - edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev)); - device_unregister(&mci->dev); - bus_unregister(mci->bus); -- kfree(mci->bus->name); -+ kfree(name); - } - - static void mc_attr_release(struct device *dev) -diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c -index 2cf44b4d..b4b3860 100644 ---- a/drivers/edac/edac_pci.c -+++ b/drivers/edac/edac_pci.c -@@ -274,13 +274,12 @@ static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci, - */ - static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci) - { -- int status; -- - edac_dbg(0, "\n"); - -- status = cancel_delayed_work(&pci->work); -- if (status == 0) -- flush_workqueue(edac_workqueue); -+ pci->op_state = OP_OFFLINE; -+ -+ cancel_delayed_work_sync(&pci->work); -+ flush_workqueue(edac_workqueue); - } - - /* -diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c -index 756eca8..10e6774 100644 ---- a/drivers/firmware/efi/efivars.c -+++ b/drivers/firmware/efi/efivars.c -@@ -221,7 +221,7 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor, - } - - if ((attributes & ~EFI_VARIABLE_MASK) != 0 || -- efivar_validate(name, data, size) == false) { -+ efivar_validate(vendor, name, data, size) == false) { - printk(KERN_ERR "efivars: Malformed variable content\n"); - return -EINVAL; - } -@@ -447,7 +447,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, - } - - if ((attributes & ~EFI_VARIABLE_MASK) != 0 || -- efivar_validate(name, data, size) == false) { -+ efivar_validate(new_var->VendorGuid, name, data, -+ size) == false) { - printk(KERN_ERR "efivars: Malformed variable content\n"); - return -EINVAL; - } -@@ -540,38 +541,30 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, - static int - efivar_create_sysfs_entry(struct efivar_entry *new_var) - { -- int i, short_name_size; -+ int short_name_size; - char *short_name; -- unsigned long variable_name_size; -- efi_char16_t *variable_name; -+ unsigned long utf8_name_size; -+ efi_char16_t *variable_name = new_var->var.VariableName; - int ret; - -- variable_name = new_var->var.VariableName; -- variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t); -- - /* -- * Length of the variable bytes in ASCII, plus the '-' separator, -+ * Length of the variable bytes in UTF8, plus the '-' separator, - * plus the GUID, plus trailing NUL - */ -- short_name_size = variable_name_size / sizeof(efi_char16_t) -- + 1 + EFI_VARIABLE_GUID_LEN + 1; -- -- short_name = kzalloc(short_name_size, GFP_KERNEL); -+ utf8_name_size = ucs2_utf8size(variable_name); -+ short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1; - -+ short_name = kmalloc(short_name_size, GFP_KERNEL); - if (!short_name) - return -ENOMEM; - -- /* Convert Unicode to normal chars (assume top bits are 0), -- ala UTF-8 */ -- for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) { -- short_name[i] = variable_name[i] & 0xFF; -- } -+ ucs2_as_utf8(short_name, variable_name, short_name_size); -+ - /* This is ugly, but necessary to separate one vendor's - private variables from another's. */ -- -- *(short_name + strlen(short_name)) = '-'; -+ short_name[utf8_name_size] = '-'; - efi_guid_to_str(&new_var->var.VendorGuid, -- short_name + strlen(short_name)); -+ short_name + utf8_name_size + 1); - - new_var->kobj.kset = efivars_kset; - -diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c -index 70a0fb1..7f2ea21 100644 ---- a/drivers/firmware/efi/vars.c -+++ b/drivers/firmware/efi/vars.c -@@ -165,67 +165,133 @@ validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer, - } - - struct variable_validate { -+ efi_guid_t vendor; - char *name; - bool (*validate)(efi_char16_t *var_name, int match, u8 *data, - unsigned long len); - }; - -+/* -+ * This is the list of variables we need to validate, as well as the -+ * whitelist for what we think is safe not to default to immutable. -+ * -+ * If it has a validate() method that's not NULL, it'll go into the -+ * validation routine. If not, it is assumed valid, but still used for -+ * whitelisting. -+ * -+ * Note that it's sorted by {vendor,name}, but globbed names must come after -+ * any other name with the same prefix. -+ */ - static const struct variable_validate variable_validate[] = { -- { "BootNext", validate_uint16 }, -- { "BootOrder", validate_boot_order }, -- { "DriverOrder", validate_boot_order }, -- { "Boot*", validate_load_option }, -- { "Driver*", validate_load_option }, -- { "ConIn", validate_device_path }, -- { "ConInDev", validate_device_path }, -- { "ConOut", validate_device_path }, -- { "ConOutDev", validate_device_path }, -- { "ErrOut", validate_device_path }, -- { "ErrOutDev", validate_device_path }, -- { "Timeout", validate_uint16 }, -- { "Lang", validate_ascii_string }, -- { "PlatformLang", validate_ascii_string }, -- { "", NULL }, -+ { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 }, -+ { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order }, -+ { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option }, -+ { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order }, -+ { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option }, -+ { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path }, -+ { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path }, -+ { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path }, -+ { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path }, -+ { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path }, -+ { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path }, -+ { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string }, -+ { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL }, -+ { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string }, -+ { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 }, -+ { LINUX_EFI_CRASH_GUID, "*", NULL }, -+ { NULL_GUID, "", NULL }, - }; - -+static bool -+variable_matches(const char *var_name, size_t len, const char *match_name, -+ int *match) -+{ -+ for (*match = 0; ; (*match)++) { -+ char c = match_name[*match]; -+ char u = var_name[*match]; -+ -+ /* Wildcard in the matching name means we've matched */ -+ if (c == '*') -+ return true; -+ -+ /* Case sensitive match */ -+ if (!c && *match == len) -+ return true; -+ -+ if (c != u) -+ return false; -+ -+ if (!c) -+ return true; -+ } -+ return true; -+} -+ - bool --efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len) -+efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, -+ unsigned long data_size) - { - int i; -- u16 *unicode_name = var_name; -+ unsigned long utf8_size; -+ u8 *utf8_name; - -- for (i = 0; variable_validate[i].validate != NULL; i++) { -- const char *name = variable_validate[i].name; -- int match; -+ utf8_size = ucs2_utf8size(var_name); -+ utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL); -+ if (!utf8_name) -+ return false; - -- for (match = 0; ; match++) { -- char c = name[match]; -- u16 u = unicode_name[match]; -+ ucs2_as_utf8(utf8_name, var_name, utf8_size); -+ utf8_name[utf8_size] = '\0'; - -- /* All special variables are plain ascii */ -- if (u > 127) -- return true; -+ for (i = 0; variable_validate[i].name[0] != '\0'; i++) { -+ const char *name = variable_validate[i].name; -+ int match = 0; - -- /* Wildcard in the matching name means we've matched */ -- if (c == '*') -- return variable_validate[i].validate(var_name, -- match, data, len); -+ if (efi_guidcmp(vendor, variable_validate[i].vendor)) -+ continue; - -- /* Case sensitive match */ -- if (c != u) -+ if (variable_matches(utf8_name, utf8_size+1, name, &match)) { -+ if (variable_validate[i].validate == NULL) - break; -- -- /* Reached the end of the string while matching */ -- if (!c) -- return variable_validate[i].validate(var_name, -- match, data, len); -+ kfree(utf8_name); -+ return variable_validate[i].validate(var_name, match, -+ data, data_size); - } - } -- -+ kfree(utf8_name); - return true; - } - EXPORT_SYMBOL_GPL(efivar_validate); - -+bool -+efivar_variable_is_removable(efi_guid_t vendor, const char *var_name, -+ size_t len) -+{ -+ int i; -+ bool found = false; -+ int match = 0; -+ -+ /* -+ * Check if our variable is in the validated variables list -+ */ -+ for (i = 0; variable_validate[i].name[0] != '\0'; i++) { -+ if (efi_guidcmp(variable_validate[i].vendor, vendor)) -+ continue; -+ -+ if (variable_matches(var_name, len, -+ variable_validate[i].name, &match)) { -+ found = true; -+ break; -+ } -+ } -+ -+ /* -+ * If it's in our list, it is removable. -+ */ -+ return found; -+} -+EXPORT_SYMBOL_GPL(efivar_variable_is_removable); -+ - static efi_status_t - check_var_size(u32 attributes, unsigned long size) - { -@@ -852,7 +918,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, - - *set = false; - -- if (efivar_validate(name, data, *size) == false) -+ if (efivar_validate(*vendor, name, data, *size) == false) - return -EINVAL; - - /* -diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile -index 04c2707..ca06601 100644 ---- a/drivers/gpu/drm/amd/amdgpu/Makefile -+++ b/drivers/gpu/drm/amd/amdgpu/Makefile -@@ -22,7 +22,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ - amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o - - # add asic specific block --amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \ -+amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ - ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \ - amdgpu_amdkfd_gfx_v7.o - -@@ -31,6 +31,7 @@ amdgpu-y += \ - - # add GMC block - amdgpu-y += \ -+ gmc_v7_0.o \ - gmc_v8_0.o - - # add IH block -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h -index 048cfe0..bb1099c 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h -@@ -604,8 +604,6 @@ struct amdgpu_sa_manager { - uint32_t align; - }; - --struct amdgpu_sa_bo; -- - /* sub-allocation buffer */ - struct amdgpu_sa_bo { - struct list_head olist; -@@ -2314,6 +2312,8 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); - int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, - uint32_t flags); - bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); -+bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, -+ unsigned long end); - bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); - uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, - struct ttm_mem_reg *mem); -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c -index d5b4213..c961fe0 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c -@@ -1744,15 +1744,20 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) - } - - /* post card */ -- amdgpu_atom_asic_init(adev->mode_info.atom_context); -+ if (!amdgpu_card_posted(adev)) -+ amdgpu_atom_asic_init(adev->mode_info.atom_context); - - r = amdgpu_resume(adev); -+ if (r) -+ DRM_ERROR("amdgpu_resume failed (%d).\n", r); - - amdgpu_fence_driver_resume(adev); - -- r = amdgpu_ib_ring_tests(adev); -- if (r) -- DRM_ERROR("ib ring test failed (%d).\n", r); -+ if (resume) { -+ r = amdgpu_ib_ring_tests(adev); -+ if (r) -+ DRM_ERROR("ib ring test failed (%d).\n", r); -+ } - - r = amdgpu_late_init(adev); - if (r) -@@ -1788,6 +1793,7 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) - } - - drm_kms_helper_poll_enable(dev); -+ drm_helper_hpd_irq_event(dev); - - if (fbcon) { - amdgpu_fbdev_set_suspend(adev, 0); -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c -index 5580d34..0c713a9 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c -@@ -72,8 +72,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work) - - struct drm_crtc *crtc = &amdgpuCrtc->base; - unsigned long flags; -- unsigned i; -- int vpos, hpos, stat, min_udelay; -+ unsigned i, repcnt = 4; -+ int vpos, hpos, stat, min_udelay = 0; - struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; - - amdgpu_flip_wait_fence(adev, &work->excl); -@@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work) - * In practice this won't execute very often unless on very fast - * machines because the time window for this to happen is very small. - */ -- for (;;) { -+ while (amdgpuCrtc->enabled && repcnt--) { - /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank - * start in hpos, and to the "fudged earlier" vblank start in - * vpos. -@@ -114,10 +114,22 @@ static void amdgpu_flip_work_func(struct work_struct *__work) - /* Sleep at least until estimated real start of hw vblank */ - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); -+ if (min_udelay > vblank->framedur_ns / 2000) { -+ /* Don't wait ridiculously long - something is wrong */ -+ repcnt = 0; -+ break; -+ } - usleep_range(min_udelay, 2 * min_udelay); - spin_lock_irqsave(&crtc->dev->event_lock, flags); - }; - -+ if (!repcnt) -+ DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, " -+ "framedur %d, linedur %d, stat %d, vpos %d, " -+ "hpos %d\n", work->crtc_id, min_udelay, -+ vblank->framedur_ns / 1000, -+ vblank->linedur_ns / 1000, stat, vpos, hpos); -+ - /* do the flip (mmio) */ - adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); - /* set the flip status */ -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c -index 0508c5c..8d6668c 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c -@@ -250,11 +250,11 @@ static struct pci_device_id pciidlist[] = { - {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, - #endif - /* topaz */ -- {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, -- {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, -- {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, -- {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, -- {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, -+ {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, -+ {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, -+ {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, -+ {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, -+ {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, - /* tonga */ - {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, - {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c -index b1969f2..d4e2780 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c -@@ -142,7 +142,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, - - list_for_each_entry(bo, &node->bos, mn_list) { - -- if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) -+ if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, -+ end)) - continue; - - r = amdgpu_bo_reserve(bo, true); -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c -index c3ce103..a2a16ac 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c -@@ -399,7 +399,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, - } - if (fpfn > bo->placements[i].fpfn) - bo->placements[i].fpfn = fpfn; -- if (lpfn && lpfn < bo->placements[i].lpfn) -+ if (!bo->placements[i].lpfn || -+ (lpfn && lpfn < bo->placements[i].lpfn)) - bo->placements[i].lpfn = lpfn; - bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; - } -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c -index 22a8c7d..03fe251 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c -@@ -595,8 +595,6 @@ force: - - /* update display watermarks based on new power state */ - amdgpu_display_bandwidth_update(adev); -- /* update displays */ -- amdgpu_dpm_display_configuration_changed(adev); - - adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; - adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; -@@ -616,6 +614,9 @@ force: - - amdgpu_dpm_post_set_power_state(adev); - -+ /* update displays */ -+ amdgpu_dpm_display_configuration_changed(adev); -+ - if (adev->pm.funcs->force_performance_level) { - if (adev->pm.dpm.thermal_active) { - enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level; -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c -index 8b88edb..ca72a2e 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c -@@ -354,12 +354,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, - - for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i) - if (fences[i]) -- fences[count++] = fences[i]; -+ fences[count++] = fence_get(fences[i]); - - if (count) { - spin_unlock(&sa_manager->wq.lock); - t = fence_wait_any_timeout(fences, count, false, - MAX_SCHEDULE_TIMEOUT); -+ for (i = 0; i < count; ++i) -+ fence_put(fences[i]); -+ - r = (t > 0) ? 0 : t; - spin_lock(&sa_manager->wq.lock); - } else { -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c -index dd005c3..181ce39 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c -@@ -293,7 +293,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, - fence = to_amdgpu_fence(sync->sync_to[i]); - - /* check if we really need to sync */ -- if (!amdgpu_fence_need_sync(fence, ring)) -+ if (!amdgpu_enable_scheduler && -+ !amdgpu_fence_need_sync(fence, ring)) - continue; - - /* prevent GPU deadlocks */ -@@ -303,7 +304,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, - } - - if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) { -- r = fence_wait(&fence->base, true); -+ r = fence_wait(sync->sync_to[i], true); - if (r) - return r; - continue; -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c -index 8a1752f..1cbb16e 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c -@@ -712,7 +712,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) - 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) { -- while (--i) { -+ while (i--) { - pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i], - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - gtt->ttm.dma_address[i] = 0; -@@ -783,6 +783,25 @@ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm) - return !!gtt->userptr; - } - -+bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, -+ unsigned long end) -+{ -+ struct amdgpu_ttm_tt *gtt = (void *)ttm; -+ unsigned long size; -+ -+ if (gtt == NULL) -+ return false; -+ -+ if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr) -+ return false; -+ -+ size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; -+ if (gtt->userptr > end || gtt->userptr + size <= start) -+ return false; -+ -+ return true; -+} -+ - bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) - { - struct amdgpu_ttm_tt *gtt = (void *)ttm; -@@ -808,7 +827,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, - flags |= AMDGPU_PTE_SNOOPED; - } - -- if (adev->asic_type >= CHIP_TOPAZ) -+ if (adev->asic_type >= CHIP_TONGA) - flags |= AMDGPU_PTE_EXECUTABLE; - - flags |= AMDGPU_PTE_READABLE; -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c -index b53d273..39adbb6 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c -@@ -1010,13 +1010,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, - return -EINVAL; - - /* make sure object fit at this offset */ -- eaddr = saddr + size; -+ eaddr = saddr + size - 1; - if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) - return -EINVAL; - - last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; -- if (last_pfn > adev->vm_manager.max_pfn) { -- dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", -+ if (last_pfn >= adev->vm_manager.max_pfn) { -+ dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n", - last_pfn, adev->vm_manager.max_pfn); - return -EINVAL; - } -@@ -1025,7 +1025,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, - eaddr /= AMDGPU_GPU_PAGE_SIZE; - - spin_lock(&vm->it_lock); -- it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1); -+ it = interval_tree_iter_first(&vm->va, saddr, eaddr); - spin_unlock(&vm->it_lock); - if (it) { - struct amdgpu_bo_va_mapping *tmp; -@@ -1046,7 +1046,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, - - INIT_LIST_HEAD(&mapping->list); - mapping->it.start = saddr; -- mapping->it.last = eaddr - 1; -+ mapping->it.last = eaddr; - mapping->offset = offset; - mapping->flags = flags; - -diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c -index e1dcab9..4cb45f4 100644 ---- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c -@@ -90,7 +90,6 @@ MODULE_FIRMWARE("amdgpu/topaz_ce.bin"); - MODULE_FIRMWARE("amdgpu/topaz_pfp.bin"); - MODULE_FIRMWARE("amdgpu/topaz_me.bin"); - MODULE_FIRMWARE("amdgpu/topaz_mec.bin"); --MODULE_FIRMWARE("amdgpu/topaz_mec2.bin"); - MODULE_FIRMWARE("amdgpu/topaz_rlc.bin"); - - MODULE_FIRMWARE("amdgpu/fiji_ce.bin"); -@@ -807,7 +806,8 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) - adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); - adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); - -- if (adev->asic_type != CHIP_STONEY) { -+ if ((adev->asic_type != CHIP_STONEY) && -+ (adev->asic_type != CHIP_TOPAZ)) { - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); - err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); - if (!err) { -diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c -index ed8abb5..272110c 100644 ---- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c -@@ -42,9 +42,39 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); - - MODULE_FIRMWARE("radeon/bonaire_mc.bin"); - MODULE_FIRMWARE("radeon/hawaii_mc.bin"); -+MODULE_FIRMWARE("amdgpu/topaz_mc.bin"); -+ -+static const u32 golden_settings_iceland_a11[] = -+{ -+ mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, -+ mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, -+ mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, -+ mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff -+}; -+ -+static const u32 iceland_mgcg_cgcg_init[] = -+{ -+ mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 -+}; -+ -+static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev) -+{ -+ switch (adev->asic_type) { -+ case CHIP_TOPAZ: -+ amdgpu_program_register_sequence(adev, -+ iceland_mgcg_cgcg_init, -+ (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); -+ amdgpu_program_register_sequence(adev, -+ golden_settings_iceland_a11, -+ (const u32)ARRAY_SIZE(golden_settings_iceland_a11)); -+ break; -+ default: -+ break; -+ } -+} - - /** -- * gmc8_mc_wait_for_idle - wait for MC idle callback. -+ * gmc7_mc_wait_for_idle - wait for MC idle callback. - * - * @adev: amdgpu_device pointer - * -@@ -132,13 +162,20 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev) - case CHIP_HAWAII: - chip_name = "hawaii"; - break; -+ case CHIP_TOPAZ: -+ chip_name = "topaz"; -+ break; - case CHIP_KAVERI: - case CHIP_KABINI: - return 0; - default: BUG(); - } - -- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); -+ if (adev->asic_type == CHIP_TOPAZ) -+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); -+ else -+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); -+ - err = request_firmware(&adev->mc.fw, fw_name, adev->dev); - if (err) - goto out; -@@ -980,6 +1017,8 @@ static int gmc_v7_0_hw_init(void *handle) - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - -+ gmc_v7_0_init_golden_registers(adev); -+ - gmc_v7_0_mc_program(adev); - - if (!(adev->flags & AMD_IS_APU)) { -diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c -index d390284..ba4ad00 100644 ---- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c -@@ -42,9 +42,7 @@ - static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev); - static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); - --MODULE_FIRMWARE("amdgpu/topaz_mc.bin"); - MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); --MODULE_FIRMWARE("amdgpu/fiji_mc.bin"); - - static const u32 golden_settings_tonga_a11[] = - { -@@ -75,19 +73,6 @@ static const u32 fiji_mgcg_cgcg_init[] = - mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 - }; - --static const u32 golden_settings_iceland_a11[] = --{ -- mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, -- mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, -- mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, -- mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff --}; -- --static const u32 iceland_mgcg_cgcg_init[] = --{ -- mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 --}; -- - static const u32 cz_mgcg_cgcg_init[] = - { - mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 -@@ -102,14 +87,6 @@ static const u32 stoney_mgcg_cgcg_init[] = - static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) - { - switch (adev->asic_type) { -- case CHIP_TOPAZ: -- amdgpu_program_register_sequence(adev, -- iceland_mgcg_cgcg_init, -- (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); -- amdgpu_program_register_sequence(adev, -- golden_settings_iceland_a11, -- (const u32)ARRAY_SIZE(golden_settings_iceland_a11)); -- break; - case CHIP_FIJI: - amdgpu_program_register_sequence(adev, - fiji_mgcg_cgcg_init, -@@ -229,15 +206,10 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) - DRM_DEBUG("\n"); - - switch (adev->asic_type) { -- case CHIP_TOPAZ: -- chip_name = "topaz"; -- break; - case CHIP_TONGA: - chip_name = "tonga"; - break; - case CHIP_FIJI: -- chip_name = "fiji"; -- break; - case CHIP_CARRIZO: - case CHIP_STONEY: - return 0; -@@ -1003,7 +975,7 @@ static int gmc_v8_0_hw_init(void *handle) - - gmc_v8_0_mc_program(adev); - -- if (!(adev->flags & AMD_IS_APU)) { -+ if (adev->asic_type == CHIP_TONGA) { - r = gmc_v8_0_mc_load_microcode(adev); - if (r) { - DRM_ERROR("Failed to load MC firmware!\n"); -diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c -index 966d4b2..090486c 100644 ---- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c -+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c -@@ -432,7 +432,7 @@ static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type) - case AMDGPU_UCODE_ID_CP_ME: - return UCODE_ID_CP_ME_MASK; - case AMDGPU_UCODE_ID_CP_MEC1: -- return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK; -+ return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK; - case AMDGPU_UCODE_ID_CP_MEC2: - return UCODE_ID_CP_MEC_MASK; - case AMDGPU_UCODE_ID_RLC_G: -@@ -522,12 +522,6 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev) - return -EINVAL; - } - -- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2, -- &toc->entry[toc->num_entries++])) { -- DRM_ERROR("Failed to get firmware entry for MEC_JT2\n"); -- return -EINVAL; -- } -- - if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for SDMA0\n"); -@@ -550,8 +544,8 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev) - UCODE_ID_CP_ME_MASK | - UCODE_ID_CP_PFP_MASK | - UCODE_ID_CP_MEC_MASK | -- UCODE_ID_CP_MEC_JT1_MASK | -- UCODE_ID_CP_MEC_JT2_MASK; -+ UCODE_ID_CP_MEC_JT1_MASK; -+ - - if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) { - DRM_ERROR("Fail to request SMU load ucode\n"); -diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c -index 2049038..63d6cb3 100644 ---- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c -+++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c -@@ -122,25 +122,12 @@ static int tonga_dpm_hw_fini(void *handle) - - static int tonga_dpm_suspend(void *handle) - { -- return 0; -+ return tonga_dpm_hw_fini(handle); - } - - static int tonga_dpm_resume(void *handle) - { -- int ret; -- struct amdgpu_device *adev = (struct amdgpu_device *)handle; -- -- mutex_lock(&adev->pm.mutex); -- -- ret = tonga_smu_start(adev); -- if (ret) { -- DRM_ERROR("SMU start failed\n"); -- goto fail; -- } -- --fail: -- mutex_unlock(&adev->pm.mutex); -- return ret; -+ return tonga_dpm_hw_init(handle); - } - - static int tonga_dpm_set_clockgating_state(void *handle, -diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c -index 2adc1c8..7628eb4 100644 ---- a/drivers/gpu/drm/amd/amdgpu/vi.c -+++ b/drivers/gpu/drm/amd/amdgpu/vi.c -@@ -60,6 +60,7 @@ - #include "vi.h" - #include "vi_dpm.h" - #include "gmc_v8_0.h" -+#include "gmc_v7_0.h" - #include "gfx_v8_0.h" - #include "sdma_v2_4.h" - #include "sdma_v3_0.h" -@@ -1128,10 +1129,10 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] = - }, - { - .type = AMD_IP_BLOCK_TYPE_GMC, -- .major = 8, -- .minor = 0, -+ .major = 7, -+ .minor = 4, - .rev = 0, -- .funcs = &gmc_v8_0_ip_funcs, -+ .funcs = &gmc_v7_0_ip_funcs, - }, - { - .type = AMD_IP_BLOCK_TYPE_IH, -diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c -index 809959d..39d7e2e 100644 ---- a/drivers/gpu/drm/drm_dp_mst_topology.c -+++ b/drivers/gpu/drm/drm_dp_mst_topology.c -@@ -798,6 +798,18 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad) - return mstb; - } - -+static void drm_dp_free_mst_port(struct kref *kref); -+ -+static void drm_dp_free_mst_branch_device(struct kref *kref) -+{ -+ struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); -+ if (mstb->port_parent) { -+ if (list_empty(&mstb->port_parent->next)) -+ kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port); -+ } -+ kfree(mstb); -+} -+ - static void drm_dp_destroy_mst_branch_device(struct kref *kref) - { - struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); -@@ -805,6 +817,15 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) - bool wake_tx = false; - - /* -+ * init kref again to be used by ports to remove mst branch when it is -+ * not needed anymore -+ */ -+ kref_init(kref); -+ -+ if (mstb->port_parent && list_empty(&mstb->port_parent->next)) -+ kref_get(&mstb->port_parent->kref); -+ -+ /* - * destroy all ports - don't need lock - * as there are no more references to the mst branch - * device at this point. -@@ -830,7 +851,8 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) - - if (wake_tx) - wake_up(&mstb->mgr->tx_waitq); -- kfree(mstb); -+ -+ kref_put(kref, drm_dp_free_mst_branch_device); - } - - static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb) -@@ -878,6 +900,7 @@ static void drm_dp_destroy_port(struct kref *kref) - * from an EDID retrieval */ - - mutex_lock(&mgr->destroy_connector_lock); -+ kref_get(&port->parent->kref); - list_add(&port->next, &mgr->destroy_connector_list); - mutex_unlock(&mgr->destroy_connector_lock); - schedule_work(&mgr->destroy_connector_work); -@@ -973,17 +996,17 @@ static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u - static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, - u8 *rad) - { -- int lct = port->parent->lct; -+ int parent_lct = port->parent->lct; - int shift = 4; -- int idx = lct / 2; -- if (lct > 1) { -- memcpy(rad, port->parent->rad, idx); -- shift = (lct % 2) ? 4 : 0; -+ int idx = (parent_lct - 1) / 2; -+ if (parent_lct > 1) { -+ memcpy(rad, port->parent->rad, idx + 1); -+ shift = (parent_lct % 2) ? 4 : 0; - } else - rad[0] = 0; - - rad[idx] |= port->port_num << shift; -- return lct + 1; -+ return parent_lct + 1; - } - - /* -@@ -1013,18 +1036,27 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port) - return send_link; - } - --static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb, -- struct drm_dp_mst_port *port) -+static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid) - { - int ret; -- if (port->dpcd_rev >= 0x12) { -- port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid); -- if (!port->guid_valid) { -- ret = drm_dp_send_dpcd_write(mstb->mgr, -- port, -- DP_GUID, -- 16, port->guid); -- port->guid_valid = true; -+ -+ memcpy(mstb->guid, guid, 16); -+ -+ if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) { -+ if (mstb->port_parent) { -+ ret = drm_dp_send_dpcd_write( -+ mstb->mgr, -+ mstb->port_parent, -+ DP_GUID, -+ 16, -+ mstb->guid); -+ } else { -+ -+ ret = drm_dp_dpcd_write( -+ mstb->mgr->aux, -+ DP_GUID, -+ mstb->guid, -+ 16); - } - } - } -@@ -1039,7 +1071,7 @@ static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, - snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id); - for (i = 0; i < (mstb->lct - 1); i++) { - int shift = (i % 2) ? 0 : 4; -- int port_num = mstb->rad[i / 2] >> shift; -+ int port_num = (mstb->rad[i / 2] >> shift) & 0xf; - snprintf(temp, sizeof(temp), "-%d", port_num); - strlcat(proppath, temp, proppath_size); - } -@@ -1081,7 +1113,6 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, - port->dpcd_rev = port_msg->dpcd_revision; - port->num_sdp_streams = port_msg->num_sdp_streams; - port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks; -- memcpy(port->guid, port_msg->peer_guid, 16); - - /* manage mstb port lists with mgr lock - take a reference - for this list */ -@@ -1094,11 +1125,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, - - if (old_ddps != port->ddps) { - if (port->ddps) { -- drm_dp_check_port_guid(mstb, port); - if (!port->input) - drm_dp_send_enum_path_resources(mstb->mgr, mstb, port); - } else { -- port->guid_valid = false; - port->available_pbn = 0; - } - } -@@ -1157,10 +1186,8 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb, - - if (old_ddps != port->ddps) { - if (port->ddps) { -- drm_dp_check_port_guid(mstb, port); - dowork = true; - } else { -- port->guid_valid = false; - port->available_pbn = 0; - } - } -@@ -1190,7 +1217,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_ - - for (i = 0; i < lct - 1; i++) { - int shift = (i % 2) ? 0 : 4; -- int port_num = rad[i / 2] >> shift; -+ int port_num = (rad[i / 2] >> shift) & 0xf; - - list_for_each_entry(port, &mstb->ports, next) { - if (port->port_num == port_num) { -@@ -1210,6 +1237,48 @@ out: - return mstb; - } - -+static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( -+ struct drm_dp_mst_branch *mstb, -+ uint8_t *guid) -+{ -+ struct drm_dp_mst_branch *found_mstb; -+ struct drm_dp_mst_port *port; -+ -+ if (memcmp(mstb->guid, guid, 16) == 0) -+ return mstb; -+ -+ -+ list_for_each_entry(port, &mstb->ports, next) { -+ if (!port->mstb) -+ continue; -+ -+ found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); -+ -+ if (found_mstb) -+ return found_mstb; -+ } -+ -+ return NULL; -+} -+ -+static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid( -+ struct drm_dp_mst_topology_mgr *mgr, -+ uint8_t *guid) -+{ -+ struct drm_dp_mst_branch *mstb; -+ -+ /* find the port by iterating down */ -+ mutex_lock(&mgr->lock); -+ -+ mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid); -+ -+ if (mstb) -+ kref_get(&mstb->kref); -+ -+ mutex_unlock(&mgr->lock); -+ return mstb; -+} -+ - static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_branch *mstb) - { -@@ -1320,6 +1389,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr, - struct drm_dp_sideband_msg_tx *txmsg) - { - struct drm_dp_mst_branch *mstb = txmsg->dst; -+ u8 req_type; - - /* both msg slots are full */ - if (txmsg->seqno == -1) { -@@ -1336,7 +1406,13 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr, - txmsg->seqno = 1; - mstb->tx_slots[txmsg->seqno] = txmsg; - } -- hdr->broadcast = 0; -+ -+ req_type = txmsg->msg[0] & 0x7f; -+ if (req_type == DP_CONNECTION_STATUS_NOTIFY || -+ req_type == DP_RESOURCE_STATUS_NOTIFY) -+ hdr->broadcast = 1; -+ else -+ hdr->broadcast = 0; - hdr->path_msg = txmsg->path_msg; - hdr->lct = mstb->lct; - hdr->lcr = mstb->lct - 1; -@@ -1438,26 +1514,18 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) - } - - /* called holding qlock */ --static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) -+static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, -+ struct drm_dp_sideband_msg_tx *txmsg) - { -- struct drm_dp_sideband_msg_tx *txmsg; - int ret; - - /* construct a chunk from the first msg in the tx_msg queue */ -- if (list_empty(&mgr->tx_msg_upq)) { -- mgr->tx_up_in_progress = false; -- return; -- } -- -- txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next); - ret = process_single_tx_qlock(mgr, txmsg, true); -- if (ret == 1) { -- /* up txmsgs aren't put in slots - so free after we send it */ -- list_del(&txmsg->next); -- kfree(txmsg); -- } else if (ret) -+ -+ if (ret != 1) - DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); -- mgr->tx_up_in_progress = true; -+ -+ txmsg->dst->tx_slots[txmsg->seqno] = NULL; - } - - static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, -@@ -1507,6 +1575,9 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, - txmsg->reply.u.link_addr.ports[i].num_sdp_streams, - txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks); - } -+ -+ drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid); -+ - for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { - drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); - } -@@ -1554,6 +1625,37 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, - return 0; - } - -+static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb) -+{ -+ if (!mstb->port_parent) -+ return NULL; -+ -+ if (mstb->port_parent->mstb != mstb) -+ return mstb->port_parent; -+ -+ return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent); -+} -+ -+static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr, -+ struct drm_dp_mst_branch *mstb, -+ int *port_num) -+{ -+ struct drm_dp_mst_branch *rmstb = NULL; -+ struct drm_dp_mst_port *found_port; -+ mutex_lock(&mgr->lock); -+ if (mgr->mst_primary) { -+ found_port = drm_dp_get_last_connected_port_to_mstb(mstb); -+ -+ if (found_port) { -+ rmstb = found_port->parent; -+ kref_get(&rmstb->kref); -+ *port_num = found_port->port_num; -+ } -+ } -+ mutex_unlock(&mgr->lock); -+ return rmstb; -+} -+ - static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port, - int id, -@@ -1561,11 +1663,16 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, - { - struct drm_dp_sideband_msg_tx *txmsg; - struct drm_dp_mst_branch *mstb; -- int len, ret; -+ int len, ret, port_num; - -+ port_num = port->port_num; - mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); -- if (!mstb) -- return -EINVAL; -+ if (!mstb) { -+ mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num); -+ -+ if (!mstb) -+ return -EINVAL; -+ } - - txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); - if (!txmsg) { -@@ -1574,7 +1681,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, - } - - txmsg->dst = mstb; -- len = build_allocate_payload(txmsg, port->port_num, -+ len = build_allocate_payload(txmsg, port_num, - id, - pbn); - -@@ -1844,11 +1951,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, - drm_dp_encode_up_ack_reply(txmsg, req_type); - - mutex_lock(&mgr->qlock); -- list_add_tail(&txmsg->next, &mgr->tx_msg_upq); -- if (!mgr->tx_up_in_progress) { -- process_single_up_tx_qlock(mgr); -- } -+ -+ process_single_up_tx_qlock(mgr, txmsg); -+ - mutex_unlock(&mgr->qlock); -+ -+ kfree(txmsg); - return 0; - } - -@@ -1927,31 +2035,17 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms - mgr->mst_primary = mstb; - kref_get(&mgr->mst_primary->kref); - -- { -- struct drm_dp_payload reset_pay; -- reset_pay.start_slot = 0; -- reset_pay.num_slots = 0x3f; -- drm_dp_dpcd_write_payload(mgr, 0, &reset_pay); -- } -- - ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, -- DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); -+ DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); - if (ret < 0) { - goto out_unlock; - } - -- -- /* sort out guid */ -- ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16); -- if (ret != 16) { -- DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret); -- goto out_unlock; -- } -- -- mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid); -- if (!mgr->guid_valid) { -- ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16); -- mgr->guid_valid = true; -+ { -+ struct drm_dp_payload reset_pay; -+ reset_pay.start_slot = 0; -+ reset_pay.num_slots = 0x3f; -+ drm_dp_dpcd_write_payload(mgr, 0, &reset_pay); - } - - queue_work(system_long_wq, &mgr->work); -@@ -2145,28 +2239,51 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) - - if (mgr->up_req_recv.have_eomt) { - struct drm_dp_sideband_msg_req_body msg; -- struct drm_dp_mst_branch *mstb; -+ struct drm_dp_mst_branch *mstb = NULL; - bool seqno; -- mstb = drm_dp_get_mst_branch_device(mgr, -- mgr->up_req_recv.initial_hdr.lct, -- mgr->up_req_recv.initial_hdr.rad); -- if (!mstb) { -- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); -- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); -- return 0; -+ -+ if (!mgr->up_req_recv.initial_hdr.broadcast) { -+ mstb = drm_dp_get_mst_branch_device(mgr, -+ mgr->up_req_recv.initial_hdr.lct, -+ mgr->up_req_recv.initial_hdr.rad); -+ if (!mstb) { -+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); -+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); -+ return 0; -+ } - } - - seqno = mgr->up_req_recv.initial_hdr.seqno; - drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg); - - if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { -- drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false); -+ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); -+ -+ if (!mstb) -+ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid); -+ -+ if (!mstb) { -+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); -+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); -+ return 0; -+ } -+ - drm_dp_update_port(mstb, &msg.u.conn_stat); -+ - DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); - (*mgr->cbs->hotplug)(mgr); - - } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { -- drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false); -+ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); -+ if (!mstb) -+ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid); -+ -+ if (!mstb) { -+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); -+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); -+ return 0; -+ } -+ - DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); - } - -@@ -2346,6 +2463,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp - DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn); - if (pbn == port->vcpi.pbn) { - *slots = port->vcpi.num_slots; -+ drm_dp_put_port(port); - return true; - } - } -@@ -2505,32 +2623,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status); - */ - int drm_dp_calc_pbn_mode(int clock, int bpp) - { -- fixed20_12 pix_bw; -- fixed20_12 fbpp; -- fixed20_12 result; -- fixed20_12 margin, tmp; -- u32 res; -- -- pix_bw.full = dfixed_const(clock); -- fbpp.full = dfixed_const(bpp); -- tmp.full = dfixed_const(8); -- fbpp.full = dfixed_div(fbpp, tmp); -- -- result.full = dfixed_mul(pix_bw, fbpp); -- margin.full = dfixed_const(54); -- tmp.full = dfixed_const(64); -- margin.full = dfixed_div(margin, tmp); -- result.full = dfixed_div(result, margin); -- -- margin.full = dfixed_const(1006); -- tmp.full = dfixed_const(1000); -- margin.full = dfixed_div(margin, tmp); -- result.full = dfixed_mul(result, margin); -- -- result.full = dfixed_div(result, tmp); -- result.full = dfixed_ceil(result); -- res = dfixed_trunc(result); -- return res; -+ u64 kbps; -+ s64 peak_kbps; -+ u32 numerator; -+ u32 denominator; -+ -+ kbps = clock * bpp; -+ -+ /* -+ * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 -+ * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on -+ * common multiplier to render an integer PBN for all link rate/lane -+ * counts combinations -+ * calculate -+ * peak_kbps *= (1006/1000) -+ * peak_kbps *= (64/54) -+ * peak_kbps *= 8 convert to bytes -+ */ -+ -+ numerator = 64 * 1006; -+ denominator = 54 * 8 * 1000 * 1000; -+ -+ kbps *= numerator; -+ peak_kbps = drm_fixp_from_fraction(kbps, denominator); -+ -+ return drm_fixp2int_ceil(peak_kbps); - } - EXPORT_SYMBOL(drm_dp_calc_pbn_mode); - -@@ -2538,11 +2655,23 @@ static int test_calc_pbn_mode(void) - { - int ret; - ret = drm_dp_calc_pbn_mode(154000, 30); -- if (ret != 689) -+ if (ret != 689) { -+ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", -+ 154000, 30, 689, ret); - return -EINVAL; -+ } - ret = drm_dp_calc_pbn_mode(234000, 30); -- if (ret != 1047) -+ if (ret != 1047) { -+ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", -+ 234000, 30, 1047, ret); -+ return -EINVAL; -+ } -+ ret = drm_dp_calc_pbn_mode(297000, 24); -+ if (ret != 1063) { -+ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", -+ 297000, 24, 1063, ret); - return -EINVAL; -+ } - return 0; - } - -@@ -2683,6 +2812,13 @@ static void drm_dp_tx_work(struct work_struct *work) - mutex_unlock(&mgr->qlock); - } - -+static void drm_dp_free_mst_port(struct kref *kref) -+{ -+ struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); -+ kref_put(&port->parent->kref, drm_dp_free_mst_branch_device); -+ kfree(port); -+} -+ - static void drm_dp_destroy_connector_work(struct work_struct *work) - { - struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); -@@ -2703,13 +2839,22 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) - list_del(&port->next); - mutex_unlock(&mgr->destroy_connector_lock); - -+ kref_init(&port->kref); -+ INIT_LIST_HEAD(&port->next); -+ - mgr->cbs->destroy_connector(mgr, port->connector); - - drm_dp_port_teardown_pdt(port, port->pdt); - -- if (!port->input && port->vcpi.vcpi > 0) -- drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); -- kfree(port); -+ if (!port->input && port->vcpi.vcpi > 0) { -+ if (mgr->mst_state) { -+ drm_dp_mst_reset_vcpi_slots(mgr, port); -+ drm_dp_update_payload_part1(mgr); -+ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); -+ } -+ } -+ -+ kref_put(&port->kref, drm_dp_free_mst_port); - send_hotplug = true; - } - if (send_hotplug) -@@ -2736,7 +2881,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, - mutex_init(&mgr->qlock); - mutex_init(&mgr->payload_lock); - mutex_init(&mgr->destroy_connector_lock); -- INIT_LIST_HEAD(&mgr->tx_msg_upq); - INIT_LIST_HEAD(&mgr->tx_msg_downq); - INIT_LIST_HEAD(&mgr->destroy_connector_list); - INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); -diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c -index 607f493..8090989 100644 ---- a/drivers/gpu/drm/drm_irq.c -+++ b/drivers/gpu/drm/drm_irq.c -@@ -221,6 +221,64 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, - diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0; - } - -+ /* -+ * Within a drm_vblank_pre_modeset - drm_vblank_post_modeset -+ * interval? If so then vblank irqs keep running and it will likely -+ * happen that the hardware vblank counter is not trustworthy as it -+ * might reset at some point in that interval and vblank timestamps -+ * are not trustworthy either in that interval. Iow. this can result -+ * in a bogus diff >> 1 which must be avoided as it would cause -+ * random large forward jumps of the software vblank counter. -+ */ -+ if (diff > 1 && (vblank->inmodeset & 0x2)) { -+ DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u" -+ " due to pre-modeset.\n", pipe, diff); -+ diff = 1; -+ } -+ -+ /* -+ * FIMXE: Need to replace this hack with proper seqlocks. -+ * -+ * Restrict the bump of the software vblank counter to a safe maximum -+ * value of +1 whenever there is the possibility that concurrent readers -+ * of vblank timestamps could be active at the moment, as the current -+ * implementation of the timestamp caching and updating is not safe -+ * against concurrent readers for calls to store_vblank() with a bump -+ * of anything but +1. A bump != 1 would very likely return corrupted -+ * timestamps to userspace, because the same slot in the cache could -+ * be concurrently written by store_vblank() and read by one of those -+ * readers without the read-retry logic detecting the collision. -+ * -+ * Concurrent readers can exist when we are called from the -+ * drm_vblank_off() or drm_vblank_on() functions and other non-vblank- -+ * irq callers. However, all those calls to us are happening with the -+ * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount -+ * can't increase while we are executing. Therefore a zero refcount at -+ * this point is safe for arbitrary counter bumps if we are called -+ * outside vblank irq, a non-zero count is not 100% safe. Unfortunately -+ * we must also accept a refcount of 1, as whenever we are called from -+ * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and -+ * we must let that one pass through in order to not lose vblank counts -+ * during vblank irq off - which would completely defeat the whole -+ * point of this routine. -+ * -+ * Whenever we are called from vblank irq, we have to assume concurrent -+ * readers exist or can show up any time during our execution, even if -+ * the refcount is currently zero, as vblank irqs are usually only -+ * enabled due to the presence of readers, and because when we are called -+ * from vblank irq we can't hold the vbl_lock to protect us from sudden -+ * bumps in vblank refcount. Therefore also restrict bumps to +1 when -+ * called from vblank irq. -+ */ -+ if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 || -+ (flags & DRM_CALLED_FROM_VBLIRQ))) { -+ DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u " -+ "refcount %u, vblirq %u\n", pipe, diff, -+ atomic_read(&vblank->refcount), -+ (flags & DRM_CALLED_FROM_VBLIRQ) != 0); -+ diff = 1; -+ } -+ - DRM_DEBUG_VBL("updating vblank count on crtc %u:" - " current=%u, diff=%u, hw=%u hw_last=%u\n", - pipe, vblank->count, diff, cur_vblank, vblank->last); -@@ -1313,7 +1371,13 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe) - spin_lock_irqsave(&dev->event_lock, irqflags); - - spin_lock(&dev->vbl_lock); -- vblank_disable_and_save(dev, pipe); -+ DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n", -+ pipe, vblank->enabled, vblank->inmodeset); -+ -+ /* Avoid redundant vblank disables without previous drm_vblank_on(). */ -+ if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset) -+ vblank_disable_and_save(dev, pipe); -+ - wake_up(&vblank->queue); - - /* -@@ -1415,6 +1479,9 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe) - return; - - spin_lock_irqsave(&dev->vbl_lock, irqflags); -+ DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n", -+ pipe, vblank->enabled, vblank->inmodeset); -+ - /* Drop our private "prevent drm_vblank_get" refcount */ - if (vblank->inmodeset) { - atomic_dec(&vblank->refcount); -@@ -1427,8 +1494,7 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe) - * re-enable interrupts if there are users left, or the - * user wishes vblank interrupts to be enabled all the time. - */ -- if (atomic_read(&vblank->refcount) != 0 || -- (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0)) -+ if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0) - WARN_ON(drm_vblank_enable(dev, pipe)); - spin_unlock_irqrestore(&dev->vbl_lock, irqflags); - } -@@ -1523,6 +1589,7 @@ void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe) - if (vblank->inmodeset) { - spin_lock_irqsave(&dev->vbl_lock, irqflags); - dev->vblank_disable_allowed = true; -+ drm_reset_vblank_timestamp(dev, pipe); - spin_unlock_irqrestore(&dev->vbl_lock, irqflags); - - if (vblank->inmodeset & 0x2) -diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c -index c707fa6..e3bdc8b 100644 ---- a/drivers/gpu/drm/gma500/gem.c -+++ b/drivers/gpu/drm/gma500/gem.c -@@ -130,7 +130,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size, - return ret; - } - /* We have the initial and handle reference but need only one now */ -- drm_gem_object_unreference(&r->gem); -+ drm_gem_object_unreference_unlocked(&r->gem); - *handlep = handle; - return 0; - } -diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c -index b4741d1..61fcb3b 100644 ---- a/drivers/gpu/drm/i915/i915_dma.c -+++ b/drivers/gpu/drm/i915/i915_dma.c -@@ -402,6 +402,8 @@ static int i915_load_modeset_init(struct drm_device *dev) - if (ret) - goto cleanup_gem_stolen; - -+ intel_setup_gmbus(dev); -+ - /* Important: The output setup functions called by modeset_init need - * working irqs for e.g. gmbus and dp aux transfers. */ - intel_modeset_init(dev); -@@ -451,6 +453,7 @@ cleanup_gem: - cleanup_irq: - intel_guc_ucode_fini(dev); - drm_irq_uninstall(dev); -+ intel_teardown_gmbus(dev); - cleanup_gem_stolen: - i915_gem_cleanup_stolen(dev); - cleanup_vga_switcheroo: -@@ -1028,7 +1031,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) - - /* Try to make sure MCHBAR is enabled before poking at it */ - intel_setup_mchbar(dev); -- intel_setup_gmbus(dev); - intel_opregion_setup(dev); - - i915_gem_load(dev); -@@ -1099,7 +1101,6 @@ out_gem_unload: - if (dev->pdev->msi_enabled) - pci_disable_msi(dev->pdev); - -- intel_teardown_gmbus(dev); - intel_teardown_mchbar(dev); - pm_qos_remove_request(&dev_priv->pm_qos); - destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); -@@ -1198,7 +1199,6 @@ int i915_driver_unload(struct drm_device *dev) - - intel_csr_ucode_fini(dev); - -- intel_teardown_gmbus(dev); - intel_teardown_mchbar(dev); - - destroy_workqueue(dev_priv->hotplug.dp_wq); -diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c -index 02ceb7a..0433d25 100644 ---- a/drivers/gpu/drm/i915/i915_gem_context.c -+++ b/drivers/gpu/drm/i915/i915_gem_context.c -@@ -340,6 +340,10 @@ void i915_gem_context_reset(struct drm_device *dev) - i915_gem_context_unreference(lctx); - ring->last_context = NULL; - } -+ -+ /* Force the GPU state to be reinitialised on enabling */ -+ if (ring->default_context) -+ ring->default_context->legacy_hw_ctx.initialized = false; - } - } - -@@ -708,7 +712,7 @@ static int do_switch(struct drm_i915_gem_request *req) - if (ret) - goto unpin_out; - -- if (!to->legacy_hw_ctx.initialized) { -+ if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) { - hw_flags |= MI_RESTORE_INHIBIT; - /* NB: If we inhibit the restore, the context is not allowed to - * die because future work may end up depending on valid address -diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c -index 0d228f9..0f42a27 100644 ---- a/drivers/gpu/drm/i915/i915_irq.c -+++ b/drivers/gpu/drm/i915/i915_irq.c -@@ -2354,9 +2354,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) - spt_irq_handler(dev, pch_iir); - else - cpt_irq_handler(dev, pch_iir); -- } else -- DRM_ERROR("The master control interrupt lied (SDE)!\n"); -- -+ } else { -+ /* -+ * Like on previous PCH there seems to be something -+ * fishy going on with forwarding PCH interrupts. -+ */ -+ DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); -+ } - } - - I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); -diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c -index a6752a6..7e6158b 100644 ---- a/drivers/gpu/drm/i915/intel_ddi.c -+++ b/drivers/gpu/drm/i915/intel_ddi.c -@@ -1582,7 +1582,8 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc, - DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | - DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | - wrpll_params.central_freq; -- } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { -+ } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || -+ intel_encoder->type == INTEL_OUTPUT_DP_MST) { - switch (crtc_state->port_clock / 2) { - case 81000: - ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0); -diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c -index 32cf973..f859a5b 100644 ---- a/drivers/gpu/drm/i915/intel_display.c -+++ b/drivers/gpu/drm/i915/intel_display.c -@@ -11930,11 +11930,21 @@ connected_sink_compute_bpp(struct intel_connector *connector, - pipe_config->pipe_bpp = connector->base.display_info.bpc*3; - } - -- /* Clamp bpp to 8 on screens without EDID 1.4 */ -- if (connector->base.display_info.bpc == 0 && bpp > 24) { -- DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", -- bpp); -- pipe_config->pipe_bpp = 24; -+ /* Clamp bpp to default limit on screens without EDID 1.4 */ -+ if (connector->base.display_info.bpc == 0) { -+ int type = connector->base.connector_type; -+ int clamp_bpp = 24; -+ -+ /* Fall back to 18 bpp when DP sink capability is unknown. */ -+ if (type == DRM_MODE_CONNECTOR_DisplayPort || -+ type == DRM_MODE_CONNECTOR_eDP) -+ clamp_bpp = 18; -+ -+ if (bpp > clamp_bpp) { -+ DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n", -+ bpp, clamp_bpp); -+ pipe_config->pipe_bpp = clamp_bpp; -+ } - } - } - -@@ -13537,11 +13547,12 @@ intel_check_primary_plane(struct drm_plane *plane, - int max_scale = DRM_PLANE_HELPER_NO_SCALING; - bool can_position = false; - -- /* use scaler when colorkey is not required */ -- if (INTEL_INFO(plane->dev)->gen >= 9 && -- state->ckey.flags == I915_SET_COLORKEY_NONE) { -- min_scale = 1; -- max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state); -+ if (INTEL_INFO(plane->dev)->gen >= 9) { -+ /* use scaler when colorkey is not required */ -+ if (state->ckey.flags == I915_SET_COLORKEY_NONE) { -+ min_scale = 1; -+ max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state); -+ } - can_position = true; - } - -@@ -15565,6 +15576,8 @@ void intel_modeset_cleanup(struct drm_device *dev) - mutex_lock(&dev->struct_mutex); - intel_cleanup_gt_powersave(dev); - mutex_unlock(&dev->struct_mutex); -+ -+ intel_teardown_gmbus(dev); - } - - /* -diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c -index a5e99ac..a8912ae 100644 ---- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c -+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c -@@ -207,7 +207,12 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) - gpio = *data++; - - /* pull up/down */ -- action = *data++; -+ action = *data++ & 1; -+ -+ if (gpio >= ARRAY_SIZE(gtable)) { -+ DRM_DEBUG_KMS("unknown gpio %u\n", gpio); -+ goto out; -+ } - - function = gtable[gpio].function_reg; - pad = gtable[gpio].pad_reg; -@@ -226,6 +231,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) - vlv_gpio_nc_write(dev_priv, pad, val); - mutex_unlock(&dev_priv->sb_lock); - -+out: - return data; - } - -diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c -index b177857..d7a6437 100644 ---- a/drivers/gpu/drm/i915/intel_hotplug.c -+++ b/drivers/gpu/drm/i915/intel_hotplug.c -@@ -468,9 +468,14 @@ void intel_hpd_init(struct drm_i915_private *dev_priv) - list_for_each_entry(connector, &mode_config->connector_list, head) { - struct intel_connector *intel_connector = to_intel_connector(connector); - connector->polled = intel_connector->polled; -- if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) -- connector->polled = DRM_CONNECTOR_POLL_HPD; -+ -+ /* MST has a dynamic intel_connector->encoder and it's reprobing -+ * is all handled by the MST helpers. */ - if (intel_connector->mst_port) -+ continue; -+ -+ if (!connector->polled && I915_HAS_HOTPLUG(dev) && -+ intel_connector->encoder->hpd_pin > HPD_NONE) - connector->polled = DRM_CONNECTOR_POLL_HPD; - } - -diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c -index 8324654..f3bee54 100644 ---- a/drivers/gpu/drm/i915/intel_i2c.c -+++ b/drivers/gpu/drm/i915/intel_i2c.c -@@ -675,7 +675,7 @@ int intel_setup_gmbus(struct drm_device *dev) - return 0; - - err: -- while (--pin) { -+ while (pin--) { - if (!intel_gmbus_is_valid_pin(dev_priv, pin)) - continue; - -diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c -index 88e12bd..d69547a 100644 ---- a/drivers/gpu/drm/i915/intel_lrc.c -+++ b/drivers/gpu/drm/i915/intel_lrc.c -@@ -1706,6 +1706,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, - if (flush_domains) { - flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; - flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; -+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; - flags |= PIPE_CONTROL_FLUSH_ENABLE; - } - -diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c -index 9461a23..f6b2a81 100644 ---- a/drivers/gpu/drm/i915/intel_ringbuffer.c -+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c -@@ -347,6 +347,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, - if (flush_domains) { - flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; - flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; -+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; - flags |= PIPE_CONTROL_FLUSH_ENABLE; - } - if (invalidate_domains) { -@@ -419,6 +420,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, - if (flush_domains) { - flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; - flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; -+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; - flags |= PIPE_CONTROL_FLUSH_ENABLE; - } - if (invalidate_domains) { -diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c -index 2e7cbe9..2a5ed74 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_connector.c -+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c -@@ -969,10 +969,13 @@ nouveau_connector_hotplug(struct nvif_notify *notify) - - NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name); - -+ mutex_lock(&drm->dev->mode_config.mutex); - if (plugged) - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); - else - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); -+ mutex_unlock(&drm->dev->mode_config.mutex); -+ - drm_helper_hpd_irq_event(connector->dev); - } - -diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c -index 64c8d93..58a3f7c 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_display.c -+++ b/drivers/gpu/drm/nouveau/nouveau_display.c -@@ -634,10 +634,6 @@ nouveau_display_resume(struct drm_device *dev, bool runtime) - nv_crtc->lut.depth = 0; - } - -- /* Make sure that drm and hw vblank irqs get resumed if needed. */ -- for (head = 0; head < dev->mode_config.num_crtc; head++) -- drm_vblank_on(dev, head); -- - /* This should ensure we don't hit a locking problem when someone - * wakes us up via a connector. We should never go into suspend - * while the display is on anyways. -@@ -647,6 +643,10 @@ nouveau_display_resume(struct drm_device *dev, bool runtime) - - drm_helper_resume_force_mode(dev); - -+ /* Make sure that drm and hw vblank irqs get resumed if needed. */ -+ for (head = 0; head < dev->mode_config.num_crtc; head++) -+ drm_vblank_on(dev, head); -+ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - -diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c -index 60e32c4..35ecc0d 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_platform.c -+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c -@@ -24,7 +24,7 @@ - static int nouveau_platform_probe(struct platform_device *pdev) - { - const struct nvkm_device_tegra_func *func; -- struct nvkm_device *device; -+ struct nvkm_device *device = NULL; - struct drm_device *drm; - int ret; - -diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c -index 7f8a427..e7e581d 100644 ---- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c -+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c -@@ -252,32 +252,40 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, - - if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL))) - return -ENOMEM; -- *pdevice = &tdev->device; -+ - tdev->func = func; - tdev->pdev = pdev; - tdev->irq = -1; - - tdev->vdd = devm_regulator_get(&pdev->dev, "vdd"); -- if (IS_ERR(tdev->vdd)) -- return PTR_ERR(tdev->vdd); -+ if (IS_ERR(tdev->vdd)) { -+ ret = PTR_ERR(tdev->vdd); -+ goto free; -+ } - - tdev->rst = devm_reset_control_get(&pdev->dev, "gpu"); -- if (IS_ERR(tdev->rst)) -- return PTR_ERR(tdev->rst); -+ if (IS_ERR(tdev->rst)) { -+ ret = PTR_ERR(tdev->rst); -+ goto free; -+ } - - tdev->clk = devm_clk_get(&pdev->dev, "gpu"); -- if (IS_ERR(tdev->clk)) -- return PTR_ERR(tdev->clk); -+ if (IS_ERR(tdev->clk)) { -+ ret = PTR_ERR(tdev->clk); -+ goto free; -+ } - - tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr"); -- if (IS_ERR(tdev->clk_pwr)) -- return PTR_ERR(tdev->clk_pwr); -+ if (IS_ERR(tdev->clk_pwr)) { -+ ret = PTR_ERR(tdev->clk_pwr); -+ goto free; -+ } - - nvkm_device_tegra_probe_iommu(tdev); - - ret = nvkm_device_tegra_power_up(tdev); - if (ret) -- return ret; -+ goto remove; - - tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value; - ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev, -@@ -285,9 +293,19 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, - cfg, dbg, detect, mmio, subdev_mask, - &tdev->device); - if (ret) -- return ret; -+ goto powerdown; -+ -+ *pdevice = &tdev->device; - - return 0; -+ -+powerdown: -+ nvkm_device_tegra_power_down(tdev); -+remove: -+ nvkm_device_tegra_remove_iommu(tdev); -+free: -+ kfree(tdev); -+ return ret; - } - #else - int -diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c -index 74e2f7c..9688970 100644 ---- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c -+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c -@@ -328,6 +328,7 @@ nvkm_dp_train(struct work_struct *w) - .outp = outp, - }, *dp = &_dp; - u32 datarate = 0; -+ u8 pwr; - int ret; - - if (!outp->base.info.location && disp->func->sor.magic) -@@ -355,6 +356,15 @@ nvkm_dp_train(struct work_struct *w) - /* disable link interrupt handling during link training */ - nvkm_notify_put(&outp->irq); - -+ /* ensure sink is not in a low-power state */ -+ if (!nvkm_rdaux(outp->aux, DPCD_SC00, &pwr, 1)) { -+ if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) { -+ pwr &= ~DPCD_SC00_SET_POWER; -+ pwr |= DPCD_SC00_SET_POWER_D0; -+ nvkm_wraux(outp->aux, DPCD_SC00, &pwr, 1); -+ } -+ } -+ - /* enable down-spreading and execute pre-train script from vbios */ - dp_link_train_init(dp, outp->dpcd[3] & 0x01); - -diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h -index 9596290..6e10c5e 100644 ---- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h -+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h -@@ -71,5 +71,11 @@ - #define DPCD_LS0C_LANE1_POST_CURSOR2 0x0c - #define DPCD_LS0C_LANE0_POST_CURSOR2 0x03 - -+/* DPCD Sink Control */ -+#define DPCD_SC00 0x00600 -+#define DPCD_SC00_SET_POWER 0x03 -+#define DPCD_SC00_SET_POWER_D0 0x01 -+#define DPCD_SC00_SET_POWER_D3 0x03 -+ - void nvkm_dp_train(struct work_struct *); - #endif -diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c -index 2ae8577..7c2e782 100644 ---- a/drivers/gpu/drm/qxl/qxl_ioctl.c -+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c -@@ -168,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev, - cmd->command_size)) - return -EFAULT; - -- reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL); -+ reloc_info = kmalloc_array(cmd->relocs_num, -+ sizeof(struct qxl_reloc_info), GFP_KERNEL); - if (!reloc_info) - return -ENOMEM; - -diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c -index 7520727..367a916 100644 ---- a/drivers/gpu/drm/radeon/dce6_afmt.c -+++ b/drivers/gpu/drm/radeon/dce6_afmt.c -@@ -301,6 +301,14 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev, - * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator - */ - if (ASIC_IS_DCE8(rdev)) { -+ unsigned int div = (RREG32(DENTIST_DISPCLK_CNTL) & -+ DENTIST_DPREFCLK_WDIVIDER_MASK) >> -+ DENTIST_DPREFCLK_WDIVIDER_SHIFT; -+ div = radeon_audio_decode_dfs_div(div); -+ -+ if (div) -+ clock = clock * 100 / div; -+ - WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000); - WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock); - } else { -diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c -index 9953356..3cf04a2 100644 ---- a/drivers/gpu/drm/radeon/evergreen_hdmi.c -+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c -@@ -289,6 +289,16 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev, - * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE - * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator - */ -+ if (ASIC_IS_DCE41(rdev)) { -+ unsigned int div = (RREG32(DCE41_DENTIST_DISPCLK_CNTL) & -+ DENTIST_DPREFCLK_WDIVIDER_MASK) >> -+ DENTIST_DPREFCLK_WDIVIDER_SHIFT; -+ div = radeon_audio_decode_dfs_div(div); -+ -+ if (div) -+ clock = 100 * clock / div; -+ } -+ - WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); - WREG32(DCCG_AUDIO_DTO1_MODULE, clock); - } -diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h -index 4aa5f75..13b6029 100644 ---- a/drivers/gpu/drm/radeon/evergreend.h -+++ b/drivers/gpu/drm/radeon/evergreend.h -@@ -511,6 +511,11 @@ - #define DCCG_AUDIO_DTO1_CNTL 0x05cc - # define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3) - -+#define DCE41_DENTIST_DISPCLK_CNTL 0x049c -+# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24) -+# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24) -+# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24 -+ - /* DCE 4.0 AFMT */ - #define HDMI_CONTROL 0x7030 - # define HDMI_KEEPOUT_MODE (1 << 0) -diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h -index 87db649..5580568 100644 ---- a/drivers/gpu/drm/radeon/radeon.h -+++ b/drivers/gpu/drm/radeon/radeon.h -@@ -268,6 +268,7 @@ struct radeon_clock { - uint32_t current_dispclk; - uint32_t dp_extclk; - uint32_t max_pixel_clock; -+ uint32_t vco_freq; - }; - - /* -diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c -index 8f28524..de9a2ff 100644 ---- a/drivers/gpu/drm/radeon/radeon_atombios.c -+++ b/drivers/gpu/drm/radeon/radeon_atombios.c -@@ -437,7 +437,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, - } - - /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */ -- if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) && -+ if (((dev->pdev->device == 0x9802) || -+ (dev->pdev->device == 0x9805) || -+ (dev->pdev->device == 0x9806)) && - (dev->pdev->subsystem_vendor == 0x1734) && - (dev->pdev->subsystem_device == 0x11bd)) { - if (*connector_type == DRM_MODE_CONNECTOR_VGA) { -@@ -448,14 +450,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, - } - } - -- /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */ -- if ((dev->pdev->device == 0x9805) && -- (dev->pdev->subsystem_vendor == 0x1734) && -- (dev->pdev->subsystem_device == 0x11bd)) { -- if (*connector_type == DRM_MODE_CONNECTOR_VGA) -- return false; -- } -- - return true; - } - -@@ -1112,6 +1106,31 @@ union firmware_info { - ATOM_FIRMWARE_INFO_V2_2 info_22; - }; - -+union igp_info { -+ struct _ATOM_INTEGRATED_SYSTEM_INFO info; -+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; -+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; -+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; -+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; -+}; -+ -+static void radeon_atombios_get_dentist_vco_freq(struct radeon_device *rdev) -+{ -+ struct radeon_mode_info *mode_info = &rdev->mode_info; -+ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); -+ union igp_info *igp_info; -+ u8 frev, crev; -+ u16 data_offset; -+ -+ if (atom_parse_data_header(mode_info->atom_context, index, NULL, -+ &frev, &crev, &data_offset)) { -+ igp_info = (union igp_info *)(mode_info->atom_context->bios + -+ data_offset); -+ rdev->clock.vco_freq = -+ le32_to_cpu(igp_info->info_6.ulDentistVCOFreq); -+ } -+} -+ - bool radeon_atom_get_clock_info(struct drm_device *dev) - { - struct radeon_device *rdev = dev->dev_private; -@@ -1263,20 +1282,25 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) - rdev->mode_info.firmware_flags = - le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess); - -+ if (ASIC_IS_DCE8(rdev)) -+ rdev->clock.vco_freq = -+ le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq); -+ else if (ASIC_IS_DCE5(rdev)) -+ rdev->clock.vco_freq = rdev->clock.current_dispclk; -+ else if (ASIC_IS_DCE41(rdev)) -+ radeon_atombios_get_dentist_vco_freq(rdev); -+ else -+ rdev->clock.vco_freq = rdev->clock.current_dispclk; -+ -+ if (rdev->clock.vco_freq == 0) -+ rdev->clock.vco_freq = 360000; /* 3.6 GHz */ -+ - return true; - } - - return false; - } - --union igp_info { -- struct _ATOM_INTEGRATED_SYSTEM_INFO info; -- struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; -- struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; -- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; -- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; --}; -- - bool radeon_atombios_sideport_present(struct radeon_device *rdev) - { - struct radeon_mode_info *mode_info = &rdev->mode_info; -diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c -index 2c02e99..b214663 100644 ---- a/drivers/gpu/drm/radeon/radeon_audio.c -+++ b/drivers/gpu/drm/radeon/radeon_audio.c -@@ -739,9 +739,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); -- struct radeon_connector *radeon_connector = to_radeon_connector(connector); -- struct radeon_connector_atom_dig *dig_connector = -- radeon_connector->con_priv; - - if (!dig || !dig->afmt) - return; -@@ -753,10 +750,7 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, - radeon_audio_write_speaker_allocation(encoder); - radeon_audio_write_sad_regs(encoder); - radeon_audio_write_latency_fields(encoder, mode); -- if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev)) -- radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10); -- else -- radeon_audio_set_dto(encoder, dig_connector->dp_clock); -+ radeon_audio_set_dto(encoder, rdev->clock.vco_freq * 10); - radeon_audio_set_audio_packet(encoder); - radeon_audio_select_pin(encoder); - -@@ -781,3 +775,15 @@ void radeon_audio_dpms(struct drm_encoder *encoder, int mode) - if (radeon_encoder->audio && radeon_encoder->audio->dpms) - radeon_encoder->audio->dpms(encoder, mode == DRM_MODE_DPMS_ON); - } -+ -+unsigned int radeon_audio_decode_dfs_div(unsigned int div) -+{ -+ if (div >= 8 && div < 64) -+ return (div - 8) * 25 + 200; -+ else if (div >= 64 && div < 96) -+ return (div - 64) * 50 + 1600; -+ else if (div >= 96 && div < 128) -+ return (div - 96) * 100 + 3200; -+ else -+ return 0; -+} -diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h -index 059cc30..5c70cce 100644 ---- a/drivers/gpu/drm/radeon/radeon_audio.h -+++ b/drivers/gpu/drm/radeon/radeon_audio.h -@@ -79,5 +79,6 @@ void radeon_audio_fini(struct radeon_device *rdev); - void radeon_audio_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode); - void radeon_audio_dpms(struct drm_encoder *encoder, int mode); -+unsigned int radeon_audio_decode_dfs_div(unsigned int div); - - #endif -diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c -index c566993..d690df5 100644 ---- a/drivers/gpu/drm/radeon/radeon_device.c -+++ b/drivers/gpu/drm/radeon/radeon_device.c -@@ -1744,6 +1744,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) - } - - drm_kms_helper_poll_enable(dev); -+ drm_helper_hpd_irq_event(dev); - - /* set the power state here in case we are a PX system or headless */ - if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) -diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c -index 1eca0ac..13767d2 100644 ---- a/drivers/gpu/drm/radeon/radeon_display.c -+++ b/drivers/gpu/drm/radeon/radeon_display.c -@@ -403,7 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work) - struct drm_crtc *crtc = &radeon_crtc->base; - unsigned long flags; - int r; -- int vpos, hpos, stat, min_udelay; -+ int vpos, hpos, stat, min_udelay = 0; -+ unsigned repcnt = 4; - struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; - - down_read(&rdev->exclusive_lock); -@@ -454,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work) - * In practice this won't execute very often unless on very fast - * machines because the time window for this to happen is very small. - */ -- for (;;) { -+ while (radeon_crtc->enabled && repcnt--) { - /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank - * start in hpos, and to the "fudged earlier" vblank start in - * vpos. -@@ -472,10 +473,22 @@ static void radeon_flip_work_func(struct work_struct *__work) - /* Sleep at least until estimated real start of hw vblank */ - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); -+ if (min_udelay > vblank->framedur_ns / 2000) { -+ /* Don't wait ridiculously long - something is wrong */ -+ repcnt = 0; -+ break; -+ } - usleep_range(min_udelay, 2 * min_udelay); - spin_lock_irqsave(&crtc->dev->event_lock, flags); - }; - -+ if (!repcnt) -+ DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, " -+ "framedur %d, linedur %d, stat %d, vpos %d, " -+ "hpos %d\n", work->crtc_id, min_udelay, -+ vblank->framedur_ns / 1000, -+ vblank->linedur_ns / 1000, stat, vpos, hpos); -+ - /* do the flip (mmio) */ - radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base); - -diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c -index 84d4563..fb6ad14 100644 ---- a/drivers/gpu/drm/radeon/radeon_object.c -+++ b/drivers/gpu/drm/radeon/radeon_object.c -@@ -33,6 +33,7 @@ - #include <linux/slab.h> - #include <drm/drmP.h> - #include <drm/radeon_drm.h> -+#include <drm/drm_cache.h> - #include "radeon.h" - #include "radeon_trace.h" - -@@ -245,6 +246,12 @@ int radeon_bo_create(struct radeon_device *rdev, - DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " - "better performance thanks to write-combining\n"); - bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); -+#else -+ /* For architectures that don't support WC memory, -+ * mask out the WC flag from the BO -+ */ -+ if (!drm_arch_can_wc_memory()) -+ bo->flags &= ~RADEON_GEM_GTT_WC; - #endif - - radeon_ttm_placement_from_domain(bo, domain); -diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c -index 59abebd..2081a60 100644 ---- a/drivers/gpu/drm/radeon/radeon_pm.c -+++ b/drivers/gpu/drm/radeon/radeon_pm.c -@@ -1075,8 +1075,6 @@ force: - - /* update display watermarks based on new power state */ - radeon_bandwidth_update(rdev); -- /* update displays */ -- radeon_dpm_display_configuration_changed(rdev); - - rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; - rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; -@@ -1097,6 +1095,9 @@ force: - - radeon_dpm_post_set_power_state(rdev); - -+ /* update displays */ -+ radeon_dpm_display_configuration_changed(rdev); -+ - if (rdev->asic->dpm.force_performance_level) { - if (rdev->pm.dpm.thermal_active) { - enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; -diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c -index c507896..197b157 100644 ---- a/drivers/gpu/drm/radeon/radeon_sa.c -+++ b/drivers/gpu/drm/radeon/radeon_sa.c -@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev, - /* see if we can skip over some allocations */ - } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); - -+ for (i = 0; i < RADEON_NUM_RINGS; ++i) -+ radeon_fence_ref(fences[i]); -+ - spin_unlock(&sa_manager->wq.lock); - r = radeon_fence_wait_any(rdev, fences, false); -+ for (i = 0; i < RADEON_NUM_RINGS; ++i) -+ radeon_fence_unref(&fences[i]); - spin_lock(&sa_manager->wq.lock); - /* if we have nothing to wait for block */ - if (r == -ENOENT) { -diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c -index e343074..e06ac54 100644 ---- a/drivers/gpu/drm/radeon/radeon_ttm.c -+++ b/drivers/gpu/drm/radeon/radeon_ttm.c -@@ -758,7 +758,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm) - 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { -- while (--i) { -+ while (i--) { - pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - gtt->ttm.dma_address[i] = 0; -diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c -index 48d97c0..3979632 100644 ---- a/drivers/gpu/drm/radeon/radeon_vm.c -+++ b/drivers/gpu/drm/radeon/radeon_vm.c -@@ -455,15 +455,15 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, - - if (soffset) { - /* make sure object fit at this offset */ -- eoffset = soffset + size; -+ eoffset = soffset + size - 1; - if (soffset >= eoffset) { - r = -EINVAL; - goto error_unreserve; - } - - last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; -- if (last_pfn > rdev->vm_manager.max_pfn) { -- dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", -+ if (last_pfn >= rdev->vm_manager.max_pfn) { -+ dev_err(rdev->dev, "va above limit (0x%08X >= 0x%08X)\n", - last_pfn, rdev->vm_manager.max_pfn); - r = -EINVAL; - goto error_unreserve; -@@ -478,7 +478,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, - eoffset /= RADEON_GPU_PAGE_SIZE; - if (soffset || eoffset) { - struct interval_tree_node *it; -- it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1); -+ it = interval_tree_iter_first(&vm->va, soffset, eoffset); - if (it && it != &bo_va->it) { - struct radeon_bo_va *tmp; - tmp = container_of(it, struct radeon_bo_va, it); -@@ -518,7 +518,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, - if (soffset || eoffset) { - spin_lock(&vm->status_lock); - bo_va->it.start = soffset; -- bo_va->it.last = eoffset - 1; -+ bo_va->it.last = eoffset; - list_add(&bo_va->vm_status, &vm->cleared); - spin_unlock(&vm->status_lock); - interval_tree_insert(&bo_va->it, &vm->va); -@@ -888,7 +888,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm, - unsigned i; - - start >>= radeon_vm_block_size; -- end >>= radeon_vm_block_size; -+ end = (end - 1) >> radeon_vm_block_size; - - for (i = start; i <= end; ++i) - radeon_bo_fence(vm->page_tables[i].bo, fence, true); -diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h -index 4c4a721..d1a7b58 100644 ---- a/drivers/gpu/drm/radeon/sid.h -+++ b/drivers/gpu/drm/radeon/sid.h -@@ -915,6 +915,11 @@ - #define DCCG_AUDIO_DTO1_PHASE 0x05c0 - #define DCCG_AUDIO_DTO1_MODULE 0x05c4 - -+#define DENTIST_DISPCLK_CNTL 0x0490 -+# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24) -+# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24) -+# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24 -+ - #define AFMT_AUDIO_SRC_CONTROL 0x713c - #define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0) - /* AFMT_AUDIO_SRC_SELECT -diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c -index 07a0d37..a01efe3 100644 ---- a/drivers/gpu/drm/radeon/vce_v1_0.c -+++ b/drivers/gpu/drm/radeon/vce_v1_0.c -@@ -178,12 +178,12 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data) - return -EINVAL; - } - -- for (i = 0; i < sign->num; ++i) { -- if (sign->val[i].chip_id == chip_id) -+ for (i = 0; i < le32_to_cpu(sign->num); ++i) { -+ if (le32_to_cpu(sign->val[i].chip_id) == chip_id) - break; - } - -- if (i == sign->num) -+ if (i == le32_to_cpu(sign->num)) - return -EINVAL; - - data += (256 - 64) / 4; -@@ -191,18 +191,18 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data) - data[1] = sign->val[i].nonce[1]; - data[2] = sign->val[i].nonce[2]; - data[3] = sign->val[i].nonce[3]; -- data[4] = sign->len + 64; -+ data[4] = cpu_to_le32(le32_to_cpu(sign->len) + 64); - - memset(&data[5], 0, 44); - memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign)); - -- data += data[4] / 4; -+ data += le32_to_cpu(data[4]) / 4; - data[0] = sign->val[i].sigval[0]; - data[1] = sign->val[i].sigval[1]; - data[2] = sign->val[i].sigval[2]; - data[3] = sign->val[i].sigval[3]; - -- rdev->vce.keyselect = sign->val[i].keyselect; -+ rdev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect); - - return 0; - } -diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c -index 6377e81..67cebb2 100644 ---- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c -@@ -247,7 +247,7 @@ static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header) - { - struct vmw_cmdbuf_man *man = header->man; - -- BUG_ON(!spin_is_locked(&man->lock)); -+ lockdep_assert_held_once(&man->lock); - - if (header->inline_space) { - vmw_cmdbuf_header_inline_free(header); -diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c -index c49812b..24fb348 100644 ---- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c -@@ -25,6 +25,7 @@ - * - **************************************************************************/ - #include <linux/module.h> -+#include <linux/console.h> - - #include <drm/drmP.h> - #include "vmwgfx_drv.h" -@@ -1538,6 +1539,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) - static int __init vmwgfx_init(void) - { - int ret; -+ -+#ifdef CONFIG_VGA_CONSOLE -+ if (vgacon_text_force()) -+ return -EINVAL; -+#endif -+ - ret = drm_pci_init(&driver, &vmw_pci_driver); - if (ret) - DRM_ERROR("Failed initializing DRM.\n"); -diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c -index 9b4bb9e..7c2e118 100644 ---- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c -@@ -763,21 +763,25 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev, - uint32_t format; - struct drm_vmw_size content_base_size; - struct vmw_resource *res; -+ unsigned int bytes_pp; - int ret; - - switch (mode_cmd->depth) { - case 32: - case 24: - format = SVGA3D_X8R8G8B8; -+ bytes_pp = 4; - break; - - case 16: - case 15: - format = SVGA3D_R5G6B5; -+ bytes_pp = 2; - break; - - case 8: - format = SVGA3D_P8; -+ bytes_pp = 1; - break; - - default: -@@ -785,7 +789,7 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev, - return -EINVAL; - } - -- content_base_size.width = mode_cmd->width; -+ content_base_size.width = mode_cmd->pitch / bytes_pp; - content_base_size.height = mode_cmd->height; - content_base_size.depth = 1; - -diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c -index c4dcab0..9098f13 100644 ---- a/drivers/hv/channel.c -+++ b/drivers/hv/channel.c -@@ -630,10 +630,19 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, - * on the ring. We will not signal if more data is - * to be placed. - * -+ * Based on the channel signal state, we will decide -+ * which signaling policy will be applied. -+ * - * If we cannot write to the ring-buffer; signal the host - * even if we may not have written anything. This is a rare - * enough condition that it should not matter. - */ -+ -+ if (channel->signal_policy) -+ signal = true; -+ else -+ kick_q = true; -+ - if (((ret == 0) && kick_q && signal) || (ret)) - vmbus_setevent(channel); - -@@ -733,10 +742,19 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, - * on the ring. We will not signal if more data is - * to be placed. - * -+ * Based on the channel signal state, we will decide -+ * which signaling policy will be applied. -+ * - * If we cannot write to the ring-buffer; signal the host - * even if we may not have written anything. This is a rare - * enough condition that it should not matter. - */ -+ -+ if (channel->signal_policy) -+ signal = true; -+ else -+ kick_q = true; -+ - if (((ret == 0) && kick_q && signal) || (ret)) - vmbus_setevent(channel); - -diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c -index f155b83..2b3105c 100644 ---- a/drivers/hwmon/ads1015.c -+++ b/drivers/hwmon/ads1015.c -@@ -126,7 +126,7 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel, - struct ads1015_data *data = i2c_get_clientdata(client); - unsigned int pga = data->channel_data[channel].pga; - int fullscale = fullscale_table[pga]; -- const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0; -+ const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0; - - return DIV_ROUND_CLOSEST(reg * fullscale, mask); - } -diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c -index c848789..c43318d 100644 ---- a/drivers/hwmon/dell-smm-hwmon.c -+++ b/drivers/hwmon/dell-smm-hwmon.c -@@ -932,6 +932,17 @@ MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); - static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = { - { - /* -+ * CPU fan speed going up and down on Dell Studio XPS 8000 -+ * for unknown reasons. -+ */ -+ .ident = "Dell Studio XPS 8000", -+ .matches = { -+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), -+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8000"), -+ }, -+ }, -+ { -+ /* - * CPU fan speed going up and down on Dell Studio XPS 8100 - * for unknown reasons. - */ -diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c -index 82de3de..685568b 100644 ---- a/drivers/hwmon/gpio-fan.c -+++ b/drivers/hwmon/gpio-fan.c -@@ -406,16 +406,11 @@ static int gpio_fan_get_cur_state(struct thermal_cooling_device *cdev, - unsigned long *state) - { - struct gpio_fan_data *fan_data = cdev->devdata; -- int r; - - if (!fan_data) - return -EINVAL; - -- r = get_fan_speed_index(fan_data); -- if (r < 0) -- return r; -- -- *state = r; -+ *state = fan_data->speed_index; - return 0; - } - -diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c -index e254921..93738df 100644 ---- a/drivers/hwtracing/coresight/coresight.c -+++ b/drivers/hwtracing/coresight/coresight.c -@@ -548,7 +548,7 @@ static int coresight_name_match(struct device *dev, void *data) - to_match = data; - i_csdev = to_coresight_device(dev); - -- if (!strcmp(to_match, dev_name(&i_csdev->dev))) -+ if (to_match && !strcmp(to_match, dev_name(&i_csdev->dev))) - return 1; - - return 0; -diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c -index f62d697..27fa0cb 100644 ---- a/drivers/i2c/busses/i2c-i801.c -+++ b/drivers/i2c/busses/i2c-i801.c -@@ -1271,6 +1271,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) - switch (dev->device) { - case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS: - case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS: -+ case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS: -+ case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS: - case PCI_DEVICE_ID_INTEL_DNV_SMBUS: - priv->features |= FEATURE_I2C_BLOCK_READ; - priv->features |= FEATURE_IRQ; -diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c -index 0a26dd6..d6d2b35 100644 ---- a/drivers/infiniband/core/cm.c -+++ b/drivers/infiniband/core/cm.c -@@ -782,11 +782,11 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv) - wait_time = cm_convert_to_ms(cm_id_priv->av.timeout); - - /* Check if the device started its remove_one */ -- spin_lock_irq(&cm.lock); -+ spin_lock_irqsave(&cm.lock, flags); - if (!cm_dev->going_down) - queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, - msecs_to_jiffies(wait_time)); -- spin_unlock_irq(&cm.lock); -+ spin_unlock_irqrestore(&cm.lock, flags); - - cm_id_priv->timewait_info = NULL; - } -diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c -index 2d762a2..17a15c5 100644 ---- a/drivers/infiniband/core/cma.c -+++ b/drivers/infiniband/core/cma.c -@@ -453,7 +453,7 @@ static inline int cma_validate_port(struct ib_device *device, u8 port, - if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) - return ret; - -- if (dev_type == ARPHRD_ETHER) -+ if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) - ndev = dev_get_by_index(&init_net, bound_if_index); - - ret = ib_find_cached_gid_by_port(device, gid, port, ndev, NULL); -diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c -index cb78b1e..f504ba7 100644 ---- a/drivers/infiniband/hw/cxgb3/iwch_cm.c -+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c -@@ -149,7 +149,7 @@ static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_en - error = l2t_send(tdev, skb, l2e); - if (error < 0) - kfree_skb(skb); -- return error; -+ return error < 0 ? error : 0; - } - - int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb) -@@ -165,7 +165,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb) - error = cxgb3_ofld_send(tdev, skb); - if (error < 0) - kfree_skb(skb); -- return error; -+ return error < 0 ? error : 0; - } - - static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb) -diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c -index 7e97cb5..c4e0915 100644 ---- a/drivers/infiniband/hw/mlx5/main.c -+++ b/drivers/infiniband/hw/mlx5/main.c -@@ -275,7 +275,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, - props->max_sge = min(max_rq_sg, max_sq_sg); - props->max_sge_rd = props->max_sge; - props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); -- props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_eq_sz)) - 1; -+ props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1; - props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); - props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd); - props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp); -diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c -index 40f85bb..3eff35c 100644 ---- a/drivers/infiniband/hw/qib/qib_qp.c -+++ b/drivers/infiniband/hw/qib/qib_qp.c -@@ -100,9 +100,10 @@ static u32 credit_table[31] = { - 32768 /* 1E */ - }; - --static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map) -+static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map, -+ gfp_t gfp) - { -- unsigned long page = get_zeroed_page(GFP_KERNEL); -+ unsigned long page = get_zeroed_page(gfp); - - /* - * Free the page if someone raced with us installing it. -@@ -121,7 +122,7 @@ static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map) - * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI. - */ - static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, -- enum ib_qp_type type, u8 port) -+ enum ib_qp_type type, u8 port, gfp_t gfp) - { - u32 i, offset, max_scan, qpn; - struct qpn_map *map; -@@ -151,7 +152,7 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, - max_scan = qpt->nmaps - !offset; - for (i = 0;;) { - if (unlikely(!map->page)) { -- get_map_page(qpt, map); -+ get_map_page(qpt, map, gfp); - if (unlikely(!map->page)) - break; - } -@@ -983,13 +984,21 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd, - size_t sz; - size_t sg_list_sz; - struct ib_qp *ret; -+ gfp_t gfp; -+ - - if (init_attr->cap.max_send_sge > ib_qib_max_sges || - init_attr->cap.max_send_wr > ib_qib_max_qp_wrs || -- init_attr->create_flags) { -- ret = ERR_PTR(-EINVAL); -- goto bail; -- } -+ init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO)) -+ return ERR_PTR(-EINVAL); -+ -+ /* GFP_NOIO is applicable in RC QPs only */ -+ if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO && -+ init_attr->qp_type != IB_QPT_RC) -+ return ERR_PTR(-EINVAL); -+ -+ gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ? -+ GFP_NOIO : GFP_KERNEL; - - /* Check receive queue parameters if no SRQ is specified. */ - if (!init_attr->srq) { -@@ -1021,7 +1030,8 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd, - sz = sizeof(struct qib_sge) * - init_attr->cap.max_send_sge + - sizeof(struct qib_swqe); -- swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz); -+ swq = __vmalloc((init_attr->cap.max_send_wr + 1) * sz, -+ gfp, PAGE_KERNEL); - if (swq == NULL) { - ret = ERR_PTR(-ENOMEM); - goto bail; -@@ -1037,13 +1047,13 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd, - } else if (init_attr->cap.max_recv_sge > 1) - sg_list_sz = sizeof(*qp->r_sg_list) * - (init_attr->cap.max_recv_sge - 1); -- qp = kzalloc(sz + sg_list_sz, GFP_KERNEL); -+ qp = kzalloc(sz + sg_list_sz, gfp); - if (!qp) { - ret = ERR_PTR(-ENOMEM); - goto bail_swq; - } - RCU_INIT_POINTER(qp->next, NULL); -- qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL); -+ qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), gfp); - if (!qp->s_hdr) { - ret = ERR_PTR(-ENOMEM); - goto bail_qp; -@@ -1058,8 +1068,16 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd, - qp->r_rq.max_sge = init_attr->cap.max_recv_sge; - sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + - sizeof(struct qib_rwqe); -- qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) + -- qp->r_rq.size * sz); -+ if (gfp != GFP_NOIO) -+ qp->r_rq.wq = vmalloc_user( -+ sizeof(struct qib_rwq) + -+ qp->r_rq.size * sz); -+ else -+ qp->r_rq.wq = __vmalloc( -+ sizeof(struct qib_rwq) + -+ qp->r_rq.size * sz, -+ gfp, PAGE_KERNEL); -+ - if (!qp->r_rq.wq) { - ret = ERR_PTR(-ENOMEM); - goto bail_qp; -@@ -1090,7 +1108,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd, - dev = to_idev(ibpd->device); - dd = dd_from_dev(dev); - err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type, -- init_attr->port_num); -+ init_attr->port_num, gfp); - if (err < 0) { - ret = ERR_PTR(err); - vfree(qp->r_rq.wq); -diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c -index f8ea069..b2fb528 100644 ---- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c -+++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c -@@ -286,15 +286,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) - struct qib_ibdev *dev = to_idev(ibqp->device); - struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num); - struct qib_mcast *mcast = NULL; -- struct qib_mcast_qp *p, *tmp; -+ struct qib_mcast_qp *p, *tmp, *delp = NULL; - struct rb_node *n; - int last = 0; - int ret; - -- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) { -- ret = -EINVAL; -- goto bail; -- } -+ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) -+ return -EINVAL; - - spin_lock_irq(&ibp->lock); - -@@ -303,8 +301,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) - while (1) { - if (n == NULL) { - spin_unlock_irq(&ibp->lock); -- ret = -EINVAL; -- goto bail; -+ return -EINVAL; - } - - mcast = rb_entry(n, struct qib_mcast, rb_node); -@@ -328,6 +325,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) - */ - list_del_rcu(&p->list); - mcast->n_attached--; -+ delp = p; - - /* If this was the last attached QP, remove the GID too. */ - if (list_empty(&mcast->qp_list)) { -@@ -338,15 +336,16 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) - } - - spin_unlock_irq(&ibp->lock); -+ /* QP not attached */ -+ if (!delp) -+ return -EINVAL; -+ /* -+ * Wait for any list walkers to finish before freeing the -+ * list element. -+ */ -+ wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1); -+ qib_mcast_qp_free(delp); - -- if (p) { -- /* -- * Wait for any list walkers to finish before freeing the -- * list element. -- */ -- wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1); -- qib_mcast_qp_free(p); -- } - if (last) { - atomic_dec(&mcast->refcount); - wait_event(mcast->wait, !atomic_read(&mcast->refcount)); -@@ -355,11 +354,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) - dev->n_mcast_grps_allocated--; - spin_unlock_irq(&dev->n_mcast_grps_lock); - } -- -- ret = 0; -- --bail: -- return ret; -+ return 0; - } - - int qib_mcast_tree_empty(struct qib_ibport *ibp) -diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c -index b12a5d5..37199b9 100644 ---- a/drivers/irqchip/irq-atmel-aic-common.c -+++ b/drivers/irqchip/irq-atmel-aic-common.c -@@ -86,7 +86,7 @@ int aic_common_set_priority(int priority, unsigned *val) - priority > AT91_AIC_IRQ_MAX_PRIORITY) - return -EINVAL; - -- *val &= AT91_AIC_PRIOR; -+ *val &= ~AT91_AIC_PRIOR; - *val |= priority; - - return 0; -diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c -index e23d1d1..a159529f 100644 ---- a/drivers/irqchip/irq-gic-v3-its.c -+++ b/drivers/irqchip/irq-gic-v3-its.c -@@ -597,11 +597,6 @@ static void its_unmask_irq(struct irq_data *d) - lpi_set_config(d, true); - } - --static void its_eoi_irq(struct irq_data *d) --{ -- gic_write_eoir(d->hwirq); --} -- - static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, - bool force) - { -@@ -638,7 +633,7 @@ static struct irq_chip its_irq_chip = { - .name = "ITS", - .irq_mask = its_mask_irq, - .irq_unmask = its_unmask_irq, -- .irq_eoi = its_eoi_irq, -+ .irq_eoi = irq_chip_eoi_parent, - .irq_set_affinity = its_set_affinity, - .irq_compose_msi_msg = its_irq_compose_msi_msg, - }; -diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c -index c22e2d4..efe5084 100644 ---- a/drivers/irqchip/irq-mxs.c -+++ b/drivers/irqchip/irq-mxs.c -@@ -241,6 +241,7 @@ static int __init asm9260_of_init(struct device_node *np, - writel(0, icoll_priv.intr + i); - - icoll_add_domain(np, ASM9260_NUM_IRQS); -+ set_handle_irq(icoll_handle_irq); - - return 0; - } -diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c -index 8587d0f..f6cb1b8 100644 ---- a/drivers/irqchip/irq-omap-intc.c -+++ b/drivers/irqchip/irq-omap-intc.c -@@ -47,6 +47,7 @@ - #define INTC_ILR0 0x0100 - - #define ACTIVEIRQ_MASK 0x7f /* omap2/3 active interrupt bits */ -+#define SPURIOUSIRQ_MASK (0x1ffffff << 7) - #define INTCPS_NR_ILR_REGS 128 - #define INTCPS_NR_MIR_REGS 4 - -@@ -330,11 +331,35 @@ static int __init omap_init_irq(u32 base, struct device_node *node) - static asmlinkage void __exception_irq_entry - omap_intc_handle_irq(struct pt_regs *regs) - { -+ extern unsigned long irq_err_count; - u32 irqnr; - - irqnr = intc_readl(INTC_SIR); -+ -+ /* -+ * A spurious IRQ can result if interrupt that triggered the -+ * sorting is no longer active during the sorting (10 INTC -+ * functional clock cycles after interrupt assertion). Or a -+ * change in interrupt mask affected the result during sorting -+ * time. There is no special handling required except ignoring -+ * the SIR register value just read and retrying. -+ * See section 6.2.5 of AM335x TRM Literature Number: SPRUH73K -+ * -+ * Many a times, a spurious interrupt situation has been fixed -+ * by adding a flush for the posted write acking the IRQ in -+ * the device driver. Typically, this is going be the device -+ * driver whose interrupt was handled just before the spurious -+ * IRQ occurred. Pay attention to those device drivers if you -+ * run into hitting the spurious IRQ condition below. -+ */ -+ if (unlikely((irqnr & SPURIOUSIRQ_MASK) == SPURIOUSIRQ_MASK)) { -+ pr_err_once("%s: spurious irq!\n", __func__); -+ irq_err_count++; -+ omap_ack_irq(NULL); -+ return; -+ } -+ - irqnr &= ACTIVEIRQ_MASK; -- WARN_ONCE(!irqnr, "Spurious IRQ ?\n"); - handle_domain_irq(domain, irqnr, regs); - } - -diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c -index 83392f8..22b9e34 100644 ---- a/drivers/md/bcache/btree.c -+++ b/drivers/md/bcache/btree.c -@@ -1741,6 +1741,7 @@ static void bch_btree_gc(struct cache_set *c) - do { - ret = btree_root(gc_root, c, &op, &writes, &stats); - closure_sync(&writes); -+ cond_resched(); - - if (ret && ret != -EAGAIN) - pr_warn("gc failed!"); -@@ -2162,8 +2163,10 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, - rw_lock(true, b, b->level); - - if (b->key.ptr[0] != btree_ptr || -- b->seq != seq + 1) -+ b->seq != seq + 1) { -+ op->lock = b->level; - goto out; -+ } - } - - SET_KEY_PTRS(check_key, 1); -diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c -index 679a093..8d0ead9 100644 ---- a/drivers/md/bcache/super.c -+++ b/drivers/md/bcache/super.c -@@ -685,6 +685,8 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c, - WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") || - sysfs_create_link(&c->kobj, &d->kobj, d->name), - "Couldn't create device <-> cache set symlinks"); -+ -+ clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags); - } - - static void bcache_device_detach(struct bcache_device *d) -@@ -847,8 +849,11 @@ void bch_cached_dev_run(struct cached_dev *dc) - buf[SB_LABEL_SIZE] = '\0'; - env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf); - -- if (atomic_xchg(&dc->running, 1)) -+ if (atomic_xchg(&dc->running, 1)) { -+ kfree(env[1]); -+ kfree(env[2]); - return; -+ } - - if (!d->c && - BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { -@@ -1933,6 +1938,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, - else - err = "device busy"; - mutex_unlock(&bch_register_lock); -+ if (attr == &ksysfs_register_quiet) -+ goto out; - } - goto err; - } -@@ -1971,8 +1978,7 @@ out: - err_close: - blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); - err: -- if (attr != &ksysfs_register_quiet) -- pr_info("error opening %s: %s", path, err); -+ pr_info("error opening %s: %s", path, err); - ret = -EINVAL; - goto out; - } -@@ -2066,8 +2072,10 @@ static int __init bcache_init(void) - closure_debug_init(); - - bcache_major = register_blkdev(0, "bcache"); -- if (bcache_major < 0) -+ if (bcache_major < 0) { -+ unregister_reboot_notifier(&reboot); - return bcache_major; -+ } - - if (!(bcache_wq = create_workqueue("bcache")) || - !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || -diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c -index b23f88d..b9346cd 100644 ---- a/drivers/md/bcache/writeback.c -+++ b/drivers/md/bcache/writeback.c -@@ -323,6 +323,10 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, - - static bool dirty_pred(struct keybuf *buf, struct bkey *k) - { -+ struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys); -+ -+ BUG_ON(KEY_INODE(k) != dc->disk.id); -+ - return KEY_DIRTY(k); - } - -@@ -372,11 +376,24 @@ next: - } - } - -+/* -+ * Returns true if we scanned the entire disk -+ */ - static bool refill_dirty(struct cached_dev *dc) - { - struct keybuf *buf = &dc->writeback_keys; -+ struct bkey start = KEY(dc->disk.id, 0, 0); - struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0); -- bool searched_from_start = false; -+ struct bkey start_pos; -+ -+ /* -+ * make sure keybuf pos is inside the range for this disk - at bringup -+ * we might not be attached yet so this disk's inode nr isn't -+ * initialized then -+ */ -+ if (bkey_cmp(&buf->last_scanned, &start) < 0 || -+ bkey_cmp(&buf->last_scanned, &end) > 0) -+ buf->last_scanned = start; - - if (dc->partial_stripes_expensive) { - refill_full_stripes(dc); -@@ -384,14 +401,20 @@ static bool refill_dirty(struct cached_dev *dc) - return false; - } - -- if (bkey_cmp(&buf->last_scanned, &end) >= 0) { -- buf->last_scanned = KEY(dc->disk.id, 0, 0); -- searched_from_start = true; -- } -- -+ start_pos = buf->last_scanned; - bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); - -- return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start; -+ if (bkey_cmp(&buf->last_scanned, &end) < 0) -+ return false; -+ -+ /* -+ * If we get to the end start scanning again from the beginning, and -+ * only scan up to where we initially started scanning from: -+ */ -+ buf->last_scanned = start; -+ bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred); -+ -+ return bkey_cmp(&buf->last_scanned, &start_pos) >= 0; - } - - static int bch_writeback_thread(void *arg) -diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h -index 0a9dab1..073a042 100644 ---- a/drivers/md/bcache/writeback.h -+++ b/drivers/md/bcache/writeback.h -@@ -63,7 +63,8 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, - - static inline void bch_writeback_queue(struct cached_dev *dc) - { -- wake_up_process(dc->writeback_thread); -+ if (!IS_ERR_OR_NULL(dc->writeback_thread)) -+ wake_up_process(dc->writeback_thread); - } - - static inline void bch_writeback_add(struct cached_dev *dc) -diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h -index fae34e7..12b5216 100644 ---- a/drivers/md/dm-exception-store.h -+++ b/drivers/md/dm-exception-store.h -@@ -69,7 +69,7 @@ struct dm_exception_store_type { - * Update the metadata with this exception. - */ - void (*commit_exception) (struct dm_exception_store *store, -- struct dm_exception *e, -+ struct dm_exception *e, int valid, - void (*callback) (void *, int success), - void *callback_context); - -diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c -index 3164b8b..4d39093 100644 ---- a/drivers/md/dm-snap-persistent.c -+++ b/drivers/md/dm-snap-persistent.c -@@ -695,7 +695,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store, - } - - static void persistent_commit_exception(struct dm_exception_store *store, -- struct dm_exception *e, -+ struct dm_exception *e, int valid, - void (*callback) (void *, int success), - void *callback_context) - { -@@ -704,6 +704,9 @@ static void persistent_commit_exception(struct dm_exception_store *store, - struct core_exception ce; - struct commit_callback *cb; - -+ if (!valid) -+ ps->valid = 0; -+ - ce.old_chunk = e->old_chunk; - ce.new_chunk = e->new_chunk; - write_exception(ps, ps->current_committed++, &ce); -diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c -index 9b7c8c8..4d50a12 100644 ---- a/drivers/md/dm-snap-transient.c -+++ b/drivers/md/dm-snap-transient.c -@@ -52,12 +52,12 @@ static int transient_prepare_exception(struct dm_exception_store *store, - } - - static void transient_commit_exception(struct dm_exception_store *store, -- struct dm_exception *e, -+ struct dm_exception *e, int valid, - void (*callback) (void *, int success), - void *callback_context) - { - /* Just succeed */ -- callback(callback_context, 1); -+ callback(callback_context, valid); - } - - static void transient_usage(struct dm_exception_store *store, -diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c -index c06b74e..61f184a 100644 ---- a/drivers/md/dm-snap.c -+++ b/drivers/md/dm-snap.c -@@ -1438,8 +1438,9 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err) - dm_table_event(s->ti->table); - } - --static void pending_complete(struct dm_snap_pending_exception *pe, int success) -+static void pending_complete(void *context, int success) - { -+ struct dm_snap_pending_exception *pe = context; - struct dm_exception *e; - struct dm_snapshot *s = pe->snap; - struct bio *origin_bios = NULL; -@@ -1509,24 +1510,13 @@ out: - free_pending_exception(pe); - } - --static void commit_callback(void *context, int success) --{ -- struct dm_snap_pending_exception *pe = context; -- -- pending_complete(pe, success); --} -- - static void complete_exception(struct dm_snap_pending_exception *pe) - { - struct dm_snapshot *s = pe->snap; - -- if (unlikely(pe->copy_error)) -- pending_complete(pe, 0); -- -- else -- /* Update the metadata if we are persistent */ -- s->store->type->commit_exception(s->store, &pe->e, -- commit_callback, pe); -+ /* Update the metadata if we are persistent */ -+ s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error, -+ pending_complete, pe); - } - - /* -diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c -index 63903a5..a1cc797 100644 ---- a/drivers/md/dm-thin.c -+++ b/drivers/md/dm-thin.c -@@ -3453,8 +3453,8 @@ static void pool_postsuspend(struct dm_target *ti) - struct pool_c *pt = ti->private; - struct pool *pool = pt->pool; - -- cancel_delayed_work(&pool->waker); -- cancel_delayed_work(&pool->no_space_timeout); -+ cancel_delayed_work_sync(&pool->waker); -+ cancel_delayed_work_sync(&pool->no_space_timeout); - flush_workqueue(pool->wq); - (void) commit(pool); - } -diff --git a/drivers/md/dm.c b/drivers/md/dm.c -index 5df4048..dd83492 100644 ---- a/drivers/md/dm.c -+++ b/drivers/md/dm.c -@@ -1191,6 +1191,8 @@ static void dm_unprep_request(struct request *rq) - - if (clone) - free_rq_clone(clone); -+ else if (!tio->md->queue->mq_ops) -+ free_rq_tio(tio); - } - - /* -diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c -index fca6dbc..7e44005 100644 ---- a/drivers/md/persistent-data/dm-space-map-metadata.c -+++ b/drivers/md/persistent-data/dm-space-map-metadata.c -@@ -152,12 +152,9 @@ static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result) - - static int brb_pop(struct bop_ring_buffer *brb) - { -- struct block_op *bop; -- - if (brb_empty(brb)) - return -ENODATA; - -- bop = brb->bops + brb->begin; - brb->begin = brb_next(brb, brb->begin); - - return 0; -diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c -index c38ef1a..e2a3833 100644 ---- a/drivers/media/dvb-core/dvb_frontend.c -+++ b/drivers/media/dvb-core/dvb_frontend.c -@@ -2313,9 +2313,9 @@ static int dvb_frontend_ioctl_legacy(struct file *file, - dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n", - __func__, c->delivery_system, fe->ops.info.type); - -- /* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't -- * do it, it is done for it. */ -- info->caps |= FE_CAN_INVERSION_AUTO; -+ /* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */ -+ if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT)) -+ info->caps |= FE_CAN_INVERSION_AUTO; - err = 0; - break; - } -diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c -index 0e209b5..c6abeb4 100644 ---- a/drivers/media/dvb-frontends/tda1004x.c -+++ b/drivers/media/dvb-frontends/tda1004x.c -@@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe) - { - struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache; - struct tda1004x_state* state = fe->demodulator_priv; -+ int status; - - dprintk("%s\n", __func__); - -+ status = tda1004x_read_byte(state, TDA1004X_STATUS_CD); -+ if (status == -1) -+ return -EIO; -+ -+ /* Only update the properties cache if device is locked */ -+ if (!(status & 8)) -+ return 0; -+ - // inversion status - fe_params->inversion = INVERSION_OFF; - if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20) -diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c -index 7830aef..40f7768 100644 ---- a/drivers/media/rc/sunxi-cir.c -+++ b/drivers/media/rc/sunxi-cir.c -@@ -153,6 +153,8 @@ static int sunxi_ir_probe(struct platform_device *pdev) - if (!ir) - return -ENOMEM; - -+ spin_lock_init(&ir->ir_lock); -+ - if (of_device_is_compatible(dn, "allwinner,sun5i-a13-ir")) - ir->fifo_size = 64; - else -diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c -index ce157ed..0e1ca2b 100644 ---- a/drivers/media/tuners/si2157.c -+++ b/drivers/media/tuners/si2157.c -@@ -168,6 +168,7 @@ static int si2157_init(struct dvb_frontend *fe) - len = fw->data[fw->size - remaining]; - if (len > SI2157_ARGLEN) { - dev_err(&client->dev, "Bad firmware length\n"); -+ ret = -EINVAL; - goto err_release_firmware; - } - memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len); -diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c -index 146071b..bfff1d1 100644 ---- a/drivers/media/usb/gspca/ov534.c -+++ b/drivers/media/usb/gspca/ov534.c -@@ -1491,8 +1491,13 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev, - struct v4l2_fract *tpf = &cp->timeperframe; - struct sd *sd = (struct sd *) gspca_dev; - -- /* Set requested framerate */ -- sd->frame_rate = tpf->denominator / tpf->numerator; -+ if (tpf->numerator == 0 || tpf->denominator == 0) -+ /* Set default framerate */ -+ sd->frame_rate = 30; -+ else -+ /* Set requested framerate */ -+ sd->frame_rate = tpf->denominator / tpf->numerator; -+ - if (gspca_dev->streaming) - set_frame_rate(gspca_dev); - -diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c -index c70ff40..c028a5c 100644 ---- a/drivers/media/usb/gspca/topro.c -+++ b/drivers/media/usb/gspca/topro.c -@@ -4802,7 +4802,11 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev, - struct v4l2_fract *tpf = &cp->timeperframe; - int fr, i; - -- sd->framerate = tpf->denominator / tpf->numerator; -+ if (tpf->numerator == 0 || tpf->denominator == 0) -+ sd->framerate = 30; -+ else -+ sd->framerate = tpf->denominator / tpf->numerator; -+ - if (gspca_dev->streaming) - setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure)); - -diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c -index 27b4b9e..502984c 100644 ---- a/drivers/media/v4l2-core/videobuf2-v4l2.c -+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c -@@ -822,10 +822,10 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) - return res | POLLERR; - - /* -- * For output streams you can write as long as there are fewer buffers -- * queued than there are buffers available. -+ * For output streams you can call write() as long as there are fewer -+ * buffers queued than there are buffers available. - */ -- if (q->is_output && q->queued_count < q->num_buffers) -+ if (q->is_output && q->fileio && q->queued_count < q->num_buffers) - return res | POLLOUT | POLLWRNORM; - - if (list_empty(&q->done_list)) { -diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c -index c241e15..cbd4331 100644 ---- a/drivers/misc/cxl/vphb.c -+++ b/drivers/misc/cxl/vphb.c -@@ -203,7 +203,7 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn, - mask <<= shift; - val <<= shift; - -- v = (in_le32(ioaddr) & ~mask) || (val & mask); -+ v = (in_le32(ioaddr) & ~mask) | (val & mask); - - out_le32(ioaddr, v); - return PCIBIOS_SUCCESSFUL; -diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c -index b2f2486..80f9afc 100644 ---- a/drivers/misc/mei/main.c -+++ b/drivers/misc/mei/main.c -@@ -458,7 +458,11 @@ static int mei_ioctl_client_notify_request(struct file *file, u32 request) - { - struct mei_cl *cl = file->private_data; - -- return mei_cl_notify_request(cl, file, request); -+ if (request != MEI_HBM_NOTIFICATION_START && -+ request != MEI_HBM_NOTIFICATION_STOP) -+ return -EINVAL; -+ -+ return mei_cl_notify_request(cl, file, (u8)request); - } - - /** -@@ -657,7 +661,9 @@ out: - * @file: pointer to file structure - * @band: band bitmap - * -- * Return: poll mask -+ * Return: negative on error, -+ * 0 if it did no changes, -+ * and positive a process was added or deleted - */ - static int mei_fasync(int fd, struct file *file, int band) - { -@@ -665,7 +671,7 @@ static int mei_fasync(int fd, struct file *file, int band) - struct mei_cl *cl = file->private_data; - - if (!mei_cl_is_connected(cl)) -- return POLLERR; -+ return -ENODEV; - - return fasync_helper(fd, file, band, &cl->ev_async); - } -diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c -index 3a9a79e..3d5087b 100644 ---- a/drivers/mmc/core/mmc.c -+++ b/drivers/mmc/core/mmc.c -@@ -1076,8 +1076,7 @@ static int mmc_select_hs400(struct mmc_card *card) - mmc_set_clock(host, max_dtr); - - /* Switch card to HS mode */ -- val = EXT_CSD_TIMING_HS | -- card->drive_strength << EXT_CSD_DRV_STR_SHIFT; -+ val = EXT_CSD_TIMING_HS; - err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_HS_TIMING, val, - card->ext_csd.generic_cmd6_time, -@@ -1160,8 +1159,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card) - mmc_set_clock(host, max_dtr); - - /* Switch HS400 to HS DDR */ -- val = EXT_CSD_TIMING_HS | -- card->drive_strength << EXT_CSD_DRV_STR_SHIFT; -+ val = EXT_CSD_TIMING_HS; - err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, - val, card->ext_csd.generic_cmd6_time, - true, send_status, true); -diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c -index 141eaa9..967535d 100644 ---- a/drivers/mmc/core/sd.c -+++ b/drivers/mmc/core/sd.c -@@ -626,9 +626,9 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card) - * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104. - */ - if (!mmc_host_is_spi(card->host) && -- (card->sd_bus_speed == UHS_SDR50_BUS_SPEED || -- card->sd_bus_speed == UHS_DDR50_BUS_SPEED || -- card->sd_bus_speed == UHS_SDR104_BUS_SPEED)) { -+ (card->host->ios.timing == MMC_TIMING_UHS_SDR50 || -+ card->host->ios.timing == MMC_TIMING_UHS_DDR50 || -+ card->host->ios.timing == MMC_TIMING_UHS_SDR104)) { - err = mmc_execute_tuning(card); - - /* -@@ -638,7 +638,7 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card) - * difference between v3.00 and 3.01 spec means that CMD19 - * tuning is also available for DDR50 mode. - */ -- if (err && card->sd_bus_speed == UHS_DDR50_BUS_SPEED) { -+ if (err && card->host->ios.timing == MMC_TIMING_UHS_DDR50) { - pr_warn("%s: ddr50 tuning failed\n", - mmc_hostname(card->host)); - err = 0; -diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c -index 16d838e..467b3cf 100644 ---- a/drivers/mmc/core/sdio.c -+++ b/drivers/mmc/core/sdio.c -@@ -535,8 +535,8 @@ static int mmc_sdio_init_uhs_card(struct mmc_card *card) - * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104. - */ - if (!mmc_host_is_spi(card->host) && -- ((card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR50) || -- (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104))) -+ ((card->host->ios.timing == MMC_TIMING_UHS_SDR50) || -+ (card->host->ios.timing == MMC_TIMING_UHS_SDR104))) - err = mmc_execute_tuning(card); - out: - return err; -@@ -630,7 +630,7 @@ try_again: - */ - if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) { - err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, -- ocr); -+ ocr_card); - if (err == -EAGAIN) { - sdio_reset(host); - mmc_go_idle(host); -diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c -index fb26674..acece32 100644 ---- a/drivers/mmc/host/mmci.c -+++ b/drivers/mmc/host/mmci.c -@@ -1886,7 +1886,7 @@ static struct amba_id mmci_ids[] = { - { - .id = 0x00280180, - .mask = 0x00ffffff, -- .data = &variant_u300, -+ .data = &variant_nomadik, - }, - { - .id = 0x00480180, -diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c -index ce08896..28a057f 100644 ---- a/drivers/mmc/host/pxamci.c -+++ b/drivers/mmc/host/pxamci.c -@@ -804,7 +804,7 @@ static int pxamci_probe(struct platform_device *pdev) - dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); - goto out; - } else { -- mmc->caps |= host->pdata->gpio_card_ro_invert ? -+ mmc->caps2 |= host->pdata->gpio_card_ro_invert ? - 0 : MMC_CAP2_RO_ACTIVE_HIGH; - } - -diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c -index f6047fc..a5cda92 100644 ---- a/drivers/mmc/host/sdhci-acpi.c -+++ b/drivers/mmc/host/sdhci-acpi.c -@@ -146,6 +146,33 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = { - .ops = &sdhci_acpi_ops_int, - }; - -+static int bxt_get_cd(struct mmc_host *mmc) -+{ -+ int gpio_cd = mmc_gpio_get_cd(mmc); -+ struct sdhci_host *host = mmc_priv(mmc); -+ unsigned long flags; -+ int ret = 0; -+ -+ if (!gpio_cd) -+ return 0; -+ -+ pm_runtime_get_sync(mmc->parent); -+ -+ spin_lock_irqsave(&host->lock, flags); -+ -+ if (host->flags & SDHCI_DEVICE_DEAD) -+ goto out; -+ -+ ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); -+out: -+ spin_unlock_irqrestore(&host->lock, flags); -+ -+ pm_runtime_mark_last_busy(mmc->parent); -+ pm_runtime_put_autosuspend(mmc->parent); -+ -+ return ret; -+} -+ - static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev, - const char *hid, const char *uid) - { -@@ -196,6 +223,9 @@ static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev, - - /* Platform specific code during sd probe slot goes here */ - -+ if (hid && !strcmp(hid, "80865ACA")) -+ host->mmc_host_ops.get_cd = bxt_get_cd; -+ - return 0; - } - -diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c -index cf7ad45..45ee07d 100644 ---- a/drivers/mmc/host/sdhci-pci-core.c -+++ b/drivers/mmc/host/sdhci-pci-core.c -@@ -277,7 +277,7 @@ static int spt_select_drive_strength(struct sdhci_host *host, - if (sdhci_pci_spt_drive_strength > 0) - drive_strength = sdhci_pci_spt_drive_strength & 0xf; - else -- drive_strength = 1; /* 33-ohm */ -+ drive_strength = 0; /* Default 50-ohm */ - - if ((mmc_driver_type_mask(drive_strength) & card_drv) == 0) - drive_strength = 0; /* Default 50-ohm */ -@@ -330,6 +330,33 @@ static void spt_read_drive_strength(struct sdhci_host *host) - sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf); - } - -+static int bxt_get_cd(struct mmc_host *mmc) -+{ -+ int gpio_cd = mmc_gpio_get_cd(mmc); -+ struct sdhci_host *host = mmc_priv(mmc); -+ unsigned long flags; -+ int ret = 0; -+ -+ if (!gpio_cd) -+ return 0; -+ -+ pm_runtime_get_sync(mmc->parent); -+ -+ spin_lock_irqsave(&host->lock, flags); -+ -+ if (host->flags & SDHCI_DEVICE_DEAD) -+ goto out; -+ -+ ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); -+out: -+ spin_unlock_irqrestore(&host->lock, flags); -+ -+ pm_runtime_mark_last_busy(mmc->parent); -+ pm_runtime_put_autosuspend(mmc->parent); -+ -+ return ret; -+} -+ - static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) - { - slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | -@@ -362,6 +389,10 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) - slot->cd_con_id = NULL; - slot->cd_idx = 0; - slot->cd_override_level = true; -+ if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD || -+ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD) -+ slot->host->mmc_host_ops.get_cd = bxt_get_cd; -+ - return 0; - } - -diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c -index b48565e..8814eb6 100644 ---- a/drivers/mmc/host/sdhci.c -+++ b/drivers/mmc/host/sdhci.c -@@ -540,9 +540,12 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, - - BUG_ON(len > 65536); - -- /* tran, valid */ -- sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID); -- desc += host->desc_sz; -+ if (len) { -+ /* tran, valid */ -+ sdhci_adma_write_desc(host, desc, addr, len, -+ ADMA2_TRAN_VALID); -+ desc += host->desc_sz; -+ } - - /* - * If this triggers then we have a calculation bug -@@ -1364,7 +1367,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) - sdhci_runtime_pm_get(host); - - /* Firstly check card presence */ -- present = sdhci_do_get_cd(host); -+ present = mmc->ops->get_cd(mmc); - - spin_lock_irqsave(&host->lock, flags); - -@@ -2760,7 +2763,7 @@ static int sdhci_runtime_pm_put(struct sdhci_host *host) - - static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) - { -- if (host->runtime_suspended || host->bus_on) -+ if (host->bus_on) - return; - host->bus_on = true; - pm_runtime_get_noresume(host->mmc->parent); -@@ -2768,7 +2771,7 @@ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) - - static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) - { -- if (host->runtime_suspended || !host->bus_on) -+ if (!host->bus_on) - return; - host->bus_on = false; - pm_runtime_put_noidle(host->mmc->parent); -@@ -2861,6 +2864,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev, - - host = mmc_priv(mmc); - host->mmc = mmc; -+ host->mmc_host_ops = sdhci_ops; -+ mmc->ops = &host->mmc_host_ops; - - return host; - } -@@ -3057,7 +3062,6 @@ int sdhci_add_host(struct sdhci_host *host) - /* - * Set host parameters. - */ -- mmc->ops = &sdhci_ops; - max_clk = host->max_clk; - - if (host->ops->get_min_clock) -diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h -index 9d4aa31..9c331ac 100644 ---- a/drivers/mmc/host/sdhci.h -+++ b/drivers/mmc/host/sdhci.h -@@ -425,6 +425,7 @@ struct sdhci_host { - - /* Internal data */ - struct mmc_host *mmc; /* MMC structure */ -+ struct mmc_host_ops mmc_host_ops; /* MMC host ops */ - u64 dma_mask; /* custom DMA mask */ - - #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) -diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c -index 4498e92..b47122d 100644 ---- a/drivers/mmc/host/usdhi6rol0.c -+++ b/drivers/mmc/host/usdhi6rol0.c -@@ -1634,7 +1634,7 @@ static void usdhi6_timeout_work(struct work_struct *work) - struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work); - struct mmc_request *mrq = host->mrq; - struct mmc_data *data = mrq ? mrq->data : NULL; -- struct scatterlist *sg = host->sg ?: data->sg; -+ struct scatterlist *sg; - - dev_warn(mmc_dev(host->mmc), - "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n", -@@ -1666,6 +1666,7 @@ static void usdhi6_timeout_work(struct work_struct *work) - case USDHI6_WAIT_FOR_MWRITE: - case USDHI6_WAIT_FOR_READ: - case USDHI6_WAIT_FOR_WRITE: -+ sg = host->sg ?: data->sg; - dev_dbg(mmc_dev(host->mmc), - "%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n", - data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx, -diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c -index f1692e4..28bbca0 100644 ---- a/drivers/net/bonding/bond_main.c -+++ b/drivers/net/bonding/bond_main.c -@@ -214,6 +214,8 @@ static void bond_uninit(struct net_device *bond_dev); - static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, - struct rtnl_link_stats64 *stats); - static void bond_slave_arr_handler(struct work_struct *work); -+static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, -+ int mod); - - /*---------------------------- General routines -----------------------------*/ - -@@ -2418,7 +2420,7 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, - struct slave *slave) - { - struct arphdr *arp = (struct arphdr *)skb->data; -- struct slave *curr_active_slave; -+ struct slave *curr_active_slave, *curr_arp_slave; - unsigned char *arp_ptr; - __be32 sip, tip; - int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP); -@@ -2465,26 +2467,41 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, - &sip, &tip); - - curr_active_slave = rcu_dereference(bond->curr_active_slave); -+ curr_arp_slave = rcu_dereference(bond->current_arp_slave); - -- /* Backup slaves won't see the ARP reply, but do come through -- * here for each ARP probe (so we swap the sip/tip to validate -- * the probe). In a "redundant switch, common router" type of -- * configuration, the ARP probe will (hopefully) travel from -- * the active, through one switch, the router, then the other -- * switch before reaching the backup. -+ /* We 'trust' the received ARP enough to validate it if: -+ * -+ * (a) the slave receiving the ARP is active (which includes the -+ * current ARP slave, if any), or -+ * -+ * (b) the receiving slave isn't active, but there is a currently -+ * active slave and it received valid arp reply(s) after it became -+ * the currently active slave, or -+ * -+ * (c) there is an ARP slave that sent an ARP during the prior ARP -+ * interval, and we receive an ARP reply on any slave. We accept -+ * these because switch FDB update delays may deliver the ARP -+ * reply to a slave other than the sender of the ARP request. - * -- * We 'trust' the arp requests if there is an active slave and -- * it received valid arp reply(s) after it became active. This -- * is done to avoid endless looping when we can't reach the -+ * Note: for (b), backup slaves are receiving the broadcast ARP -+ * request, not a reply. This request passes from the sending -+ * slave through the L2 switch(es) to the receiving slave. Since -+ * this is checking the request, sip/tip are swapped for -+ * validation. -+ * -+ * This is done to avoid endless looping when we can't reach the - * arp_ip_target and fool ourselves with our own arp requests. - */ -- - if (bond_is_active_slave(slave)) - bond_validate_arp(bond, slave, sip, tip); - else if (curr_active_slave && - time_after(slave_last_rx(bond, curr_active_slave), - curr_active_slave->last_link_up)) - bond_validate_arp(bond, slave, tip, sip); -+ else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) && -+ bond_time_in_interval(bond, -+ dev_trans_start(curr_arp_slave->dev), 1)) -+ bond_validate_arp(bond, slave, sip, tip); - - out_unlock: - if (arp != (struct arphdr *)skb->data) -diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c -index fc5b756..eb7192f 100644 ---- a/drivers/net/can/usb/ems_usb.c -+++ b/drivers/net/can/usb/ems_usb.c -@@ -117,6 +117,9 @@ MODULE_LICENSE("GPL v2"); - */ - #define EMS_USB_ARM7_CLOCK 8000000 - -+#define CPC_TX_QUEUE_TRIGGER_LOW 25 -+#define CPC_TX_QUEUE_TRIGGER_HIGH 35 -+ - /* - * CAN-Message representation in a CPC_MSG. Message object type is - * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or -@@ -278,6 +281,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb) - switch (urb->status) { - case 0: - dev->free_slots = dev->intr_in_buffer[1]; -+ if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){ -+ if (netif_queue_stopped(netdev)){ -+ netif_wake_queue(netdev); -+ } -+ } - break; - - case -ECONNRESET: /* unlink */ -@@ -526,8 +534,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb) - /* Release context */ - context->echo_index = MAX_TX_URBS; - -- if (netif_queue_stopped(netdev)) -- netif_wake_queue(netdev); - } - - /* -@@ -587,7 +593,7 @@ static int ems_usb_start(struct ems_usb *dev) - int err, i; - - dev->intr_in_buffer[0] = 0; -- dev->free_slots = 15; /* initial size */ -+ dev->free_slots = 50; /* initial size */ - - for (i = 0; i < MAX_RX_URBS; i++) { - struct urb *urb = NULL; -@@ -835,7 +841,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne - - /* Slow down tx path */ - if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS || -- dev->free_slots < 5) { -+ dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) { - netif_stop_queue(netdev); - } - } -diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c -index b06dba0..2dea39b 100644 ---- a/drivers/net/dsa/mv88e6xxx.c -+++ b/drivers/net/dsa/mv88e6xxx.c -@@ -1519,7 +1519,7 @@ int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, - - /* no PVID with ranges, otherwise it's a bug */ - if (pvid) -- err = _mv88e6xxx_port_pvid_set(ds, port, vid); -+ err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end); - unlock: - mutex_unlock(&ps->smi_mutex); - -diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c -index 79789d8..ca5ac5d 100644 ---- a/drivers/net/ethernet/broadcom/tg3.c -+++ b/drivers/net/ethernet/broadcom/tg3.c -@@ -7833,6 +7833,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, - return ret; - } - -+static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb) -+{ -+ /* Check if we will never have enough descriptors, -+ * as gso_segs can be more than current ring size -+ */ -+ return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3; -+} -+ - static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); - - /* Use GSO to workaround all TSO packets that meet HW bug conditions -@@ -7936,14 +7944,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) - * vlan encapsulated. - */ - if (skb->protocol == htons(ETH_P_8021Q) || -- skb->protocol == htons(ETH_P_8021AD)) -- return tg3_tso_bug(tp, tnapi, txq, skb); -+ skb->protocol == htons(ETH_P_8021AD)) { -+ if (tg3_tso_bug_gso_check(tnapi, skb)) -+ return tg3_tso_bug(tp, tnapi, txq, skb); -+ goto drop; -+ } - - if (!skb_is_gso_v6(skb)) { - if (unlikely((ETH_HLEN + hdr_len) > 80) && -- tg3_flag(tp, TSO_BUG)) -- return tg3_tso_bug(tp, tnapi, txq, skb); -- -+ tg3_flag(tp, TSO_BUG)) { -+ if (tg3_tso_bug_gso_check(tnapi, skb)) -+ return tg3_tso_bug(tp, tnapi, txq, skb); -+ goto drop; -+ } - ip_csum = iph->check; - ip_tot_len = iph->tot_len; - iph->check = 0; -@@ -8075,7 +8088,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) - if (would_hit_hwbug) { - tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); - -- if (mss) { -+ if (mss && tg3_tso_bug_gso_check(tnapi, skb)) { - /* If it's a TSO packet, do GSO instead of - * allocating and copying to a large linear SKB - */ -diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h -index 1671fa3..7ba6d53 100644 ---- a/drivers/net/ethernet/cisco/enic/enic.h -+++ b/drivers/net/ethernet/cisco/enic/enic.h -@@ -33,7 +33,7 @@ - - #define DRV_NAME "enic" - #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" --#define DRV_VERSION "2.3.0.12" -+#define DRV_VERSION "2.3.0.20" - #define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" - - #define ENIC_BARS_MAX 6 -diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c -index 1ffd105..1fdf5fe 100644 ---- a/drivers/net/ethernet/cisco/enic/vnic_dev.c -+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c -@@ -298,7 +298,8 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, - int wait) - { - struct devcmd2_controller *dc2c = vdev->devcmd2; -- struct devcmd2_result *result = dc2c->result + dc2c->next_result; -+ struct devcmd2_result *result; -+ u8 color; - unsigned int i; - int delay, err; - u32 fetch_index, new_posted; -@@ -336,13 +337,17 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, - if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) - return 0; - -+ result = dc2c->result + dc2c->next_result; -+ color = dc2c->color; -+ -+ dc2c->next_result++; -+ if (dc2c->next_result == dc2c->result_size) { -+ dc2c->next_result = 0; -+ dc2c->color = dc2c->color ? 0 : 1; -+ } -+ - for (delay = 0; delay < wait; delay++) { -- if (result->color == dc2c->color) { -- dc2c->next_result++; -- if (dc2c->next_result == dc2c->result_size) { -- dc2c->next_result = 0; -- dc2c->color = dc2c->color ? 0 : 1; -- } -+ if (result->color == color) { - if (result->error) { - err = result->error; - if (err != ERR_ECMDUNKNOWN || -diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c -index 038f9ce..1494997 100644 ---- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c -+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c -@@ -236,6 +236,24 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = { - .enable = mlx4_en_phc_enable, - }; - -+#define MLX4_EN_WRAP_AROUND_SEC 10ULL -+ -+/* This function calculates the max shift that enables the user range -+ * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register. -+ */ -+static u32 freq_to_shift(u16 freq) -+{ -+ u32 freq_khz = freq * 1000; -+ u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC; -+ u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ? -+ max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1; -+ /* calculate max possible multiplier in order to fit in 64bit */ -+ u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded); -+ -+ /* This comes from the reverse of clocksource_khz2mult */ -+ return ilog2(div_u64(max_mul * freq_khz, 1000000)); -+} -+ - void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) - { - struct mlx4_dev *dev = mdev->dev; -@@ -254,12 +272,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) - memset(&mdev->cycles, 0, sizeof(mdev->cycles)); - mdev->cycles.read = mlx4_en_read_clock; - mdev->cycles.mask = CLOCKSOURCE_MASK(48); -- /* Using shift to make calculation more accurate. Since current HW -- * clock frequency is 427 MHz, and cycles are given using a 48 bits -- * register, the biggest shift when calculating using u64, is 14 -- * (max_cycles * multiplier < 2^64) -- */ -- mdev->cycles.shift = 14; -+ mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock); - mdev->cycles.mult = - clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift); - mdev->nominal_c_mult = mdev->cycles.mult; -diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c -index 7869f97..67e9633 100644 ---- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c -+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c -@@ -2381,8 +2381,6 @@ out: - /* set offloads */ - priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | - NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; -- priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; -- priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL; - } - - static void mlx4_en_del_vxlan_offloads(struct work_struct *work) -@@ -2393,8 +2391,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work) - /* unset offloads */ - priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM | - NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL); -- priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL; -- priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL; - - ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, - VXLAN_STEER_BY_OUTER_MAC, 0); -@@ -3020,6 +3016,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, - priv->rss_hash_fn = ETH_RSS_HASH_TOP; - } - -+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { -+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; -+ dev->features |= NETIF_F_GSO_UDP_TUNNEL; -+ } -+ - mdev->pndev[port] = dev; - mdev->upper[port] = NULL; - -diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c -index ee99e67..3904b5f 100644 ---- a/drivers/net/ethernet/mellanox/mlx4/en_port.c -+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c -@@ -238,11 +238,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) - stats->collisions = 0; - stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP); - stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); -- stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); -+ stats->rx_over_errors = 0; - stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); - stats->rx_frame_errors = 0; - stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); -- stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); -+ stats->rx_missed_errors = 0; - stats->tx_aborted_errors = 0; - stats->tx_carrier_errors = 0; - stats->tx_fifo_errors = 0; -diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c -index 617fb22..7dbeafa 100644 ---- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c -+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c -@@ -45,6 +45,7 @@ - #include <linux/if_bridge.h> - #include <linux/workqueue.h> - #include <linux/jiffies.h> -+#include <linux/rtnetlink.h> - #include <net/switchdev.h> - - #include "spectrum.h" -@@ -812,6 +813,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work) - - mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work); - -+ rtnl_lock(); - do { - mlxsw_reg_sfn_pack(sfn_pl); - err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); -@@ -824,6 +826,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work) - mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); - - } while (num_rec); -+ rtnl_unlock(); - - kfree(sfn_pl); - mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); -diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c -index e9f2349..52ec3d6 100644 ---- a/drivers/net/ethernet/rocker/rocker.c -+++ b/drivers/net/ethernet/rocker/rocker.c -@@ -3531,12 +3531,14 @@ static void rocker_port_fdb_learn_work(struct work_struct *work) - info.addr = lw->addr; - info.vid = lw->vid; - -+ rtnl_lock(); - if (learned && removing) - call_switchdev_notifiers(SWITCHDEV_FDB_DEL, - lw->rocker_port->dev, &info.info); - else if (learned && !removing) - call_switchdev_notifiers(SWITCHDEV_FDB_ADD, - lw->rocker_port->dev, &info.info); -+ rtnl_unlock(); - - rocker_port_kfree(lw->trans, work); - } -diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c -index 47b7117..e6cefd0 100644 ---- a/drivers/net/phy/dp83640.c -+++ b/drivers/net/phy/dp83640.c -@@ -845,6 +845,11 @@ static void decode_rxts(struct dp83640_private *dp83640, - struct skb_shared_hwtstamps *shhwtstamps = NULL; - struct sk_buff *skb; - unsigned long flags; -+ u8 overflow; -+ -+ overflow = (phy_rxts->ns_hi >> 14) & 0x3; -+ if (overflow) -+ pr_debug("rx timestamp queue overflow, count %d\n", overflow); - - spin_lock_irqsave(&dp83640->rx_lock, flags); - -@@ -887,6 +892,7 @@ static void decode_txts(struct dp83640_private *dp83640, - struct skb_shared_hwtstamps shhwtstamps; - struct sk_buff *skb; - u64 ns; -+ u8 overflow; - - /* We must already have the skb that triggered this. */ - -@@ -896,6 +902,17 @@ static void decode_txts(struct dp83640_private *dp83640, - pr_debug("have timestamp but tx_queue empty\n"); - return; - } -+ -+ overflow = (phy_txts->ns_hi >> 14) & 0x3; -+ if (overflow) { -+ pr_debug("tx timestamp queue overflow, count %d\n", overflow); -+ while (skb) { -+ skb_complete_tx_timestamp(skb, NULL); -+ skb = skb_dequeue(&dp83640->tx_queue); -+ } -+ return; -+ } -+ - ns = phy2txts(phy_txts); - memset(&shhwtstamps, 0, sizeof(shhwtstamps)); - shhwtstamps.hwtstamp = ns_to_ktime(ns); -diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c -index 0a37f84..4e0068e7 100644 ---- a/drivers/net/ppp/pppoe.c -+++ b/drivers/net/ppp/pppoe.c -@@ -395,6 +395,8 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb) - - if (!__pppoe_xmit(sk_pppox(relay_po), skb)) - goto abort_put; -+ -+ sock_put(sk_pppox(relay_po)); - } else { - if (sock_queue_rcv_skb(sk, skb)) - goto abort_kfree; -diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c -index 597c53e..f7e8c79 100644 ---- a/drivers/net/ppp/pptp.c -+++ b/drivers/net/ppp/pptp.c -@@ -129,24 +129,27 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr) - return i < MAX_CALLID; - } - --static int add_chan(struct pppox_sock *sock) -+static int add_chan(struct pppox_sock *sock, -+ struct pptp_addr *sa) - { - static int call_id; - - spin_lock(&chan_lock); -- if (!sock->proto.pptp.src_addr.call_id) { -+ if (!sa->call_id) { - call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1); - if (call_id == MAX_CALLID) { - call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1); - if (call_id == MAX_CALLID) - goto out_err; - } -- sock->proto.pptp.src_addr.call_id = call_id; -- } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap)) -+ sa->call_id = call_id; -+ } else if (test_bit(sa->call_id, callid_bitmap)) { - goto out_err; -+ } - -- set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap); -- rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock); -+ sock->proto.pptp.src_addr = *sa; -+ set_bit(sa->call_id, callid_bitmap); -+ rcu_assign_pointer(callid_sock[sa->call_id], sock); - spin_unlock(&chan_lock); - - return 0; -@@ -416,7 +419,6 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr, - struct sock *sk = sock->sk; - struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; - struct pppox_sock *po = pppox_sk(sk); -- struct pptp_opt *opt = &po->proto.pptp; - int error = 0; - - if (sockaddr_len < sizeof(struct sockaddr_pppox)) -@@ -424,10 +426,22 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr, - - lock_sock(sk); - -- opt->src_addr = sp->sa_addr.pptp; -- if (add_chan(po)) -+ if (sk->sk_state & PPPOX_DEAD) { -+ error = -EALREADY; -+ goto out; -+ } -+ -+ if (sk->sk_state & PPPOX_BOUND) { - error = -EBUSY; -+ goto out; -+ } -+ -+ if (add_chan(po, &sp->sa_addr.pptp)) -+ error = -EBUSY; -+ else -+ sk->sk_state |= PPPOX_BOUND; - -+out: - release_sock(sk); - return error; - } -@@ -498,7 +512,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, - } - - opt->dst_addr = sp->sa_addr.pptp; -- sk->sk_state = PPPOX_CONNECTED; -+ sk->sk_state |= PPPOX_CONNECTED; - - end: - release_sock(sk); -diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c -index 5fccc5a..982e0acd 100644 ---- a/drivers/net/usb/qmi_wwan.c -+++ b/drivers/net/usb/qmi_wwan.c -@@ -492,6 +492,7 @@ static const struct usb_device_id products[] = { - - /* 3. Combined interface devices matching on interface number */ - {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ -+ {QMI_FIXED_INTF(0x05c6, 0x6001, 3)}, /* 4G LTE usb-modem U901 */ - {QMI_FIXED_INTF(0x05c6, 0x7000, 0)}, - {QMI_FIXED_INTF(0x05c6, 0x7001, 1)}, - {QMI_FIXED_INTF(0x05c6, 0x7002, 1)}, -diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c -index 405a7b6..e0fcda4 100644 ---- a/drivers/net/vxlan.c -+++ b/drivers/net/vxlan.c -@@ -1984,11 +1984,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, - vxlan->cfg.port_max, true); - - if (info) { -- if (info->key.tun_flags & TUNNEL_CSUM) -- flags |= VXLAN_F_UDP_CSUM; -- else -- flags &= ~VXLAN_F_UDP_CSUM; -- - ttl = info->key.ttl; - tos = info->key.tos; - -@@ -2003,8 +1998,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, - goto drop; - sk = vxlan->vn4_sock->sock->sk; - -- if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)) -- df = htons(IP_DF); -+ if (info) { -+ if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) -+ df = htons(IP_DF); -+ -+ if (info->key.tun_flags & TUNNEL_CSUM) -+ flags |= VXLAN_F_UDP_CSUM; -+ else -+ flags &= ~VXLAN_F_UDP_CSUM; -+ } - - memset(&fl4, 0, sizeof(fl4)); - fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0; -@@ -2102,6 +2104,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, - return; - } - -+ if (info) { -+ if (info->key.tun_flags & TUNNEL_CSUM) -+ flags &= ~VXLAN_F_UDP_ZERO_CSUM6_TX; -+ else -+ flags |= VXLAN_F_UDP_ZERO_CSUM6_TX; -+ } -+ - ttl = ttl ? : ip6_dst_hoplimit(ndst); - err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr, - 0, ttl, src_port, dst_port, htonl(vni << 8), md, -diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c -index e18629a..0961f33 100644 ---- a/drivers/net/wireless/iwlwifi/dvm/lib.c -+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c -@@ -1154,6 +1154,9 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan) - - priv->ucode_loaded = false; - iwl_trans_stop_device(priv->trans); -+ ret = iwl_trans_start_hw(priv->trans); -+ if (ret) -+ goto out; - - priv->wowlan = true; - -diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c -index d6e0c1b..8215d74 100644 ---- a/drivers/net/wireless/iwlwifi/mvm/scan.c -+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c -@@ -1267,6 +1267,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, - return -EBUSY; - } - -+ /* we don't support "match all" in the firmware */ -+ if (!req->n_match_sets) -+ return -EOPNOTSUPP; -+ - ret = iwl_mvm_check_running_scans(mvm, type); - if (ret) - return ret; -diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c -index 639761f..d58c094 100644 ---- a/drivers/net/wireless/iwlwifi/pcie/drv.c -+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c -@@ -384,6 +384,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { - {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, -+ {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, -@@ -401,10 +402,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = { - {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, -- {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, -+ {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, -- {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, -+ {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, -diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c -index 9028345..8c72047 100644 ---- a/drivers/net/wireless/iwlwifi/pcie/trans.c -+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c -@@ -7,6 +7,7 @@ - * - * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH -+ * Copyright(c) 2016 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as -@@ -33,6 +34,7 @@ - * - * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH -+ * Copyright(c) 2016 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without -@@ -924,9 +926,16 @@ monitor: - if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) { - iwl_write_prph(trans, le32_to_cpu(dest->base_reg), - trans_pcie->fw_mon_phys >> dest->base_shift); -- iwl_write_prph(trans, le32_to_cpu(dest->end_reg), -- (trans_pcie->fw_mon_phys + -- trans_pcie->fw_mon_size) >> dest->end_shift); -+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) -+ iwl_write_prph(trans, le32_to_cpu(dest->end_reg), -+ (trans_pcie->fw_mon_phys + -+ trans_pcie->fw_mon_size - 256) >> -+ dest->end_shift); -+ else -+ iwl_write_prph(trans, le32_to_cpu(dest->end_reg), -+ (trans_pcie->fw_mon_phys + -+ trans_pcie->fw_mon_size) >> -+ dest->end_shift); - } - } - -diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c -index f46c9d7..7f471bf 100644 ---- a/drivers/net/wireless/realtek/rtlwifi/pci.c -+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c -@@ -801,7 +801,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) - hw_queue); - if (rx_remained_cnt == 0) - return; -- -+ buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc[ -+ rtlpci->rx_ring[rxring_idx].idx]; -+ pdesc = (struct rtl_rx_desc *)skb->data; - } else { /* rx descriptor */ - pdesc = &rtlpci->rx_ring[rxring_idx].desc[ - rtlpci->rx_ring[rxring_idx].idx]; -@@ -824,13 +826,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) - new_skb = dev_alloc_skb(rtlpci->rxbuffersize); - if (unlikely(!new_skb)) - goto no_new; -- if (rtlpriv->use_new_trx_flow) { -- buffer_desc = -- &rtlpci->rx_ring[rxring_idx].buffer_desc -- [rtlpci->rx_ring[rxring_idx].idx]; -- /*means rx wifi info*/ -- pdesc = (struct rtl_rx_desc *)skb->data; -- } - memset(&rx_status , 0 , sizeof(rx_status)); - rtlpriv->cfg->ops->query_rx_desc(hw, &stats, - &rx_status, (u8 *)pdesc, skb); -diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c -index 1134412..47e32cb 100644 ---- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c -+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c -@@ -88,8 +88,6 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw) - u8 tid; - - rtl8188ee_bt_reg_init(hw); -- rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; -- - rtlpriv->dm.dm_initialgain_enable = 1; - rtlpriv->dm.dm_flag = 0; - rtlpriv->dm.disable_framebursting = 0; -@@ -138,6 +136,11 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw) - rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; - rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; - rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; -+ rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; -+ rtlpriv->cfg->mod_params->sw_crypto = -+ rtlpriv->cfg->mod_params->sw_crypto; -+ rtlpriv->cfg->mod_params->disable_watchdog = -+ rtlpriv->cfg->mod_params->disable_watchdog; - if (rtlpriv->cfg->mod_params->disable_watchdog) - pr_info("watchdog disabled\n"); - if (!rtlpriv->psc.inactiveps) -diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c -index de6cb6c..4780bdc 100644 ---- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c -+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c -@@ -139,6 +139,8 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) - rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; - rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; - rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; -+ rtlpriv->cfg->mod_params->sw_crypto = -+ rtlpriv->cfg->mod_params->sw_crypto; - if (!rtlpriv->psc.inactiveps) - pr_info("rtl8192ce: Power Save off (module option)\n"); - if (!rtlpriv->psc.fwctrl_lps) -diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c -index fd4a535..7c6f7f0 100644 ---- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c -+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c -@@ -65,6 +65,8 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw) - rtlpriv->dm.disable_framebursting = false; - rtlpriv->dm.thermalvalue = 0; - rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; -+ rtlpriv->cfg->mod_params->sw_crypto = -+ rtlpriv->cfg->mod_params->sw_crypto; - - /* for firmware buf */ - rtlpriv->rtlhal.pfirmware = vzalloc(0x4000); -diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c -index b19d039..c6e09a1 100644 ---- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c -+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c -@@ -376,8 +376,8 @@ module_param_named(swlps, rtl92de_mod_params.swctrl_lps, bool, 0444); - module_param_named(fwlps, rtl92de_mod_params.fwctrl_lps, bool, 0444); - MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); - MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); --MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); --MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); -+MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n"); -+MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n"); - MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); - - static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); -diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c -index e1fd27c..31baca41 100644 ---- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c -+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c -@@ -187,6 +187,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) - rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; - rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; - rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; -+ rtlpriv->cfg->mod_params->sw_crypto = -+ rtlpriv->cfg->mod_params->sw_crypto; - if (!rtlpriv->psc.inactiveps) - pr_info("Power Save off (module option)\n"); - if (!rtlpriv->psc.fwctrl_lps) -@@ -425,8 +427,8 @@ module_param_named(swlps, rtl92se_mod_params.swctrl_lps, bool, 0444); - module_param_named(fwlps, rtl92se_mod_params.fwctrl_lps, bool, 0444); - MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); - MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); --MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); --MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); -+MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n"); -+MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n"); - MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); - - static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); -diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c -index 3859b3e..ff49a8c 100644 ---- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c -+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c -@@ -150,6 +150,11 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw) - rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; - rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; - rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; -+ rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; -+ rtlpriv->cfg->mod_params->sw_crypto = -+ rtlpriv->cfg->mod_params->sw_crypto; -+ rtlpriv->cfg->mod_params->disable_watchdog = -+ rtlpriv->cfg->mod_params->disable_watchdog; - if (rtlpriv->cfg->mod_params->disable_watchdog) - pr_info("watchdog disabled\n"); - rtlpriv->psc.reg_fwctrl_lps = 3; -@@ -267,6 +272,8 @@ static struct rtl_mod_params rtl8723e_mod_params = { - .swctrl_lps = false, - .fwctrl_lps = true, - .debug = DBG_EMERG, -+ .msi_support = false, -+ .disable_watchdog = false, - }; - - static struct rtl_hal_cfg rtl8723e_hal_cfg = { -@@ -383,12 +390,14 @@ module_param_named(debug, rtl8723e_mod_params.debug, int, 0444); - module_param_named(ips, rtl8723e_mod_params.inactiveps, bool, 0444); - module_param_named(swlps, rtl8723e_mod_params.swctrl_lps, bool, 0444); - module_param_named(fwlps, rtl8723e_mod_params.fwctrl_lps, bool, 0444); -+module_param_named(msi, rtl8723e_mod_params.msi_support, bool, 0444); - module_param_named(disable_watchdog, rtl8723e_mod_params.disable_watchdog, - bool, 0444); - MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); - MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); - MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); - MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); -+MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n"); - MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); - MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); - -diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c -index d091f1d..a78eaed 100644 ---- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c -+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c -@@ -93,7 +93,6 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - - rtl8723be_bt_reg_init(hw); -- rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; - rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); - - rtlpriv->dm.dm_initialgain_enable = 1; -@@ -151,6 +150,10 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) - rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; - rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; - rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; -+ rtlpriv->cfg->mod_params->sw_crypto = -+ rtlpriv->cfg->mod_params->sw_crypto; -+ rtlpriv->cfg->mod_params->disable_watchdog = -+ rtlpriv->cfg->mod_params->disable_watchdog; - if (rtlpriv->cfg->mod_params->disable_watchdog) - pr_info("watchdog disabled\n"); - rtlpriv->psc.reg_fwctrl_lps = 3; -@@ -267,6 +270,9 @@ static struct rtl_mod_params rtl8723be_mod_params = { - .inactiveps = true, - .swctrl_lps = false, - .fwctrl_lps = true, -+ .msi_support = false, -+ .disable_watchdog = false, -+ .debug = DBG_EMERG, - }; - - static struct rtl_hal_cfg rtl8723be_hal_cfg = { -diff --git a/drivers/of/irq.c b/drivers/of/irq.c -index 4fa916d..72a2c19 100644 ---- a/drivers/of/irq.c -+++ b/drivers/of/irq.c -@@ -636,6 +636,13 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np, - msi_base = be32_to_cpup(msi_map + 2); - rid_len = be32_to_cpup(msi_map + 3); - -+ if (rid_base & ~map_mask) { -+ dev_err(parent_dev, -+ "Invalid msi-map translation - msi-map-mask (0x%x) ignores rid-base (0x%x)\n", -+ map_mask, rid_base); -+ return rid_out; -+ } -+ - msi_controller_node = of_find_node_by_phandle(phandle); - - matched = (masked_rid >= rid_base && -@@ -655,7 +662,7 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np, - if (!matched) - return rid_out; - -- rid_out = masked_rid + msi_base; -+ rid_out = masked_rid - rid_base + msi_base; - dev_dbg(dev, - "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n", - dev_name(parent_dev), map_mask, rid_base, msi_base, -diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c -index ff53856..0b3e0bf 100644 ---- a/drivers/pci/hotplug/acpiphp_glue.c -+++ b/drivers/pci/hotplug/acpiphp_glue.c -@@ -953,8 +953,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot) - { - pci_lock_rescan_remove(); - -- if (slot->flags & SLOT_IS_GOING_AWAY) -+ if (slot->flags & SLOT_IS_GOING_AWAY) { -+ pci_unlock_rescan_remove(); - return -ENODEV; -+ } - - /* configure all functions */ - if (!(slot->flags & SLOT_ENABLED)) -diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c -index 0bf82a2..48d21e0 100644 ---- a/drivers/pci/pcie/aer/aerdrv.c -+++ b/drivers/pci/pcie/aer/aerdrv.c -@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev) - rpc->rpd = dev; - INIT_WORK(&rpc->dpc_handler, aer_isr); - mutex_init(&rpc->rpc_mutex); -- init_waitqueue_head(&rpc->wait_release); - - /* Use PCIe bus function to store rpc into PCIe device */ - set_service_data(dev, rpc); -@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev) - if (rpc->isr) - free_irq(dev->irq, dev); - -- wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx); -- -+ flush_work(&rpc->dpc_handler); - aer_disable_rootport(rpc); - kfree(rpc); - set_service_data(dev, NULL); -diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h -index 84420b7..945c939 100644 ---- a/drivers/pci/pcie/aer/aerdrv.h -+++ b/drivers/pci/pcie/aer/aerdrv.h -@@ -72,7 +72,6 @@ struct aer_rpc { - * recovery on the same - * root port hierarchy - */ -- wait_queue_head_t wait_release; - }; - - struct aer_broadcast_data { -diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c -index fba785e..4e14de0 100644 ---- a/drivers/pci/pcie/aer/aerdrv_core.c -+++ b/drivers/pci/pcie/aer/aerdrv_core.c -@@ -811,8 +811,6 @@ void aer_isr(struct work_struct *work) - while (get_e_source(rpc, &e_src)) - aer_isr_one_error(p_device, &e_src); - mutex_unlock(&rpc->rpc_mutex); -- -- wake_up(&rpc->wait_release); - } - - /** -diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c -index c777b97..5f70fee 100644 ---- a/drivers/pci/xen-pcifront.c -+++ b/drivers/pci/xen-pcifront.c -@@ -53,7 +53,7 @@ struct pcifront_device { - }; - - struct pcifront_sd { -- int domain; -+ struct pci_sysdata sd; - struct pcifront_device *pdev; - }; - -@@ -67,7 +67,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd, - unsigned int domain, unsigned int bus, - struct pcifront_device *pdev) - { -- sd->domain = domain; -+ /* Because we do not expose that information via XenBus. */ -+ sd->sd.node = first_online_node; -+ sd->sd.domain = domain; - sd->pdev = pdev; - } - -@@ -468,8 +470,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev, - dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n", - domain, bus); - -- bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL); -- sd = kmalloc(sizeof(*sd), GFP_KERNEL); -+ bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL); -+ sd = kzalloc(sizeof(*sd), GFP_KERNEL); - if (!bus_entry || !sd) { - err = -ENOMEM; - goto err_out; -diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c -index 8c7f27d..e7e574d 100644 ---- a/drivers/phy/phy-core.c -+++ b/drivers/phy/phy-core.c -@@ -275,20 +275,21 @@ EXPORT_SYMBOL_GPL(phy_exit); - - int phy_power_on(struct phy *phy) - { -- int ret; -+ int ret = 0; - - if (!phy) -- return 0; -+ goto out; - - if (phy->pwr) { - ret = regulator_enable(phy->pwr); - if (ret) -- return ret; -+ goto out; - } - - ret = phy_pm_runtime_get_sync(phy); - if (ret < 0 && ret != -ENOTSUPP) -- return ret; -+ goto err_pm_sync; -+ - ret = 0; /* Override possible ret == -ENOTSUPP */ - - mutex_lock(&phy->mutex); -@@ -296,19 +297,20 @@ int phy_power_on(struct phy *phy) - ret = phy->ops->power_on(phy); - if (ret < 0) { - dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); -- goto out; -+ goto err_pwr_on; - } - } - ++phy->power_count; - mutex_unlock(&phy->mutex); - return 0; - --out: -+err_pwr_on: - mutex_unlock(&phy->mutex); - phy_pm_runtime_put_sync(phy); -+err_pm_sync: - if (phy->pwr) - regulator_disable(phy->pwr); -- -+out: - return ret; - } - EXPORT_SYMBOL_GPL(phy_power_on); -diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c -index a313dfc..d78ee15 100644 ---- a/drivers/platform/x86/ideapad-laptop.c -+++ b/drivers/platform/x86/ideapad-laptop.c -@@ -865,6 +865,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { - }, - }, - { -+ .ident = "Lenovo ideapad Y700-17ISK", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), -+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-17ISK"), -+ }, -+ }, -+ { - .ident = "Lenovo Yoga 2 11 / 13 / Pro", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), -@@ -893,6 +900,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { - }, - }, - { -+ .ident = "Lenovo Yoga 700", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), -+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 700"), -+ }, -+ }, -+ { - .ident = "Lenovo Yoga 900", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), -diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c -index c013029..b0f6214 100644 ---- a/drivers/platform/x86/toshiba_acpi.c -+++ b/drivers/platform/x86/toshiba_acpi.c -@@ -2484,6 +2484,14 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev) - brightness = __get_lcd_brightness(dev); - if (brightness < 0) - return 0; -+ /* -+ * If transflective backlight is supported and the brightness is zero -+ * (lowest brightness level), the set_lcd_brightness function will -+ * activate the transflective backlight, making the LCD appear to be -+ * turned off, simply increment the brightness level to avoid that. -+ */ -+ if (dev->tr_backlight_supported && brightness == 0) -+ brightness++; - ret = set_lcd_brightness(dev, brightness); - if (ret) { - pr_debug("Backlight method is read-only, disabling backlight support\n"); -diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig -index 8df0b0e..0067620 100644 ---- a/drivers/regulator/Kconfig -+++ b/drivers/regulator/Kconfig -@@ -446,6 +446,7 @@ config REGULATOR_MC13892 - config REGULATOR_MT6311 - tristate "MediaTek MT6311 PMIC" - depends on I2C -+ select REGMAP_I2C - help - Say y here to select this option to enable the power regulator of - MediaTek MT6311 PMIC. -diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c -index 35de22f..f2e1a39 100644 ---- a/drivers/regulator/axp20x-regulator.c -+++ b/drivers/regulator/axp20x-regulator.c -@@ -27,8 +27,8 @@ - #define AXP20X_IO_ENABLED 0x03 - #define AXP20X_IO_DISABLED 0x07 - --#define AXP22X_IO_ENABLED 0x04 --#define AXP22X_IO_DISABLED 0x03 -+#define AXP22X_IO_ENABLED 0x03 -+#define AXP22X_IO_DISABLED 0x04 - - #define AXP20X_WORKMODE_DCDC2_MASK BIT(2) - #define AXP20X_WORKMODE_DCDC3_MASK BIT(1) -diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c -index a263c10..4abfbdb 100644 ---- a/drivers/s390/block/dasd.c -+++ b/drivers/s390/block/dasd.c -@@ -3031,6 +3031,7 @@ static void dasd_setup_queue(struct dasd_block *block) - max = block->base->discipline->max_blocks << block->s2b_shift; - } - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue); -+ block->request_queue->limits.max_dev_sectors = max; - blk_queue_logical_block_size(block->request_queue, - block->bp_block); - blk_queue_max_hw_sectors(block->request_queue, max); -diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c -index 184b1db..286782c 100644 ---- a/drivers/s390/block/dasd_alias.c -+++ b/drivers/s390/block/dasd_alias.c -@@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) - spin_unlock_irqrestore(&lcu->lock, flags); - cancel_work_sync(&lcu->suc_data.worker); - spin_lock_irqsave(&lcu->lock, flags); -- if (device == lcu->suc_data.device) -+ if (device == lcu->suc_data.device) { -+ dasd_put_device(device); - lcu->suc_data.device = NULL; -+ } - } - was_pending = 0; - if (device == lcu->ruac_data.device) { -@@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) - was_pending = 1; - cancel_delayed_work_sync(&lcu->ruac_data.dwork); - spin_lock_irqsave(&lcu->lock, flags); -- if (device == lcu->ruac_data.device) -+ if (device == lcu->ruac_data.device) { -+ dasd_put_device(device); - lcu->ruac_data.device = NULL; -+ } - } - private->lcu = NULL; - spin_unlock_irqrestore(&lcu->lock, flags); -@@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work) - if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) { - DBF_DEV_EVENT(DBF_WARNING, device, "could not update" - " alias data in lcu (rc = %d), retry later", rc); -- schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ); -+ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ)) -+ dasd_put_device(device); - } else { -+ dasd_put_device(device); - lcu->ruac_data.device = NULL; - lcu->flags &= ~UPDATE_PENDING; - } -@@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu, - */ - if (!usedev) - return -EINVAL; -+ dasd_get_device(usedev); - lcu->ruac_data.device = usedev; -- schedule_delayed_work(&lcu->ruac_data.dwork, 0); -+ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0)) -+ dasd_put_device(usedev); - return 0; - } - -@@ -723,7 +731,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu, - ASCEBC((char *) &cqr->magic, 4); - ccw = cqr->cpaddr; - ccw->cmd_code = DASD_ECKD_CCW_RSCK; -- ccw->flags = 0 ; -+ ccw->flags = CCW_FLAG_SLI; - ccw->count = 16; - ccw->cda = (__u32)(addr_t) cqr->data; - ((char *)cqr->data)[0] = reason; -@@ -930,6 +938,7 @@ static void summary_unit_check_handling_work(struct work_struct *work) - /* 3. read new alias configuration */ - _schedule_lcu_update(lcu, device); - lcu->suc_data.device = NULL; -+ dasd_put_device(device); - spin_unlock_irqrestore(&lcu->lock, flags); - } - -@@ -989,6 +998,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device, - } - lcu->suc_data.reason = reason; - lcu->suc_data.device = device; -+ dasd_get_device(device); - spin_unlock(&lcu->lock); -- schedule_work(&lcu->suc_data.worker); -+ if (!schedule_work(&lcu->suc_data.worker)) -+ dasd_put_device(device); - }; -diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c -index 16a1935c..e197c6f 100644 ---- a/drivers/scsi/qla2xxx/qla_init.c -+++ b/drivers/scsi/qla2xxx/qla_init.c -@@ -2192,7 +2192,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) - /* Clear outstanding commands array. */ - for (que = 0; que < ha->max_req_queues; que++) { - req = ha->req_q_map[que]; -- if (!req) -+ if (!req || !test_bit(que, ha->req_qid_map)) - continue; - req->out_ptr = (void *)(req->ring + req->length); - *req->out_ptr = 0; -@@ -2209,7 +2209,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) - - for (que = 0; que < ha->max_rsp_queues; que++) { - rsp = ha->rsp_q_map[que]; -- if (!rsp) -+ if (!rsp || !test_bit(que, ha->rsp_qid_map)) - continue; - rsp->in_ptr = (void *)(rsp->ring + rsp->length); - *rsp->in_ptr = 0; -@@ -4961,7 +4961,7 @@ qla25xx_init_queues(struct qla_hw_data *ha) - - for (i = 1; i < ha->max_rsp_queues; i++) { - rsp = ha->rsp_q_map[i]; -- if (rsp) { -+ if (rsp && test_bit(i, ha->rsp_qid_map)) { - rsp->options &= ~BIT_0; - ret = qla25xx_init_rsp_que(base_vha, rsp); - if (ret != QLA_SUCCESS) -@@ -4976,8 +4976,8 @@ qla25xx_init_queues(struct qla_hw_data *ha) - } - for (i = 1; i < ha->max_req_queues; i++) { - req = ha->req_q_map[i]; -- if (req) { -- /* Clear outstanding commands array. */ -+ if (req && test_bit(i, ha->req_qid_map)) { -+ /* Clear outstanding commands array. */ - req->options &= ~BIT_0; - ret = qla25xx_init_req_que(base_vha, req); - if (ret != QLA_SUCCESS) -diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c -index ccf6a7f..0e59731 100644 ---- a/drivers/scsi/qla2xxx/qla_isr.c -+++ b/drivers/scsi/qla2xxx/qla_isr.c -@@ -3018,9 +3018,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) - "MSI-X: Failed to enable support " - "-- %d/%d\n Retry with %d vectors.\n", - ha->msix_count, ret, ret); -+ ha->msix_count = ret; -+ ha->max_rsp_queues = ha->msix_count - 1; - } -- ha->msix_count = ret; -- ha->max_rsp_queues = ha->msix_count - 1; - ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * - ha->msix_count, GFP_KERNEL); - if (!ha->msix_entries) { -diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c -index c5dd594..cf7ba52 100644 ---- a/drivers/scsi/qla2xxx/qla_mid.c -+++ b/drivers/scsi/qla2xxx/qla_mid.c -@@ -600,7 +600,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha) - /* Delete request queues */ - for (cnt = 1; cnt < ha->max_req_queues; cnt++) { - req = ha->req_q_map[cnt]; -- if (req) { -+ if (req && test_bit(cnt, ha->req_qid_map)) { - ret = qla25xx_delete_req_que(vha, req); - if (ret != QLA_SUCCESS) { - ql_log(ql_log_warn, vha, 0x00ea, -@@ -614,7 +614,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha) - /* Delete response queues */ - for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { - rsp = ha->rsp_q_map[cnt]; -- if (rsp) { -+ if (rsp && test_bit(cnt, ha->rsp_qid_map)) { - ret = qla25xx_delete_rsp_que(vha, rsp); - if (ret != QLA_SUCCESS) { - ql_log(ql_log_warn, vha, 0x00eb, -diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c -index bfa9a64..fc6674d 100644 ---- a/drivers/scsi/qla2xxx/qla_os.c -+++ b/drivers/scsi/qla2xxx/qla_os.c -@@ -397,6 +397,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) - int cnt; - - for (cnt = 0; cnt < ha->max_req_queues; cnt++) { -+ if (!test_bit(cnt, ha->req_qid_map)) -+ continue; -+ - req = ha->req_q_map[cnt]; - qla2x00_free_req_que(ha, req); - } -@@ -404,6 +407,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) - ha->req_q_map = NULL; - - for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { -+ if (!test_bit(cnt, ha->rsp_qid_map)) -+ continue; -+ - rsp = ha->rsp_q_map[cnt]; - qla2x00_free_rsp_que(ha, rsp); - } -diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c -index ddbe2e7..c3e6225 100644 ---- a/drivers/scsi/qla2xxx/qla_tmpl.c -+++ b/drivers/scsi/qla2xxx/qla_tmpl.c -@@ -395,6 +395,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, - if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) { - for (i = 0; i < vha->hw->max_req_queues; i++) { - struct req_que *req = vha->hw->req_q_map[i]; -+ -+ if (!test_bit(i, vha->hw->req_qid_map)) -+ continue; -+ - if (req || !buf) { - length = req ? - req->length : REQUEST_ENTRY_CNT_24XX; -@@ -408,6 +412,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, - } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) { - for (i = 0; i < vha->hw->max_rsp_queues; i++) { - struct rsp_que *rsp = vha->hw->rsp_q_map[i]; -+ -+ if (!test_bit(i, vha->hw->rsp_qid_map)) -+ continue; -+ - if (rsp || !buf) { - length = rsp ? - rsp->length : RESPONSE_ENTRY_CNT_MQ; -@@ -634,6 +642,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, - if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) { - for (i = 0; i < vha->hw->max_req_queues; i++) { - struct req_que *req = vha->hw->req_q_map[i]; -+ -+ if (!test_bit(i, vha->hw->req_qid_map)) -+ continue; -+ - if (req || !buf) { - qla27xx_insert16(i, buf, len); - qla27xx_insert16(1, buf, len); -@@ -645,6 +657,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, - } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) { - for (i = 0; i < vha->hw->max_rsp_queues; i++) { - struct rsp_que *rsp = vha->hw->rsp_q_map[i]; -+ -+ if (!test_bit(i, vha->hw->rsp_qid_map)) -+ continue; -+ - if (rsp || !buf) { - qla27xx_insert16(i, buf, len); - qla27xx_insert16(1, buf, len); -diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c -index 84fa4c4..bb669d3 100644 ---- a/drivers/scsi/sd.c -+++ b/drivers/scsi/sd.c -@@ -2893,7 +2893,7 @@ static int sd_revalidate_disk(struct gendisk *disk) - sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && - sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE) - rw_max = q->limits.io_opt = -- logical_to_sectors(sdp, sdkp->opt_xfer_blocks); -+ sdkp->opt_xfer_blocks * sdp->sector_size; - else - rw_max = BLK_DEF_MAX_SECTORS; - -diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c -index aebad36..8feac59 100644 ---- a/drivers/spi/spi-atmel.c -+++ b/drivers/spi/spi-atmel.c -@@ -1571,6 +1571,7 @@ static int atmel_spi_probe(struct platform_device *pdev) - - as->use_cs_gpios = true; - if (atmel_spi_is_v2(as) && -+ pdev->dev.of_node && - !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) { - as->use_cs_gpios = false; - master->num_chipselect = 4; -diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c -index 1f8903d..ed8283e 100644 ---- a/drivers/spi/spi-omap2-mcspi.c -+++ b/drivers/spi/spi-omap2-mcspi.c -@@ -1024,6 +1024,16 @@ static int omap2_mcspi_setup(struct spi_device *spi) - spi->controller_state = cs; - /* Link this to context save list */ - list_add_tail(&cs->node, &ctx->cs); -+ -+ if (gpio_is_valid(spi->cs_gpio)) { -+ ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev)); -+ if (ret) { -+ dev_err(&spi->dev, "failed to request gpio\n"); -+ return ret; -+ } -+ gpio_direction_output(spi->cs_gpio, -+ !(spi->mode & SPI_CS_HIGH)); -+ } - } - - if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) { -@@ -1032,15 +1042,6 @@ static int omap2_mcspi_setup(struct spi_device *spi) - return ret; - } - -- if (gpio_is_valid(spi->cs_gpio)) { -- ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev)); -- if (ret) { -- dev_err(&spi->dev, "failed to request gpio\n"); -- return ret; -- } -- gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); -- } -- - ret = pm_runtime_get_sync(mcspi->dev); - if (ret < 0) - return ret; -diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c -index 79ac192..70b8f4f 100644 ---- a/drivers/staging/panel/panel.c -+++ b/drivers/staging/panel/panel.c -@@ -825,8 +825,7 @@ static void lcd_write_cmd_s(int cmd) - lcd_send_serial(0x1F); /* R/W=W, RS=0 */ - lcd_send_serial(cmd & 0x0F); - lcd_send_serial((cmd >> 4) & 0x0F); -- /* the shortest command takes at least 40 us */ -- usleep_range(40, 100); -+ udelay(40); /* the shortest command takes at least 40 us */ - spin_unlock_irq(&pprt_lock); - } - -@@ -837,8 +836,7 @@ static void lcd_write_data_s(int data) - lcd_send_serial(0x5F); /* R/W=W, RS=1 */ - lcd_send_serial(data & 0x0F); - lcd_send_serial((data >> 4) & 0x0F); -- /* the shortest data takes at least 40 us */ -- usleep_range(40, 100); -+ udelay(40); /* the shortest data takes at least 40 us */ - spin_unlock_irq(&pprt_lock); - } - -@@ -848,20 +846,19 @@ static void lcd_write_cmd_p8(int cmd) - spin_lock_irq(&pprt_lock); - /* present the data to the data port */ - w_dtr(pprt, cmd); -- /* maintain the data during 20 us before the strobe */ -- usleep_range(20, 100); -+ udelay(20); /* maintain the data during 20 us before the strobe */ - - bits.e = BIT_SET; - bits.rs = BIT_CLR; - bits.rw = BIT_CLR; - set_ctrl_bits(); - -- usleep_range(40, 100); /* maintain the strobe during 40 us */ -+ udelay(40); /* maintain the strobe during 40 us */ - - bits.e = BIT_CLR; - set_ctrl_bits(); - -- usleep_range(120, 500); /* the shortest command takes at least 120 us */ -+ udelay(120); /* the shortest command takes at least 120 us */ - spin_unlock_irq(&pprt_lock); - } - -@@ -871,20 +868,19 @@ static void lcd_write_data_p8(int data) - spin_lock_irq(&pprt_lock); - /* present the data to the data port */ - w_dtr(pprt, data); -- /* maintain the data during 20 us before the strobe */ -- usleep_range(20, 100); -+ udelay(20); /* maintain the data during 20 us before the strobe */ - - bits.e = BIT_SET; - bits.rs = BIT_SET; - bits.rw = BIT_CLR; - set_ctrl_bits(); - -- usleep_range(40, 100); /* maintain the strobe during 40 us */ -+ udelay(40); /* maintain the strobe during 40 us */ - - bits.e = BIT_CLR; - set_ctrl_bits(); - -- usleep_range(45, 100); /* the shortest data takes at least 45 us */ -+ udelay(45); /* the shortest data takes at least 45 us */ - spin_unlock_irq(&pprt_lock); - } - -@@ -894,7 +890,7 @@ static void lcd_write_cmd_tilcd(int cmd) - spin_lock_irq(&pprt_lock); - /* present the data to the control port */ - w_ctr(pprt, cmd); -- usleep_range(60, 120); -+ udelay(60); - spin_unlock_irq(&pprt_lock); - } - -@@ -904,7 +900,7 @@ static void lcd_write_data_tilcd(int data) - spin_lock_irq(&pprt_lock); - /* present the data to the data port */ - w_dtr(pprt, data); -- usleep_range(60, 120); -+ udelay(60); - spin_unlock_irq(&pprt_lock); - } - -@@ -947,7 +943,7 @@ static void lcd_clear_fast_s(void) - lcd_send_serial(0x5F); /* R/W=W, RS=1 */ - lcd_send_serial(' ' & 0x0F); - lcd_send_serial((' ' >> 4) & 0x0F); -- usleep_range(40, 100); /* the shortest data takes at least 40 us */ -+ udelay(40); /* the shortest data takes at least 40 us */ - } - spin_unlock_irq(&pprt_lock); - -@@ -971,7 +967,7 @@ static void lcd_clear_fast_p8(void) - w_dtr(pprt, ' '); - - /* maintain the data during 20 us before the strobe */ -- usleep_range(20, 100); -+ udelay(20); - - bits.e = BIT_SET; - bits.rs = BIT_SET; -@@ -979,13 +975,13 @@ static void lcd_clear_fast_p8(void) - set_ctrl_bits(); - - /* maintain the strobe during 40 us */ -- usleep_range(40, 100); -+ udelay(40); - - bits.e = BIT_CLR; - set_ctrl_bits(); - - /* the shortest data takes at least 45 us */ -- usleep_range(45, 100); -+ udelay(45); - } - spin_unlock_irq(&pprt_lock); - -@@ -1007,7 +1003,7 @@ static void lcd_clear_fast_tilcd(void) - for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) { - /* present the data to the data port */ - w_dtr(pprt, ' '); -- usleep_range(60, 120); -+ udelay(60); - } - - spin_unlock_irq(&pprt_lock); -diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c -index 3b5835b..a5bbb33 100644 ---- a/drivers/staging/speakup/serialio.c -+++ b/drivers/staging/speakup/serialio.c -@@ -6,6 +6,11 @@ - #include "spk_priv.h" - #include "serialio.h" - -+#include <linux/serial_core.h> -+/* WARNING: Do not change this to <linux/serial.h> without testing that -+ * SERIAL_PORT_DFNS does get defined to the appropriate value. */ -+#include <asm/serial.h> -+ - #ifndef SERIAL_PORT_DFNS - #define SERIAL_PORT_DFNS - #endif -@@ -23,9 +28,15 @@ const struct old_serial_port *spk_serial_init(int index) - int baud = 9600, quot = 0; - unsigned int cval = 0; - int cflag = CREAD | HUPCL | CLOCAL | B9600 | CS8; -- const struct old_serial_port *ser = rs_table + index; -+ const struct old_serial_port *ser; - int err; - -+ if (index >= ARRAY_SIZE(rs_table)) { -+ pr_info("no port info for ttyS%d\n", index); -+ return NULL; -+ } -+ ser = rs_table + index; -+ - /* Divisor, bytesize and parity */ - quot = ser->baud_base / baud; - cval = cflag & (CSIZE | CSTOPB); -diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c -index 28fb301..88029cc 100644 ---- a/drivers/target/target_core_tmr.c -+++ b/drivers/target/target_core_tmr.c -@@ -68,23 +68,25 @@ void core_tmr_release_req(struct se_tmr_req *tmr) - - if (dev) { - spin_lock_irqsave(&dev->se_tmr_lock, flags); -- list_del(&tmr->tmr_list); -+ list_del_init(&tmr->tmr_list); - spin_unlock_irqrestore(&dev->se_tmr_lock, flags); - } - - kfree(tmr); - } - --static void core_tmr_handle_tas_abort( -- struct se_node_acl *tmr_nacl, -- struct se_cmd *cmd, -- int tas) -+static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) - { -- bool remove = true; -+ unsigned long flags; -+ bool remove = true, send_tas; - /* - * TASK ABORTED status (TAS) bit support - */ -- if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) { -+ spin_lock_irqsave(&cmd->t_state_lock, flags); -+ send_tas = (cmd->transport_state & CMD_T_TAS); -+ spin_unlock_irqrestore(&cmd->t_state_lock, flags); -+ -+ if (send_tas) { - remove = false; - transport_send_task_abort(cmd); - } -@@ -107,6 +109,46 @@ static int target_check_cdb_and_preempt(struct list_head *list, - return 1; - } - -+static bool __target_check_io_state(struct se_cmd *se_cmd, -+ struct se_session *tmr_sess, int tas) -+{ -+ struct se_session *sess = se_cmd->se_sess; -+ -+ assert_spin_locked(&sess->sess_cmd_lock); -+ WARN_ON_ONCE(!irqs_disabled()); -+ /* -+ * If command already reached CMD_T_COMPLETE state within -+ * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown, -+ * this se_cmd has been passed to fabric driver and will -+ * not be aborted. -+ * -+ * Otherwise, obtain a local se_cmd->cmd_kref now for TMR -+ * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as -+ * long as se_cmd->cmd_kref is still active unless zero. -+ */ -+ spin_lock(&se_cmd->t_state_lock); -+ if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) { -+ pr_debug("Attempted to abort io tag: %llu already complete or" -+ " fabric stop, skipping\n", se_cmd->tag); -+ spin_unlock(&se_cmd->t_state_lock); -+ return false; -+ } -+ if (sess->sess_tearing_down || se_cmd->cmd_wait_set) { -+ pr_debug("Attempted to abort io tag: %llu already shutdown," -+ " skipping\n", se_cmd->tag); -+ spin_unlock(&se_cmd->t_state_lock); -+ return false; -+ } -+ se_cmd->transport_state |= CMD_T_ABORTED; -+ -+ if ((tmr_sess != se_cmd->se_sess) && tas) -+ se_cmd->transport_state |= CMD_T_TAS; -+ -+ spin_unlock(&se_cmd->t_state_lock); -+ -+ return kref_get_unless_zero(&se_cmd->cmd_kref); -+} -+ - void core_tmr_abort_task( - struct se_device *dev, - struct se_tmr_req *tmr, -@@ -130,34 +172,22 @@ void core_tmr_abort_task( - if (tmr->ref_task_tag != ref_tag) - continue; - -- if (!kref_get_unless_zero(&se_cmd->cmd_kref)) -- continue; -- - printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", - se_cmd->se_tfo->get_fabric_name(), ref_tag); - -- spin_lock(&se_cmd->t_state_lock); -- if (se_cmd->transport_state & CMD_T_COMPLETE) { -- printk("ABORT_TASK: ref_tag: %llu already complete," -- " skipping\n", ref_tag); -- spin_unlock(&se_cmd->t_state_lock); -+ if (!__target_check_io_state(se_cmd, se_sess, 0)) { - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); -- - target_put_sess_cmd(se_cmd); -- - goto out; - } -- se_cmd->transport_state |= CMD_T_ABORTED; -- spin_unlock(&se_cmd->t_state_lock); -- - list_del_init(&se_cmd->se_cmd_list); - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - - cancel_work_sync(&se_cmd->work); - transport_wait_for_tasks(se_cmd); - -- target_put_sess_cmd(se_cmd); - transport_cmd_finish_abort(se_cmd, true); -+ target_put_sess_cmd(se_cmd); - - printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" - " ref_tag: %llu\n", ref_tag); -@@ -178,9 +208,11 @@ static void core_tmr_drain_tmr_list( - struct list_head *preempt_and_abort_list) - { - LIST_HEAD(drain_tmr_list); -+ struct se_session *sess; - struct se_tmr_req *tmr_p, *tmr_pp; - struct se_cmd *cmd; - unsigned long flags; -+ bool rc; - /* - * Release all pending and outgoing TMRs aside from the received - * LUN_RESET tmr.. -@@ -206,17 +238,39 @@ static void core_tmr_drain_tmr_list( - if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) - continue; - -+ sess = cmd->se_sess; -+ if (WARN_ON_ONCE(!sess)) -+ continue; -+ -+ spin_lock(&sess->sess_cmd_lock); - spin_lock(&cmd->t_state_lock); -- if (!(cmd->transport_state & CMD_T_ACTIVE)) { -+ if (!(cmd->transport_state & CMD_T_ACTIVE) || -+ (cmd->transport_state & CMD_T_FABRIC_STOP)) { - spin_unlock(&cmd->t_state_lock); -+ spin_unlock(&sess->sess_cmd_lock); - continue; - } - if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { - spin_unlock(&cmd->t_state_lock); -+ spin_unlock(&sess->sess_cmd_lock); - continue; - } -+ if (sess->sess_tearing_down || cmd->cmd_wait_set) { -+ spin_unlock(&cmd->t_state_lock); -+ spin_unlock(&sess->sess_cmd_lock); -+ continue; -+ } -+ cmd->transport_state |= CMD_T_ABORTED; - spin_unlock(&cmd->t_state_lock); - -+ rc = kref_get_unless_zero(&cmd->cmd_kref); -+ if (!rc) { -+ printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n"); -+ spin_unlock(&sess->sess_cmd_lock); -+ continue; -+ } -+ spin_unlock(&sess->sess_cmd_lock); -+ - list_move_tail(&tmr_p->tmr_list, &drain_tmr_list); - } - spin_unlock_irqrestore(&dev->se_tmr_lock, flags); -@@ -230,20 +284,26 @@ static void core_tmr_drain_tmr_list( - (preempt_and_abort_list) ? "Preempt" : "", tmr_p, - tmr_p->function, tmr_p->response, cmd->t_state); - -+ cancel_work_sync(&cmd->work); -+ transport_wait_for_tasks(cmd); -+ - transport_cmd_finish_abort(cmd, 1); -+ target_put_sess_cmd(cmd); - } - } - - static void core_tmr_drain_state_list( - struct se_device *dev, - struct se_cmd *prout_cmd, -- struct se_node_acl *tmr_nacl, -+ struct se_session *tmr_sess, - int tas, - struct list_head *preempt_and_abort_list) - { - LIST_HEAD(drain_task_list); -+ struct se_session *sess; - struct se_cmd *cmd, *next; - unsigned long flags; -+ int rc; - - /* - * Complete outstanding commands with TASK_ABORTED SAM status. -@@ -282,6 +342,16 @@ static void core_tmr_drain_state_list( - if (prout_cmd == cmd) - continue; - -+ sess = cmd->se_sess; -+ if (WARN_ON_ONCE(!sess)) -+ continue; -+ -+ spin_lock(&sess->sess_cmd_lock); -+ rc = __target_check_io_state(cmd, tmr_sess, tas); -+ spin_unlock(&sess->sess_cmd_lock); -+ if (!rc) -+ continue; -+ - list_move_tail(&cmd->state_list, &drain_task_list); - cmd->state_active = false; - } -@@ -289,7 +359,7 @@ static void core_tmr_drain_state_list( - - while (!list_empty(&drain_task_list)) { - cmd = list_entry(drain_task_list.next, struct se_cmd, state_list); -- list_del(&cmd->state_list); -+ list_del_init(&cmd->state_list); - - pr_debug("LUN_RESET: %s cmd: %p" - " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d" -@@ -313,16 +383,11 @@ static void core_tmr_drain_state_list( - * loop above, but we do it down here given that - * cancel_work_sync may block. - */ -- if (cmd->t_state == TRANSPORT_COMPLETE) -- cancel_work_sync(&cmd->work); -- -- spin_lock_irqsave(&cmd->t_state_lock, flags); -- target_stop_cmd(cmd, &flags); -- -- cmd->transport_state |= CMD_T_ABORTED; -- spin_unlock_irqrestore(&cmd->t_state_lock, flags); -+ cancel_work_sync(&cmd->work); -+ transport_wait_for_tasks(cmd); - -- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas); -+ core_tmr_handle_tas_abort(cmd, tas); -+ target_put_sess_cmd(cmd); - } - } - -@@ -334,6 +399,7 @@ int core_tmr_lun_reset( - { - struct se_node_acl *tmr_nacl = NULL; - struct se_portal_group *tmr_tpg = NULL; -+ struct se_session *tmr_sess = NULL; - int tas; - /* - * TASK_ABORTED status bit, this is configurable via ConfigFS -@@ -352,8 +418,9 @@ int core_tmr_lun_reset( - * or struct se_device passthrough.. - */ - if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { -- tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; -- tmr_tpg = tmr->task_cmd->se_sess->se_tpg; -+ tmr_sess = tmr->task_cmd->se_sess; -+ tmr_nacl = tmr_sess->se_node_acl; -+ tmr_tpg = tmr_sess->se_tpg; - if (tmr_nacl && tmr_tpg) { - pr_debug("LUN_RESET: TMR caller fabric: %s" - " initiator port %s\n", -@@ -366,7 +433,7 @@ int core_tmr_lun_reset( - dev->transport->name, tas); - - core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); -- core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas, -+ core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas, - preempt_and_abort_list); - - /* -diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c -index 4fdcee2..94f4ffa 100644 ---- a/drivers/target/target_core_transport.c -+++ b/drivers/target/target_core_transport.c -@@ -528,9 +528,6 @@ void transport_deregister_session(struct se_session *se_sess) - } - EXPORT_SYMBOL(transport_deregister_session); - --/* -- * Called with cmd->t_state_lock held. -- */ - static void target_remove_from_state_list(struct se_cmd *cmd) - { - struct se_device *dev = cmd->se_dev; -@@ -555,10 +552,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, - { - unsigned long flags; - -- spin_lock_irqsave(&cmd->t_state_lock, flags); -- if (write_pending) -- cmd->t_state = TRANSPORT_WRITE_PENDING; -- - if (remove_from_lists) { - target_remove_from_state_list(cmd); - -@@ -568,6 +561,10 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, - cmd->se_lun = NULL; - } - -+ spin_lock_irqsave(&cmd->t_state_lock, flags); -+ if (write_pending) -+ cmd->t_state = TRANSPORT_WRITE_PENDING; -+ - /* - * Determine if frontend context caller is requesting the stopping of - * this command for frontend exceptions. -@@ -621,6 +618,8 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) - - void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) - { -+ bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); -+ - if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) - transport_lun_remove_cmd(cmd); - /* -@@ -632,7 +631,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) - - if (transport_cmd_check_stop_to_fabric(cmd)) - return; -- if (remove) -+ if (remove && ack_kref) - transport_put_cmd(cmd); - } - -@@ -700,7 +699,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) - * Check for case where an explicit ABORT_TASK has been received - * and transport_wait_for_tasks() will be waiting for completion.. - */ -- if (cmd->transport_state & CMD_T_ABORTED && -+ if (cmd->transport_state & CMD_T_ABORTED || - cmd->transport_state & CMD_T_STOP) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete_all(&cmd->t_transport_stop_comp); -@@ -1850,19 +1849,21 @@ static bool target_handle_task_attr(struct se_cmd *cmd) - return true; - } - -+static int __transport_check_aborted_status(struct se_cmd *, int); -+ - void target_execute_cmd(struct se_cmd *cmd) - { - /* -- * If the received CDB has aleady been aborted stop processing it here. -- */ -- if (transport_check_aborted_status(cmd, 1)) -- return; -- -- /* - * Determine if frontend context caller is requesting the stopping of - * this command for frontend exceptions. -+ * -+ * If the received CDB has aleady been aborted stop processing it here. - */ - spin_lock_irq(&cmd->t_state_lock); -+ if (__transport_check_aborted_status(cmd, 1)) { -+ spin_unlock_irq(&cmd->t_state_lock); -+ return; -+ } - if (cmd->transport_state & CMD_T_STOP) { - pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", - __func__, __LINE__, cmd->tag); -@@ -2213,20 +2214,14 @@ static inline void transport_free_pages(struct se_cmd *cmd) - } - - /** -- * transport_release_cmd - free a command -- * @cmd: command to free -+ * transport_put_cmd - release a reference to a command -+ * @cmd: command to release - * -- * This routine unconditionally frees a command, and reference counting -- * or list removal must be done in the caller. -+ * This routine releases our reference to the command and frees it if possible. - */ --static int transport_release_cmd(struct se_cmd *cmd) -+static int transport_put_cmd(struct se_cmd *cmd) - { - BUG_ON(!cmd->se_tfo); -- -- if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) -- core_tmr_release_req(cmd->se_tmr_req); -- if (cmd->t_task_cdb != cmd->__t_task_cdb) -- kfree(cmd->t_task_cdb); - /* - * If this cmd has been setup with target_get_sess_cmd(), drop - * the kref and call ->release_cmd() in kref callback. -@@ -2234,18 +2229,6 @@ static int transport_release_cmd(struct se_cmd *cmd) - return target_put_sess_cmd(cmd); - } - --/** -- * transport_put_cmd - release a reference to a command -- * @cmd: command to release -- * -- * This routine releases our reference to the command and frees it if possible. -- */ --static int transport_put_cmd(struct se_cmd *cmd) --{ -- transport_free_pages(cmd); -- return transport_release_cmd(cmd); --} -- - void *transport_kmap_data_sg(struct se_cmd *cmd) - { - struct scatterlist *sg = cmd->t_data_sg; -@@ -2441,34 +2424,58 @@ static void transport_write_pending_qf(struct se_cmd *cmd) - } - } - --int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) -+static bool -+__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, -+ unsigned long *flags); -+ -+static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) - { - unsigned long flags; -+ -+ spin_lock_irqsave(&cmd->t_state_lock, flags); -+ __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); -+ spin_unlock_irqrestore(&cmd->t_state_lock, flags); -+} -+ -+int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) -+{ - int ret = 0; -+ bool aborted = false, tas = false; - - if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { - if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) -- transport_wait_for_tasks(cmd); -+ target_wait_free_cmd(cmd, &aborted, &tas); - -- ret = transport_release_cmd(cmd); -+ if (!aborted || tas) -+ ret = transport_put_cmd(cmd); - } else { - if (wait_for_tasks) -- transport_wait_for_tasks(cmd); -+ target_wait_free_cmd(cmd, &aborted, &tas); - /* - * Handle WRITE failure case where transport_generic_new_cmd() - * has already added se_cmd to state_list, but fabric has - * failed command before I/O submission. - */ -- if (cmd->state_active) { -- spin_lock_irqsave(&cmd->t_state_lock, flags); -+ if (cmd->state_active) - target_remove_from_state_list(cmd); -- spin_unlock_irqrestore(&cmd->t_state_lock, flags); -- } - - if (cmd->se_lun) - transport_lun_remove_cmd(cmd); - -- ret = transport_put_cmd(cmd); -+ if (!aborted || tas) -+ ret = transport_put_cmd(cmd); -+ } -+ /* -+ * If the task has been internally aborted due to TMR ABORT_TASK -+ * or LUN_RESET, target_core_tmr.c is responsible for performing -+ * the remaining calls to target_put_sess_cmd(), and not the -+ * callers of this function. -+ */ -+ if (aborted) { -+ pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); -+ wait_for_completion(&cmd->cmd_wait_comp); -+ cmd->se_tfo->release_cmd(cmd); -+ ret = 1; - } - return ret; - } -@@ -2508,26 +2515,46 @@ out: - } - EXPORT_SYMBOL(target_get_sess_cmd); - -+static void target_free_cmd_mem(struct se_cmd *cmd) -+{ -+ transport_free_pages(cmd); -+ -+ if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) -+ core_tmr_release_req(cmd->se_tmr_req); -+ if (cmd->t_task_cdb != cmd->__t_task_cdb) -+ kfree(cmd->t_task_cdb); -+} -+ - static void target_release_cmd_kref(struct kref *kref) - { - struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); - struct se_session *se_sess = se_cmd->se_sess; - unsigned long flags; -+ bool fabric_stop; - - spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); - if (list_empty(&se_cmd->se_cmd_list)) { - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); -+ target_free_cmd_mem(se_cmd); - se_cmd->se_tfo->release_cmd(se_cmd); - return; - } -- if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { -+ -+ spin_lock(&se_cmd->t_state_lock); -+ fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP); -+ spin_unlock(&se_cmd->t_state_lock); -+ -+ if (se_cmd->cmd_wait_set || fabric_stop) { -+ list_del_init(&se_cmd->se_cmd_list); - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); -+ target_free_cmd_mem(se_cmd); - complete(&se_cmd->cmd_wait_comp); - return; - } -- list_del(&se_cmd->se_cmd_list); -+ list_del_init(&se_cmd->se_cmd_list); - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - -+ target_free_cmd_mem(se_cmd); - se_cmd->se_tfo->release_cmd(se_cmd); - } - -@@ -2539,6 +2566,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd) - struct se_session *se_sess = se_cmd->se_sess; - - if (!se_sess) { -+ target_free_cmd_mem(se_cmd); - se_cmd->se_tfo->release_cmd(se_cmd); - return 1; - } -@@ -2555,6 +2583,7 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) - { - struct se_cmd *se_cmd; - unsigned long flags; -+ int rc; - - spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); - if (se_sess->sess_tearing_down) { -@@ -2564,8 +2593,15 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) - se_sess->sess_tearing_down = 1; - list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); - -- list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) -- se_cmd->cmd_wait_set = 1; -+ list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) { -+ rc = kref_get_unless_zero(&se_cmd->cmd_kref); -+ if (rc) { -+ se_cmd->cmd_wait_set = 1; -+ spin_lock(&se_cmd->t_state_lock); -+ se_cmd->transport_state |= CMD_T_FABRIC_STOP; -+ spin_unlock(&se_cmd->t_state_lock); -+ } -+ } - - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - } -@@ -2578,15 +2614,25 @@ void target_wait_for_sess_cmds(struct se_session *se_sess) - { - struct se_cmd *se_cmd, *tmp_cmd; - unsigned long flags; -+ bool tas; - - list_for_each_entry_safe(se_cmd, tmp_cmd, - &se_sess->sess_wait_list, se_cmd_list) { -- list_del(&se_cmd->se_cmd_list); -+ list_del_init(&se_cmd->se_cmd_list); - - pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" - " %d\n", se_cmd, se_cmd->t_state, - se_cmd->se_tfo->get_cmd_state(se_cmd)); - -+ spin_lock_irqsave(&se_cmd->t_state_lock, flags); -+ tas = (se_cmd->transport_state & CMD_T_TAS); -+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); -+ -+ if (!target_put_sess_cmd(se_cmd)) { -+ if (tas) -+ target_put_sess_cmd(se_cmd); -+ } -+ - wait_for_completion(&se_cmd->cmd_wait_comp); - pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" - " fabric state: %d\n", se_cmd, se_cmd->t_state, -@@ -2608,53 +2654,75 @@ void transport_clear_lun_ref(struct se_lun *lun) - wait_for_completion(&lun->lun_ref_comp); - } - --/** -- * transport_wait_for_tasks - wait for completion to occur -- * @cmd: command to wait -- * -- * Called from frontend fabric context to wait for storage engine -- * to pause and/or release frontend generated struct se_cmd. -- */ --bool transport_wait_for_tasks(struct se_cmd *cmd) -+static bool -+__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, -+ bool *aborted, bool *tas, unsigned long *flags) -+ __releases(&cmd->t_state_lock) -+ __acquires(&cmd->t_state_lock) - { -- unsigned long flags; - -- spin_lock_irqsave(&cmd->t_state_lock, flags); -+ assert_spin_locked(&cmd->t_state_lock); -+ WARN_ON_ONCE(!irqs_disabled()); -+ -+ if (fabric_stop) -+ cmd->transport_state |= CMD_T_FABRIC_STOP; -+ -+ if (cmd->transport_state & CMD_T_ABORTED) -+ *aborted = true; -+ -+ if (cmd->transport_state & CMD_T_TAS) -+ *tas = true; -+ - if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && -- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { -- spin_unlock_irqrestore(&cmd->t_state_lock, flags); -+ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) - return false; -- } - - if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && -- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { -- spin_unlock_irqrestore(&cmd->t_state_lock, flags); -+ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) - return false; -- } - -- if (!(cmd->transport_state & CMD_T_ACTIVE)) { -- spin_unlock_irqrestore(&cmd->t_state_lock, flags); -+ if (!(cmd->transport_state & CMD_T_ACTIVE)) -+ return false; -+ -+ if (fabric_stop && *aborted) - return false; -- } - - cmd->transport_state |= CMD_T_STOP; - -- pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n", -- cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); -+ pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d," -+ " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag, -+ cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); - -- spin_unlock_irqrestore(&cmd->t_state_lock, flags); -+ spin_unlock_irqrestore(&cmd->t_state_lock, *flags); - - wait_for_completion(&cmd->t_transport_stop_comp); - -- spin_lock_irqsave(&cmd->t_state_lock, flags); -+ spin_lock_irqsave(&cmd->t_state_lock, *flags); - cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); - -- pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n", -- cmd->tag); -+ pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->" -+ "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag); -+ -+ return true; -+} - -+/** -+ * transport_wait_for_tasks - wait for completion to occur -+ * @cmd: command to wait -+ * -+ * Called from frontend fabric context to wait for storage engine -+ * to pause and/or release frontend generated struct se_cmd. -+ */ -+bool transport_wait_for_tasks(struct se_cmd *cmd) -+{ -+ unsigned long flags; -+ bool ret, aborted = false, tas = false; -+ -+ spin_lock_irqsave(&cmd->t_state_lock, flags); -+ ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - -- return true; -+ return ret; - } - EXPORT_SYMBOL(transport_wait_for_tasks); - -@@ -2836,28 +2904,49 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd, - } - EXPORT_SYMBOL(transport_send_check_condition_and_sense); - --int transport_check_aborted_status(struct se_cmd *cmd, int send_status) -+static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status) -+ __releases(&cmd->t_state_lock) -+ __acquires(&cmd->t_state_lock) - { -+ assert_spin_locked(&cmd->t_state_lock); -+ WARN_ON_ONCE(!irqs_disabled()); -+ - if (!(cmd->transport_state & CMD_T_ABORTED)) - return 0; -- - /* - * If cmd has been aborted but either no status is to be sent or it has - * already been sent, just return - */ -- if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) -+ if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) { -+ if (send_status) -+ cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; - return 1; -+ } - -- pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n", -- cmd->t_task_cdb[0], cmd->tag); -+ pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:" -+ " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag); - - cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; - cmd->scsi_status = SAM_STAT_TASK_ABORTED; - trace_target_cmd_complete(cmd); -+ -+ spin_unlock_irq(&cmd->t_state_lock); - cmd->se_tfo->queue_status(cmd); -+ spin_lock_irq(&cmd->t_state_lock); - - return 1; - } -+ -+int transport_check_aborted_status(struct se_cmd *cmd, int send_status) -+{ -+ int ret; -+ -+ spin_lock_irq(&cmd->t_state_lock); -+ ret = __transport_check_aborted_status(cmd, send_status); -+ spin_unlock_irq(&cmd->t_state_lock); -+ -+ return ret; -+} - EXPORT_SYMBOL(transport_check_aborted_status); - - void transport_send_task_abort(struct se_cmd *cmd) -@@ -2879,11 +2968,17 @@ void transport_send_task_abort(struct se_cmd *cmd) - */ - if (cmd->data_direction == DMA_TO_DEVICE) { - if (cmd->se_tfo->write_pending_status(cmd) != 0) { -- cmd->transport_state |= CMD_T_ABORTED; -+ spin_lock_irqsave(&cmd->t_state_lock, flags); -+ if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) { -+ spin_unlock_irqrestore(&cmd->t_state_lock, flags); -+ goto send_abort; -+ } - cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; -+ spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return; - } - } -+send_abort: - cmd->scsi_status = SAM_STAT_TASK_ABORTED; - - transport_lun_remove_cmd(cmd); -@@ -2900,8 +2995,17 @@ static void target_tmr_work(struct work_struct *work) - struct se_cmd *cmd = container_of(work, struct se_cmd, work); - struct se_device *dev = cmd->se_dev; - struct se_tmr_req *tmr = cmd->se_tmr_req; -+ unsigned long flags; - int ret; - -+ spin_lock_irqsave(&cmd->t_state_lock, flags); -+ if (cmd->transport_state & CMD_T_ABORTED) { -+ tmr->response = TMR_FUNCTION_REJECTED; -+ spin_unlock_irqrestore(&cmd->t_state_lock, flags); -+ goto check_stop; -+ } -+ spin_unlock_irqrestore(&cmd->t_state_lock, flags); -+ - switch (tmr->function) { - case TMR_ABORT_TASK: - core_tmr_abort_task(dev, tmr, cmd->se_sess); -@@ -2934,9 +3038,17 @@ static void target_tmr_work(struct work_struct *work) - break; - } - -+ spin_lock_irqsave(&cmd->t_state_lock, flags); -+ if (cmd->transport_state & CMD_T_ABORTED) { -+ spin_unlock_irqrestore(&cmd->t_state_lock, flags); -+ goto check_stop; -+ } - cmd->t_state = TRANSPORT_ISTATE_PROCESSING; -+ spin_unlock_irqrestore(&cmd->t_state_lock, flags); -+ - cmd->se_tfo->queue_tm_rsp(cmd); - -+check_stop: - transport_cmd_check_stop_to_fabric(cmd); - } - -diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c -index 2f9f708..ea9366a 100644 ---- a/drivers/thermal/step_wise.c -+++ b/drivers/thermal/step_wise.c -@@ -63,6 +63,19 @@ static unsigned long get_target_state(struct thermal_instance *instance, - next_target = instance->target; - dev_dbg(&cdev->device, "cur_state=%ld\n", cur_state); - -+ if (!instance->initialized) { -+ if (throttle) { -+ next_target = (cur_state + 1) >= instance->upper ? -+ instance->upper : -+ ((cur_state + 1) < instance->lower ? -+ instance->lower : (cur_state + 1)); -+ } else { -+ next_target = THERMAL_NO_TARGET; -+ } -+ -+ return next_target; -+ } -+ - switch (trend) { - case THERMAL_TREND_RAISING: - if (throttle) { -@@ -149,7 +162,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) - dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n", - old_target, (int)instance->target); - -- if (old_target == instance->target) -+ if (instance->initialized && old_target == instance->target) - continue; - - /* Activate a passive thermal instance */ -@@ -161,7 +174,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) - instance->target == THERMAL_NO_TARGET) - update_passive_instance(tz, trip_type, -1); - -- -+ instance->initialized = true; - instance->cdev->updated = false; /* cdev needs update */ - } - -diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c -index d9e525c..ba08b55 100644 ---- a/drivers/thermal/thermal_core.c -+++ b/drivers/thermal/thermal_core.c -@@ -37,6 +37,7 @@ - #include <linux/of.h> - #include <net/netlink.h> - #include <net/genetlink.h> -+#include <linux/suspend.h> - - #define CREATE_TRACE_POINTS - #include <trace/events/thermal.h> -@@ -59,6 +60,8 @@ static LIST_HEAD(thermal_governor_list); - static DEFINE_MUTEX(thermal_list_lock); - static DEFINE_MUTEX(thermal_governor_lock); - -+static atomic_t in_suspend; -+ - static struct thermal_governor *def_governor; - - static struct thermal_governor *__find_governor(const char *name) -@@ -532,14 +535,31 @@ static void update_temperature(struct thermal_zone_device *tz) - mutex_unlock(&tz->lock); - - trace_thermal_temperature(tz); -- dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n", -- tz->last_temperature, tz->temperature); -+ if (tz->last_temperature == THERMAL_TEMP_INVALID) -+ dev_dbg(&tz->device, "last_temperature N/A, current_temperature=%d\n", -+ tz->temperature); -+ else -+ dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n", -+ tz->last_temperature, tz->temperature); -+} -+ -+static void thermal_zone_device_reset(struct thermal_zone_device *tz) -+{ -+ struct thermal_instance *pos; -+ -+ tz->temperature = THERMAL_TEMP_INVALID; -+ tz->passive = 0; -+ list_for_each_entry(pos, &tz->thermal_instances, tz_node) -+ pos->initialized = false; - } - - void thermal_zone_device_update(struct thermal_zone_device *tz) - { - int count; - -+ if (atomic_read(&in_suspend)) -+ return; -+ - if (!tz->ops->get_temp) - return; - -@@ -1321,6 +1341,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, - if (!result) { - list_add_tail(&dev->tz_node, &tz->thermal_instances); - list_add_tail(&dev->cdev_node, &cdev->thermal_instances); -+ atomic_set(&tz->need_update, 1); - } - mutex_unlock(&cdev->lock); - mutex_unlock(&tz->lock); -@@ -1430,6 +1451,7 @@ __thermal_cooling_device_register(struct device_node *np, - const struct thermal_cooling_device_ops *ops) - { - struct thermal_cooling_device *cdev; -+ struct thermal_zone_device *pos = NULL; - int result; - - if (type && strlen(type) >= THERMAL_NAME_LENGTH) -@@ -1474,6 +1496,12 @@ __thermal_cooling_device_register(struct device_node *np, - /* Update binding information for 'this' new cdev */ - bind_cdev(cdev); - -+ mutex_lock(&thermal_list_lock); -+ list_for_each_entry(pos, &thermal_tz_list, node) -+ if (atomic_cmpxchg(&pos->need_update, 1, 0)) -+ thermal_zone_device_update(pos); -+ mutex_unlock(&thermal_list_lock); -+ - return cdev; - } - -@@ -1806,6 +1834,8 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, - tz->trips = trips; - tz->passive_delay = passive_delay; - tz->polling_delay = polling_delay; -+ /* A new thermal zone needs to be updated anyway. */ -+ atomic_set(&tz->need_update, 1); - - dev_set_name(&tz->device, "thermal_zone%d", tz->id); - result = device_register(&tz->device); -@@ -1900,7 +1930,10 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, - - INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check); - -- thermal_zone_device_update(tz); -+ thermal_zone_device_reset(tz); -+ /* Update the new thermal zone and mark it as already updated. */ -+ if (atomic_cmpxchg(&tz->need_update, 1, 0)) -+ thermal_zone_device_update(tz); - - return tz; - -@@ -2140,6 +2173,36 @@ static void thermal_unregister_governors(void) - thermal_gov_power_allocator_unregister(); - } - -+static int thermal_pm_notify(struct notifier_block *nb, -+ unsigned long mode, void *_unused) -+{ -+ struct thermal_zone_device *tz; -+ -+ switch (mode) { -+ case PM_HIBERNATION_PREPARE: -+ case PM_RESTORE_PREPARE: -+ case PM_SUSPEND_PREPARE: -+ atomic_set(&in_suspend, 1); -+ break; -+ case PM_POST_HIBERNATION: -+ case PM_POST_RESTORE: -+ case PM_POST_SUSPEND: -+ atomic_set(&in_suspend, 0); -+ list_for_each_entry(tz, &thermal_tz_list, node) { -+ thermal_zone_device_reset(tz); -+ thermal_zone_device_update(tz); -+ } -+ break; -+ default: -+ break; -+ } -+ return 0; -+} -+ -+static struct notifier_block thermal_pm_nb = { -+ .notifier_call = thermal_pm_notify, -+}; -+ - static int __init thermal_init(void) - { - int result; -@@ -2160,6 +2223,11 @@ static int __init thermal_init(void) - if (result) - goto exit_netlink; - -+ result = register_pm_notifier(&thermal_pm_nb); -+ if (result) -+ pr_warn("Thermal: Can not register suspend notifier, return %d\n", -+ result); -+ - return 0; - - exit_netlink: -@@ -2179,6 +2247,7 @@ error: - - static void __exit thermal_exit(void) - { -+ unregister_pm_notifier(&thermal_pm_nb); - of_thermal_destroy_zones(); - genetlink_exit(); - class_unregister(&thermal_class); -diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h -index d7ac1fc..749d41a 100644 ---- a/drivers/thermal/thermal_core.h -+++ b/drivers/thermal/thermal_core.h -@@ -41,6 +41,7 @@ struct thermal_instance { - struct thermal_zone_device *tz; - struct thermal_cooling_device *cdev; - int trip; -+ bool initialized; - unsigned long upper; /* Highest cooling state for this trip point */ - unsigned long lower; /* Lowest cooling state for this trip point */ - unsigned long target; /* expected cooling state */ -diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c -index e4c70dc..fa4e239 100644 ---- a/drivers/usb/class/cdc-acm.c -+++ b/drivers/usb/class/cdc-acm.c -@@ -1841,6 +1841,11 @@ static const struct usb_device_id acm_ids[] = { - }, - #endif - -+ /*Samsung phone in firmware update mode */ -+ { USB_DEVICE(0x04e8, 0x685d), -+ .driver_info = IGNORE_DEVICE, -+ }, -+ - /* Exclude Infineon Flash Loader utility */ - { USB_DEVICE(0x058b, 0x0041), - .driver_info = IGNORE_DEVICE, -diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h -index 36f1cb7..78be201 100644 ---- a/drivers/usb/dwc3/core.h -+++ b/drivers/usb/dwc3/core.h -@@ -853,7 +853,6 @@ struct dwc3 { - unsigned pullups_connected:1; - unsigned resize_fifos:1; - unsigned setup_packet_pending:1; -- unsigned start_config_issued:1; - unsigned three_stage_setup:1; - unsigned usb3_lpm_capable:1; - -diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c -index 5320e93..b13912d 100644 ---- a/drivers/usb/dwc3/ep0.c -+++ b/drivers/usb/dwc3/ep0.c -@@ -555,7 +555,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) - int ret; - u32 reg; - -- dwc->start_config_issued = false; - cfg = le16_to_cpu(ctrl->wValue); - - switch (state) { -@@ -737,10 +736,6 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) - dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY"); - ret = dwc3_ep0_set_isoch_delay(dwc, ctrl); - break; -- case USB_REQ_SET_INTERFACE: -- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE"); -- dwc->start_config_issued = false; -- /* Fall through */ - default: - dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver"); - ret = dwc3_ep0_delegate_req(dwc, ctrl); -diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c -index a58376f..69ffe6e 100644 ---- a/drivers/usb/dwc3/gadget.c -+++ b/drivers/usb/dwc3/gadget.c -@@ -388,24 +388,66 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep) - dep->trb_pool_dma = 0; - } - -+static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep); -+ -+/** -+ * dwc3_gadget_start_config - Configure EP resources -+ * @dwc: pointer to our controller context structure -+ * @dep: endpoint that is being enabled -+ * -+ * The assignment of transfer resources cannot perfectly follow the -+ * data book due to the fact that the controller driver does not have -+ * all knowledge of the configuration in advance. It is given this -+ * information piecemeal by the composite gadget framework after every -+ * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook -+ * programming model in this scenario can cause errors. For two -+ * reasons: -+ * -+ * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION -+ * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of -+ * multiple interfaces. -+ * -+ * 2) The databook does not mention doing more DEPXFERCFG for new -+ * endpoint on alt setting (8.1.6). -+ * -+ * The following simplified method is used instead: -+ * -+ * All hardware endpoints can be assigned a transfer resource and this -+ * setting will stay persistent until either a core reset or -+ * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and -+ * do DEPXFERCFG for every hardware endpoint as well. We are -+ * guaranteed that there are as many transfer resources as endpoints. -+ * -+ * This function is called for each endpoint when it is being enabled -+ * but is triggered only when called for EP0-out, which always happens -+ * first, and which should only happen in one of the above conditions. -+ */ - static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) - { - struct dwc3_gadget_ep_cmd_params params; - u32 cmd; -+ int i; -+ int ret; -+ -+ if (dep->number) -+ return 0; - - memset(¶ms, 0x00, sizeof(params)); -+ cmd = DWC3_DEPCMD_DEPSTARTCFG; - -- if (dep->number != 1) { -- cmd = DWC3_DEPCMD_DEPSTARTCFG; -- /* XferRscIdx == 0 for ep0 and 2 for the remaining */ -- if (dep->number > 1) { -- if (dwc->start_config_issued) -- return 0; -- dwc->start_config_issued = true; -- cmd |= DWC3_DEPCMD_PARAM(2); -- } -+ ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms); -+ if (ret) -+ return ret; - -- return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms); -+ for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { -+ struct dwc3_ep *dep = dwc->eps[i]; -+ -+ if (!dep) -+ continue; -+ -+ ret = dwc3_gadget_set_xfer_resource(dwc, dep); -+ if (ret) -+ return ret; - } - - return 0; -@@ -519,10 +561,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, - struct dwc3_trb *trb_st_hw; - struct dwc3_trb *trb_link; - -- ret = dwc3_gadget_set_xfer_resource(dwc, dep); -- if (ret) -- return ret; -- - dep->endpoint.desc = desc; - dep->comp_desc = comp_desc; - dep->type = usb_endpoint_type(desc); -@@ -1604,8 +1642,6 @@ static int dwc3_gadget_start(struct usb_gadget *g, - } - dwc3_writel(dwc->regs, DWC3_DCFG, reg); - -- dwc->start_config_issued = false; -- - /* Start with SuperSpeed Default */ - dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); - -@@ -2202,7 +2238,6 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) - dwc3_writel(dwc->regs, DWC3_DCTL, reg); - - dwc3_disconnect_gadget(dwc); -- dwc->start_config_issued = false; - - dwc->gadget.speed = USB_SPEED_UNKNOWN; - dwc->setup_packet_pending = false; -@@ -2253,7 +2288,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) - - dwc3_stop_active_transfers(dwc); - dwc3_clear_stall_all_ep(dwc); -- dwc->start_config_issued = false; - - /* Reset device address to zero */ - reg = dwc3_readl(dwc->regs, DWC3_DCFG); -diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c -index 1dd9919..a7caf53 100644 ---- a/drivers/usb/serial/cp210x.c -+++ b/drivers/usb/serial/cp210x.c -@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = { - { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ - { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ - { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ -+ { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ -+ { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ - { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ - { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ - { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ -diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c -index db86e51..8849439a 100644 ---- a/drivers/usb/serial/option.c -+++ b/drivers/usb/serial/option.c -@@ -315,6 +315,7 @@ static void option_instat_callback(struct urb *urb); - #define TOSHIBA_PRODUCT_G450 0x0d45 - - #define ALINK_VENDOR_ID 0x1e0e -+#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */ - #define ALINK_PRODUCT_PH300 0x9100 - #define ALINK_PRODUCT_3GU 0x9200 - -@@ -607,6 +608,10 @@ static const struct option_blacklist_info zte_1255_blacklist = { - .reserved = BIT(3) | BIT(4), - }; - -+static const struct option_blacklist_info simcom_sim7100e_blacklist = { -+ .reserved = BIT(5) | BIT(6), -+}; -+ - static const struct option_blacklist_info telit_le910_blacklist = { - .sendsetup = BIT(0), - .reserved = BIT(1) | BIT(2), -@@ -1122,6 +1127,8 @@ static const struct usb_device_id option_ids[] = { - { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, - { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, - { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ -+ { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */ -+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, - { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ - { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ - { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ -@@ -1645,6 +1652,8 @@ static const struct usb_device_id option_ids[] = { - { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, - { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) }, - { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, -+ { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E), -+ .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist }, - { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), - .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist - }, -diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c -index 7efc329..7d3e5d0 100644 ---- a/drivers/virtio/virtio_balloon.c -+++ b/drivers/virtio/virtio_balloon.c -@@ -209,8 +209,8 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num) - */ - if (vb->num_pfns != 0) - tell_host(vb, vb->deflate_vq); -- mutex_unlock(&vb->balloon_lock); - release_pages_balloon(vb); -+ mutex_unlock(&vb->balloon_lock); - return num_freed_pages; - } - -diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c -index 78f804a..2046a68 100644 ---- a/drivers/virtio/virtio_pci_common.c -+++ b/drivers/virtio/virtio_pci_common.c -@@ -545,6 +545,7 @@ err_enable_device: - static void virtio_pci_remove(struct pci_dev *pci_dev) - { - struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); -+ struct device *dev = get_device(&vp_dev->vdev.dev); - - unregister_virtio_device(&vp_dev->vdev); - -@@ -554,6 +555,7 @@ static void virtio_pci_remove(struct pci_dev *pci_dev) - virtio_pci_modern_remove(vp_dev); - - pci_disable_device(pci_dev); -+ put_device(dev); - } - - static struct pci_driver virtio_pci_driver = { -diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c -index 73dafdc..fb02214 100644 ---- a/drivers/xen/xen-pciback/pciback_ops.c -+++ b/drivers/xen/xen-pciback/pciback_ops.c -@@ -227,8 +227,9 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev, - /* - * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able - * to access the BARs where the MSI-X entries reside. -+ * But VF devices are unique in which the PF needs to be checked. - */ -- pci_read_config_word(dev, PCI_COMMAND, &cmd); -+ pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd); - if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY)) - return -ENXIO; - -@@ -332,6 +333,9 @@ void xen_pcibk_do_op(struct work_struct *data) - struct xen_pcibk_dev_data *dev_data = NULL; - struct xen_pci_op *op = &pdev->op; - int test_intx = 0; -+#ifdef CONFIG_PCI_MSI -+ unsigned int nr = 0; -+#endif - - *op = pdev->sh_info->op; - barrier(); -@@ -360,6 +364,7 @@ void xen_pcibk_do_op(struct work_struct *data) - op->err = xen_pcibk_disable_msi(pdev, dev, op); - break; - case XEN_PCI_OP_enable_msix: -+ nr = op->value; - op->err = xen_pcibk_enable_msix(pdev, dev, op); - break; - case XEN_PCI_OP_disable_msix: -@@ -382,7 +387,7 @@ void xen_pcibk_do_op(struct work_struct *data) - if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) { - unsigned int i; - -- for (i = 0; i < op->value; i++) -+ for (i = 0; i < nr; i++) - pdev->sh_info->op.msix_entries[i].vector = - op->msix_entries[i].vector; - } -diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c -index ad4eb10..51387d7 100644 ---- a/drivers/xen/xen-scsiback.c -+++ b/drivers/xen/xen-scsiback.c -@@ -939,12 +939,12 @@ out: - spin_unlock_irqrestore(&info->v2p_lock, flags); - - out_free: -- mutex_lock(&tpg->tv_tpg_mutex); -- tpg->tv_tpg_fe_count--; -- mutex_unlock(&tpg->tv_tpg_mutex); -- -- if (err) -+ if (err) { -+ mutex_lock(&tpg->tv_tpg_mutex); -+ tpg->tv_tpg_fe_count--; -+ mutex_unlock(&tpg->tv_tpg_mutex); - kfree(new); -+ } - - return err; - } -diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c -index 0ddca67..4958360 100644 ---- a/fs/btrfs/disk-io.c -+++ b/fs/btrfs/disk-io.c -@@ -1582,8 +1582,23 @@ int btrfs_init_fs_root(struct btrfs_root *root) - ret = get_anon_bdev(&root->anon_dev); - if (ret) - goto free_writers; -+ -+ mutex_lock(&root->objectid_mutex); -+ ret = btrfs_find_highest_objectid(root, -+ &root->highest_objectid); -+ if (ret) { -+ mutex_unlock(&root->objectid_mutex); -+ goto free_root_dev; -+ } -+ -+ ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); -+ -+ mutex_unlock(&root->objectid_mutex); -+ - return 0; - -+free_root_dev: -+ free_anon_bdev(root->anon_dev); - free_writers: - btrfs_free_subvolume_writers(root->subv_writers); - fail: -@@ -2667,6 +2682,7 @@ int open_ctree(struct super_block *sb, - if (btrfs_check_super_csum(bh->b_data)) { - printk(KERN_ERR "BTRFS: superblock checksum mismatch\n"); - err = -EINVAL; -+ brelse(bh); - goto fail_alloc; - } - -@@ -2899,6 +2915,18 @@ retry_root_backup: - tree_root->commit_root = btrfs_root_node(tree_root); - btrfs_set_root_refs(&tree_root->root_item, 1); - -+ mutex_lock(&tree_root->objectid_mutex); -+ ret = btrfs_find_highest_objectid(tree_root, -+ &tree_root->highest_objectid); -+ if (ret) { -+ mutex_unlock(&tree_root->objectid_mutex); -+ goto recovery_tree_root; -+ } -+ -+ ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); -+ -+ mutex_unlock(&tree_root->objectid_mutex); -+ - ret = btrfs_read_roots(fs_info, tree_root); - if (ret) - goto recovery_tree_root; -diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c -index 767a605..07573dc 100644 ---- a/fs/btrfs/inode-map.c -+++ b/fs/btrfs/inode-map.c -@@ -515,7 +515,7 @@ out: - return ret; - } - --static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid) -+int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid) - { - struct btrfs_path *path; - int ret; -@@ -555,13 +555,6 @@ int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid) - int ret; - mutex_lock(&root->objectid_mutex); - -- if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) { -- ret = btrfs_find_highest_objectid(root, -- &root->highest_objectid); -- if (ret) -- goto out; -- } -- - if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) { - ret = -ENOSPC; - goto out; -diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h -index ddb347b..c8e864b 100644 ---- a/fs/btrfs/inode-map.h -+++ b/fs/btrfs/inode-map.h -@@ -9,5 +9,6 @@ int btrfs_save_ino_cache(struct btrfs_root *root, - struct btrfs_trans_handle *trans); - - int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid); -+int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid); - - #endif -diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c -index 54b5f0d..52fc1b5 100644 ---- a/fs/btrfs/inode.c -+++ b/fs/btrfs/inode.c -@@ -6493,7 +6493,7 @@ out_unlock_inode: - static int btrfs_link(struct dentry *old_dentry, struct inode *dir, - struct dentry *dentry) - { -- struct btrfs_trans_handle *trans; -+ struct btrfs_trans_handle *trans = NULL; - struct btrfs_root *root = BTRFS_I(dir)->root; - struct inode *inode = d_inode(old_dentry); - u64 index; -@@ -6519,6 +6519,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, - trans = btrfs_start_transaction(root, 5); - if (IS_ERR(trans)) { - err = PTR_ERR(trans); -+ trans = NULL; - goto fail; - } - -@@ -6552,9 +6553,10 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, - btrfs_log_new_name(trans, inode, NULL, parent); - } - -- btrfs_end_transaction(trans, root); - btrfs_balance_delayed_items(root); - fail: -+ if (trans) -+ btrfs_end_transaction(trans, root); - if (drop_inode) { - inode_dec_link_count(inode); - iput(inode); -@@ -8548,15 +8550,28 @@ int btrfs_readpage(struct file *file, struct page *page) - static int btrfs_writepage(struct page *page, struct writeback_control *wbc) - { - struct extent_io_tree *tree; -- -+ struct inode *inode = page->mapping->host; -+ int ret; - - if (current->flags & PF_MEMALLOC) { - redirty_page_for_writepage(wbc, page); - unlock_page(page); - return 0; - } -+ -+ /* -+ * If we are under memory pressure we will call this directly from the -+ * VM, we need to make sure we have the inode referenced for the ordered -+ * extent. If not just return like we didn't do anything. -+ */ -+ if (!igrab(inode)) { -+ redirty_page_for_writepage(wbc, page); -+ return AOP_WRITEPAGE_ACTIVATE; -+ } - tree = &BTRFS_I(page->mapping->host)->io_tree; -- return extent_write_full_page(tree, page, btrfs_get_extent, wbc); -+ ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc); -+ btrfs_add_delayed_iput(inode); -+ return ret; - } - - static int btrfs_writepages(struct address_space *mapping, -@@ -9650,9 +9665,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, - /* - * 2 items for inode item and ref - * 2 items for dir items -+ * 1 item for updating parent inode item -+ * 1 item for the inline extent item - * 1 item for xattr if selinux is on - */ -- trans = btrfs_start_transaction(root, 5); -+ trans = btrfs_start_transaction(root, 7); - if (IS_ERR(trans)) - return PTR_ERR(trans); - -diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c -index 08fd3f0..f07d01b 100644 ---- a/fs/btrfs/ioctl.c -+++ b/fs/btrfs/ioctl.c -@@ -568,6 +568,10 @@ static noinline int create_subvol(struct inode *dir, - goto fail; - } - -+ mutex_lock(&new_root->objectid_mutex); -+ new_root->highest_objectid = new_dirid; -+ mutex_unlock(&new_root->objectid_mutex); -+ - /* - * insert the directory item - */ -diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c -index 355a458..63a6152 100644 ---- a/fs/btrfs/send.c -+++ b/fs/btrfs/send.c -@@ -1469,7 +1469,21 @@ static int read_symlink(struct btrfs_root *root, - ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); - if (ret < 0) - goto out; -- BUG_ON(ret); -+ if (ret) { -+ /* -+ * An empty symlink inode. Can happen in rare error paths when -+ * creating a symlink (transaction committed before the inode -+ * eviction handler removed the symlink inode items and a crash -+ * happened in between or the subvol was snapshoted in between). -+ * Print an informative message to dmesg/syslog so that the user -+ * can delete the symlink. -+ */ -+ btrfs_err(root->fs_info, -+ "Found empty symlink inode %llu at root %llu", -+ ino, root->root_key.objectid); -+ ret = -EIO; -+ goto out; -+ } - - ei = btrfs_item_ptr(path->nodes[0], path->slots[0], - struct btrfs_file_extent_item); -diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c -index 24154e4..fe609b8 100644 ---- a/fs/btrfs/super.c -+++ b/fs/btrfs/super.c -@@ -1956,6 +1956,8 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes) - * there are other factors that may change the result (like a new metadata - * chunk). - * -+ * If metadata is exhausted, f_bavail will be 0. -+ * - * FIXME: not accurate for mixed block groups, total and free/used are ok, - * available appears slightly larger. - */ -@@ -1967,11 +1969,13 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) - struct btrfs_space_info *found; - u64 total_used = 0; - u64 total_free_data = 0; -+ u64 total_free_meta = 0; - int bits = dentry->d_sb->s_blocksize_bits; - __be32 *fsid = (__be32 *)fs_info->fsid; - unsigned factor = 1; - struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; - int ret; -+ u64 thresh = 0; - - /* - * holding chunk_muext to avoid allocating new chunks, holding -@@ -1997,6 +2001,8 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) - } - } - } -+ if (found->flags & BTRFS_BLOCK_GROUP_METADATA) -+ total_free_meta += found->disk_total - found->disk_used; - - total_used += found->disk_used; - } -@@ -2019,6 +2025,24 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) - buf->f_bavail += div_u64(total_free_data, factor); - buf->f_bavail = buf->f_bavail >> bits; - -+ /* -+ * We calculate the remaining metadata space minus global reserve. If -+ * this is (supposedly) smaller than zero, there's no space. But this -+ * does not hold in practice, the exhausted state happens where's still -+ * some positive delta. So we apply some guesswork and compare the -+ * delta to a 4M threshold. (Practically observed delta was ~2M.) -+ * -+ * We probably cannot calculate the exact threshold value because this -+ * depends on the internal reservations requested by various -+ * operations, so some operations that consume a few metadata will -+ * succeed even if the Avail is zero. But this is better than the other -+ * way around. -+ */ -+ thresh = 4 * 1024 * 1024; -+ -+ if (total_free_meta - thresh < block_rsv->size) -+ buf->f_bavail = 0; -+ - buf->f_type = BTRFS_SUPER_MAGIC; - buf->f_bsize = dentry->d_sb->s_blocksize; - buf->f_namelen = BTRFS_NAME_LEN; -diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c -index 9e08447..9c62a6f 100644 ---- a/fs/btrfs/volumes.c -+++ b/fs/btrfs/volumes.c -@@ -232,6 +232,7 @@ static struct btrfs_device *__alloc_device(void) - spin_lock_init(&dev->reada_lock); - atomic_set(&dev->reada_in_flight, 0); - atomic_set(&dev->dev_stats_ccnt, 0); -+ btrfs_device_data_ordered_init(dev); - INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); - INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); - -diff --git a/fs/direct-io.c b/fs/direct-io.c -index 602e844..01171d8 100644 ---- a/fs/direct-io.c -+++ b/fs/direct-io.c -@@ -472,8 +472,8 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio) - dio->io_error = -EIO; - - if (dio->is_async && dio->rw == READ && dio->should_dirty) { -- bio_check_pages_dirty(bio); /* transfers ownership */ - err = bio->bi_error; -+ bio_check_pages_dirty(bio); /* transfers ownership */ - } else { - bio_for_each_segment_all(bvec, bio, i) { - struct page *page = bvec->bv_page; -diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c -index 90001da..66842e5 100644 ---- a/fs/efivarfs/file.c -+++ b/fs/efivarfs/file.c -@@ -10,6 +10,7 @@ - #include <linux/efi.h> - #include <linux/fs.h> - #include <linux/slab.h> -+#include <linux/mount.h> - - #include "internal.h" - -@@ -103,9 +104,78 @@ out_free: - return size; - } - -+static int -+efivarfs_ioc_getxflags(struct file *file, void __user *arg) -+{ -+ struct inode *inode = file->f_mapping->host; -+ unsigned int i_flags; -+ unsigned int flags = 0; -+ -+ i_flags = inode->i_flags; -+ if (i_flags & S_IMMUTABLE) -+ flags |= FS_IMMUTABLE_FL; -+ -+ if (copy_to_user(arg, &flags, sizeof(flags))) -+ return -EFAULT; -+ return 0; -+} -+ -+static int -+efivarfs_ioc_setxflags(struct file *file, void __user *arg) -+{ -+ struct inode *inode = file->f_mapping->host; -+ unsigned int flags; -+ unsigned int i_flags = 0; -+ int error; -+ -+ if (!inode_owner_or_capable(inode)) -+ return -EACCES; -+ -+ if (copy_from_user(&flags, arg, sizeof(flags))) -+ return -EFAULT; -+ -+ if (flags & ~FS_IMMUTABLE_FL) -+ return -EOPNOTSUPP; -+ -+ if (!capable(CAP_LINUX_IMMUTABLE)) -+ return -EPERM; -+ -+ if (flags & FS_IMMUTABLE_FL) -+ i_flags |= S_IMMUTABLE; -+ -+ -+ error = mnt_want_write_file(file); -+ if (error) -+ return error; -+ -+ mutex_lock(&inode->i_mutex); -+ inode_set_flags(inode, i_flags, S_IMMUTABLE); -+ mutex_unlock(&inode->i_mutex); -+ -+ mnt_drop_write_file(file); -+ -+ return 0; -+} -+ -+long -+efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p) -+{ -+ void __user *arg = (void __user *)p; -+ -+ switch (cmd) { -+ case FS_IOC_GETFLAGS: -+ return efivarfs_ioc_getxflags(file, arg); -+ case FS_IOC_SETFLAGS: -+ return efivarfs_ioc_setxflags(file, arg); -+ } -+ -+ return -ENOTTY; -+} -+ - const struct file_operations efivarfs_file_operations = { - .open = simple_open, - .read = efivarfs_file_read, - .write = efivarfs_file_write, - .llseek = no_llseek, -+ .unlocked_ioctl = efivarfs_file_ioctl, - }; -diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c -index 3381b9d..e2ab6d0 100644 ---- a/fs/efivarfs/inode.c -+++ b/fs/efivarfs/inode.c -@@ -15,7 +15,8 @@ - #include "internal.h" - - struct inode *efivarfs_get_inode(struct super_block *sb, -- const struct inode *dir, int mode, dev_t dev) -+ const struct inode *dir, int mode, -+ dev_t dev, bool is_removable) - { - struct inode *inode = new_inode(sb); - -@@ -23,6 +24,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb, - inode->i_ino = get_next_ino(); - inode->i_mode = mode; - inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; -+ inode->i_flags = is_removable ? 0 : S_IMMUTABLE; - switch (mode & S_IFMT) { - case S_IFREG: - inode->i_fop = &efivarfs_file_operations; -@@ -102,22 +104,17 @@ static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid) - static int efivarfs_create(struct inode *dir, struct dentry *dentry, - umode_t mode, bool excl) - { -- struct inode *inode; -+ struct inode *inode = NULL; - struct efivar_entry *var; - int namelen, i = 0, err = 0; -+ bool is_removable = false; - - if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len)) - return -EINVAL; - -- inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0); -- if (!inode) -- return -ENOMEM; -- - var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL); -- if (!var) { -- err = -ENOMEM; -- goto out; -- } -+ if (!var) -+ return -ENOMEM; - - /* length of the variable name itself: remove GUID and separator */ - namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1; -@@ -125,6 +122,16 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry, - efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1, - &var->var.VendorGuid); - -+ if (efivar_variable_is_removable(var->var.VendorGuid, -+ dentry->d_name.name, namelen)) -+ is_removable = true; -+ -+ inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable); -+ if (!inode) { -+ err = -ENOMEM; -+ goto out; -+ } -+ - for (i = 0; i < namelen; i++) - var->var.VariableName[i] = dentry->d_name.name[i]; - -@@ -138,7 +145,8 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry, - out: - if (err) { - kfree(var); -- iput(inode); -+ if (inode) -+ iput(inode); - } - return err; - } -diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h -index b5ff16a..b450518 100644 ---- a/fs/efivarfs/internal.h -+++ b/fs/efivarfs/internal.h -@@ -15,7 +15,8 @@ extern const struct file_operations efivarfs_file_operations; - extern const struct inode_operations efivarfs_dir_inode_operations; - extern bool efivarfs_valid_name(const char *str, int len); - extern struct inode *efivarfs_get_inode(struct super_block *sb, -- const struct inode *dir, int mode, dev_t dev); -+ const struct inode *dir, int mode, dev_t dev, -+ bool is_removable); - - extern struct list_head efivarfs_list; - -diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c -index 86a2121..abb244b 100644 ---- a/fs/efivarfs/super.c -+++ b/fs/efivarfs/super.c -@@ -118,8 +118,9 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, - struct dentry *dentry, *root = sb->s_root; - unsigned long size = 0; - char *name; -- int len, i; -+ int len; - int err = -ENOMEM; -+ bool is_removable = false; - - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) -@@ -128,15 +129,17 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, - memcpy(entry->var.VariableName, name16, name_size); - memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); - -- len = ucs2_strlen(entry->var.VariableName); -+ len = ucs2_utf8size(entry->var.VariableName); - - /* name, plus '-', plus GUID, plus NUL*/ - name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL); - if (!name) - goto fail; - -- for (i = 0; i < len; i++) -- name[i] = entry->var.VariableName[i] & 0xFF; -+ ucs2_as_utf8(name, entry->var.VariableName, len); -+ -+ if (efivar_variable_is_removable(entry->var.VendorGuid, name, len)) -+ is_removable = true; - - name[len] = '-'; - -@@ -144,7 +147,8 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, - - name[len + EFI_VARIABLE_GUID_LEN+1] = '\0'; - -- inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0); -+ inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0, -+ is_removable); - if (!inode) - goto fail_name; - -@@ -200,7 +204,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent) - sb->s_d_op = &efivarfs_d_ops; - sb->s_time_gran = 1; - -- inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0); -+ inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true); - if (!inode) - return -ENOMEM; - inode->i_op = &efivarfs_dir_inode_operations; -diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c -index ea433a7..06bda03 100644 ---- a/fs/ext4/inode.c -+++ b/fs/ext4/inode.c -@@ -657,6 +657,34 @@ has_zeroout: - return retval; - } - -+/* -+ * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages -+ * we have to be careful as someone else may be manipulating b_state as well. -+ */ -+static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags) -+{ -+ unsigned long old_state; -+ unsigned long new_state; -+ -+ flags &= EXT4_MAP_FLAGS; -+ -+ /* Dummy buffer_head? Set non-atomically. */ -+ if (!bh->b_page) { -+ bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags; -+ return; -+ } -+ /* -+ * Someone else may be modifying b_state. Be careful! This is ugly but -+ * once we get rid of using bh as a container for mapping information -+ * to pass to / from get_block functions, this can go away. -+ */ -+ do { -+ old_state = READ_ONCE(bh->b_state); -+ new_state = (old_state & ~EXT4_MAP_FLAGS) | flags; -+ } while (unlikely( -+ cmpxchg(&bh->b_state, old_state, new_state) != old_state)); -+} -+ - /* Maximum number of blocks we map for direct IO at once. */ - #define DIO_MAX_BLOCKS 4096 - -@@ -693,7 +721,7 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock, - ext4_io_end_t *io_end = ext4_inode_aio(inode); - - map_bh(bh, inode->i_sb, map.m_pblk); -- bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; -+ ext4_update_bh_state(bh, map.m_flags); - if (IS_DAX(inode) && buffer_unwritten(bh)) { - /* - * dgc: I suspect unwritten conversion on ext4+DAX is -@@ -1669,7 +1697,7 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, - return ret; - - map_bh(bh, inode->i_sb, map.m_pblk); -- bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; -+ ext4_update_bh_state(bh, map.m_flags); - - if (buffer_unwritten(bh)) { - /* A delayed write to unwritten bh should be marked -diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c -index 023f6a1..e5232bb 100644 ---- a/fs/fs-writeback.c -+++ b/fs/fs-writeback.c -@@ -317,6 +317,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work) - struct inode_switch_wbs_context *isw = - container_of(work, struct inode_switch_wbs_context, work); - struct inode *inode = isw->inode; -+ struct super_block *sb = inode->i_sb; - struct address_space *mapping = inode->i_mapping; - struct bdi_writeback *old_wb = inode->i_wb; - struct bdi_writeback *new_wb = isw->new_wb; -@@ -423,6 +424,7 @@ skip_switch: - wb_put(new_wb); - - iput(inode); -+ deactivate_super(sb); - kfree(isw); - } - -@@ -469,11 +471,14 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id) - - /* while holding I_WB_SWITCH, no one else can update the association */ - spin_lock(&inode->i_lock); -+ - if (inode->i_state & (I_WB_SWITCH | I_FREEING) || -- inode_to_wb(inode) == isw->new_wb) { -- spin_unlock(&inode->i_lock); -- goto out_free; -- } -+ inode_to_wb(inode) == isw->new_wb) -+ goto out_unlock; -+ -+ if (!atomic_inc_not_zero(&inode->i_sb->s_active)) -+ goto out_unlock; -+ - inode->i_state |= I_WB_SWITCH; - spin_unlock(&inode->i_lock); - -@@ -489,6 +494,8 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id) - call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn); - return; - -+out_unlock: -+ spin_unlock(&inode->i_lock); - out_free: - if (isw->new_wb) - wb_put(isw->new_wb); -diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c -index 2ac99db..5a7b322 100644 ---- a/fs/hostfs/hostfs_kern.c -+++ b/fs/hostfs/hostfs_kern.c -@@ -730,15 +730,13 @@ static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, - - init_special_inode(inode, mode, dev); - err = do_mknod(name, mode, MAJOR(dev), MINOR(dev)); -- if (!err) -+ if (err) - goto out_free; - - err = read_name(inode, name); - __putname(name); - if (err) - goto out_put; -- if (err) -- goto out_put; - - d_instantiate(dentry, inode); - return 0; -diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c -index ae4d5a1..bffb908 100644 ---- a/fs/hpfs/namei.c -+++ b/fs/hpfs/namei.c -@@ -375,12 +375,11 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry) - struct inode *inode = d_inode(dentry); - dnode_secno dno; - int r; -- int rep = 0; - int err; - - hpfs_lock(dir->i_sb); - hpfs_adjust_length(name, &len); --again: -+ - err = -ENOENT; - de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh); - if (!de) -@@ -400,33 +399,9 @@ again: - hpfs_error(dir->i_sb, "there was error when removing dirent"); - err = -EFSERROR; - break; -- case 2: /* no space for deleting, try to truncate file */ -- -+ case 2: /* no space for deleting */ - err = -ENOSPC; -- if (rep++) -- break; -- -- dentry_unhash(dentry); -- if (!d_unhashed(dentry)) { -- hpfs_unlock(dir->i_sb); -- return -ENOSPC; -- } -- if (generic_permission(inode, MAY_WRITE) || -- !S_ISREG(inode->i_mode) || -- get_write_access(inode)) { -- d_rehash(dentry); -- } else { -- struct iattr newattrs; -- /*pr_info("truncating file before delete.\n");*/ -- newattrs.ia_size = 0; -- newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME; -- err = notify_change(dentry, &newattrs, NULL); -- put_write_access(inode); -- if (!err) -- goto again; -- } -- hpfs_unlock(dir->i_sb); -- return -ENOSPC; -+ break; - default: - drop_nlink(inode); - err = 0; -diff --git a/fs/locks.c b/fs/locks.c -index 0d2b326..6333263 100644 ---- a/fs/locks.c -+++ b/fs/locks.c -@@ -2182,7 +2182,6 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, - goto out; - } - --again: - error = flock_to_posix_lock(filp, file_lock, &flock); - if (error) - goto out; -@@ -2224,19 +2223,22 @@ again: - * Attempt to detect a close/fcntl race and recover by - * releasing the lock that was just acquired. - */ -- /* -- * we need that spin_lock here - it prevents reordering between -- * update of i_flctx->flc_posix and check for it done in close(). -- * rcu_read_lock() wouldn't do. -- */ -- spin_lock(¤t->files->file_lock); -- f = fcheck(fd); -- spin_unlock(¤t->files->file_lock); -- if (!error && f != filp && flock.l_type != F_UNLCK) { -- flock.l_type = F_UNLCK; -- goto again; -+ if (!error && file_lock->fl_type != F_UNLCK) { -+ /* -+ * We need that spin_lock here - it prevents reordering between -+ * update of i_flctx->flc_posix and check for it done in -+ * close(). rcu_read_lock() wouldn't do. -+ */ -+ spin_lock(¤t->files->file_lock); -+ f = fcheck(fd); -+ spin_unlock(¤t->files->file_lock); -+ if (f != filp) { -+ file_lock->fl_type = F_UNLCK; -+ error = do_lock_file_wait(filp, cmd, file_lock); -+ WARN_ON_ONCE(error); -+ error = -EBADF; -+ } - } -- - out: - locks_free_lock(file_lock); - return error; -@@ -2322,7 +2324,6 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, - goto out; - } - --again: - error = flock64_to_posix_lock(filp, file_lock, &flock); - if (error) - goto out; -@@ -2364,14 +2365,22 @@ again: - * Attempt to detect a close/fcntl race and recover by - * releasing the lock that was just acquired. - */ -- spin_lock(¤t->files->file_lock); -- f = fcheck(fd); -- spin_unlock(¤t->files->file_lock); -- if (!error && f != filp && flock.l_type != F_UNLCK) { -- flock.l_type = F_UNLCK; -- goto again; -+ if (!error && file_lock->fl_type != F_UNLCK) { -+ /* -+ * We need that spin_lock here - it prevents reordering between -+ * update of i_flctx->flc_posix and check for it done in -+ * close(). rcu_read_lock() wouldn't do. -+ */ -+ spin_lock(¤t->files->file_lock); -+ f = fcheck(fd); -+ spin_unlock(¤t->files->file_lock); -+ if (f != filp) { -+ file_lock->fl_type = F_UNLCK; -+ error = do_lock_file_wait(filp, cmd, file_lock); -+ WARN_ON_ONCE(error); -+ error = -EBADF; -+ } - } -- - out: - locks_free_lock(file_lock); - return error; -diff --git a/fs/namei.c b/fs/namei.c -index 0c3974c..d8ee4da 100644 ---- a/fs/namei.c -+++ b/fs/namei.c -@@ -1711,6 +1711,11 @@ static inline int should_follow_link(struct nameidata *nd, struct path *link, - return 0; - if (!follow) - return 0; -+ /* make sure that d_is_symlink above matches inode */ -+ if (nd->flags & LOOKUP_RCU) { -+ if (read_seqcount_retry(&link->dentry->d_seq, seq)) -+ return -ECHILD; -+ } - return pick_link(nd, link, inode, seq); - } - -@@ -1742,11 +1747,11 @@ static int walk_component(struct nameidata *nd, int flags) - if (err < 0) - return err; - -- inode = d_backing_inode(path.dentry); - seq = 0; /* we are already out of RCU mode */ - err = -ENOENT; - if (d_is_negative(path.dentry)) - goto out_path_put; -+ inode = d_backing_inode(path.dentry); - } - - if (flags & WALK_PUT) -@@ -3130,12 +3135,12 @@ retry_lookup: - return error; - - BUG_ON(nd->flags & LOOKUP_RCU); -- inode = d_backing_inode(path.dentry); - seq = 0; /* out of RCU mode, so the value doesn't matter */ - if (unlikely(d_is_negative(path.dentry))) { - path_to_nameidata(&path, nd); - return -ENOENT; - } -+ inode = d_backing_inode(path.dentry); - finish_lookup: - if (nd->depth) - put_link(nd); -@@ -3144,11 +3149,6 @@ finish_lookup: - if (unlikely(error)) - return error; - -- if (unlikely(d_is_symlink(path.dentry)) && !(open_flag & O_PATH)) { -- path_to_nameidata(&path, nd); -- return -ELOOP; -- } -- - if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) { - path_to_nameidata(&path, nd); - } else { -@@ -3167,6 +3167,10 @@ finish_open: - return error; - } - audit_inode(nd->name, nd->path.dentry, 0); -+ if (unlikely(d_is_symlink(nd->path.dentry)) && !(open_flag & O_PATH)) { -+ error = -ELOOP; -+ goto out; -+ } - error = -EISDIR; - if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry)) - goto out; -@@ -3210,6 +3214,10 @@ opened: - goto exit_fput; - } - out: -+ if (unlikely(error > 0)) { -+ WARN_ON(1); -+ error = -EINVAL; -+ } - if (got_write) - mnt_drop_write(nd->path.mnt); - path_put(&save_parent); -diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c -index f496ed7..98a4415 100644 ---- a/fs/nfs/nfs4proc.c -+++ b/fs/nfs/nfs4proc.c -@@ -2461,9 +2461,9 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, - dentry = d_add_unique(dentry, igrab(state->inode)); - if (dentry == NULL) { - dentry = opendata->dentry; -- } else if (dentry != ctx->dentry) { -+ } else { - dput(ctx->dentry); -- ctx->dentry = dget(dentry); -+ ctx->dentry = dentry; - } - nfs_set_verifier(dentry, - nfs_save_change_attribute(d_inode(opendata->dir))); -diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c -index 7f60472..e6795c7 100644 ---- a/fs/ocfs2/aops.c -+++ b/fs/ocfs2/aops.c -@@ -956,6 +956,7 @@ clean_orphan: - tmp_ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh, - update_isize, end); - if (tmp_ret < 0) { -+ ocfs2_inode_unlock(inode, 1); - ret = tmp_ret; - mlog_errno(ret); - brelse(di_bh); -diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h -index 0419485..0f1c6f3 100644 ---- a/include/asm-generic/cputime_nsecs.h -+++ b/include/asm-generic/cputime_nsecs.h -@@ -75,7 +75,7 @@ typedef u64 __nocast cputime64_t; - */ - static inline cputime_t timespec_to_cputime(const struct timespec *val) - { -- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec; -+ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec; - return (__force cputime_t) ret; - } - static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) -@@ -91,7 +91,8 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) - */ - static inline cputime_t timeval_to_cputime(const struct timeval *val) - { -- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC; -+ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + -+ val->tv_usec * NSEC_PER_USEC; - return (__force cputime_t) ret; - } - static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) -diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h -index 7bfb063..461a055 100644 ---- a/include/drm/drm_cache.h -+++ b/include/drm/drm_cache.h -@@ -35,4 +35,13 @@ - - void drm_clflush_pages(struct page *pages[], unsigned long num_pages); - -+static inline bool drm_arch_can_wc_memory(void) -+{ -+#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE) -+ return false; -+#else -+ return true; -+#endif -+} -+ - #endif -diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h -index 5340099..f356f97 100644 ---- a/include/drm/drm_dp_mst_helper.h -+++ b/include/drm/drm_dp_mst_helper.h -@@ -44,8 +44,6 @@ struct drm_dp_vcpi { - /** - * struct drm_dp_mst_port - MST port - * @kref: reference count for this port. -- * @guid_valid: for DP 1.2 devices if we have validated the GUID. -- * @guid: guid for DP 1.2 device on this port. - * @port_num: port number - * @input: if this port is an input port. - * @mcs: message capability status - DP 1.2 spec. -@@ -70,10 +68,6 @@ struct drm_dp_vcpi { - struct drm_dp_mst_port { - struct kref kref; - -- /* if dpcd 1.2 device is on this port - its GUID info */ -- bool guid_valid; -- u8 guid[16]; -- - u8 port_num; - bool input; - bool mcs; -@@ -109,10 +103,12 @@ struct drm_dp_mst_port { - * @tx_slots: transmission slots for this device. - * @last_seqno: last sequence number used to talk to this. - * @link_address_sent: if a link address message has been sent to this device yet. -+ * @guid: guid for DP 1.2 branch device. port under this branch can be -+ * identified by port #. - * - * This structure represents an MST branch device, there is one -- * primary branch device at the root, along with any others connected -- * to downstream ports -+ * primary branch device at the root, along with any other branches connected -+ * to downstream port of parent branches. - */ - struct drm_dp_mst_branch { - struct kref kref; -@@ -131,6 +127,9 @@ struct drm_dp_mst_branch { - struct drm_dp_sideband_msg_tx *tx_slots[2]; - int last_seqno; - bool link_address_sent; -+ -+ /* global unique identifier to identify branch devices */ -+ u8 guid[16]; - }; - - -@@ -405,11 +404,9 @@ struct drm_dp_payload { - * @conn_base_id: DRM connector ID this mgr is connected to. - * @down_rep_recv: msg receiver state for down replies. - * @up_req_recv: msg receiver state for up requests. -- * @lock: protects mst state, primary, guid, dpcd. -+ * @lock: protects mst state, primary, dpcd. - * @mst_state: if this manager is enabled for an MST capable port. - * @mst_primary: pointer to the primary branch device. -- * @guid_valid: GUID valid for the primary branch device. -- * @guid: GUID for primary port. - * @dpcd: cache of DPCD for primary port. - * @pbn_div: PBN to slots divisor. - * -@@ -431,13 +428,11 @@ struct drm_dp_mst_topology_mgr { - struct drm_dp_sideband_msg_rx up_req_recv; - - /* pointer to info about the initial MST device */ -- struct mutex lock; /* protects mst_state + primary + guid + dpcd */ -+ struct mutex lock; /* protects mst_state + primary + dpcd */ - - bool mst_state; - struct drm_dp_mst_branch *mst_primary; -- /* primary MST device GUID */ -- bool guid_valid; -- u8 guid[16]; -+ - u8 dpcd[DP_RECEIVER_CAP_SIZE]; - u8 sink_count; - int pbn_div; -@@ -450,9 +445,7 @@ struct drm_dp_mst_topology_mgr { - the mstb tx_slots and txmsg->state once they are queued */ - struct mutex qlock; - struct list_head tx_msg_downq; -- struct list_head tx_msg_upq; - bool tx_down_in_progress; -- bool tx_up_in_progress; - - /* payload info + lock for it */ - struct mutex payload_lock; -diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h -index d639049..553210c 100644 ---- a/include/drm/drm_fixed.h -+++ b/include/drm/drm_fixed.h -@@ -73,18 +73,28 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B) - #define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT) - #define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1) - #define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK) -+#define DRM_FIXED_EPSILON 1LL -+#define DRM_FIXED_ALMOST_ONE (DRM_FIXED_ONE - DRM_FIXED_EPSILON) - - static inline s64 drm_int2fixp(int a) - { - return ((s64)a) << DRM_FIXED_POINT; - } - --static inline int drm_fixp2int(int64_t a) -+static inline int drm_fixp2int(s64 a) - { - return ((s64)a) >> DRM_FIXED_POINT; - } - --static inline unsigned drm_fixp_msbset(int64_t a) -+static inline int drm_fixp2int_ceil(s64 a) -+{ -+ if (a > 0) -+ return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE); -+ else -+ return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE); -+} -+ -+static inline unsigned drm_fixp_msbset(s64 a) - { - unsigned shift, sign = (a >> 63) & 1; - -@@ -136,6 +146,45 @@ static inline s64 drm_fixp_div(s64 a, s64 b) - return result; - } - -+static inline s64 drm_fixp_from_fraction(s64 a, s64 b) -+{ -+ s64 res; -+ bool a_neg = a < 0; -+ bool b_neg = b < 0; -+ u64 a_abs = a_neg ? -a : a; -+ u64 b_abs = b_neg ? -b : b; -+ u64 rem; -+ -+ /* determine integer part */ -+ u64 res_abs = div64_u64_rem(a_abs, b_abs, &rem); -+ -+ /* determine fractional part */ -+ { -+ u32 i = DRM_FIXED_POINT; -+ -+ do { -+ rem <<= 1; -+ res_abs <<= 1; -+ if (rem >= b_abs) { -+ res_abs |= 1; -+ rem -= b_abs; -+ } -+ } while (--i != 0); -+ } -+ -+ /* round up LSB */ -+ { -+ u64 summand = (rem << 1) >= b_abs; -+ -+ res_abs += summand; -+ } -+ -+ res = (s64) res_abs; -+ if (a_neg ^ b_neg) -+ res = -res; -+ return res; -+} -+ - static inline s64 drm_fixp_exp(s64 x) - { - s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000); -diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h -index 71b1d6c..8dbd787 100644 ---- a/include/linux/ceph/messenger.h -+++ b/include/linux/ceph/messenger.h -@@ -220,6 +220,7 @@ struct ceph_connection { - struct ceph_entity_addr actual_peer_addr; - - /* message out temps */ -+ struct ceph_msg_header out_hdr; - struct ceph_msg *out_msg; /* sending message (== tail of - out_sent) */ - bool out_msg_done; -@@ -229,7 +230,6 @@ struct ceph_connection { - int out_kvec_left; /* kvec's left in out_kvec */ - int out_skip; /* skip this many bytes */ - int out_kvec_bytes; /* total bytes left */ -- bool out_kvec_is_msg; /* kvec refers to out_msg */ - int out_more; /* there is more data after the kvecs */ - __le64 out_temp_ack; /* for writing an ack */ - struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2 -diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h -index 06b77f9d..8e30fae 100644 ---- a/include/linux/cgroup-defs.h -+++ b/include/linux/cgroup-defs.h -@@ -133,6 +133,12 @@ struct cgroup_subsys_state { - */ - u64 serial_nr; - -+ /* -+ * Incremented by online self and children. Used to guarantee that -+ * parents are not offlined before their children. -+ */ -+ atomic_t online_cnt; -+ - /* percpu_ref killing and RCU release */ - struct rcu_head rcu_head; - struct work_struct destroy_work; -diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h -index 85a868c..fea160e 100644 ---- a/include/linux/cpuset.h -+++ b/include/linux/cpuset.h -@@ -137,6 +137,8 @@ static inline void set_mems_allowed(nodemask_t nodemask) - task_unlock(current); - } - -+extern void cpuset_post_attach_flush(void); -+ - #else /* !CONFIG_CPUSETS */ - - static inline bool cpusets_enabled(void) { return false; } -@@ -243,6 +245,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq) - return false; - } - -+static inline void cpuset_post_attach_flush(void) -+{ -+} -+ - #endif /* !CONFIG_CPUSETS */ - - #endif /* _LINUX_CPUSET_H */ -diff --git a/include/linux/efi.h b/include/linux/efi.h -index 569b5a8..47be3ad 100644 ---- a/include/linux/efi.h -+++ b/include/linux/efi.h -@@ -1199,7 +1199,10 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *), - struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, - struct list_head *head, bool remove); - --bool efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len); -+bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, -+ unsigned long data_size); -+bool efivar_variable_is_removable(efi_guid_t vendor, const char *name, -+ size_t len); - - extern struct work_struct efivar_work; - void efivar_run_worker(void); -diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h -index 8fdc17b..ae6a711 100644 ---- a/include/linux/hyperv.h -+++ b/include/linux/hyperv.h -@@ -630,6 +630,11 @@ struct hv_input_signal_event_buffer { - struct hv_input_signal_event event; - }; - -+enum hv_signal_policy { -+ HV_SIGNAL_POLICY_DEFAULT = 0, -+ HV_SIGNAL_POLICY_EXPLICIT, -+}; -+ - struct vmbus_channel { - /* Unique channel id */ - int id; -@@ -757,8 +762,21 @@ struct vmbus_channel { - * link up channels based on their CPU affinity. - */ - struct list_head percpu_list; -+ /* -+ * Host signaling policy: The default policy will be -+ * based on the ring buffer state. We will also support -+ * a policy where the client driver can have explicit -+ * signaling control. -+ */ -+ enum hv_signal_policy signal_policy; - }; - -+static inline void set_channel_signal_state(struct vmbus_channel *c, -+ enum hv_signal_policy policy) -+{ -+ c->signal_policy = policy; -+} -+ - static inline void set_channel_read_state(struct vmbus_channel *c, bool state) - { - c->batched_reading = state; -diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h -index c0e9614..5455b66 100644 ---- a/include/linux/nfs_fs.h -+++ b/include/linux/nfs_fs.h -@@ -544,9 +544,7 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, - - static inline loff_t nfs_size_to_loff_t(__u64 size) - { -- if (size > (__u64) OFFSET_MAX - 1) -- return OFFSET_MAX - 1; -- return (loff_t) size; -+ return min_t(u64, size, OFFSET_MAX); - } - - static inline ino_t -diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h -index 50777b5..92d112a 100644 ---- a/include/linux/shmem_fs.h -+++ b/include/linux/shmem_fs.h -@@ -15,10 +15,7 @@ struct shmem_inode_info { - unsigned int seals; /* shmem seals */ - unsigned long flags; - unsigned long alloced; /* data pages alloced to file */ -- union { -- unsigned long swapped; /* subtotal assigned to swap */ -- char *symlink; /* unswappable short symlink */ -- }; -+ unsigned long swapped; /* subtotal assigned to swap */ - struct shared_policy policy; /* NUMA memory alloc policy */ - struct list_head swaplist; /* chain of maybes on swap */ - struct simple_xattrs xattrs; /* list of xattrs */ -diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h -index 9147f9f..75f136a 100644 ---- a/include/linux/skbuff.h -+++ b/include/linux/skbuff.h -@@ -219,6 +219,7 @@ struct sk_buff; - #else - #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) - #endif -+extern int sysctl_max_skb_frags; - - typedef struct skb_frag_struct skb_frag_t; - -diff --git a/include/linux/thermal.h b/include/linux/thermal.h -index 613c29b..e13a1ac 100644 ---- a/include/linux/thermal.h -+++ b/include/linux/thermal.h -@@ -43,6 +43,9 @@ - /* Default weight of a bound cooling device */ - #define THERMAL_WEIGHT_DEFAULT 0 - -+/* use value, which < 0K, to indicate an invalid/uninitialized temperature */ -+#define THERMAL_TEMP_INVALID -274000 -+ - /* Unit conversion macros */ - #define DECI_KELVIN_TO_CELSIUS(t) ({ \ - long _t = (t); \ -@@ -167,6 +170,7 @@ struct thermal_attr { - * @forced_passive: If > 0, temperature at which to switch on all ACPI - * processor cooling devices. Currently only used by the - * step-wise governor. -+ * @need_update: if equals 1, thermal_zone_device_update needs to be invoked. - * @ops: operations this &thermal_zone_device supports - * @tzp: thermal zone parameters - * @governor: pointer to the governor for this thermal zone -@@ -194,6 +198,7 @@ struct thermal_zone_device { - int emul_temperature; - int passive; - unsigned int forced_passive; -+ atomic_t need_update; - struct thermal_zone_device_ops *ops; - struct thermal_zone_params *tzp; - struct thermal_governor *governor; -diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h -index cbb20af..bb679b4 100644 ---- a/include/linux/ucs2_string.h -+++ b/include/linux/ucs2_string.h -@@ -11,4 +11,8 @@ unsigned long ucs2_strlen(const ucs2_char_t *s); - unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength); - int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len); - -+unsigned long ucs2_utf8size(const ucs2_char_t *src); -+unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, -+ unsigned long maxlength); -+ - #endif /* _LINUX_UCS2_STRING_H_ */ -diff --git a/include/net/af_unix.h b/include/net/af_unix.h -index 2a91a05..9b4c418 100644 ---- a/include/net/af_unix.h -+++ b/include/net/af_unix.h -@@ -6,8 +6,8 @@ - #include <linux/mutex.h> - #include <net/sock.h> - --void unix_inflight(struct file *fp); --void unix_notinflight(struct file *fp); -+void unix_inflight(struct user_struct *user, struct file *fp); -+void unix_notinflight(struct user_struct *user, struct file *fp); - void unix_gc(void); - void wait_for_unix_gc(void); - struct sock *unix_get_socket(struct file *filp); -diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h -index 6816f0f..30a56ab 100644 ---- a/include/net/dst_metadata.h -+++ b/include/net/dst_metadata.h -@@ -44,6 +44,24 @@ static inline bool skb_valid_dst(const struct sk_buff *skb) - return dst && !(dst->flags & DST_METADATA); - } - -+static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a, -+ const struct sk_buff *skb_b) -+{ -+ const struct metadata_dst *a, *b; -+ -+ if (!(skb_a->_skb_refdst | skb_b->_skb_refdst)) -+ return 0; -+ -+ a = (const struct metadata_dst *) skb_dst(skb_a); -+ b = (const struct metadata_dst *) skb_dst(skb_b); -+ -+ if (!a != !b || a->u.tun_info.options_len != b->u.tun_info.options_len) -+ return 1; -+ -+ return memcmp(&a->u.tun_info, &b->u.tun_info, -+ sizeof(a->u.tun_info) + a->u.tun_info.options_len); -+} -+ - struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags); - struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags); - -diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h -index 481fe1c..49dcad4 100644 ---- a/include/net/inet_connection_sock.h -+++ b/include/net/inet_connection_sock.h -@@ -270,8 +270,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, - struct sock *newsk, - const struct request_sock *req); - --void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req, -- struct sock *child); -+struct sock *inet_csk_reqsk_queue_add(struct sock *sk, -+ struct request_sock *req, -+ struct sock *child); - void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, - unsigned long timeout); - struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child, -diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h -index 877f682..295d291 100644 ---- a/include/net/ip6_route.h -+++ b/include/net/ip6_route.h -@@ -64,8 +64,16 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr) - - void ip6_route_input(struct sk_buff *skb); - --struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk, -- struct flowi6 *fl6); -+struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk, -+ struct flowi6 *fl6, int flags); -+ -+static inline struct dst_entry *ip6_route_output(struct net *net, -+ const struct sock *sk, -+ struct flowi6 *fl6) -+{ -+ return ip6_route_output_flags(net, sk, fl6, 0); -+} -+ - struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, - int flags); - -diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h -index 9f4df68..3f98233 100644 ---- a/include/net/ip_fib.h -+++ b/include/net/ip_fib.h -@@ -61,6 +61,7 @@ struct fib_nh_exception { - struct rtable __rcu *fnhe_rth_input; - struct rtable __rcu *fnhe_rth_output; - unsigned long fnhe_stamp; -+ struct rcu_head rcu; - }; - - struct fnhe_hash_bucket { -diff --git a/include/net/scm.h b/include/net/scm.h -index 262532d..59fa93c 100644 ---- a/include/net/scm.h -+++ b/include/net/scm.h -@@ -21,6 +21,7 @@ struct scm_creds { - struct scm_fp_list { - short count; - short max; -+ struct user_struct *user; - struct file *fp[SCM_MAX_FD]; - }; - -diff --git a/include/net/tcp.h b/include/net/tcp.h -index f80e74c..414d822 100644 ---- a/include/net/tcp.h -+++ b/include/net/tcp.h -@@ -449,7 +449,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); - - void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); - void tcp_v4_mtu_reduced(struct sock *sk); --void tcp_req_err(struct sock *sk, u32 seq); -+void tcp_req_err(struct sock *sk, u32 seq, bool abort); - int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); - struct sock *tcp_create_openreq_child(const struct sock *sk, - struct request_sock *req, -diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h -index aabf0ac..689f4d2 100644 ---- a/include/target/target_core_base.h -+++ b/include/target/target_core_base.h -@@ -138,6 +138,7 @@ enum se_cmd_flags_table { - SCF_COMPARE_AND_WRITE = 0x00080000, - SCF_COMPARE_AND_WRITE_POST = 0x00100000, - SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000, -+ SCF_ACK_KREF = 0x00400000, - }; - - /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ -@@ -490,6 +491,8 @@ struct se_cmd { - #define CMD_T_DEV_ACTIVE (1 << 7) - #define CMD_T_REQUEST_STOP (1 << 8) - #define CMD_T_BUSY (1 << 9) -+#define CMD_T_TAS (1 << 10) -+#define CMD_T_FABRIC_STOP (1 << 11) - spinlock_t t_state_lock; - struct kref cmd_kref; - struct completion t_transport_stop_comp; -diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild -index c2e5d6c..ebd10e6 100644 ---- a/include/uapi/linux/Kbuild -+++ b/include/uapi/linux/Kbuild -@@ -307,7 +307,7 @@ header-y += nfs_mount.h - header-y += nl80211.h - header-y += n_r3964.h - header-y += nubus.h --header-y += nvme.h -+header-y += nvme_ioctl.h - header-y += nvram.h - header-y += omap3isp.h - header-y += omapfb.h -diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c -index d1d3e8f..2e7f7ab 100644 ---- a/kernel/bpf/verifier.c -+++ b/kernel/bpf/verifier.c -@@ -2082,7 +2082,7 @@ static void adjust_branches(struct bpf_prog *prog, int pos, int delta) - /* adjust offset of jmps if necessary */ - if (i < pos && i + insn->off + 1 > pos) - insn->off += delta; -- else if (i > pos && i + insn->off + 1 < pos) -+ else if (i > pos + delta && i + insn->off + 1 <= pos + delta) - insn->off -= delta; - } - } -diff --git a/kernel/cgroup.c b/kernel/cgroup.c -index 470f653..fb1ecfd 100644 ---- a/kernel/cgroup.c -+++ b/kernel/cgroup.c -@@ -57,7 +57,7 @@ - #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */ - #include <linux/kthread.h> - #include <linux/delay.h> -- -+#include <linux/cpuset.h> - #include <linux/atomic.h> - - /* -@@ -2764,6 +2764,7 @@ out_unlock_rcu: - out_unlock_threadgroup: - percpu_up_write(&cgroup_threadgroup_rwsem); - cgroup_kn_unlock(of->kn); -+ cpuset_post_attach_flush(); - return ret ?: nbytes; - } - -@@ -4783,6 +4784,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css, - INIT_LIST_HEAD(&css->sibling); - INIT_LIST_HEAD(&css->children); - css->serial_nr = css_serial_nr_next++; -+ atomic_set(&css->online_cnt, 0); - - if (cgroup_parent(cgrp)) { - css->parent = cgroup_css(cgroup_parent(cgrp), ss); -@@ -4805,6 +4807,10 @@ static int online_css(struct cgroup_subsys_state *css) - if (!ret) { - css->flags |= CSS_ONLINE; - rcu_assign_pointer(css->cgroup->subsys[ss->id], css); -+ -+ atomic_inc(&css->online_cnt); -+ if (css->parent) -+ atomic_inc(&css->parent->online_cnt); - } - return ret; - } -@@ -5036,10 +5042,15 @@ static void css_killed_work_fn(struct work_struct *work) - container_of(work, struct cgroup_subsys_state, destroy_work); - - mutex_lock(&cgroup_mutex); -- offline_css(css); -- mutex_unlock(&cgroup_mutex); - -- css_put(css); -+ do { -+ offline_css(css); -+ css_put(css); -+ /* @css can't go away while we're holding cgroup_mutex */ -+ css = css->parent; -+ } while (css && atomic_dec_and_test(&css->online_cnt)); -+ -+ mutex_unlock(&cgroup_mutex); - } - - /* css kill confirmation processing requires process context, bounce */ -@@ -5048,8 +5059,10 @@ static void css_killed_ref_fn(struct percpu_ref *ref) - struct cgroup_subsys_state *css = - container_of(ref, struct cgroup_subsys_state, refcnt); - -- INIT_WORK(&css->destroy_work, css_killed_work_fn); -- queue_work(cgroup_destroy_wq, &css->destroy_work); -+ if (atomic_dec_and_test(&css->online_cnt)) { -+ INIT_WORK(&css->destroy_work, css_killed_work_fn); -+ queue_work(cgroup_destroy_wq, &css->destroy_work); -+ } - } - - /** -diff --git a/kernel/cpuset.c b/kernel/cpuset.c -index 02a8ea5..2ade632 100644 ---- a/kernel/cpuset.c -+++ b/kernel/cpuset.c -@@ -286,6 +286,8 @@ static struct cpuset top_cpuset = { - static DEFINE_MUTEX(cpuset_mutex); - static DEFINE_SPINLOCK(callback_lock); - -+static struct workqueue_struct *cpuset_migrate_mm_wq; -+ - /* - * CPU / memory hotplug is handled asynchronously. - */ -@@ -971,31 +973,51 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, - } - - /* -- * cpuset_migrate_mm -- * -- * Migrate memory region from one set of nodes to another. -- * -- * Temporarilly set tasks mems_allowed to target nodes of migration, -- * so that the migration code can allocate pages on these nodes. -- * -- * While the mm_struct we are migrating is typically from some -- * other task, the task_struct mems_allowed that we are hacking -- * is for our current task, which must allocate new pages for that -- * migrating memory region. -+ * Migrate memory region from one set of nodes to another. This is -+ * performed asynchronously as it can be called from process migration path -+ * holding locks involved in process management. All mm migrations are -+ * performed in the queued order and can be waited for by flushing -+ * cpuset_migrate_mm_wq. - */ - -+struct cpuset_migrate_mm_work { -+ struct work_struct work; -+ struct mm_struct *mm; -+ nodemask_t from; -+ nodemask_t to; -+}; -+ -+static void cpuset_migrate_mm_workfn(struct work_struct *work) -+{ -+ struct cpuset_migrate_mm_work *mwork = -+ container_of(work, struct cpuset_migrate_mm_work, work); -+ -+ /* on a wq worker, no need to worry about %current's mems_allowed */ -+ do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); -+ mmput(mwork->mm); -+ kfree(mwork); -+} -+ - static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, - const nodemask_t *to) - { -- struct task_struct *tsk = current; -- -- tsk->mems_allowed = *to; -+ struct cpuset_migrate_mm_work *mwork; - -- do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); -+ mwork = kzalloc(sizeof(*mwork), GFP_KERNEL); -+ if (mwork) { -+ mwork->mm = mm; -+ mwork->from = *from; -+ mwork->to = *to; -+ INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); -+ queue_work(cpuset_migrate_mm_wq, &mwork->work); -+ } else { -+ mmput(mm); -+ } -+} - -- rcu_read_lock(); -- guarantee_online_mems(task_cs(tsk), &tsk->mems_allowed); -- rcu_read_unlock(); -+void cpuset_post_attach_flush(void) -+{ -+ flush_workqueue(cpuset_migrate_mm_wq); - } - - /* -@@ -1096,7 +1118,8 @@ static void update_tasks_nodemask(struct cpuset *cs) - mpol_rebind_mm(mm, &cs->mems_allowed); - if (migrate) - cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); -- mmput(mm); -+ else -+ mmput(mm); - } - css_task_iter_end(&it); - -@@ -1541,11 +1564,11 @@ static void cpuset_attach(struct cgroup_taskset *tset) - * @old_mems_allowed is the right nodesets that we - * migrate mm from. - */ -- if (is_memory_migrate(cs)) { -+ if (is_memory_migrate(cs)) - cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, - &cpuset_attach_nodemask_to); -- } -- mmput(mm); -+ else -+ mmput(mm); - } - } - -@@ -1710,6 +1733,7 @@ out_unlock: - mutex_unlock(&cpuset_mutex); - kernfs_unbreak_active_protection(of->kn); - css_put(&cs->css); -+ flush_workqueue(cpuset_migrate_mm_wq); - return retval ?: nbytes; - } - -@@ -2355,6 +2379,9 @@ void __init cpuset_init_smp(void) - top_cpuset.effective_mems = node_states[N_MEMORY]; - - register_hotmemory_notifier(&cpuset_track_online_nodes_nb); -+ -+ cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0); -+ BUG_ON(!cpuset_migrate_mm_wq); - } - - /** -diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c -index a302cf9..57bff78 100644 ---- a/kernel/irq/handle.c -+++ b/kernel/irq/handle.c -@@ -138,7 +138,8 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc) - unsigned int flags = 0, irq = desc->irq_data.irq; - struct irqaction *action = desc->action; - -- do { -+ /* action might have become NULL since we dropped the lock */ -+ while (action) { - irqreturn_t res; - - trace_irq_handler_entry(irq, action); -@@ -173,7 +174,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc) - - retval |= res; - action = action->next; -- } while (action); -+ } - - add_interrupt_randomness(irq, flags); - -diff --git a/kernel/memremap.c b/kernel/memremap.c -index 7a4e473..25ced16 100644 ---- a/kernel/memremap.c -+++ b/kernel/memremap.c -@@ -133,8 +133,10 @@ void *devm_memremap(struct device *dev, resource_size_t offset, - if (addr) { - *ptr = addr; - devres_add(dev, ptr); -- } else -+ } else { - devres_free(ptr); -+ return ERR_PTR(-ENXIO); -+ } - - return addr; - } -diff --git a/kernel/resource.c b/kernel/resource.c -index f150dbb..249b1eb 100644 ---- a/kernel/resource.c -+++ b/kernel/resource.c -@@ -1083,9 +1083,10 @@ struct resource * __request_region(struct resource *parent, - if (!conflict) - break; - if (conflict != parent) { -- parent = conflict; -- if (!(conflict->flags & IORESOURCE_BUSY)) -+ if (!(conflict->flags & IORESOURCE_BUSY)) { -+ parent = conflict; - continue; -+ } - } - if (conflict->flags & flags & IORESOURCE_MUXED) { - add_wait_queue(&muxed_resource_wait, &wait); -diff --git a/kernel/seccomp.c b/kernel/seccomp.c -index 580ac2d..15a1795 100644 ---- a/kernel/seccomp.c -+++ b/kernel/seccomp.c -@@ -316,24 +316,24 @@ static inline void seccomp_sync_threads(void) - put_seccomp_filter(thread); - smp_store_release(&thread->seccomp.filter, - caller->seccomp.filter); -+ -+ /* -+ * Don't let an unprivileged task work around -+ * the no_new_privs restriction by creating -+ * a thread that sets it up, enters seccomp, -+ * then dies. -+ */ -+ if (task_no_new_privs(caller)) -+ task_set_no_new_privs(thread); -+ - /* - * Opt the other thread into seccomp if needed. - * As threads are considered to be trust-realm - * equivalent (see ptrace_may_access), it is safe to - * allow one thread to transition the other. - */ -- if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) { -- /* -- * Don't let an unprivileged task work around -- * the no_new_privs restriction by creating -- * a thread that sets it up, enters seccomp, -- * then dies. -- */ -- if (task_no_new_privs(caller)) -- task_set_no_new_privs(thread); -- -+ if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) - seccomp_assign_mode(thread, SECCOMP_MODE_FILTER); -- } - } - } - -diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c -index ce033c7..9cff0ab 100644 ---- a/kernel/time/posix-clock.c -+++ b/kernel/time/posix-clock.c -@@ -69,10 +69,10 @@ static ssize_t posix_clock_read(struct file *fp, char __user *buf, - static unsigned int posix_clock_poll(struct file *fp, poll_table *wait) - { - struct posix_clock *clk = get_posix_clock(fp); -- int result = 0; -+ unsigned int result = 0; - - if (!clk) -- return -ENODEV; -+ return POLLERR; - - if (clk->ops.poll) - result = clk->ops.poll(clk, fp, wait); -diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c -index 7c7ec45..22c57e1 100644 ---- a/kernel/time/tick-sched.c -+++ b/kernel/time/tick-sched.c -@@ -977,9 +977,9 @@ static void tick_nohz_switch_to_nohz(void) - /* Get the next period */ - next = tick_init_jiffy_update(); - -- hrtimer_forward_now(&ts->sched_timer, tick_period); - hrtimer_set_expires(&ts->sched_timer, next); -- tick_program_event(next, 1); -+ hrtimer_forward_now(&ts->sched_timer, tick_period); -+ tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); - tick_nohz_activate(ts, NOHZ_MODE_LOWRES); - } - -diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c -index d563c19..99188ee 100644 ---- a/kernel/time/timekeeping.c -+++ b/kernel/time/timekeeping.c -@@ -305,8 +305,7 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr) - - delta = timekeeping_get_delta(tkr); - -- nsec = delta * tkr->mult + tkr->xtime_nsec; -- nsec >>= tkr->shift; -+ nsec = (delta * tkr->mult + tkr->xtime_nsec) >> tkr->shift; - - /* If arch requires, add in get_arch_timeoffset() */ - return nsec + arch_gettimeoffset(); -diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c -index 4f6ef69..debf6e8 100644 ---- a/kernel/trace/trace_events.c -+++ b/kernel/trace/trace_events.c -@@ -869,7 +869,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos) - * The ftrace subsystem is for showing formats only. - * They can not be enabled or disabled via the event files. - */ -- if (call->class && call->class->reg) -+ if (call->class && call->class->reg && -+ !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) - return file; - } - -diff --git a/kernel/workqueue.c b/kernel/workqueue.c -index c579dba..450c21f 100644 ---- a/kernel/workqueue.c -+++ b/kernel/workqueue.c -@@ -568,6 +568,16 @@ static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, - int node) - { - assert_rcu_or_wq_mutex_or_pool_mutex(wq); -+ -+ /* -+ * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a -+ * delayed item is pending. The plan is to keep CPU -> NODE -+ * mapping valid and stable across CPU on/offlines. Once that -+ * happens, this workaround can be removed. -+ */ -+ if (unlikely(node == NUMA_NO_NODE)) -+ return wq->dfl_pwq; -+ - return rcu_dereference_raw(wq->numa_pwq_tbl[node]); - } - -@@ -1458,13 +1468,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, - timer_stats_timer_set_start_info(&dwork->timer); - - dwork->wq = wq; -- /* timer isn't guaranteed to run in this cpu, record earlier */ -- if (cpu == WORK_CPU_UNBOUND) -- cpu = raw_smp_processor_id(); - dwork->cpu = cpu; - timer->expires = jiffies + delay; - -- add_timer_on(timer, cpu); -+ if (unlikely(cpu != WORK_CPU_UNBOUND)) -+ add_timer_on(timer, cpu); -+ else -+ add_timer(timer); - } - - /** -diff --git a/lib/Kconfig b/lib/Kconfig -index f0df318..1a48744 100644 ---- a/lib/Kconfig -+++ b/lib/Kconfig -@@ -210,9 +210,11 @@ config RANDOM32_SELFTEST - # compression support is select'ed if needed - # - config 842_COMPRESS -+ select CRC32 - tristate - - config 842_DECOMPRESS -+ select CRC32 - tristate - - config ZLIB_INFLATE -diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c -index 6f500ef..f0b323a 100644 ---- a/lib/ucs2_string.c -+++ b/lib/ucs2_string.c -@@ -49,3 +49,65 @@ ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len) - } - } - EXPORT_SYMBOL(ucs2_strncmp); -+ -+unsigned long -+ucs2_utf8size(const ucs2_char_t *src) -+{ -+ unsigned long i; -+ unsigned long j = 0; -+ -+ for (i = 0; i < ucs2_strlen(src); i++) { -+ u16 c = src[i]; -+ -+ if (c >= 0x800) -+ j += 3; -+ else if (c >= 0x80) -+ j += 2; -+ else -+ j += 1; -+ } -+ -+ return j; -+} -+EXPORT_SYMBOL(ucs2_utf8size); -+ -+/* -+ * copy at most maxlength bytes of whole utf8 characters to dest from the -+ * ucs2 string src. -+ * -+ * The return value is the number of characters copied, not including the -+ * final NUL character. -+ */ -+unsigned long -+ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength) -+{ -+ unsigned int i; -+ unsigned long j = 0; -+ unsigned long limit = ucs2_strnlen(src, maxlength); -+ -+ for (i = 0; maxlength && i < limit; i++) { -+ u16 c = src[i]; -+ -+ if (c >= 0x800) { -+ if (maxlength < 3) -+ break; -+ maxlength -= 3; -+ dest[j++] = 0xe0 | (c & 0xf000) >> 12; -+ dest[j++] = 0x80 | (c & 0x0fc0) >> 6; -+ dest[j++] = 0x80 | (c & 0x003f); -+ } else if (c >= 0x80) { -+ if (maxlength < 2) -+ break; -+ maxlength -= 2; -+ dest[j++] = 0xc0 | (c & 0x7c0) >> 6; -+ dest[j++] = 0x80 | (c & 0x03f); -+ } else { -+ maxlength -= 1; -+ dest[j++] = c & 0x7f; -+ } -+ } -+ if (maxlength) -+ dest[j] = '\0'; -+ return j; -+} -+EXPORT_SYMBOL(ucs2_as_utf8); -diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c -index d3116be..300117f 100644 ---- a/mm/balloon_compaction.c -+++ b/mm/balloon_compaction.c -@@ -61,6 +61,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) - bool dequeued_page; - - dequeued_page = false; -+ spin_lock_irqsave(&b_dev_info->pages_lock, flags); - list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { - /* - * Block others from accessing the 'page' while we get around -@@ -75,15 +76,14 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) - continue; - } - #endif -- spin_lock_irqsave(&b_dev_info->pages_lock, flags); - balloon_page_delete(page); - __count_vm_event(BALLOON_DEFLATE); -- spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); - unlock_page(page); - dequeued_page = true; - break; - } - } -+ spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); - - if (!dequeued_page) { - /* -diff --git a/mm/memory.c b/mm/memory.c -index c387430..b80bf47 100644 ---- a/mm/memory.c -+++ b/mm/memory.c -@@ -3399,8 +3399,18 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, - if (unlikely(pmd_none(*pmd)) && - unlikely(__pte_alloc(mm, vma, pmd, address))) - return VM_FAULT_OOM; -- /* if an huge pmd materialized from under us just retry later */ -- if (unlikely(pmd_trans_huge(*pmd))) -+ /* -+ * If a huge pmd materialized under us just retry later. Use -+ * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd -+ * didn't become pmd_trans_huge under us and then back to pmd_none, as -+ * a result of MADV_DONTNEED running immediately after a huge pmd fault -+ * in a different thread of this mm, in turn leading to a misleading -+ * pmd_trans_huge() retval. All we have to ensure is that it is a -+ * regular pmd that we can walk with pte_offset_map() and we can do that -+ * through an atomic read in C, which is what pmd_trans_unstable() -+ * provides. -+ */ -+ if (unlikely(pmd_trans_unstable(pmd))) - return 0; - /* - * A regular pmd is established and it can't morph into a huge pmd -diff --git a/mm/migrate.c b/mm/migrate.c -index 7890d0b..6d17e0a 100644 ---- a/mm/migrate.c -+++ b/mm/migrate.c -@@ -1578,7 +1578,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page, - (GFP_HIGHUSER_MOVABLE | - __GFP_THISNODE | __GFP_NOMEMALLOC | - __GFP_NORETRY | __GFP_NOWARN) & -- ~(__GFP_IO | __GFP_FS), 0); -+ ~__GFP_RECLAIM, 0); - - return newpage; - } -diff --git a/mm/shmem.c b/mm/shmem.c -index 2afcdbb..ea5a70c 100644 ---- a/mm/shmem.c -+++ b/mm/shmem.c -@@ -620,8 +620,7 @@ static void shmem_evict_inode(struct inode *inode) - list_del_init(&info->swaplist); - mutex_unlock(&shmem_swaplist_mutex); - } -- } else -- kfree(info->symlink); -+ } - - simple_xattrs_free(&info->xattrs); - WARN_ON(inode->i_blocks); -@@ -2462,13 +2461,12 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s - info = SHMEM_I(inode); - inode->i_size = len-1; - if (len <= SHORT_SYMLINK_LEN) { -- info->symlink = kmemdup(symname, len, GFP_KERNEL); -- if (!info->symlink) { -+ inode->i_link = kmemdup(symname, len, GFP_KERNEL); -+ if (!inode->i_link) { - iput(inode); - return -ENOMEM; - } - inode->i_op = &shmem_short_symlink_operations; -- inode->i_link = info->symlink; - } else { - error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); - if (error) { -@@ -3083,6 +3081,7 @@ static struct inode *shmem_alloc_inode(struct super_block *sb) - static void shmem_destroy_callback(struct rcu_head *head) - { - struct inode *inode = container_of(head, struct inode, i_rcu); -+ kfree(inode->i_link); - kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); - } - -diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c -index 9e9cca3..795ddd8 100644 ---- a/net/bluetooth/6lowpan.c -+++ b/net/bluetooth/6lowpan.c -@@ -307,6 +307,9 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev, - - /* check that it's our buffer */ - if (lowpan_is_ipv6(*skb_network_header(skb))) { -+ /* Pull off the 1-byte of 6lowpan header. */ -+ skb_pull(skb, 1); -+ - /* Copy the packet so that the IPv6 header is - * properly aligned. - */ -@@ -317,6 +320,7 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev, - - local_skb->protocol = htons(ETH_P_IPV6); - local_skb->pkt_type = PACKET_HOST; -+ local_skb->dev = dev; - - skb_set_transport_header(local_skb, sizeof(struct ipv6hdr)); - -@@ -335,6 +339,8 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev, - if (!local_skb) - goto drop; - -+ local_skb->dev = dev; -+ - ret = iphc_decompress(local_skb, dev, chan); - if (ret < 0) { - kfree_skb(local_skb); -@@ -343,7 +349,6 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev, - - local_skb->protocol = htons(ETH_P_IPV6); - local_skb->pkt_type = PACKET_HOST; -- local_skb->dev = dev; - - if (give_skb_to_upper(local_skb, dev) - != NET_RX_SUCCESS) { -diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c -index 85b82f7..24e9410 100644 ---- a/net/bluetooth/hci_conn.c -+++ b/net/bluetooth/hci_conn.c -@@ -722,8 +722,12 @@ static void hci_req_add_le_create_conn(struct hci_request *req, - if (hci_update_random_address(req, false, &own_addr_type)) - return; - -+ /* Set window to be the same value as the interval to enable -+ * continuous scanning. -+ */ - cp.scan_interval = cpu_to_le16(hdev->le_scan_interval); -- cp.scan_window = cpu_to_le16(hdev->le_scan_window); -+ cp.scan_window = cp.scan_interval; -+ - bacpy(&cp.peer_addr, &conn->dst); - cp.peer_addr_type = conn->dst_type; - cp.own_address_type = own_addr_type; -diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c -index 981f8a2..02778c5 100644 ---- a/net/bluetooth/hci_request.c -+++ b/net/bluetooth/hci_request.c -@@ -175,21 +175,29 @@ static u8 update_white_list(struct hci_request *req) - * command to remove it from the controller. - */ - list_for_each_entry(b, &hdev->le_white_list, list) { -- struct hci_cp_le_del_from_white_list cp; -+ /* If the device is neither in pend_le_conns nor -+ * pend_le_reports then remove it from the whitelist. -+ */ -+ if (!hci_pend_le_action_lookup(&hdev->pend_le_conns, -+ &b->bdaddr, b->bdaddr_type) && -+ !hci_pend_le_action_lookup(&hdev->pend_le_reports, -+ &b->bdaddr, b->bdaddr_type)) { -+ struct hci_cp_le_del_from_white_list cp; -+ -+ cp.bdaddr_type = b->bdaddr_type; -+ bacpy(&cp.bdaddr, &b->bdaddr); - -- if (hci_pend_le_action_lookup(&hdev->pend_le_conns, -- &b->bdaddr, b->bdaddr_type) || -- hci_pend_le_action_lookup(&hdev->pend_le_reports, -- &b->bdaddr, b->bdaddr_type)) { -- white_list_entries++; -+ hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, -+ sizeof(cp), &cp); - continue; - } - -- cp.bdaddr_type = b->bdaddr_type; -- bacpy(&cp.bdaddr, &b->bdaddr); -+ if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) { -+ /* White list can not be used with RPAs */ -+ return 0x00; -+ } - -- hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, -- sizeof(cp), &cp); -+ white_list_entries++; - } - - /* Since all no longer valid white list entries have been -diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c -index ffed8a1..4b175df 100644 ---- a/net/bluetooth/smp.c -+++ b/net/bluetooth/smp.c -@@ -1072,22 +1072,6 @@ static void smp_notify_keys(struct l2cap_conn *conn) - hcon->dst_type = smp->remote_irk->addr_type; - queue_work(hdev->workqueue, &conn->id_addr_update_work); - } -- -- /* When receiving an indentity resolving key for -- * a remote device that does not use a resolvable -- * private address, just remove the key so that -- * it is possible to use the controller white -- * list for scanning. -- * -- * Userspace will have been told to not store -- * this key at this point. So it is safe to -- * just remove it. -- */ -- if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) { -- list_del_rcu(&smp->remote_irk->list); -- kfree_rcu(smp->remote_irk, rcu); -- smp->remote_irk = NULL; -- } - } - - if (smp->csrk) { -diff --git a/net/bridge/br.c b/net/bridge/br.c -index a1abe49..3addc05 100644 ---- a/net/bridge/br.c -+++ b/net/bridge/br.c -@@ -121,6 +121,7 @@ static struct notifier_block br_device_notifier = { - .notifier_call = br_device_event - }; - -+/* called with RTNL */ - static int br_switchdev_event(struct notifier_block *unused, - unsigned long event, void *ptr) - { -@@ -130,7 +131,6 @@ static int br_switchdev_event(struct notifier_block *unused, - struct switchdev_notifier_fdb_info *fdb_info; - int err = NOTIFY_DONE; - -- rtnl_lock(); - p = br_port_get_rtnl(dev); - if (!p) - goto out; -@@ -155,7 +155,6 @@ static int br_switchdev_event(struct notifier_block *unused, - } - - out: -- rtnl_unlock(); - return err; - } - -diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c -index 9981039..63ae5dd 100644 ---- a/net/ceph/messenger.c -+++ b/net/ceph/messenger.c -@@ -672,6 +672,8 @@ static void reset_connection(struct ceph_connection *con) - } - con->in_seq = 0; - con->in_seq_acked = 0; -+ -+ con->out_skip = 0; - } - - /* -@@ -771,6 +773,8 @@ static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt) - - static void con_out_kvec_reset(struct ceph_connection *con) - { -+ BUG_ON(con->out_skip); -+ - con->out_kvec_left = 0; - con->out_kvec_bytes = 0; - con->out_kvec_cur = &con->out_kvec[0]; -@@ -779,9 +783,9 @@ static void con_out_kvec_reset(struct ceph_connection *con) - static void con_out_kvec_add(struct ceph_connection *con, - size_t size, void *data) - { -- int index; -+ int index = con->out_kvec_left; - -- index = con->out_kvec_left; -+ BUG_ON(con->out_skip); - BUG_ON(index >= ARRAY_SIZE(con->out_kvec)); - - con->out_kvec[index].iov_len = size; -@@ -790,6 +794,27 @@ static void con_out_kvec_add(struct ceph_connection *con, - con->out_kvec_bytes += size; - } - -+/* -+ * Chop off a kvec from the end. Return residual number of bytes for -+ * that kvec, i.e. how many bytes would have been written if the kvec -+ * hadn't been nuked. -+ */ -+static int con_out_kvec_skip(struct ceph_connection *con) -+{ -+ int off = con->out_kvec_cur - con->out_kvec; -+ int skip = 0; -+ -+ if (con->out_kvec_bytes > 0) { -+ skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len; -+ BUG_ON(con->out_kvec_bytes < skip); -+ BUG_ON(!con->out_kvec_left); -+ con->out_kvec_bytes -= skip; -+ con->out_kvec_left--; -+ } -+ -+ return skip; -+} -+ - #ifdef CONFIG_BLOCK - - /* -@@ -1175,6 +1200,13 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, - return new_piece; - } - -+static size_t sizeof_footer(struct ceph_connection *con) -+{ -+ return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ? -+ sizeof(struct ceph_msg_footer) : -+ sizeof(struct ceph_msg_footer_old); -+} -+ - static void prepare_message_data(struct ceph_msg *msg, u32 data_len) - { - BUG_ON(!msg); -@@ -1197,7 +1229,6 @@ static void prepare_write_message_footer(struct ceph_connection *con) - m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE; - - dout("prepare_write_message_footer %p\n", con); -- con->out_kvec_is_msg = true; - con->out_kvec[v].iov_base = &m->footer; - if (con->peer_features & CEPH_FEATURE_MSG_AUTH) { - if (con->ops->sign_message) -@@ -1225,7 +1256,6 @@ static void prepare_write_message(struct ceph_connection *con) - u32 crc; - - con_out_kvec_reset(con); -- con->out_kvec_is_msg = true; - con->out_msg_done = false; - - /* Sneak an ack in there first? If we can get it into the same -@@ -1265,18 +1295,19 @@ static void prepare_write_message(struct ceph_connection *con) - - /* tag + hdr + front + middle */ - con_out_kvec_add(con, sizeof (tag_msg), &tag_msg); -- con_out_kvec_add(con, sizeof (m->hdr), &m->hdr); -+ con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr); - con_out_kvec_add(con, m->front.iov_len, m->front.iov_base); - - if (m->middle) - con_out_kvec_add(con, m->middle->vec.iov_len, - m->middle->vec.iov_base); - -- /* fill in crc (except data pages), footer */ -+ /* fill in hdr crc and finalize hdr */ - crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc)); - con->out_msg->hdr.crc = cpu_to_le32(crc); -- con->out_msg->footer.flags = 0; -+ memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr)); - -+ /* fill in front and middle crc, footer */ - crc = crc32c(0, m->front.iov_base, m->front.iov_len); - con->out_msg->footer.front_crc = cpu_to_le32(crc); - if (m->middle) { -@@ -1288,6 +1319,7 @@ static void prepare_write_message(struct ceph_connection *con) - dout("%s front_crc %u middle_crc %u\n", __func__, - le32_to_cpu(con->out_msg->footer.front_crc), - le32_to_cpu(con->out_msg->footer.middle_crc)); -+ con->out_msg->footer.flags = 0; - - /* is there a data payload? */ - con->out_msg->footer.data_crc = 0; -@@ -1492,7 +1524,6 @@ static int write_partial_kvec(struct ceph_connection *con) - } - } - con->out_kvec_left = 0; -- con->out_kvec_is_msg = false; - ret = 1; - out: - dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con, -@@ -1584,6 +1615,7 @@ static int write_partial_skip(struct ceph_connection *con) - { - int ret; - -+ dout("%s %p %d left\n", __func__, con, con->out_skip); - while (con->out_skip > 0) { - size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE); - -@@ -2313,9 +2345,9 @@ static int read_partial_message(struct ceph_connection *con) - ceph_pr_addr(&con->peer_addr.in_addr), - seq, con->in_seq + 1); - con->in_base_pos = -front_len - middle_len - data_len - -- sizeof(m->footer); -+ sizeof_footer(con); - con->in_tag = CEPH_MSGR_TAG_READY; -- return 0; -+ return 1; - } else if ((s64)seq - (s64)con->in_seq > 1) { - pr_err("read_partial_message bad seq %lld expected %lld\n", - seq, con->in_seq + 1); -@@ -2338,10 +2370,10 @@ static int read_partial_message(struct ceph_connection *con) - /* skip this message */ - dout("alloc_msg said skip message\n"); - con->in_base_pos = -front_len - middle_len - data_len - -- sizeof(m->footer); -+ sizeof_footer(con); - con->in_tag = CEPH_MSGR_TAG_READY; - con->in_seq++; -- return 0; -+ return 1; - } - - BUG_ON(!con->in_msg); -@@ -2506,13 +2538,13 @@ more: - - more_kvec: - /* kvec data queued? */ -- if (con->out_skip) { -- ret = write_partial_skip(con); -+ if (con->out_kvec_left) { -+ ret = write_partial_kvec(con); - if (ret <= 0) - goto out; - } -- if (con->out_kvec_left) { -- ret = write_partial_kvec(con); -+ if (con->out_skip) { -+ ret = write_partial_skip(con); - if (ret <= 0) - goto out; - } -@@ -3050,16 +3082,31 @@ void ceph_msg_revoke(struct ceph_msg *msg) - ceph_msg_put(msg); - } - if (con->out_msg == msg) { -- dout("%s %p msg %p - was sending\n", __func__, con, msg); -- con->out_msg = NULL; -- if (con->out_kvec_is_msg) { -- con->out_skip = con->out_kvec_bytes; -- con->out_kvec_is_msg = false; -+ BUG_ON(con->out_skip); -+ /* footer */ -+ if (con->out_msg_done) { -+ con->out_skip += con_out_kvec_skip(con); -+ } else { -+ BUG_ON(!msg->data_length); -+ if (con->peer_features & CEPH_FEATURE_MSG_AUTH) -+ con->out_skip += sizeof(msg->footer); -+ else -+ con->out_skip += sizeof(msg->old_footer); - } -+ /* data, middle, front */ -+ if (msg->data_length) -+ con->out_skip += msg->cursor.total_resid; -+ if (msg->middle) -+ con->out_skip += con_out_kvec_skip(con); -+ con->out_skip += con_out_kvec_skip(con); -+ -+ dout("%s %p msg %p - was sending, will write %d skip %d\n", -+ __func__, con, msg, con->out_kvec_bytes, con->out_skip); - msg->hdr.seq = 0; -- -+ con->out_msg = NULL; - ceph_msg_put(msg); - } -+ - mutex_unlock(&con->mutex); - } - -diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c -index f8f2359..a28e47f 100644 ---- a/net/ceph/osd_client.c -+++ b/net/ceph/osd_client.c -@@ -2843,8 +2843,8 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, - mutex_lock(&osdc->request_mutex); - req = __lookup_request(osdc, tid); - if (!req) { -- pr_warn("%s osd%d tid %llu unknown, skipping\n", -- __func__, osd->o_osd, tid); -+ dout("%s osd%d tid %llu unknown, skipping\n", __func__, -+ osd->o_osd, tid); - m = NULL; - *skip = 1; - goto out; -diff --git a/net/core/dev.c b/net/core/dev.c -index 7f00f24..9efbdb3 100644 ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -4145,6 +4145,7 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb) - - diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; - diffs |= p->vlan_tci ^ skb->vlan_tci; -+ diffs |= skb_metadata_dst_cmp(p, skb); - if (maclen == ETH_HLEN) - diffs |= compare_ether_header(skb_mac_header(p), - skb_mac_header(skb)); -@@ -4342,10 +4343,12 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) - break; - - case GRO_MERGED_FREE: -- if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) -+ if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) { -+ skb_dst_drop(skb); - kmem_cache_free(skbuff_head_cache, skb); -- else -+ } else { - __kfree_skb(skb); -+ } - break; - - case GRO_HELD: -@@ -7125,8 +7128,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, - dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; - setup(dev); - -- if (!dev->tx_queue_len) -+ if (!dev->tx_queue_len) { - dev->priv_flags |= IFF_NO_QUEUE; -+ dev->tx_queue_len = 1; -+ } - - dev->num_tx_queues = txqs; - dev->real_num_tx_queues = txqs; -diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c -index d79699c..12e7003 100644 ---- a/net/core/flow_dissector.c -+++ b/net/core/flow_dissector.c -@@ -208,7 +208,6 @@ ip: - case htons(ETH_P_IPV6): { - const struct ipv6hdr *iph; - struct ipv6hdr _iph; -- __be32 flow_label; - - ipv6: - iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph); -@@ -230,8 +229,12 @@ ipv6: - key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; - } - -- flow_label = ip6_flowlabel(iph); -- if (flow_label) { -+ if ((dissector_uses_key(flow_dissector, -+ FLOW_DISSECTOR_KEY_FLOW_LABEL) || -+ (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) && -+ ip6_flowlabel(iph)) { -+ __be32 flow_label = ip6_flowlabel(iph); -+ - if (dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_FLOW_LABEL)) { - key_tags = skb_flow_dissector_target(flow_dissector, -@@ -396,6 +399,13 @@ ip_proto_again: - goto out_bad; - proto = eth->h_proto; - nhoff += sizeof(*eth); -+ -+ /* Cap headers that we access via pointers at the -+ * end of the Ethernet header as our maximum alignment -+ * at that point is only 2 bytes. -+ */ -+ if (NET_IP_ALIGN) -+ hlen = nhoff; - } - - key_control->flags |= FLOW_DIS_ENCAPSULATION; -diff --git a/net/core/scm.c b/net/core/scm.c -index 8a1741b..dce0acb 100644 ---- a/net/core/scm.c -+++ b/net/core/scm.c -@@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) - *fplp = fpl; - fpl->count = 0; - fpl->max = SCM_MAX_FD; -+ fpl->user = NULL; - } - fpp = &fpl->fp[fpl->count]; - -@@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) - *fpp++ = file; - fpl->count++; - } -+ -+ if (!fpl->user) -+ fpl->user = get_uid(current_user()); -+ - return num; - } - -@@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm) - scm->fp = NULL; - for (i=fpl->count-1; i>=0; i--) - fput(fpl->fp[i]); -+ free_uid(fpl->user); - kfree(fpl); - } - } -@@ -336,6 +342,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl) - for (i = 0; i < fpl->count; i++) - get_file(fpl->fp[i]); - new_fpl->max = new_fpl->count; -+ new_fpl->user = get_uid(fpl->user); - } - return new_fpl; - } -diff --git a/net/core/skbuff.c b/net/core/skbuff.c -index b2df375..5bf88f5 100644 ---- a/net/core/skbuff.c -+++ b/net/core/skbuff.c -@@ -79,6 +79,8 @@ - - struct kmem_cache *skbuff_head_cache __read_mostly; - static struct kmem_cache *skbuff_fclone_cache __read_mostly; -+int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; -+EXPORT_SYMBOL(sysctl_max_skb_frags); - - /** - * skb_panic - private function for out-of-line support -diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c -index 95b6139..a6beb7b 100644 ---- a/net/core/sysctl_net_core.c -+++ b/net/core/sysctl_net_core.c -@@ -26,6 +26,7 @@ static int zero = 0; - static int one = 1; - static int min_sndbuf = SOCK_MIN_SNDBUF; - static int min_rcvbuf = SOCK_MIN_RCVBUF; -+static int max_skb_frags = MAX_SKB_FRAGS; - - static int net_msg_warn; /* Unused, but still a sysctl */ - -@@ -392,6 +393,15 @@ static struct ctl_table net_core_table[] = { - .mode = 0644, - .proc_handler = proc_dointvec - }, -+ { -+ .procname = "max_skb_frags", -+ .data = &sysctl_max_skb_frags, -+ .maxlen = sizeof(int), -+ .mode = 0644, -+ .proc_handler = proc_dointvec_minmax, -+ .extra1 = &one, -+ .extra2 = &max_skb_frags, -+ }, - { } - }; - -diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c -index 5684e14..902d606 100644 ---- a/net/dccp/ipv4.c -+++ b/net/dccp/ipv4.c -@@ -824,26 +824,26 @@ lookup: - - if (sk->sk_state == DCCP_NEW_SYN_RECV) { - struct request_sock *req = inet_reqsk(sk); -- struct sock *nsk = NULL; -+ struct sock *nsk; - - sk = req->rsk_listener; -- if (likely(sk->sk_state == DCCP_LISTEN)) { -- nsk = dccp_check_req(sk, skb, req); -- } else { -+ if (unlikely(sk->sk_state != DCCP_LISTEN)) { - inet_csk_reqsk_queue_drop_and_put(sk, req); - goto lookup; - } -+ sock_hold(sk); -+ nsk = dccp_check_req(sk, skb, req); - if (!nsk) { - reqsk_put(req); -- goto discard_it; -+ goto discard_and_relse; - } - if (nsk == sk) { -- sock_hold(sk); - reqsk_put(req); - } else if (dccp_child_process(sk, nsk, skb)) { - dccp_v4_ctl_send_reset(sk, skb); -- goto discard_it; -+ goto discard_and_relse; - } else { -+ sock_put(sk); - return 0; - } - } -diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c -index 9c6d050..b8608b7 100644 ---- a/net/dccp/ipv6.c -+++ b/net/dccp/ipv6.c -@@ -691,26 +691,26 @@ lookup: - - if (sk->sk_state == DCCP_NEW_SYN_RECV) { - struct request_sock *req = inet_reqsk(sk); -- struct sock *nsk = NULL; -+ struct sock *nsk; - - sk = req->rsk_listener; -- if (likely(sk->sk_state == DCCP_LISTEN)) { -- nsk = dccp_check_req(sk, skb, req); -- } else { -+ if (unlikely(sk->sk_state != DCCP_LISTEN)) { - inet_csk_reqsk_queue_drop_and_put(sk, req); - goto lookup; - } -+ sock_hold(sk); -+ nsk = dccp_check_req(sk, skb, req); - if (!nsk) { - reqsk_put(req); -- goto discard_it; -+ goto discard_and_relse; - } - if (nsk == sk) { -- sock_hold(sk); - reqsk_put(req); - } else if (dccp_child_process(sk, nsk, skb)) { - dccp_v6_ctl_send_reset(sk, skb); -- goto discard_it; -+ goto discard_and_relse; - } else { -+ sock_put(sk); - return 0; - } - } -diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c -index cebd9d3..f6303b1 100644 ---- a/net/ipv4/devinet.c -+++ b/net/ipv4/devinet.c -@@ -1847,7 +1847,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb, - if (err < 0) - goto errout; - -- err = EINVAL; -+ err = -EINVAL; - if (!tb[NETCONFA_IFINDEX]) - goto errout; - -diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c -index 46b9c88..6414891 100644 ---- a/net/ipv4/inet_connection_sock.c -+++ b/net/ipv4/inet_connection_sock.c -@@ -789,14 +789,16 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req, - reqsk_put(req); - } - --void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req, -- struct sock *child) -+struct sock *inet_csk_reqsk_queue_add(struct sock *sk, -+ struct request_sock *req, -+ struct sock *child) - { - struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; - - spin_lock(&queue->rskq_lock); - if (unlikely(sk->sk_state != TCP_LISTEN)) { - inet_child_forget(sk, req, child); -+ child = NULL; - } else { - req->sk = child; - req->dl_next = NULL; -@@ -808,6 +810,7 @@ void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req, - sk_acceptq_added(sk); - } - spin_unlock(&queue->rskq_lock); -+ return child; - } - EXPORT_SYMBOL(inet_csk_reqsk_queue_add); - -@@ -817,11 +820,8 @@ struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child, - if (own_req) { - inet_csk_reqsk_queue_drop(sk, req); - reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); -- inet_csk_reqsk_queue_add(sk, req, child); -- /* Warning: caller must not call reqsk_put(req); -- * child stole last reference on it. -- */ -- return child; -+ if (inet_csk_reqsk_queue_add(sk, req, child)) -+ return child; - } - /* Too bad, another child took ownership of the request, undo. */ - bh_unlock_sock(child); -diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c -index 1fe55ae..b8a0607d 100644 ---- a/net/ipv4/ip_fragment.c -+++ b/net/ipv4/ip_fragment.c -@@ -661,6 +661,7 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user) - struct ipq *qp; - - IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); -+ skb_orphan(skb); - - /* Lookup (or create) queue header */ - qp = ip_find(net, ip_hdr(skb), user, vif); -diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c -index 5f73a7c..a501242 100644 ---- a/net/ipv4/ip_sockglue.c -+++ b/net/ipv4/ip_sockglue.c -@@ -249,6 +249,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc, - switch (cmsg->cmsg_type) { - case IP_RETOPTS: - err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); -+ -+ /* Our caller is responsible for freeing ipc->opt */ - err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg), - err < 40 ? err : 40); - if (err) -diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c -index 6fb869f6..a04dee5 100644 ---- a/net/ipv4/netfilter/nf_defrag_ipv4.c -+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c -@@ -27,8 +27,6 @@ static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb, - { - int err; - -- skb_orphan(skb); -- - local_bh_disable(); - err = ip_defrag(net, skb, user); - local_bh_enable(); -diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c -index e89094a..aa67e0e 100644 ---- a/net/ipv4/ping.c -+++ b/net/ipv4/ping.c -@@ -746,8 +746,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) - - if (msg->msg_controllen) { - err = ip_cmsg_send(sock_net(sk), msg, &ipc, false); -- if (err) -+ if (unlikely(err)) { -+ kfree(ipc.opt); - return err; -+ } - if (ipc.opt) - free = 1; - } -diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c -index bc35f18..7113bae 100644 ---- a/net/ipv4/raw.c -+++ b/net/ipv4/raw.c -@@ -547,8 +547,10 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) - - if (msg->msg_controllen) { - err = ip_cmsg_send(net, msg, &ipc, false); -- if (err) -+ if (unlikely(err)) { -+ kfree(ipc.opt); - goto out; -+ } - if (ipc.opt) - free = 1; - } -diff --git a/net/ipv4/route.c b/net/ipv4/route.c -index 85f184e..02c6229 100644 ---- a/net/ipv4/route.c -+++ b/net/ipv4/route.c -@@ -129,6 +129,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; - static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; - static int ip_rt_min_advmss __read_mostly = 256; - -+static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; - /* - * Interface to generic destination cache. - */ -@@ -755,7 +756,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow - struct fib_nh *nh = &FIB_RES_NH(res); - - update_or_create_fnhe(nh, fl4->daddr, new_gw, -- 0, 0); -+ 0, jiffies + ip_rt_gc_timeout); - } - if (kill_route) - rt->dst.obsolete = DST_OBSOLETE_KILL; -@@ -1556,6 +1557,36 @@ static void ip_handle_martian_source(struct net_device *dev, - #endif - } - -+static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr) -+{ -+ struct fnhe_hash_bucket *hash; -+ struct fib_nh_exception *fnhe, __rcu **fnhe_p; -+ u32 hval = fnhe_hashfun(daddr); -+ -+ spin_lock_bh(&fnhe_lock); -+ -+ hash = rcu_dereference_protected(nh->nh_exceptions, -+ lockdep_is_held(&fnhe_lock)); -+ hash += hval; -+ -+ fnhe_p = &hash->chain; -+ fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock)); -+ while (fnhe) { -+ if (fnhe->fnhe_daddr == daddr) { -+ rcu_assign_pointer(*fnhe_p, rcu_dereference_protected( -+ fnhe->fnhe_next, lockdep_is_held(&fnhe_lock))); -+ fnhe_flush_routes(fnhe); -+ kfree_rcu(fnhe, rcu); -+ break; -+ } -+ fnhe_p = &fnhe->fnhe_next; -+ fnhe = rcu_dereference_protected(fnhe->fnhe_next, -+ lockdep_is_held(&fnhe_lock)); -+ } -+ -+ spin_unlock_bh(&fnhe_lock); -+} -+ - /* called in rcu_read_lock() section */ - static int __mkroute_input(struct sk_buff *skb, - const struct fib_result *res, -@@ -1609,11 +1640,20 @@ static int __mkroute_input(struct sk_buff *skb, - - fnhe = find_exception(&FIB_RES_NH(*res), daddr); - if (do_cache) { -- if (fnhe) -+ if (fnhe) { - rth = rcu_dereference(fnhe->fnhe_rth_input); -- else -- rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); -+ if (rth && rth->dst.expires && -+ time_after(jiffies, rth->dst.expires)) { -+ ip_del_fnhe(&FIB_RES_NH(*res), daddr); -+ fnhe = NULL; -+ } else { -+ goto rt_cache; -+ } -+ } -+ -+ rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); - -+rt_cache: - if (rt_cache_valid(rth)) { - skb_dst_set_noref(skb, &rth->dst); - goto out; -@@ -2014,19 +2054,29 @@ static struct rtable *__mkroute_output(const struct fib_result *res, - struct fib_nh *nh = &FIB_RES_NH(*res); - - fnhe = find_exception(nh, fl4->daddr); -- if (fnhe) -+ if (fnhe) { - prth = &fnhe->fnhe_rth_output; -- else { -- if (unlikely(fl4->flowi4_flags & -- FLOWI_FLAG_KNOWN_NH && -- !(nh->nh_gw && -- nh->nh_scope == RT_SCOPE_LINK))) { -- do_cache = false; -- goto add; -+ rth = rcu_dereference(*prth); -+ if (rth && rth->dst.expires && -+ time_after(jiffies, rth->dst.expires)) { -+ ip_del_fnhe(nh, fl4->daddr); -+ fnhe = NULL; -+ } else { -+ goto rt_cache; - } -- prth = raw_cpu_ptr(nh->nh_pcpu_rth_output); - } -+ -+ if (unlikely(fl4->flowi4_flags & -+ FLOWI_FLAG_KNOWN_NH && -+ !(nh->nh_gw && -+ nh->nh_scope == RT_SCOPE_LINK))) { -+ do_cache = false; -+ goto add; -+ } -+ prth = raw_cpu_ptr(nh->nh_pcpu_rth_output); - rth = rcu_dereference(*prth); -+ -+rt_cache: - if (rt_cache_valid(rth)) { - dst_hold(&rth->dst); - return rth; -@@ -2569,7 +2619,6 @@ void ip_rt_multicast_event(struct in_device *in_dev) - } - - #ifdef CONFIG_SYSCTL --static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; - static int ip_rt_gc_interval __read_mostly = 60 * HZ; - static int ip_rt_gc_min_interval __read_mostly = HZ / 2; - static int ip_rt_gc_elasticity __read_mostly = 8; -diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c -index c82cca1..036a76b 100644 ---- a/net/ipv4/tcp.c -+++ b/net/ipv4/tcp.c -@@ -279,6 +279,7 @@ - - #include <asm/uaccess.h> - #include <asm/ioctls.h> -+#include <asm/unaligned.h> - #include <net/busy_poll.h> - - int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; -@@ -938,7 +939,7 @@ new_segment: - - i = skb_shinfo(skb)->nr_frags; - can_coalesce = skb_can_coalesce(skb, i, page, offset); -- if (!can_coalesce && i >= MAX_SKB_FRAGS) { -+ if (!can_coalesce && i >= sysctl_max_skb_frags) { - tcp_mark_push(tp, skb); - goto new_segment; - } -@@ -1211,7 +1212,7 @@ new_segment: - - if (!skb_can_coalesce(skb, i, pfrag->page, - pfrag->offset)) { -- if (i == MAX_SKB_FRAGS || !sg) { -+ if (i == sysctl_max_skb_frags || !sg) { - tcp_mark_push(tp, skb); - goto new_segment; - } -@@ -2637,6 +2638,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) - const struct inet_connection_sock *icsk = inet_csk(sk); - u32 now = tcp_time_stamp; - unsigned int start; -+ u64 rate64; - u32 rate; - - memset(info, 0, sizeof(*info)); -@@ -2702,15 +2704,17 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) - info->tcpi_total_retrans = tp->total_retrans; - - rate = READ_ONCE(sk->sk_pacing_rate); -- info->tcpi_pacing_rate = rate != ~0U ? rate : ~0ULL; -+ rate64 = rate != ~0U ? rate : ~0ULL; -+ put_unaligned(rate64, &info->tcpi_pacing_rate); - - rate = READ_ONCE(sk->sk_max_pacing_rate); -- info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL; -+ rate64 = rate != ~0U ? rate : ~0ULL; -+ put_unaligned(rate64, &info->tcpi_max_pacing_rate); - - do { - start = u64_stats_fetch_begin_irq(&tp->syncp); -- info->tcpi_bytes_acked = tp->bytes_acked; -- info->tcpi_bytes_received = tp->bytes_received; -+ put_unaligned(tp->bytes_acked, &info->tcpi_bytes_acked); -+ put_unaligned(tp->bytes_received, &info->tcpi_bytes_received); - } while (u64_stats_fetch_retry_irq(&tp->syncp, start)); - info->tcpi_segs_out = tp->segs_out; - info->tcpi_segs_in = tp->segs_in; -diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c -index d8841a2..8c7e631 100644 ---- a/net/ipv4/tcp_ipv4.c -+++ b/net/ipv4/tcp_ipv4.c -@@ -312,7 +312,7 @@ static void do_redirect(struct sk_buff *skb, struct sock *sk) - - - /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */ --void tcp_req_err(struct sock *sk, u32 seq) -+void tcp_req_err(struct sock *sk, u32 seq, bool abort) - { - struct request_sock *req = inet_reqsk(sk); - struct net *net = sock_net(sk); -@@ -324,7 +324,7 @@ void tcp_req_err(struct sock *sk, u32 seq) - - if (seq != tcp_rsk(req)->snt_isn) { - NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); -- } else { -+ } else if (abort) { - /* - * Still in SYN_RECV, just remove it silently. - * There is no good way to pass the error to the newly -@@ -384,7 +384,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) - } - seq = ntohl(th->seq); - if (sk->sk_state == TCP_NEW_SYN_RECV) -- return tcp_req_err(sk, seq); -+ return tcp_req_err(sk, seq, -+ type == ICMP_PARAMETERPROB || -+ type == ICMP_TIME_EXCEEDED || -+ (type == ICMP_DEST_UNREACH && -+ (code == ICMP_NET_UNREACH || -+ code == ICMP_HOST_UNREACH))); - - bh_lock_sock(sk); - /* If too many ICMPs get dropped on busy -@@ -705,7 +710,8 @@ release_sk1: - outside socket context is ugly, certainly. What can I do? - */ - --static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, -+static void tcp_v4_send_ack(struct net *net, -+ struct sk_buff *skb, u32 seq, u32 ack, - u32 win, u32 tsval, u32 tsecr, int oif, - struct tcp_md5sig_key *key, - int reply_flags, u8 tos) -@@ -720,7 +726,6 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, - ]; - } rep; - struct ip_reply_arg arg; -- struct net *net = dev_net(skb_dst(skb)->dev); - - memset(&rep.th, 0, sizeof(struct tcphdr)); - memset(&arg, 0, sizeof(arg)); -@@ -782,7 +787,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) - struct inet_timewait_sock *tw = inet_twsk(sk); - struct tcp_timewait_sock *tcptw = tcp_twsk(sk); - -- tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, -+ tcp_v4_send_ack(sock_net(sk), skb, -+ tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, - tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, - tcp_time_stamp + tcptw->tw_ts_offset, - tcptw->tw_ts_recent, -@@ -801,8 +807,10 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, - /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV - * sk->sk_state == TCP_SYN_RECV -> for Fast Open. - */ -- tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ? -- tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, -+ u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : -+ tcp_sk(sk)->snd_nxt; -+ -+ tcp_v4_send_ack(sock_net(sk), skb, seq, - tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd, - tcp_time_stamp, - req->ts_recent, -@@ -1586,28 +1594,30 @@ process: - - if (sk->sk_state == TCP_NEW_SYN_RECV) { - struct request_sock *req = inet_reqsk(sk); -- struct sock *nsk = NULL; -+ struct sock *nsk; - - sk = req->rsk_listener; -- if (tcp_v4_inbound_md5_hash(sk, skb)) -- goto discard_and_relse; -- if (likely(sk->sk_state == TCP_LISTEN)) { -- nsk = tcp_check_req(sk, skb, req, false); -- } else { -+ if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) { -+ reqsk_put(req); -+ goto discard_it; -+ } -+ if (unlikely(sk->sk_state != TCP_LISTEN)) { - inet_csk_reqsk_queue_drop_and_put(sk, req); - goto lookup; - } -+ sock_hold(sk); -+ nsk = tcp_check_req(sk, skb, req, false); - if (!nsk) { - reqsk_put(req); -- goto discard_it; -+ goto discard_and_relse; - } - if (nsk == sk) { -- sock_hold(sk); - reqsk_put(req); - } else if (tcp_child_process(sk, nsk, skb)) { - tcp_v4_send_reset(nsk, skb); -- goto discard_it; -+ goto discard_and_relse; - } else { -+ sock_put(sk); - return 0; - } - } -diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c -index c438908..7f8ab46 100644 ---- a/net/ipv4/udp.c -+++ b/net/ipv4/udp.c -@@ -966,8 +966,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) - if (msg->msg_controllen) { - err = ip_cmsg_send(sock_net(sk), msg, &ipc, - sk->sk_family == AF_INET6); -- if (err) -+ if (unlikely(err)) { -+ kfree(ipc.opt); - return err; -+ } - if (ipc.opt) - free = 1; - connected = 0; -diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c -index 1f21087..e8d3da0 100644 ---- a/net/ipv6/addrconf.c -+++ b/net/ipv6/addrconf.c -@@ -583,7 +583,7 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb, - if (err < 0) - goto errout; - -- err = EINVAL; -+ err = -EINVAL; - if (!tb[NETCONFA_IFINDEX]) - goto errout; - -@@ -3506,6 +3506,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp) - { - struct inet6_dev *idev = ifp->idev; - struct net_device *dev = idev->dev; -+ bool notify = false; - - addrconf_join_solict(dev, &ifp->addr); - -@@ -3551,7 +3552,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp) - /* Because optimistic nodes can use this address, - * notify listeners. If DAD fails, RTM_DELADDR is sent. - */ -- ipv6_ifa_notify(RTM_NEWADDR, ifp); -+ notify = true; - } - } - -@@ -3559,6 +3560,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp) - out: - spin_unlock(&ifp->lock); - read_unlock_bh(&idev->lock); -+ if (notify) -+ ipv6_ifa_notify(RTM_NEWADDR, ifp); - } - - static void addrconf_dad_start(struct inet6_ifaddr *ifp) -diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c -index 517c55b..4281621 100644 ---- a/net/ipv6/datagram.c -+++ b/net/ipv6/datagram.c -@@ -162,6 +162,9 @@ ipv4_connected: - fl6.fl6_dport = inet->inet_dport; - fl6.fl6_sport = inet->inet_sport; - -+ if (!fl6.flowi6_oif) -+ fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; -+ - if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST)) - fl6.flowi6_oif = np->mcast_oif; - -diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c -index 1f9ebe3..dc2db4f 100644 ---- a/net/ipv6/ip6_flowlabel.c -+++ b/net/ipv6/ip6_flowlabel.c -@@ -540,12 +540,13 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) - } - spin_lock_bh(&ip6_sk_fl_lock); - for (sflp = &np->ipv6_fl_list; -- (sfl = rcu_dereference(*sflp)) != NULL; -+ (sfl = rcu_dereference_protected(*sflp, -+ lockdep_is_held(&ip6_sk_fl_lock))) != NULL; - sflp = &sfl->next) { - if (sfl->fl->label == freq.flr_label) { - if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK)) - np->flow_label &= ~IPV6_FLOWLABEL_MASK; -- *sflp = rcu_dereference(sfl->next); -+ *sflp = sfl->next; - spin_unlock_bh(&ip6_sk_fl_lock); - fl_release(sfl->fl); - kfree_rcu(sfl, rcu); -diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c -index 6473889..31144c4 100644 ---- a/net/ipv6/ip6_output.c -+++ b/net/ipv6/ip6_output.c -@@ -909,6 +909,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk, - struct rt6_info *rt; - #endif - int err; -+ int flags = 0; - - /* The correct way to handle this would be to do - * ip6_route_get_saddr, and then ip6_route_output; however, -@@ -940,10 +941,13 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk, - dst_release(*dst); - *dst = NULL; - } -+ -+ if (fl6->flowi6_oif) -+ flags |= RT6_LOOKUP_F_IFACE; - } - - if (!*dst) -- *dst = ip6_route_output(net, sk, fl6); -+ *dst = ip6_route_output_flags(net, sk, fl6, flags); - - err = (*dst)->error; - if (err) -diff --git a/net/ipv6/route.c b/net/ipv6/route.c -index 826e6aa..3f164d3 100644 ---- a/net/ipv6/route.c -+++ b/net/ipv6/route.c -@@ -1174,11 +1174,10 @@ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table - return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags); - } - --struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk, -- struct flowi6 *fl6) -+struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk, -+ struct flowi6 *fl6, int flags) - { - struct dst_entry *dst; -- int flags = 0; - bool any_src; - - dst = l3mdev_rt6_dst_by_oif(net, fl6); -@@ -1199,7 +1198,7 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk, - - return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output); - } --EXPORT_SYMBOL(ip6_route_output); -+EXPORT_SYMBOL_GPL(ip6_route_output_flags); - - struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig) - { -diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c -index bd100b4..b8d4056 100644 ---- a/net/ipv6/tcp_ipv6.c -+++ b/net/ipv6/tcp_ipv6.c -@@ -328,6 +328,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, - struct tcp_sock *tp; - __u32 seq, snd_una; - struct sock *sk; -+ bool fatal; - int err; - - sk = __inet6_lookup_established(net, &tcp_hashinfo, -@@ -346,8 +347,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, - return; - } - seq = ntohl(th->seq); -+ fatal = icmpv6_err_convert(type, code, &err); - if (sk->sk_state == TCP_NEW_SYN_RECV) -- return tcp_req_err(sk, seq); -+ return tcp_req_err(sk, seq, fatal); - - bh_lock_sock(sk); - if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) -@@ -401,7 +403,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, - goto out; - } - -- icmpv6_err_convert(type, code, &err); - - /* Might be for an request_sock */ - switch (sk->sk_state) { -@@ -1387,7 +1388,7 @@ process: - - if (sk->sk_state == TCP_NEW_SYN_RECV) { - struct request_sock *req = inet_reqsk(sk); -- struct sock *nsk = NULL; -+ struct sock *nsk; - - sk = req->rsk_listener; - tcp_v6_fill_cb(skb, hdr, th); -@@ -1395,24 +1396,24 @@ process: - reqsk_put(req); - goto discard_it; - } -- if (likely(sk->sk_state == TCP_LISTEN)) { -- nsk = tcp_check_req(sk, skb, req, false); -- } else { -+ if (unlikely(sk->sk_state != TCP_LISTEN)) { - inet_csk_reqsk_queue_drop_and_put(sk, req); - goto lookup; - } -+ sock_hold(sk); -+ nsk = tcp_check_req(sk, skb, req, false); - if (!nsk) { - reqsk_put(req); -- goto discard_it; -+ goto discard_and_relse; - } - if (nsk == sk) { -- sock_hold(sk); - reqsk_put(req); - tcp_v6_restore_cb(skb); - } else if (tcp_child_process(sk, nsk, skb)) { - tcp_v6_send_reset(nsk, skb); -- goto discard_it; -+ goto discard_and_relse; - } else { -+ sock_put(sk); - return 0; - } - } -diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c -index 435608c..20ab7b2 100644 ---- a/net/iucv/af_iucv.c -+++ b/net/iucv/af_iucv.c -@@ -708,6 +708,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, - if (!addr || addr->sa_family != AF_IUCV) - return -EINVAL; - -+ if (addr_len < sizeof(struct sockaddr_iucv)) -+ return -EINVAL; -+ - lock_sock(sk); - if (sk->sk_state != IUCV_OPEN) { - err = -EBADFD; -diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c -index f93c5be..2caaa84 100644 ---- a/net/l2tp/l2tp_netlink.c -+++ b/net/l2tp/l2tp_netlink.c -@@ -124,8 +124,13 @@ static int l2tp_tunnel_notify(struct genl_family *family, - ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq, - NLM_F_ACK, tunnel, cmd); - -- if (ret >= 0) -- return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC); -+ if (ret >= 0) { -+ ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC); -+ /* We don't care if no one is listening */ -+ if (ret == -ESRCH) -+ ret = 0; -+ return ret; -+ } - - nlmsg_free(msg); - -@@ -147,8 +152,13 @@ static int l2tp_session_notify(struct genl_family *family, - ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq, - NLM_F_ACK, session, cmd); - -- if (ret >= 0) -- return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC); -+ if (ret >= 0) { -+ ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC); -+ /* We don't care if no one is listening */ -+ if (ret == -ESRCH) -+ ret = 0; -+ return ret; -+ } - - nlmsg_free(msg); - -diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c -index 337bb5d..6a12b0f 100644 ---- a/net/mac80211/ibss.c -+++ b/net/mac80211/ibss.c -@@ -1732,7 +1732,6 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local) - if (sdata->vif.type != NL80211_IFTYPE_ADHOC) - continue; - sdata->u.ibss.last_scan_completed = jiffies; -- ieee80211_queue_work(&local->hw, &sdata->work); - } - mutex_unlock(&local->iflist_mtx); - } -diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c -index fa28500..6f85b6a 100644 ---- a/net/mac80211/mesh.c -+++ b/net/mac80211/mesh.c -@@ -1370,17 +1370,6 @@ out: - sdata_unlock(sdata); - } - --void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) --{ -- struct ieee80211_sub_if_data *sdata; -- -- rcu_read_lock(); -- list_for_each_entry_rcu(sdata, &local->interfaces, list) -- if (ieee80211_vif_is_mesh(&sdata->vif) && -- ieee80211_sdata_running(sdata)) -- ieee80211_queue_work(&local->hw, &sdata->work); -- rcu_read_unlock(); --} - - void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) - { -diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h -index a159634..4a8019f 100644 ---- a/net/mac80211/mesh.h -+++ b/net/mac80211/mesh.h -@@ -362,14 +362,10 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) - return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP; - } - --void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local); -- - void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata); - void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata); - void ieee80211s_stop(void); - #else --static inline void --ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {} - static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) - { return false; } - static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) -diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c -index 3aa0434..83097c3 100644 ---- a/net/mac80211/mlme.c -+++ b/net/mac80211/mlme.c -@@ -4003,8 +4003,6 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) - if (!ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR)) - ieee80211_queue_work(&sdata->local->hw, - &sdata->u.mgd.monitor_work); -- /* and do all the other regular work too */ -- ieee80211_queue_work(&sdata->local->hw, &sdata->work); - } - } - -diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c -index a413e52..acbe182 100644 ---- a/net/mac80211/scan.c -+++ b/net/mac80211/scan.c -@@ -314,6 +314,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) - bool was_scanning = local->scanning; - struct cfg80211_scan_request *scan_req; - struct ieee80211_sub_if_data *scan_sdata; -+ struct ieee80211_sub_if_data *sdata; - - lockdep_assert_held(&local->mtx); - -@@ -373,7 +374,16 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) - - ieee80211_mlme_notify_scan_completed(local); - ieee80211_ibss_notify_scan_completed(local); -- ieee80211_mesh_notify_scan_completed(local); -+ -+ /* Requeue all the work that might have been ignored while -+ * the scan was in progress; if there was none this will -+ * just be a no-op for the particular interface. -+ */ -+ list_for_each_entry_rcu(sdata, &local->interfaces, list) { -+ if (ieee80211_sdata_running(sdata)) -+ ieee80211_queue_work(&sdata->local->hw, &sdata->work); -+ } -+ - if (was_scanning) - ieee80211_start_next_roc(local); - } -diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c -index 1605691..d933cb8 100644 ---- a/net/openvswitch/vport-vxlan.c -+++ b/net/openvswitch/vport-vxlan.c -@@ -90,7 +90,7 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms) - int err; - struct vxlan_config conf = { - .no_share = true, -- .flags = VXLAN_F_COLLECT_METADATA, -+ .flags = VXLAN_F_COLLECT_METADATA | VXLAN_F_UDP_ZERO_CSUM6_RX, - }; - - if (!options) { -diff --git a/net/rfkill/core.c b/net/rfkill/core.c -index f53bf3b6..cf5b69a 100644 ---- a/net/rfkill/core.c -+++ b/net/rfkill/core.c -@@ -1095,17 +1095,6 @@ static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait) - return res; - } - --static bool rfkill_readable(struct rfkill_data *data) --{ -- bool r; -- -- mutex_lock(&data->mtx); -- r = !list_empty(&data->events); -- mutex_unlock(&data->mtx); -- -- return r; --} -- - static ssize_t rfkill_fop_read(struct file *file, char __user *buf, - size_t count, loff_t *pos) - { -@@ -1122,8 +1111,11 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf, - goto out; - } - mutex_unlock(&data->mtx); -+ /* since we re-check and it just compares pointers, -+ * using !list_empty() without locking isn't a problem -+ */ - ret = wait_event_interruptible(data->read_wait, -- rfkill_readable(data)); -+ !list_empty(&data->events)); - mutex_lock(&data->mtx); - - if (ret) -diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c -index b5c2cf2..af1acf0 100644 ---- a/net/sched/sch_api.c -+++ b/net/sched/sch_api.c -@@ -1852,6 +1852,7 @@ reset: - } - - tp = old_tp; -+ protocol = tc_skb_protocol(skb); - goto reclassify; - #endif - } -diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c -index 3d9ea9a..8b4ff31 100644 ---- a/net/sctp/protocol.c -+++ b/net/sctp/protocol.c -@@ -60,6 +60,8 @@ - #include <net/inet_common.h> - #include <net/inet_ecn.h> - -+#define MAX_SCTP_PORT_HASH_ENTRIES (64 * 1024) -+ - /* Global data structures. */ - struct sctp_globals sctp_globals __read_mostly; - -@@ -1352,6 +1354,8 @@ static __init int sctp_init(void) - unsigned long limit; - int max_share; - int order; -+ int num_entries; -+ int max_entry_order; - - sock_skb_cb_check_size(sizeof(struct sctp_ulpevent)); - -@@ -1404,14 +1408,24 @@ static __init int sctp_init(void) - - /* Size and allocate the association hash table. - * The methodology is similar to that of the tcp hash tables. -+ * Though not identical. Start by getting a goal size - */ - if (totalram_pages >= (128 * 1024)) - goal = totalram_pages >> (22 - PAGE_SHIFT); - else - goal = totalram_pages >> (24 - PAGE_SHIFT); - -- for (order = 0; (1UL << order) < goal; order++) -- ; -+ /* Then compute the page order for said goal */ -+ order = get_order(goal); -+ -+ /* Now compute the required page order for the maximum sized table we -+ * want to create -+ */ -+ max_entry_order = get_order(MAX_SCTP_PORT_HASH_ENTRIES * -+ sizeof(struct sctp_bind_hashbucket)); -+ -+ /* Limit the page order by that maximum hash table size */ -+ order = min(order, max_entry_order); - - do { - sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE / -@@ -1445,20 +1459,35 @@ static __init int sctp_init(void) - INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain); - } - -- /* Allocate and initialize the SCTP port hash table. */ -+ /* Allocate and initialize the SCTP port hash table. -+ * Note that order is initalized to start at the max sized -+ * table we want to support. If we can't get that many pages -+ * reduce the order and try again -+ */ - do { -- sctp_port_hashsize = (1UL << order) * PAGE_SIZE / -- sizeof(struct sctp_bind_hashbucket); -- if ((sctp_port_hashsize > (64 * 1024)) && order > 0) -- continue; - sctp_port_hashtable = (struct sctp_bind_hashbucket *) - __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order); - } while (!sctp_port_hashtable && --order > 0); -+ - if (!sctp_port_hashtable) { - pr_err("Failed bind hash alloc\n"); - status = -ENOMEM; - goto err_bhash_alloc; - } -+ -+ /* Now compute the number of entries that will fit in the -+ * port hash space we allocated -+ */ -+ num_entries = (1UL << order) * PAGE_SIZE / -+ sizeof(struct sctp_bind_hashbucket); -+ -+ /* And finish by rounding it down to the nearest power of two -+ * this wastes some memory of course, but its needed because -+ * the hash function operates based on the assumption that -+ * that the number of entries is a power of two -+ */ -+ sctp_port_hashsize = rounddown_pow_of_two(num_entries); -+ - for (i = 0; i < sctp_port_hashsize; i++) { - spin_lock_init(&sctp_port_hashtable[i].lock); - INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain); -diff --git a/net/sctp/socket.c b/net/sctp/socket.c -index ef1d90f..be1489f 100644 ---- a/net/sctp/socket.c -+++ b/net/sctp/socket.c -@@ -5542,6 +5542,7 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, - struct sctp_hmac_algo_param *hmacs; - __u16 data_len = 0; - u32 num_idents; -+ int i; - - if (!ep->auth_enable) - return -EACCES; -@@ -5559,8 +5560,12 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, - return -EFAULT; - if (put_user(num_idents, &p->shmac_num_idents)) - return -EFAULT; -- if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len)) -- return -EFAULT; -+ for (i = 0; i < num_idents; i++) { -+ __u16 hmacid = ntohs(hmacs->hmac_ids[i]); -+ -+ if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16))) -+ return -EFAULT; -+ } - return 0; - } - -@@ -6640,6 +6645,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) - - if (cmsgs->srinfo->sinfo_flags & - ~(SCTP_UNORDERED | SCTP_ADDR_OVER | -+ SCTP_SACK_IMMEDIATELY | - SCTP_ABORT | SCTP_EOF)) - return -EINVAL; - break; -@@ -6663,6 +6669,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) - - if (cmsgs->sinfo->snd_flags & - ~(SCTP_UNORDERED | SCTP_ADDR_OVER | -+ SCTP_SACK_IMMEDIATELY | - SCTP_ABORT | SCTP_EOF)) - return -EINVAL; - break; -diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c -index 5e4f815..21e2035 100644 ---- a/net/sunrpc/cache.c -+++ b/net/sunrpc/cache.c -@@ -1225,7 +1225,7 @@ int qword_get(char **bpp, char *dest, int bufsize) - if (bp[0] == '\\' && bp[1] == 'x') { - /* HEX STRING */ - bp += 2; -- while (len < bufsize) { -+ while (len < bufsize - 1) { - int h, l; - - h = hex_to_bin(bp[0]); -diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c -index f34e535..d5d7132 100644 ---- a/net/switchdev/switchdev.c -+++ b/net/switchdev/switchdev.c -@@ -20,6 +20,7 @@ - #include <linux/list.h> - #include <linux/workqueue.h> - #include <linux/if_vlan.h> -+#include <linux/rtnetlink.h> - #include <net/ip_fib.h> - #include <net/switchdev.h> - -@@ -565,7 +566,6 @@ int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj, - } - EXPORT_SYMBOL_GPL(switchdev_port_obj_dump); - --static DEFINE_MUTEX(switchdev_mutex); - static RAW_NOTIFIER_HEAD(switchdev_notif_chain); - - /** -@@ -580,9 +580,9 @@ int register_switchdev_notifier(struct notifier_block *nb) - { - int err; - -- mutex_lock(&switchdev_mutex); -+ rtnl_lock(); - err = raw_notifier_chain_register(&switchdev_notif_chain, nb); -- mutex_unlock(&switchdev_mutex); -+ rtnl_unlock(); - return err; - } - EXPORT_SYMBOL_GPL(register_switchdev_notifier); -@@ -598,9 +598,9 @@ int unregister_switchdev_notifier(struct notifier_block *nb) - { - int err; - -- mutex_lock(&switchdev_mutex); -+ rtnl_lock(); - err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb); -- mutex_unlock(&switchdev_mutex); -+ rtnl_unlock(); - return err; - } - EXPORT_SYMBOL_GPL(unregister_switchdev_notifier); -@@ -614,16 +614,17 @@ EXPORT_SYMBOL_GPL(unregister_switchdev_notifier); - * Call all network notifier blocks. This should be called by driver - * when it needs to propagate hardware event. - * Return values are same as for atomic_notifier_call_chain(). -+ * rtnl_lock must be held. - */ - int call_switchdev_notifiers(unsigned long val, struct net_device *dev, - struct switchdev_notifier_info *info) - { - int err; - -+ ASSERT_RTNL(); -+ - info->dev = dev; -- mutex_lock(&switchdev_mutex); - err = raw_notifier_call_chain(&switchdev_notif_chain, val, info); -- mutex_unlock(&switchdev_mutex); - return err; - } - EXPORT_SYMBOL_GPL(call_switchdev_notifiers); -diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c -index 9dc239d..92e367a 100644 ---- a/net/tipc/bcast.c -+++ b/net/tipc/bcast.c -@@ -399,8 +399,10 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg) - - hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, - NLM_F_MULTI, TIPC_NL_LINK_GET); -- if (!hdr) -+ if (!hdr) { -+ tipc_bcast_unlock(net); - return -EMSGSIZE; -+ } - - attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); - if (!attrs) -diff --git a/net/tipc/node.c b/net/tipc/node.c -index 20cddec..3926b56 100644 ---- a/net/tipc/node.c -+++ b/net/tipc/node.c -@@ -168,12 +168,6 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities) - skb_queue_head_init(&n_ptr->bc_entry.inputq1); - __skb_queue_head_init(&n_ptr->bc_entry.arrvq); - skb_queue_head_init(&n_ptr->bc_entry.inputq2); -- hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]); -- list_for_each_entry_rcu(temp_node, &tn->node_list, list) { -- if (n_ptr->addr < temp_node->addr) -- break; -- } -- list_add_tail_rcu(&n_ptr->list, &temp_node->list); - n_ptr->state = SELF_DOWN_PEER_LEAVING; - n_ptr->signature = INVALID_NODE_SIG; - n_ptr->active_links[0] = INVALID_BEARER_ID; -@@ -193,6 +187,12 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities) - tipc_node_get(n_ptr); - setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr); - n_ptr->keepalive_intv = U32_MAX; -+ hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]); -+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) { -+ if (n_ptr->addr < temp_node->addr) -+ break; -+ } -+ list_add_tail_rcu(&n_ptr->list, &temp_node->list); - exit: - spin_unlock_bh(&tn->node_list_lock); - return n_ptr; -diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c -index 350cca3..69ee2ee 100644 ---- a/net/tipc/subscr.c -+++ b/net/tipc/subscr.c -@@ -289,15 +289,14 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid, - struct sockaddr_tipc *addr, void *usr_data, - void *buf, size_t len) - { -- struct tipc_subscriber *subscriber = usr_data; -+ struct tipc_subscriber *subscrb = usr_data; - struct tipc_subscription *sub = NULL; - struct tipc_net *tn = net_generic(net, tipc_net_id); - -- tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscriber, &sub); -- if (sub) -- tipc_nametbl_subscribe(sub); -- else -- tipc_conn_terminate(tn->topsrv, subscriber->conid); -+ if (tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscrb, &sub)) -+ return tipc_conn_terminate(tn->topsrv, subscrb->conid); -+ -+ tipc_nametbl_subscribe(sub); - } - - /* Handle one request to establish a new subscriber */ -diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c -index e3f85bc..898a53a 100644 ---- a/net/unix/af_unix.c -+++ b/net/unix/af_unix.c -@@ -1496,7 +1496,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) - UNIXCB(skb).fp = NULL; - - for (i = scm->fp->count-1; i >= 0; i--) -- unix_notinflight(scm->fp->fp[i]); -+ unix_notinflight(scm->fp->user, scm->fp->fp[i]); - } - - static void unix_destruct_scm(struct sk_buff *skb) -@@ -1561,7 +1561,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) - return -ENOMEM; - - for (i = scm->fp->count - 1; i >= 0; i--) -- unix_inflight(scm->fp->fp[i]); -+ unix_inflight(scm->fp->user, scm->fp->fp[i]); - return max_level; - } - -@@ -1781,7 +1781,12 @@ restart_locked: - goto out_unlock; - } - -- if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) { -+ /* other == sk && unix_peer(other) != sk if -+ * - unix_peer(sk) == NULL, destination address bound to sk -+ * - unix_peer(sk) == sk by time of get but disconnected before lock -+ */ -+ if (other != sk && -+ unlikely(unix_peer(other) != sk && unix_recvq_full(other))) { - if (timeo) { - timeo = unix_wait_for_peer(other, timeo); - -@@ -2270,13 +2275,15 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state) - size_t size = state->size; - unsigned int last_len; - -- err = -EINVAL; -- if (sk->sk_state != TCP_ESTABLISHED) -+ if (unlikely(sk->sk_state != TCP_ESTABLISHED)) { -+ err = -EINVAL; - goto out; -+ } - -- err = -EOPNOTSUPP; -- if (flags & MSG_OOB) -+ if (unlikely(flags & MSG_OOB)) { -+ err = -EOPNOTSUPP; - goto out; -+ } - - target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); - timeo = sock_rcvtimeo(sk, noblock); -@@ -2322,9 +2329,11 @@ again: - goto unlock; - - unix_state_unlock(sk); -- err = -EAGAIN; -- if (!timeo) -+ if (!timeo) { -+ err = -EAGAIN; - break; -+ } -+ - mutex_unlock(&u->readlock); - - timeo = unix_stream_data_wait(sk, timeo, last, -@@ -2332,6 +2341,7 @@ again: - - if (signal_pending(current)) { - err = sock_intr_errno(timeo); -+ scm_destroy(&scm); - goto out; - } - -diff --git a/net/unix/diag.c b/net/unix/diag.c -index c512f64..4d96797 100644 ---- a/net/unix/diag.c -+++ b/net/unix/diag.c -@@ -220,7 +220,7 @@ done: - return skb->len; - } - --static struct sock *unix_lookup_by_ino(int ino) -+static struct sock *unix_lookup_by_ino(unsigned int ino) - { - int i; - struct sock *sk; -diff --git a/net/unix/garbage.c b/net/unix/garbage.c -index 8fcdc22..6a0d485 100644 ---- a/net/unix/garbage.c -+++ b/net/unix/garbage.c -@@ -116,7 +116,7 @@ struct sock *unix_get_socket(struct file *filp) - * descriptor if it is for an AF_UNIX socket. - */ - --void unix_inflight(struct file *fp) -+void unix_inflight(struct user_struct *user, struct file *fp) - { - struct sock *s = unix_get_socket(fp); - -@@ -133,11 +133,11 @@ void unix_inflight(struct file *fp) - } - unix_tot_inflight++; - } -- fp->f_cred->user->unix_inflight++; -+ user->unix_inflight++; - spin_unlock(&unix_gc_lock); - } - --void unix_notinflight(struct file *fp) -+void unix_notinflight(struct user_struct *user, struct file *fp) - { - struct sock *s = unix_get_socket(fp); - -@@ -152,7 +152,7 @@ void unix_notinflight(struct file *fp) - list_del_init(&u->link); - unix_tot_inflight--; - } -- fp->f_cred->user->unix_inflight--; -+ user->unix_inflight--; - spin_unlock(&unix_gc_lock); - } - -diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh -index dacf71a..ba6c34e 100755 ---- a/scripts/link-vmlinux.sh -+++ b/scripts/link-vmlinux.sh -@@ -62,7 +62,7 @@ vmlinux_link() - -Wl,--start-group \ - ${KBUILD_VMLINUX_MAIN} \ - -Wl,--end-group \ -- -lutil -lrt ${1} -+ -lutil -lrt -lpthread ${1} - rm -f linux - fi - } -diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c -index ff81026..7c57c7f 100644 ---- a/security/smack/smack_lsm.c -+++ b/security/smack/smack_lsm.c -@@ -398,12 +398,10 @@ static int smk_copy_relabel(struct list_head *nhead, struct list_head *ohead, - */ - static inline unsigned int smk_ptrace_mode(unsigned int mode) - { -- switch (mode) { -- case PTRACE_MODE_READ: -- return MAY_READ; -- case PTRACE_MODE_ATTACH: -+ if (mode & PTRACE_MODE_ATTACH) - return MAY_READWRITE; -- } -+ if (mode & PTRACE_MODE_READ) -+ return MAY_READ; - - return 0; - } -diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c -index d3c19c9..cb6ed10 100644 ---- a/security/yama/yama_lsm.c -+++ b/security/yama/yama_lsm.c -@@ -281,7 +281,7 @@ static int yama_ptrace_access_check(struct task_struct *child, - int rc = 0; - - /* require ptrace target be a child of ptracer on attach */ -- if (mode == PTRACE_MODE_ATTACH) { -+ if (mode & PTRACE_MODE_ATTACH) { - switch (ptrace_scope) { - case YAMA_SCOPE_DISABLED: - /* No additional restrictions. */ -@@ -307,7 +307,7 @@ static int yama_ptrace_access_check(struct task_struct *child, - } - } - -- if (rc) { -+ if (rc && (mode & PTRACE_MODE_NOAUDIT) == 0) { - printk_ratelimited(KERN_NOTICE - "ptrace of pid %d was attempted by: %s (pid %d)\n", - child->pid, current->comm, current->pid); -diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c -index 2c13298..2ff692d 100644 ---- a/sound/pci/hda/hda_intel.c -+++ b/sound/pci/hda/hda_intel.c -@@ -357,7 +357,10 @@ enum { - ((pci)->device == 0x0d0c) || \ - ((pci)->device == 0x160c)) - --#define IS_BROXTON(pci) ((pci)->device == 0x5a98) -+#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170) -+#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70) -+#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) -+#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) - - static char *driver_short_names[] = { - [AZX_DRIVER_ICH] = "HDA Intel", -@@ -534,13 +537,13 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset) - - if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) - snd_hdac_set_codec_wakeup(bus, true); -- if (IS_BROXTON(pci)) { -+ if (IS_SKL_PLUS(pci)) { - pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val); - val = val & ~INTEL_HDA_CGCTL_MISCBDCGE; - pci_write_config_dword(pci, INTEL_HDA_CGCTL, val); - } - azx_init_chip(chip, full_reset); -- if (IS_BROXTON(pci)) { -+ if (IS_SKL_PLUS(pci)) { - pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val); - val = val | INTEL_HDA_CGCTL_MISCBDCGE; - pci_write_config_dword(pci, INTEL_HDA_CGCTL, val); -@@ -549,7 +552,7 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset) - snd_hdac_set_codec_wakeup(bus, false); - - /* reduce dma latency to avoid noise */ -- if (IS_BROXTON(pci)) -+ if (IS_BXT(pci)) - bxt_reduce_dma_latency(chip); - } - -@@ -971,11 +974,6 @@ static int azx_resume(struct device *dev) - /* put codec down to D3 at hibernation for Intel SKL+; - * otherwise BIOS may still access the codec and screw up the driver - */ --#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170) --#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70) --#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) --#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) -- - static int azx_freeze_noirq(struct device *dev) - { - struct pci_dev *pci = to_pci_dev(dev); -diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c -index efd4980..72fa58d 100644 ---- a/sound/pci/hda/patch_realtek.c -+++ b/sound/pci/hda/patch_realtek.c -@@ -4749,6 +4749,7 @@ enum { - ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, - ALC293_FIXUP_LENOVO_SPK_NOISE, - ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, -+ ALC255_FIXUP_DELL_SPK_NOISE, - }; - - static const struct hda_fixup alc269_fixups[] = { -@@ -5368,6 +5369,12 @@ static const struct hda_fixup alc269_fixups[] = { - .type = HDA_FIXUP_FUNC, - .v.func = alc233_fixup_lenovo_line2_mic_hotkey, - }, -+ [ALC255_FIXUP_DELL_SPK_NOISE] = { -+ .type = HDA_FIXUP_FUNC, -+ .v.func = alc_fixup_disable_aamix, -+ .chained = true, -+ .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE -+ }, - }; - - static const struct snd_pci_quirk alc269_fixup_tbl[] = { -@@ -5410,6 +5417,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { - SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), - SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), - SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), -+ SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE), - SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), -diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c -index 96234b6..5d51d6f 100644 ---- a/tools/hv/hv_vss_daemon.c -+++ b/tools/hv/hv_vss_daemon.c -@@ -254,7 +254,7 @@ int main(int argc, char *argv[]) - syslog(LOG_ERR, "Illegal op:%d\n", op); - } - vss_msg->error = error; -- len = write(vss_fd, &error, sizeof(struct hv_vss_msg)); -+ len = write(vss_fd, vss_msg, sizeof(struct hv_vss_msg)); - if (len != sizeof(struct hv_vss_msg)) { - syslog(LOG_ERR, "write failed; error: %d %s", errno, - strerror(errno)); -diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c -index 2d9d830..4a3a72c 100644 ---- a/tools/perf/util/stat.c -+++ b/tools/perf/util/stat.c -@@ -310,7 +310,6 @@ int perf_stat_process_counter(struct perf_stat_config *config, - int i, ret; - - aggr->val = aggr->ena = aggr->run = 0; -- init_stats(ps->res_stats); - - if (counter->per_pkg) - zero_per_pkg(counter); -diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh -index 77edcdc..0572784 100755 ---- a/tools/testing/selftests/efivarfs/efivarfs.sh -+++ b/tools/testing/selftests/efivarfs/efivarfs.sh -@@ -88,7 +88,11 @@ test_delete() - exit 1 - fi - -- rm $file -+ rm $file 2>/dev/null -+ if [ $? -ne 0 ]; then -+ chattr -i $file -+ rm $file -+ fi - - if [ -e $file ]; then - echo "$file couldn't be deleted" >&2 -@@ -111,6 +115,7 @@ test_zero_size_delete() - exit 1 - fi - -+ chattr -i $file - printf "$attrs" > $file - - if [ -e $file ]; then -@@ -141,7 +146,11 @@ test_valid_filenames() - echo "$file could not be created" >&2 - ret=1 - else -- rm $file -+ rm $file 2>/dev/null -+ if [ $? -ne 0 ]; then -+ chattr -i $file -+ rm $file -+ fi - fi - done - -@@ -174,7 +183,11 @@ test_invalid_filenames() - - if [ -e $file ]; then - echo "Creating $file should have failed" >&2 -- rm $file -+ rm $file 2>/dev/null -+ if [ $? -ne 0 ]; then -+ chattr -i $file -+ rm $file -+ fi - ret=1 - fi - done -diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c -index 8c07644..4af74f7 100644 ---- a/tools/testing/selftests/efivarfs/open-unlink.c -+++ b/tools/testing/selftests/efivarfs/open-unlink.c -@@ -1,10 +1,68 @@ -+#include <errno.h> - #include <stdio.h> - #include <stdint.h> - #include <stdlib.h> - #include <unistd.h> -+#include <sys/ioctl.h> - #include <sys/types.h> - #include <sys/stat.h> - #include <fcntl.h> -+#include <linux/fs.h> -+ -+static int set_immutable(const char *path, int immutable) -+{ -+ unsigned int flags; -+ int fd; -+ int rc; -+ int error; -+ -+ fd = open(path, O_RDONLY); -+ if (fd < 0) -+ return fd; -+ -+ rc = ioctl(fd, FS_IOC_GETFLAGS, &flags); -+ if (rc < 0) { -+ error = errno; -+ close(fd); -+ errno = error; -+ return rc; -+ } -+ -+ if (immutable) -+ flags |= FS_IMMUTABLE_FL; -+ else -+ flags &= ~FS_IMMUTABLE_FL; -+ -+ rc = ioctl(fd, FS_IOC_SETFLAGS, &flags); -+ error = errno; -+ close(fd); -+ errno = error; -+ return rc; -+} -+ -+static int get_immutable(const char *path) -+{ -+ unsigned int flags; -+ int fd; -+ int rc; -+ int error; -+ -+ fd = open(path, O_RDONLY); -+ if (fd < 0) -+ return fd; -+ -+ rc = ioctl(fd, FS_IOC_GETFLAGS, &flags); -+ if (rc < 0) { -+ error = errno; -+ close(fd); -+ errno = error; -+ return rc; -+ } -+ close(fd); -+ if (flags & FS_IMMUTABLE_FL) -+ return 1; -+ return 0; -+} - - int main(int argc, char **argv) - { -@@ -27,7 +85,7 @@ int main(int argc, char **argv) - buf[4] = 0; - - /* create a test variable */ -- fd = open(path, O_WRONLY | O_CREAT); -+ fd = open(path, O_WRONLY | O_CREAT, 0600); - if (fd < 0) { - perror("open(O_WRONLY)"); - return EXIT_FAILURE; -@@ -41,6 +99,18 @@ int main(int argc, char **argv) - - close(fd); - -+ rc = get_immutable(path); -+ if (rc < 0) { -+ perror("ioctl(FS_IOC_GETFLAGS)"); -+ return EXIT_FAILURE; -+ } else if (rc) { -+ rc = set_immutable(path, 0); -+ if (rc < 0) { -+ perror("ioctl(FS_IOC_SETFLAGS)"); -+ return EXIT_FAILURE; -+ } -+ } -+ - fd = open(path, O_RDONLY); - if (fd < 0) { - perror("open"); -diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c -index 7a2f449..5d10f10 100644 ---- a/virt/kvm/arm/vgic.c -+++ b/virt/kvm/arm/vgic.c -@@ -1875,8 +1875,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) - static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs) - { - struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; -- -- int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8; -+ int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS); -+ int sz = nr_longs * sizeof(unsigned long); - vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL); - vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL); - vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL); -diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c -index 77d42be..4f70d12 100644 ---- a/virt/kvm/async_pf.c -+++ b/virt/kvm/async_pf.c -@@ -173,7 +173,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, - * do alloc nowait since if we are going to sleep anyway we - * may as well sleep faulting in page - */ -- work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT); -+ work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN); - if (!work) - return 0; - diff --git a/4.4.4/0000_README b/4.4.5/0000_README index 5fcf793..e92303f 100644 --- a/4.4.4/0000_README +++ b/4.4.5/0000_README @@ -2,11 +2,11 @@ README ----------------------------------------------------------------------------- Individual Patch Descriptions: ----------------------------------------------------------------------------- -Patch: 1003_linux-4.4.4.patch +Patch: 1004_linux-4.4.5.patch From: https://www.kernel.org/ -Desc: Linux 4.4.4 +Desc: Linux 4.4.5 -Patch: 4420_grsecurity-3.1-4.4.4-201603032158.patch +Patch: 4420_grsecurity-3.1-4.4.5-201603102309.patch From: http://www.grsecurity.net Desc: hardened-sources base patch from upstream grsecurity diff --git a/4.4.5/1004_linux-4.4.5.patch b/4.4.5/1004_linux-4.4.5.patch new file mode 100644 index 0000000..82135c4 --- /dev/null +++ b/4.4.5/1004_linux-4.4.5.patch @@ -0,0 +1,3396 @@ +diff --git a/Makefile b/Makefile +index 344bc6f..d13322a 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 4 ++SUBLEVEL = 5 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c +index 96e935b..3705fc2 100644 +--- a/arch/arm/kvm/guest.c ++++ b/arch/arm/kvm/guest.c +@@ -155,7 +155,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) + u64 val; + + val = kvm_arm_timer_get_reg(vcpu, reg->id); +- return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)); ++ return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0; + } + + static unsigned long num_core_regs(void) +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h +index 63f52b5..fc9f7ef 100644 +--- a/arch/arm64/include/asm/pgtable.h ++++ b/arch/arm64/include/asm/pgtable.h +@@ -34,13 +34,13 @@ + /* + * VMALLOC and SPARSEMEM_VMEMMAP ranges. + * +- * VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array ++ * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array + * (rounded up to PUD_SIZE). + * VMALLOC_START: beginning of the kernel VA space + * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space, + * fixed mappings and modules + */ +-#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE) ++#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT - 1)) * sizeof(struct page), PUD_SIZE) + + #ifndef CONFIG_KASAN + #define VMALLOC_START (VA_START) +@@ -51,7 +51,8 @@ + + #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) + +-#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K)) ++#define VMEMMAP_START (VMALLOC_END + SZ_64K) ++#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) + + #define FIRST_USER_ADDRESS 0UL + +diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c +index d250160..3039f08 100644 +--- a/arch/arm64/kvm/guest.c ++++ b/arch/arm64/kvm/guest.c +@@ -186,7 +186,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) + u64 val; + + val = kvm_arm_timer_get_reg(vcpu, reg->id); +- return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)); ++ return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0; + } + + /** +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c +index 17bf39a..4cb98aa 100644 +--- a/arch/arm64/mm/init.c ++++ b/arch/arm64/mm/init.c +@@ -319,8 +319,8 @@ void __init mem_init(void) + #endif + MLG(VMALLOC_START, VMALLOC_END), + #ifdef CONFIG_SPARSEMEM_VMEMMAP +- MLG((unsigned long)vmemmap, +- (unsigned long)vmemmap + VMEMMAP_SIZE), ++ MLG(VMEMMAP_START, ++ VMEMMAP_START + VMEMMAP_SIZE), + MLM((unsigned long)virt_to_page(PAGE_OFFSET), + (unsigned long)virt_to_page(high_memory)), + #endif +diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c +index 886cb19..ca9a810 100644 +--- a/arch/mips/kernel/traps.c ++++ b/arch/mips/kernel/traps.c +@@ -690,15 +690,15 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode) + asmlinkage void do_ov(struct pt_regs *regs) + { + enum ctx_state prev_state; +- siginfo_t info; ++ siginfo_t info = { ++ .si_signo = SIGFPE, ++ .si_code = FPE_INTOVF, ++ .si_addr = (void __user *)regs->cp0_epc, ++ }; + + prev_state = exception_enter(); + die_if_kernel("Integer overflow", regs); + +- info.si_code = FPE_INTOVF; +- info.si_signo = SIGFPE; +- info.si_errno = 0; +- info.si_addr = (void __user *) regs->cp0_epc; + force_sig_info(SIGFPE, &info, current); + exception_exit(prev_state); + } +@@ -874,7 +874,7 @@ out: + void do_trap_or_bp(struct pt_regs *regs, unsigned int code, + const char *str) + { +- siginfo_t info; ++ siginfo_t info = { 0 }; + char b[40]; + + #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP +@@ -903,7 +903,6 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code, + else + info.si_code = FPE_INTOVF; + info.si_signo = SIGFPE; +- info.si_errno = 0; + info.si_addr = (void __user *) regs->cp0_epc; + force_sig_info(SIGFPE, &info, current); + break; +diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c +index b9b803f..2683d04 100644 +--- a/arch/mips/kvm/mips.c ++++ b/arch/mips/kvm/mips.c +@@ -702,7 +702,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, + } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { + void __user *uaddr = (void __user *)(long)reg->addr; + +- return copy_to_user(uaddr, vs, 16); ++ return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0; + } else { + return -EINVAL; + } +@@ -732,7 +732,7 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, + } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { + void __user *uaddr = (void __user *)(long)reg->addr; + +- return copy_from_user(vs, uaddr, 16); ++ return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0; + } else { + return -EINVAL; + } +diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c +index 3bd0597..ddb8154 100644 +--- a/arch/mips/mm/sc-mips.c ++++ b/arch/mips/mm/sc-mips.c +@@ -164,11 +164,13 @@ static int __init mips_sc_probe_cm3(void) + + sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK; + sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF; +- c->scache.sets = 64 << sets; ++ if (sets) ++ c->scache.sets = 64 << sets; + + line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK; + line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF; +- c->scache.linesz = 2 << line_sz; ++ if (line_sz) ++ c->scache.linesz = 2 << line_sz; + + assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK; + assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF; +@@ -176,9 +178,12 @@ static int __init mips_sc_probe_cm3(void) + c->scache.waysize = c->scache.sets * c->scache.linesz; + c->scache.waybit = __ffs(c->scache.waysize); + +- c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT; ++ if (c->scache.linesz) { ++ c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT; ++ return 1; ++ } + +- return 1; ++ return 0; + } + + void __weak platform_early_l2_init(void) +diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c +index 9585c81..ce0b2b4 100644 +--- a/arch/parisc/kernel/ptrace.c ++++ b/arch/parisc/kernel/ptrace.c +@@ -269,14 +269,19 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + + long do_syscall_trace_enter(struct pt_regs *regs) + { +- long ret = 0; +- + /* Do the secure computing check first. */ + secure_computing_strict(regs->gr[20]); + + if (test_thread_flag(TIF_SYSCALL_TRACE) && +- tracehook_report_syscall_entry(regs)) +- ret = -1L; ++ tracehook_report_syscall_entry(regs)) { ++ /* ++ * Tracing decided this syscall should not happen or the ++ * debugger stored an invalid system call number. Skip ++ * the system call and the system call restart handling. ++ */ ++ regs->gr[20] = -1UL; ++ goto out; ++ } + + #ifdef CONFIG_64BIT + if (!is_compat_task()) +@@ -290,7 +295,8 @@ long do_syscall_trace_enter(struct pt_regs *regs) + regs->gr[24] & 0xffffffff, + regs->gr[23] & 0xffffffff); + +- return ret ? : regs->gr[20]; ++out: ++ return regs->gr[20]; + } + + void do_syscall_trace_exit(struct pt_regs *regs) +diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S +index 3fbd725..fbafa0d 100644 +--- a/arch/parisc/kernel/syscall.S ++++ b/arch/parisc/kernel/syscall.S +@@ -343,7 +343,7 @@ tracesys_next: + #endif + + comiclr,>>= __NR_Linux_syscalls, %r20, %r0 +- b,n .Lsyscall_nosys ++ b,n .Ltracesys_nosys + + LDREGX %r20(%r19), %r19 + +@@ -359,6 +359,9 @@ tracesys_next: + be 0(%sr7,%r19) + ldo R%tracesys_exit(%r2),%r2 + ++.Ltracesys_nosys: ++ ldo -ENOSYS(%r0),%r28 /* set errno */ ++ + /* Do *not* call this function on the gateway page, because it + makes a direct call to syscall_trace. */ + +diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c +index d1daead..adb3eaf 100644 +--- a/arch/x86/kernel/acpi/sleep.c ++++ b/arch/x86/kernel/acpi/sleep.c +@@ -16,6 +16,7 @@ + #include <asm/cacheflush.h> + #include <asm/realmode.h> + ++#include <linux/ftrace.h> + #include "../../realmode/rm/wakeup.h" + #include "sleep.h" + +@@ -107,7 +108,13 @@ int x86_acpi_suspend_lowlevel(void) + saved_magic = 0x123456789abcdef0L; + #endif /* CONFIG_64BIT */ + ++ /* ++ * Pause/unpause graph tracing around do_suspend_lowlevel as it has ++ * inconsistent call/return info after it jumps to the wakeup vector. ++ */ ++ pause_graph_tracing(); + do_suspend_lowlevel(); ++ unpause_graph_tracing(); + return 0; + } + +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 10e7693..5fd846c 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -595,6 +595,8 @@ struct vcpu_vmx { + /* Support for PML */ + #define PML_ENTITY_NUM 512 + struct page *pml_pg; ++ ++ u64 current_tsc_ratio; + }; + + enum segment_cache_field { +@@ -2062,14 +2064,16 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) + rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); + vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ + +- /* Setup TSC multiplier */ +- if (cpu_has_vmx_tsc_scaling()) +- vmcs_write64(TSC_MULTIPLIER, +- vcpu->arch.tsc_scaling_ratio); +- + vmx->loaded_vmcs->cpu = cpu; + } + ++ /* Setup TSC multiplier */ ++ if (kvm_has_tsc_control && ++ vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) { ++ vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio; ++ vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio); ++ } ++ + vmx_vcpu_pi_load(vcpu, cpu); + } + +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 6ef3856..d294502 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -2736,7 +2736,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) + } + + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); +- vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; + } + + void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) +@@ -6545,12 +6544,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) + * KVM_DEBUGREG_WONT_EXIT again. + */ + if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { +- int i; +- + WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); + kvm_x86_ops->sync_dirty_debug_regs(vcpu); +- for (i = 0; i < KVM_NR_DB_REGS; i++) +- vcpu->arch.eff_db[i] = vcpu->arch.db[i]; ++ kvm_update_dr0123(vcpu); ++ kvm_update_dr6(vcpu); ++ kvm_update_dr7(vcpu); ++ vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; + } + + /* +diff --git a/block/blk-settings.c b/block/blk-settings.c +index dd49735..c7bb666 100644 +--- a/block/blk-settings.c ++++ b/block/blk-settings.c +@@ -91,8 +91,8 @@ void blk_set_default_limits(struct queue_limits *lim) + lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; + lim->virt_boundary_mask = 0; + lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; +- lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors = +- BLK_SAFE_MAX_SECTORS; ++ lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; ++ lim->max_dev_sectors = 0; + lim->chunk_sectors = 0; + lim->max_write_same_sectors = 0; + lim->max_discard_sectors = 0; +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c +index 99921aa..60a1583 100644 +--- a/drivers/ata/ahci.c ++++ b/drivers/ata/ahci.c +@@ -367,15 +367,21 @@ static const struct pci_device_id ahci_pci_tbl[] = { + { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ + { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ + { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/ + { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/ + { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/ + { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/ + { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/ + { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/ + { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/ + { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/ + { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/ + { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/ ++ { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/ + + /* JMicron 360/1/3/5/6, match class to avoid IDE function */ + { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, +diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c +index 1f225cc..998c6a8 100644 +--- a/drivers/ata/libahci.c ++++ b/drivers/ata/libahci.c +@@ -1142,8 +1142,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap, + + /* mark esata ports */ + tmp = readl(port_mmio + PORT_CMD); +- if ((tmp & PORT_CMD_HPCP) || +- ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS))) ++ if ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)) + ap->pflags |= ATA_PFLAG_EXTERNAL; + } + +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index 7e959f9..e417e1a 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -675,19 +675,18 @@ static int ata_ioc32(struct ata_port *ap) + int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, + int cmd, void __user *arg) + { +- int val = -EINVAL, rc = -EINVAL; ++ unsigned long val; ++ int rc = -EINVAL; + unsigned long flags; + + switch (cmd) { +- case ATA_IOC_GET_IO32: ++ case HDIO_GET_32BIT: + spin_lock_irqsave(ap->lock, flags); + val = ata_ioc32(ap); + spin_unlock_irqrestore(ap->lock, flags); +- if (copy_to_user(arg, &val, 1)) +- return -EFAULT; +- return 0; ++ return put_user(val, (unsigned long __user *)arg); + +- case ATA_IOC_SET_IO32: ++ case HDIO_SET_32BIT: + val = (unsigned long) arg; + rc = 0; + spin_lock_irqsave(ap->lock, flags); +diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c +index 12fe0f3..c8b6a78 100644 +--- a/drivers/ata/pata_rb532_cf.c ++++ b/drivers/ata/pata_rb532_cf.c +@@ -32,6 +32,8 @@ + #include <linux/libata.h> + #include <scsi/scsi_host.h> + ++#include <asm/mach-rc32434/rb.h> ++ + #define DRV_NAME "pata-rb532-cf" + #define DRV_VERSION "0.1.0" + #define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash" +@@ -107,6 +109,7 @@ static int rb532_pata_driver_probe(struct platform_device *pdev) + int gpio; + struct resource *res; + struct ata_host *ah; ++ struct cf_device *pdata; + struct rb532_cf_info *info; + int ret; + +@@ -122,7 +125,13 @@ static int rb532_pata_driver_probe(struct platform_device *pdev) + return -ENOENT; + } + +- gpio = irq_to_gpio(irq); ++ pdata = dev_get_platdata(&pdev->dev); ++ if (!pdata) { ++ dev_err(&pdev->dev, "no platform data specified\n"); ++ return -EINVAL; ++ } ++ ++ gpio = pdata->gpio_pin; + if (gpio < 0) { + dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq); + return -ENOENT; +diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c +index fc4156a..a59061e 100644 +--- a/drivers/dma/pxa_dma.c ++++ b/drivers/dma/pxa_dma.c +@@ -583,6 +583,8 @@ static void set_updater_desc(struct pxad_desc_sw *sw_desc, + (PXA_DCMD_LENGTH & sizeof(u32)); + if (flags & DMA_PREP_INTERRUPT) + updater->dcmd |= PXA_DCMD_ENDIRQEN; ++ if (sw_desc->cyclic) ++ sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first; + } + + static bool is_desc_completed(struct virt_dma_desc *vd) +@@ -673,6 +675,10 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id) + dev_dbg(&chan->vc.chan.dev->device, + "%s(): checking txd %p[%x]: completed=%d\n", + __func__, vd, vd->tx.cookie, is_desc_completed(vd)); ++ if (to_pxad_sw_desc(vd)->cyclic) { ++ vchan_cyclic_callback(vd); ++ break; ++ } + if (is_desc_completed(vd)) { + list_del(&vd->node); + vchan_cookie_complete(vd); +@@ -1080,7 +1086,7 @@ pxad_prep_dma_cyclic(struct dma_chan *dchan, + return NULL; + + pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr); +- dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH | period_len); ++ dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len); + dev_dbg(&chan->vc.chan.dev->device, + "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n", + __func__, (unsigned long)buf_addr, len, period_len, dir, flags); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +index 89c3dd6..119cdc2 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +@@ -77,7 +77,7 @@ void amdgpu_connector_hotplug(struct drm_connector *connector) + } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { + /* Don't try to start link training before we + * have the dpcd */ +- if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) ++ if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) + return; + + /* set it to OFF so that drm_helper_connector_dpms() +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +index a2a16ac..b8fbbd7 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +@@ -33,6 +33,7 @@ + #include <linux/slab.h> + #include <drm/drmP.h> + #include <drm/amdgpu_drm.h> ++#include <drm/drm_cache.h> + #include "amdgpu.h" + #include "amdgpu_trace.h" + +@@ -261,6 +262,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, + AMDGPU_GEM_DOMAIN_OA); + + bo->flags = flags; ++ ++ /* For architectures that don't support WC memory, ++ * mask out the WC flag from the BO ++ */ ++ if (!drm_arch_can_wc_memory()) ++ bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; ++ + amdgpu_fill_placement_to_bo(bo, placement); + /* Kernel allocation are uninterruptible */ + r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +index 03fe251..7ae15fa 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +@@ -596,9 +596,6 @@ force: + /* update display watermarks based on new power state */ + amdgpu_display_bandwidth_update(adev); + +- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; +- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; +- + /* wait for the rings to drain */ + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + struct amdgpu_ring *ring = adev->rings[i]; +@@ -617,6 +614,9 @@ force: + /* update displays */ + amdgpu_dpm_display_configuration_changed(adev); + ++ adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; ++ adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; ++ + if (adev->pm.funcs->force_performance_level) { + if (adev->pm.dpm.thermal_active) { + enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +index 39adbb6..8c5ec15 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +@@ -1248,7 +1248,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) + { + const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, + AMDGPU_VM_PTE_COUNT * 8); +- unsigned pd_size, pd_entries, pts_size; ++ unsigned pd_size, pd_entries; + int i, r; + + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { +@@ -1266,8 +1266,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) + pd_entries = amdgpu_vm_num_pdes(adev); + + /* allocate page table array */ +- pts_size = pd_entries * sizeof(struct amdgpu_vm_pt); +- vm->page_tables = kzalloc(pts_size, GFP_KERNEL); ++ vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt)); + if (vm->page_tables == NULL) { + DRM_ERROR("Cannot allocate memory for page table array\n"); + return -ENOMEM; +@@ -1327,7 +1326,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) + + for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) + amdgpu_bo_unref(&vm->page_tables[i].bo); +- kfree(vm->page_tables); ++ drm_free_large(vm->page_tables); + + amdgpu_bo_unref(&vm->page_directory); + fence_put(vm->page_directory_fence); +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +index 72793f9..aa49154 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +@@ -3628,6 +3628,19 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vm_id, uint64_t pd_addr) + { + int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); ++ uint32_t seq = ring->fence_drv.sync_seq; ++ uint64_t addr = ring->fence_drv.gpu_addr; ++ ++ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); ++ amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ ++ WAIT_REG_MEM_FUNCTION(3) | /* equal */ ++ WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */ ++ amdgpu_ring_write(ring, addr & 0xfffffffc); ++ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); ++ amdgpu_ring_write(ring, seq); ++ amdgpu_ring_write(ring, 0xffffffff); ++ amdgpu_ring_write(ring, 4); /* poll interval */ ++ + if (usepfp) { + /* synce CE with ME to prevent CE fetch CEIB before context switch done */ + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +index 4cb45f4..d105403 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +@@ -4681,7 +4681,8 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, + + amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ +- WAIT_REG_MEM_FUNCTION(3))); /* equal */ ++ WAIT_REG_MEM_FUNCTION(3) | /* equal */ ++ WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */ + amdgpu_ring_write(ring, addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); + amdgpu_ring_write(ring, seq); +diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c +index 7628eb4..3e9cbe3 100644 +--- a/drivers/gpu/drm/amd/amdgpu/vi.c ++++ b/drivers/gpu/drm/amd/amdgpu/vi.c +@@ -1082,10 +1082,10 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] = + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, +- .major = 8, +- .minor = 0, ++ .major = 7, ++ .minor = 4, + .rev = 0, +- .funcs = &gmc_v8_0_ip_funcs, ++ .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, +@@ -1129,10 +1129,10 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] = + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, +- .major = 7, +- .minor = 4, ++ .major = 8, ++ .minor = 0, + .rev = 0, +- .funcs = &gmc_v7_0_ip_funcs, ++ .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c +index 541a610..e0b4586 100644 +--- a/drivers/gpu/drm/ast/ast_main.c ++++ b/drivers/gpu/drm/ast/ast_main.c +@@ -227,7 +227,7 @@ static int ast_get_dram_info(struct drm_device *dev) + } while (ast_read32(ast, 0x10000) != 0x01); + data = ast_read32(ast, 0x10004); + +- if (data & 0x400) ++ if (data & 0x40) + ast->dram_bus_width = 16; + else + ast->dram_bus_width = 32; +diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c +index 760e0ce..a6ad938 100644 +--- a/drivers/gpu/drm/i915/i915_drv.c ++++ b/drivers/gpu/drm/i915/i915_drv.c +@@ -531,7 +531,10 @@ void intel_detect_pch(struct drm_device *dev) + dev_priv->pch_type = PCH_SPT; + DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); + WARN_ON(!IS_SKYLAKE(dev)); +- } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) { ++ } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || ++ ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && ++ pch->subsystem_vendor == 0x1af4 && ++ pch->subsystem_device == 0x1100)) { + dev_priv->pch_type = intel_virt_detect_pch(dev); + } else + continue; +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h +index f4af19a..d3ce4da 100644 +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -2614,6 +2614,7 @@ struct drm_i915_cmd_table { + #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 + #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 + #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 ++#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ + + #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) + #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) +diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c +index 2081a60..1fa8121 100644 +--- a/drivers/gpu/drm/radeon/radeon_pm.c ++++ b/drivers/gpu/drm/radeon/radeon_pm.c +@@ -1076,10 +1076,6 @@ force: + /* update display watermarks based on new power state */ + radeon_bandwidth_update(rdev); + +- rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; +- rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; +- rdev->pm.dpm.single_display = single_display; +- + /* wait for the rings to drain */ + for (i = 0; i < RADEON_NUM_RINGS; i++) { + struct radeon_ring *ring = &rdev->ring[i]; +@@ -1098,6 +1094,10 @@ force: + /* update displays */ + radeon_dpm_display_configuration_changed(rdev); + ++ rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; ++ rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; ++ rdev->pm.dpm.single_display = single_display; ++ + if (rdev->asic->dpm.force_performance_level) { + if (rdev->pm.dpm.thermal_active) { + enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; +diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c +index 8e9637e..81115ab 100644 +--- a/drivers/i2c/busses/i2c-brcmstb.c ++++ b/drivers/i2c/busses/i2c-brcmstb.c +@@ -562,8 +562,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev) + if (!dev) + return -ENOMEM; + +- dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(struct bsc_regs *), +- GFP_KERNEL); ++ dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(*dev->bsc_regmap), GFP_KERNEL); + if (!dev->bsc_regmap) + return -ENOMEM; + +diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c +index 013bdff..bf4959f 100644 +--- a/drivers/iommu/amd_iommu_init.c ++++ b/drivers/iommu/amd_iommu_init.c +@@ -228,6 +228,10 @@ static int amd_iommu_enable_interrupts(void); + static int __init iommu_go_to_state(enum iommu_init_state state); + static void init_device_table_dma(void); + ++static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu, ++ u8 bank, u8 cntr, u8 fxn, ++ u64 *value, bool is_write); ++ + static inline void update_last_devid(u16 devid) + { + if (devid > amd_iommu_last_bdf) +@@ -1016,6 +1020,34 @@ static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) + } + + /* ++ * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission) ++ * Workaround: ++ * BIOS should enable ATS write permission check by setting ++ * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b ++ */ ++static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu) ++{ ++ u32 value; ++ ++ if ((boot_cpu_data.x86 != 0x15) || ++ (boot_cpu_data.x86_model < 0x30) || ++ (boot_cpu_data.x86_model > 0x3f)) ++ return; ++ ++ /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */ ++ value = iommu_read_l2(iommu, 0x47); ++ ++ if (value & BIT(0)) ++ return; ++ ++ /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */ ++ iommu_write_l2(iommu, 0x47, value | BIT(0)); ++ ++ pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n", ++ dev_name(&iommu->dev->dev)); ++} ++ ++/* + * This function clues the initialization function for one IOMMU + * together and also allocates the command buffer and programs the + * hardware. It does NOT enable the IOMMU. This is done afterwards. +@@ -1142,8 +1174,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu) + amd_iommu_pc_present = true; + + /* Check if the performance counters can be written to */ +- if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) || +- (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) || ++ if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) || ++ (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) || + (val != val2)) { + pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n"); + amd_iommu_pc_present = false; +@@ -1284,6 +1316,7 @@ static int iommu_init_pci(struct amd_iommu *iommu) + } + + amd_iommu_erratum_746_workaround(iommu); ++ amd_iommu_ats_write_check_workaround(iommu); + + iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu, + amd_iommu_groups, "ivhd%d", +@@ -2283,22 +2316,15 @@ u8 amd_iommu_pc_get_max_counters(u16 devid) + } + EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); + +-int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, ++static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu, ++ u8 bank, u8 cntr, u8 fxn, + u64 *value, bool is_write) + { +- struct amd_iommu *iommu; + u32 offset; + u32 max_offset_lim; + +- /* Make sure the IOMMU PC resource is available */ +- if (!amd_iommu_pc_present) +- return -ENODEV; +- +- /* Locate the iommu associated with the device ID */ +- iommu = amd_iommu_rlookup_table[devid]; +- + /* Check for valid iommu and pc register indexing */ +- if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7))) ++ if (WARN_ON((fxn > 0x28) || (fxn & 7))) + return -ENODEV; + + offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn); +@@ -2322,3 +2348,16 @@ int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, + return 0; + } + EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val); ++ ++int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, ++ u64 *value, bool is_write) ++{ ++ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; ++ ++ /* Make sure the IOMMU PC resource is available */ ++ if (!amd_iommu_pc_present || iommu == NULL) ++ return -ENODEV; ++ ++ return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn, ++ value, is_write); ++} +diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c +index 55a19e4..3821c47 100644 +--- a/drivers/iommu/dmar.c ++++ b/drivers/iommu/dmar.c +@@ -329,7 +329,8 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb, + /* Only care about add/remove events for physical functions */ + if (pdev->is_virtfn) + return NOTIFY_DONE; +- if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE) ++ if (action != BUS_NOTIFY_ADD_DEVICE && ++ action != BUS_NOTIFY_REMOVED_DEVICE) + return NOTIFY_DONE; + + info = dmar_alloc_pci_notify_info(pdev, action); +@@ -339,7 +340,7 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb, + down_write(&dmar_global_lock); + if (action == BUS_NOTIFY_ADD_DEVICE) + dmar_pci_bus_add_dev(info); +- else if (action == BUS_NOTIFY_DEL_DEVICE) ++ else if (action == BUS_NOTIFY_REMOVED_DEVICE) + dmar_pci_bus_del_dev(info); + up_write(&dmar_global_lock); + +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c +index 986a53e..a2e1b7f 100644 +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -4367,7 +4367,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) + rmrru->devices_cnt); + if(ret < 0) + return ret; +- } else if (info->event == BUS_NOTIFY_DEL_DEVICE) { ++ } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) { + dmar_remove_dev_scope(info, rmrr->segment, + rmrru->devices, rmrru->devices_cnt); + } +@@ -4387,7 +4387,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) + break; + else if(ret < 0) + return ret; +- } else if (info->event == BUS_NOTIFY_DEL_DEVICE) { ++ } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) { + if (dmar_remove_dev_scope(info, atsr->segment, + atsru->devices, atsru->devices_cnt)) + break; +diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c +index 5631ec0..01adcdc 100644 +--- a/drivers/media/i2c/adv7604.c ++++ b/drivers/media/i2c/adv7604.c +@@ -1960,10 +1960,9 @@ static int adv76xx_isr(struct v4l2_subdev *sd, u32 status, bool *handled) + } + + /* tx 5v detect */ +- tx_5v = io_read(sd, 0x70) & info->cable_det_mask; ++ tx_5v = irq_reg_0x70 & info->cable_det_mask; + if (tx_5v) { + v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v); +- io_write(sd, 0x71, tx_5v); + adv76xx_s_detect_tx_5v_ctrl(sd); + if (handled) + *handled = true; +diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c +index 85761d7..be2c8e24 100644 +--- a/drivers/misc/cxl/pci.c ++++ b/drivers/misc/cxl/pci.c +@@ -414,7 +414,7 @@ static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) + delta = mftb() - psl_tb; + if (delta < 0) + delta = -delta; +- } while (cputime_to_usecs(delta) > 16); ++ } while (tb_to_ns(delta) > 16000); + + return 0; + } +diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c +index 2a1b6e0..0134ba3 100644 +--- a/drivers/mtd/ubi/upd.c ++++ b/drivers/mtd/ubi/upd.c +@@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, + vol->changing_leb = 1; + vol->ch_lnum = req->lnum; + +- vol->upd_buf = vmalloc(req->bytes); ++ vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size)); + if (!vol->upd_buf) + return -ENOMEM; + +diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c +index ed34c95..6153853 100644 +--- a/drivers/pci/host/pci-keystone-dw.c ++++ b/drivers/pci/host/pci-keystone-dw.c +@@ -58,11 +58,6 @@ + + #define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp) + +-static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys) +-{ +- return sys->private_data; +-} +- + static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, + u32 *bit_pos) + { +@@ -108,7 +103,7 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d) + struct pcie_port *pp; + + msi = irq_data_get_msi_desc(d); +- pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); ++ pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi); + ks_pcie = to_keystone_pcie(pp); + offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); + update_reg_offset_bit_pos(offset, ®_offset, &bit_pos); +@@ -146,7 +141,7 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d) + u32 offset; + + msi = irq_data_get_msi_desc(d); +- pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); ++ pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi); + ks_pcie = to_keystone_pcie(pp); + offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); + +@@ -167,7 +162,7 @@ static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d) + u32 offset; + + msi = irq_data_get_msi_desc(d); +- pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); ++ pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi); + ks_pcie = to_keystone_pcie(pp); + offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); + +diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c +index 91a00301..a9bac3b 100644 +--- a/drivers/sh/pm_runtime.c ++++ b/drivers/sh/pm_runtime.c +@@ -34,7 +34,7 @@ static struct pm_clk_notifier_block platform_bus_notifier = { + + static int __init sh_pm_runtime_init(void) + { +- if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) { ++ if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_ARCH_SHMOBILE)) { + if (!of_find_compatible_node(NULL, NULL, + "renesas,cpg-mstp-clocks")) + return 0; +diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c +index 88ea4e4..3436a83 100644 +--- a/drivers/target/target_core_device.c ++++ b/drivers/target/target_core_device.c +@@ -826,6 +826,49 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) + return dev; + } + ++/* ++ * Check if the underlying struct block_device request_queue supports ++ * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM ++ * in ATA and we need to set TPE=1 ++ */ ++bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, ++ struct request_queue *q, int block_size) ++{ ++ if (!blk_queue_discard(q)) ++ return false; ++ ++ attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) / ++ block_size; ++ /* ++ * Currently hardcoded to 1 in Linux/SCSI code.. ++ */ ++ attrib->max_unmap_block_desc_count = 1; ++ attrib->unmap_granularity = q->limits.discard_granularity / block_size; ++ attrib->unmap_granularity_alignment = q->limits.discard_alignment / ++ block_size; ++ return true; ++} ++EXPORT_SYMBOL(target_configure_unmap_from_queue); ++ ++/* ++ * Convert from blocksize advertised to the initiator to the 512 byte ++ * units unconditionally used by the Linux block layer. ++ */ ++sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) ++{ ++ switch (dev->dev_attrib.block_size) { ++ case 4096: ++ return lb << 3; ++ case 2048: ++ return lb << 2; ++ case 1024: ++ return lb << 1; ++ default: ++ return lb; ++ } ++} ++EXPORT_SYMBOL(target_to_linux_sector); ++ + int target_configure_device(struct se_device *dev) + { + struct se_hba *hba = dev->se_hba; +diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c +index e319570..75f0f08 100644 +--- a/drivers/target/target_core_file.c ++++ b/drivers/target/target_core_file.c +@@ -160,25 +160,11 @@ static int fd_configure_device(struct se_device *dev) + " block_device blocks: %llu logical_block_size: %d\n", + dev_size, div_u64(dev_size, fd_dev->fd_block_size), + fd_dev->fd_block_size); +- /* +- * Check if the underlying struct block_device request_queue supports +- * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM +- * in ATA and we need to set TPE=1 +- */ +- if (blk_queue_discard(q)) { +- dev->dev_attrib.max_unmap_lba_count = +- q->limits.max_discard_sectors; +- /* +- * Currently hardcoded to 1 in Linux/SCSI code.. +- */ +- dev->dev_attrib.max_unmap_block_desc_count = 1; +- dev->dev_attrib.unmap_granularity = +- q->limits.discard_granularity >> 9; +- dev->dev_attrib.unmap_granularity_alignment = +- q->limits.discard_alignment; ++ ++ if (target_configure_unmap_from_queue(&dev->dev_attrib, q, ++ fd_dev->fd_block_size)) + pr_debug("IFILE: BLOCK Discard support available," +- " disabled by default\n"); +- } ++ " disabled by default\n"); + /* + * Enable write same emulation for IBLOCK and use 0xFFFF as + * the smaller WRITE_SAME(10) only has a two-byte block count. +@@ -490,9 +476,12 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) + if (S_ISBLK(inode->i_mode)) { + /* The backend is block device, use discard */ + struct block_device *bdev = inode->i_bdev; ++ struct se_device *dev = cmd->se_dev; + +- ret = blkdev_issue_discard(bdev, lba, +- nolb, GFP_KERNEL, 0); ++ ret = blkdev_issue_discard(bdev, ++ target_to_linux_sector(dev, lba), ++ target_to_linux_sector(dev, nolb), ++ GFP_KERNEL, 0); + if (ret < 0) { + pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n", + ret); +diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c +index f29c691..2c53dce 100644 +--- a/drivers/target/target_core_iblock.c ++++ b/drivers/target/target_core_iblock.c +@@ -121,27 +121,11 @@ static int iblock_configure_device(struct se_device *dev) + dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); + dev->dev_attrib.hw_queue_depth = q->nr_requests; + +- /* +- * Check if the underlying struct block_device request_queue supports +- * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM +- * in ATA and we need to set TPE=1 +- */ +- if (blk_queue_discard(q)) { +- dev->dev_attrib.max_unmap_lba_count = +- q->limits.max_discard_sectors; +- +- /* +- * Currently hardcoded to 1 in Linux/SCSI code.. +- */ +- dev->dev_attrib.max_unmap_block_desc_count = 1; +- dev->dev_attrib.unmap_granularity = +- q->limits.discard_granularity >> 9; +- dev->dev_attrib.unmap_granularity_alignment = +- q->limits.discard_alignment; +- ++ if (target_configure_unmap_from_queue(&dev->dev_attrib, q, ++ dev->dev_attrib.hw_block_size)) + pr_debug("IBLOCK: BLOCK Discard support available," +- " disabled by default\n"); +- } ++ " disabled by default\n"); ++ + /* + * Enable write same emulation for IBLOCK and use 0xFFFF as + * the smaller WRITE_SAME(10) only has a two-byte block count. +@@ -413,9 +397,13 @@ static sense_reason_t + iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) + { + struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; ++ struct se_device *dev = cmd->se_dev; + int ret; + +- ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0); ++ ret = blkdev_issue_discard(bdev, ++ target_to_linux_sector(dev, lba), ++ target_to_linux_sector(dev, nolb), ++ GFP_KERNEL, 0); + if (ret < 0) { + pr_err("blkdev_issue_discard() failed: %d\n", ret); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +@@ -431,8 +419,10 @@ iblock_execute_write_same(struct se_cmd *cmd) + struct scatterlist *sg; + struct bio *bio; + struct bio_list list; +- sector_t block_lba = cmd->t_task_lba; +- sector_t sectors = sbc_get_write_same_sectors(cmd); ++ struct se_device *dev = cmd->se_dev; ++ sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba); ++ sector_t sectors = target_to_linux_sector(dev, ++ sbc_get_write_same_sectors(cmd)); + + if (cmd->prot_op) { + pr_err("WRITE_SAME: Protection information with IBLOCK" +@@ -646,12 +636,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, + enum dma_data_direction data_direction) + { + struct se_device *dev = cmd->se_dev; ++ sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba); + struct iblock_req *ibr; + struct bio *bio, *bio_start; + struct bio_list list; + struct scatterlist *sg; + u32 sg_num = sgl_nents; +- sector_t block_lba; + unsigned bio_cnt; + int rw = 0; + int i; +@@ -677,24 +667,6 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, + rw = READ; + } + +- /* +- * Convert the blocksize advertised to the initiator to the 512 byte +- * units unconditionally used by the Linux block layer. +- */ +- if (dev->dev_attrib.block_size == 4096) +- block_lba = (cmd->t_task_lba << 3); +- else if (dev->dev_attrib.block_size == 2048) +- block_lba = (cmd->t_task_lba << 2); +- else if (dev->dev_attrib.block_size == 1024) +- block_lba = (cmd->t_task_lba << 1); +- else if (dev->dev_attrib.block_size == 512) +- block_lba = cmd->t_task_lba; +- else { +- pr_err("Unsupported SCSI -> BLOCK LBA conversion:" +- " %u\n", dev->dev_attrib.block_size); +- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +- } +- + ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); + if (!ibr) + goto fail; +diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c +index e3fbc5a..6ceac4f 100644 +--- a/drivers/thermal/cpu_cooling.c ++++ b/drivers/thermal/cpu_cooling.c +@@ -377,26 +377,28 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_device, + * get_load() - get load for a cpu since last updated + * @cpufreq_device: &struct cpufreq_cooling_device for this cpu + * @cpu: cpu number ++ * @cpu_idx: index of the cpu in cpufreq_device->allowed_cpus + * + * Return: The average load of cpu @cpu in percentage since this + * function was last called. + */ +-static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu) ++static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu, ++ int cpu_idx) + { + u32 load; + u64 now, now_idle, delta_time, delta_idle; + + now_idle = get_cpu_idle_time(cpu, &now, 0); +- delta_idle = now_idle - cpufreq_device->time_in_idle[cpu]; +- delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu]; ++ delta_idle = now_idle - cpufreq_device->time_in_idle[cpu_idx]; ++ delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu_idx]; + + if (delta_time <= delta_idle) + load = 0; + else + load = div64_u64(100 * (delta_time - delta_idle), delta_time); + +- cpufreq_device->time_in_idle[cpu] = now_idle; +- cpufreq_device->time_in_idle_timestamp[cpu] = now; ++ cpufreq_device->time_in_idle[cpu_idx] = now_idle; ++ cpufreq_device->time_in_idle_timestamp[cpu_idx] = now; + + return load; + } +@@ -598,7 +600,7 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev, + u32 load; + + if (cpu_online(cpu)) +- load = get_load(cpufreq_device, cpu); ++ load = get_load(cpufreq_device, cpu, i); + else + load = 0; + +diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c +index 45f86da..03b6743 100644 +--- a/drivers/usb/chipidea/otg.c ++++ b/drivers/usb/chipidea/otg.c +@@ -158,7 +158,7 @@ static void ci_otg_work(struct work_struct *work) + int ci_hdrc_otg_init(struct ci_hdrc *ci) + { + INIT_WORK(&ci->work, ci_otg_work); +- ci->wq = create_singlethread_workqueue("ci_otg"); ++ ci->wq = create_freezable_workqueue("ci_otg"); + if (!ci->wq) { + dev_err(ci->dev, "can't create workqueue\n"); + return -ENODEV; +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index a7caf53..7a76fe4 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -164,6 +164,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ + { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ + { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ ++ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ + { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 8849439a..348e198 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -270,6 +270,7 @@ static void option_instat_callback(struct urb *urb); + #define TELIT_PRODUCT_UE910_V2 0x1012 + #define TELIT_PRODUCT_LE922_USBCFG0 0x1042 + #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 ++#define TELIT_PRODUCT_LE922_USBCFG5 0x1045 + #define TELIT_PRODUCT_LE920 0x1200 + #define TELIT_PRODUCT_LE910 0x1201 + +@@ -1132,6 +1133,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ ++ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */ ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), +@@ -1183,6 +1186,8 @@ static const struct usb_device_id option_ids[] = { + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3), + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), ++ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), + .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c +index 9919d2a..1bc6089 100644 +--- a/drivers/usb/serial/qcserial.c ++++ b/drivers/usb/serial/qcserial.c +@@ -157,14 +157,17 @@ static const struct usb_device_id id_table[] = { + {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */ + {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */ + {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */ +- {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx/EM74xx */ +- {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx/EM74xx */ ++ {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */ ++ {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */ ++ {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */ ++ {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */ + {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ + {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ + {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ + {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ + {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ + {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ ++ {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ + + /* Huawei devices */ + {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c +index 56bf6db..9982cb1 100644 +--- a/drivers/vfio/pci/vfio_pci.c ++++ b/drivers/vfio/pci/vfio_pci.c +@@ -446,7 +446,8 @@ static long vfio_pci_ioctl(void *device_data, + info.num_regions = VFIO_PCI_NUM_REGIONS; + info.num_irqs = VFIO_PCI_NUM_IRQS; + +- return copy_to_user((void __user *)arg, &info, minsz); ++ return copy_to_user((void __user *)arg, &info, minsz) ? ++ -EFAULT : 0; + + } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) { + struct pci_dev *pdev = vdev->pdev; +@@ -520,7 +521,8 @@ static long vfio_pci_ioctl(void *device_data, + return -EINVAL; + } + +- return copy_to_user((void __user *)arg, &info, minsz); ++ return copy_to_user((void __user *)arg, &info, minsz) ? ++ -EFAULT : 0; + + } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) { + struct vfio_irq_info info; +@@ -555,7 +557,8 @@ static long vfio_pci_ioctl(void *device_data, + else + info.flags |= VFIO_IRQ_INFO_NORESIZE; + +- return copy_to_user((void __user *)arg, &info, minsz); ++ return copy_to_user((void __user *)arg, &info, minsz) ? ++ -EFAULT : 0; + + } else if (cmd == VFIO_DEVICE_SET_IRQS) { + struct vfio_irq_set hdr; +diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c +index 418cdd9..e65b142 100644 +--- a/drivers/vfio/platform/vfio_platform_common.c ++++ b/drivers/vfio/platform/vfio_platform_common.c +@@ -219,7 +219,8 @@ static long vfio_platform_ioctl(void *device_data, + info.num_regions = vdev->num_regions; + info.num_irqs = vdev->num_irqs; + +- return copy_to_user((void __user *)arg, &info, minsz); ++ return copy_to_user((void __user *)arg, &info, minsz) ? ++ -EFAULT : 0; + + } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) { + struct vfio_region_info info; +@@ -240,7 +241,8 @@ static long vfio_platform_ioctl(void *device_data, + info.size = vdev->regions[info.index].size; + info.flags = vdev->regions[info.index].flags; + +- return copy_to_user((void __user *)arg, &info, minsz); ++ return copy_to_user((void __user *)arg, &info, minsz) ? ++ -EFAULT : 0; + + } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) { + struct vfio_irq_info info; +@@ -259,7 +261,8 @@ static long vfio_platform_ioctl(void *device_data, + info.flags = vdev->irqs[info.index].flags; + info.count = vdev->irqs[info.index].count; + +- return copy_to_user((void __user *)arg, &info, minsz); ++ return copy_to_user((void __user *)arg, &info, minsz) ? ++ -EFAULT : 0; + + } else if (cmd == VFIO_DEVICE_SET_IRQS) { + struct vfio_irq_set hdr; +diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c +index 59d47cb..ecb826e 100644 +--- a/drivers/vfio/vfio_iommu_type1.c ++++ b/drivers/vfio/vfio_iommu_type1.c +@@ -999,7 +999,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data, + + info.iova_pgsizes = vfio_pgsize_bitmap(iommu); + +- return copy_to_user((void __user *)arg, &info, minsz); ++ return copy_to_user((void __user *)arg, &info, minsz) ? ++ -EFAULT : 0; + + } else if (cmd == VFIO_IOMMU_MAP_DMA) { + struct vfio_iommu_type1_dma_map map; +@@ -1032,7 +1033,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data, + if (ret) + return ret; + +- return copy_to_user((void __user *)arg, &unmap, minsz); ++ return copy_to_user((void __user *)arg, &unmap, minsz) ? ++ -EFAULT : 0; + } + + return -ENOTTY; +diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c +index 92f3949..6e92917 100644 +--- a/drivers/video/console/fbcon.c ++++ b/drivers/video/console/fbcon.c +@@ -709,6 +709,7 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info, + } + + if (!err) { ++ ops->cur_blink_jiffies = HZ / 5; + info->fbcon_par = ops; + + if (vc) +@@ -956,6 +957,7 @@ static const char *fbcon_startup(void) + ops->currcon = -1; + ops->graphics = 1; + ops->cur_rotate = -1; ++ ops->cur_blink_jiffies = HZ / 5; + info->fbcon_par = ops; + p->con_rotate = initial_rotation; + set_blitting_type(vc, info); +diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c +index 3e36e4a..9aba42b 100644 +--- a/fs/btrfs/async-thread.c ++++ b/fs/btrfs/async-thread.c +@@ -328,8 +328,8 @@ static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq, + list_add_tail(&work->ordered_list, &wq->ordered_list); + spin_unlock_irqrestore(&wq->list_lock, flags); + } +- queue_work(wq->normal_wq, &work->normal_work); + trace_btrfs_work_queued(work); ++ queue_work(wq->normal_wq, &work->normal_work); + } + + void btrfs_queue_work(struct btrfs_workqueue *wq, +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h +index 35489e7..385b449 100644 +--- a/fs/btrfs/ctree.h ++++ b/fs/btrfs/ctree.h +@@ -1572,7 +1572,7 @@ struct btrfs_fs_info { + + spinlock_t delayed_iput_lock; + struct list_head delayed_iputs; +- struct rw_semaphore delayed_iput_sem; ++ struct mutex cleaner_delayed_iput_mutex; + + /* this protects tree_mod_seq_list */ + spinlock_t tree_mod_seq_lock; +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 4958360..41fb431 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -1796,7 +1796,10 @@ static int cleaner_kthread(void *arg) + goto sleep; + } + ++ mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex); + btrfs_run_delayed_iputs(root); ++ mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex); ++ + again = btrfs_clean_one_deleted_snapshot(root); + mutex_unlock(&root->fs_info->cleaner_mutex); + +@@ -2556,8 +2559,8 @@ int open_ctree(struct super_block *sb, + mutex_init(&fs_info->delete_unused_bgs_mutex); + mutex_init(&fs_info->reloc_mutex); + mutex_init(&fs_info->delalloc_root_mutex); ++ mutex_init(&fs_info->cleaner_delayed_iput_mutex); + seqlock_init(&fs_info->profiles_lock); +- init_rwsem(&fs_info->delayed_iput_sem); + + INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); + INIT_LIST_HEAD(&fs_info->space_info); +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index c4661db..2368cac 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -4086,8 +4086,10 @@ commit_trans: + !atomic_read(&root->fs_info->open_ioctl_trans)) { + need_commit--; + +- if (need_commit > 0) ++ if (need_commit > 0) { ++ btrfs_start_delalloc_roots(fs_info, 0, -1); + btrfs_wait_ordered_roots(fs_info, -1); ++ } + + trans = btrfs_join_transaction(root); + if (IS_ERR(trans)) +@@ -4100,11 +4102,12 @@ commit_trans: + if (ret) + return ret; + /* +- * make sure that all running delayed iput are +- * done ++ * The cleaner kthread might still be doing iput ++ * operations. Wait for it to finish so that ++ * more space is released. + */ +- down_write(&root->fs_info->delayed_iput_sem); +- up_write(&root->fs_info->delayed_iput_sem); ++ mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex); ++ mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex); + goto again; + } else { + btrfs_end_transaction(trans, root); +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 52fc1b5..4bc9dbf 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -3142,8 +3142,6 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root) + if (empty) + return; + +- down_read(&fs_info->delayed_iput_sem); +- + spin_lock(&fs_info->delayed_iput_lock); + list_splice_init(&fs_info->delayed_iputs, &list); + spin_unlock(&fs_info->delayed_iput_lock); +@@ -3154,8 +3152,6 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root) + iput(delayed->inode); + kfree(delayed); + } +- +- up_read(&root->fs_info->delayed_iput_sem); + } + + /* +diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c +index 7cf8509..2c849b0 100644 +--- a/fs/btrfs/root-tree.c ++++ b/fs/btrfs/root-tree.c +@@ -310,8 +310,16 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root) + set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state); + + err = btrfs_insert_fs_root(root->fs_info, root); ++ /* ++ * The root might have been inserted already, as before we look ++ * for orphan roots, log replay might have happened, which ++ * triggers a transaction commit and qgroup accounting, which ++ * in turn reads and inserts fs roots while doing backref ++ * walking. ++ */ ++ if (err == -EEXIST) ++ err = 0; + if (err) { +- BUG_ON(err == -EEXIST); + btrfs_free_fs_root(root); + break; + } +diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h +index c3cc160..44b3d42 100644 +--- a/fs/cifs/cifsfs.h ++++ b/fs/cifs/cifsfs.h +@@ -31,19 +31,15 @@ + * so that it will fit. We use hash_64 to convert the value to 31 bits, and + * then add 1, to ensure that we don't end up with a 0 as the value. + */ +-#if BITS_PER_LONG == 64 + static inline ino_t + cifs_uniqueid_to_ino_t(u64 fileid) + { ++ if ((sizeof(ino_t)) < (sizeof(u64))) ++ return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1; ++ + return (ino_t)fileid; ++ + } +-#else +-static inline ino_t +-cifs_uniqueid_to_ino_t(u64 fileid) +-{ +- return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1; +-} +-#endif + + extern struct file_system_type cifs_fs_type; + extern const struct address_space_operations cifs_addr_ops; +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c +index 90b4f9f..76fcb50 100644 +--- a/fs/cifs/cifssmb.c ++++ b/fs/cifs/cifssmb.c +@@ -1396,11 +1396,10 @@ openRetry: + * current bigbuf. + */ + static int +-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) ++discard_remaining_data(struct TCP_Server_Info *server) + { + unsigned int rfclen = get_rfc1002_length(server->smallbuf); + int remaining = rfclen + 4 - server->total_read; +- struct cifs_readdata *rdata = mid->callback_data; + + while (remaining > 0) { + int length; +@@ -1414,10 +1413,20 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) + remaining -= length; + } + +- dequeue_mid(mid, rdata->result); + return 0; + } + ++static int ++cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) ++{ ++ int length; ++ struct cifs_readdata *rdata = mid->callback_data; ++ ++ length = discard_remaining_data(server); ++ dequeue_mid(mid, rdata->result); ++ return length; ++} ++ + int + cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) + { +@@ -1446,6 +1455,12 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) + return length; + server->total_read += length; + ++ if (server->ops->is_status_pending && ++ server->ops->is_status_pending(buf, server, 0)) { ++ discard_remaining_data(server); ++ return -1; ++ } ++ + /* Was the SMB read successful? */ + rdata->result = server->ops->map_error(buf, false); + if (rdata->result != 0) { +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index 7675555..373b5cd 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -1109,21 +1109,25 @@ parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp, + { + char *data_offset; + struct create_context *cc; +- unsigned int next = 0; ++ unsigned int next; ++ unsigned int remaining; + char *name; + + data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset); ++ remaining = le32_to_cpu(rsp->CreateContextsLength); + cc = (struct create_context *)data_offset; +- do { +- cc = (struct create_context *)((char *)cc + next); ++ while (remaining >= sizeof(struct create_context)) { + name = le16_to_cpu(cc->NameOffset) + (char *)cc; +- if (le16_to_cpu(cc->NameLength) != 4 || +- strncmp(name, "RqLs", 4)) { +- next = le32_to_cpu(cc->Next); +- continue; +- } +- return server->ops->parse_lease_buf(cc, epoch); +- } while (next != 0); ++ if (le16_to_cpu(cc->NameLength) == 4 && ++ strncmp(name, "RqLs", 4) == 0) ++ return server->ops->parse_lease_buf(cc, epoch); ++ ++ next = le32_to_cpu(cc->Next); ++ if (!next) ++ break; ++ remaining -= next; ++ cc = (struct create_context *)((char *)cc + next); ++ } + + return 0; + } +diff --git a/fs/dcache.c b/fs/dcache.c +index 5c33aeb..877bcbb 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -269,9 +269,6 @@ static inline int dname_external(const struct dentry *dentry) + return dentry->d_name.name != dentry->d_iname; + } + +-/* +- * Make sure other CPUs see the inode attached before the type is set. +- */ + static inline void __d_set_inode_and_type(struct dentry *dentry, + struct inode *inode, + unsigned type_flags) +@@ -279,28 +276,18 @@ static inline void __d_set_inode_and_type(struct dentry *dentry, + unsigned flags; + + dentry->d_inode = inode; +- smp_wmb(); + flags = READ_ONCE(dentry->d_flags); + flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); + flags |= type_flags; + WRITE_ONCE(dentry->d_flags, flags); + } + +-/* +- * Ideally, we want to make sure that other CPUs see the flags cleared before +- * the inode is detached, but this is really a violation of RCU principles +- * since the ordering suggests we should always set inode before flags. +- * +- * We should instead replace or discard the entire dentry - but that sucks +- * performancewise on mass deletion/rename. +- */ + static inline void __d_clear_type_and_inode(struct dentry *dentry) + { + unsigned flags = READ_ONCE(dentry->d_flags); + + flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); + WRITE_ONCE(dentry->d_flags, flags); +- smp_wmb(); + dentry->d_inode = NULL; + } + +@@ -370,9 +357,11 @@ static void dentry_unlink_inode(struct dentry * dentry) + __releases(dentry->d_inode->i_lock) + { + struct inode *inode = dentry->d_inode; ++ ++ raw_write_seqcount_begin(&dentry->d_seq); + __d_clear_type_and_inode(dentry); + hlist_del_init(&dentry->d_u.d_alias); +- dentry_rcuwalk_invalidate(dentry); ++ raw_write_seqcount_end(&dentry->d_seq); + spin_unlock(&dentry->d_lock); + spin_unlock(&inode->i_lock); + if (!inode->i_nlink) +@@ -1757,8 +1746,9 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode) + spin_lock(&dentry->d_lock); + if (inode) + hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); ++ raw_write_seqcount_begin(&dentry->d_seq); + __d_set_inode_and_type(dentry, inode, add_flags); +- dentry_rcuwalk_invalidate(dentry); ++ raw_write_seqcount_end(&dentry->d_seq); + spin_unlock(&dentry->d_lock); + fsnotify_d_instantiate(dentry, inode); + } +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c +index e5232bb..7a8ea13 100644 +--- a/fs/fs-writeback.c ++++ b/fs/fs-writeback.c +@@ -223,6 +223,9 @@ static void wb_wait_for_completion(struct backing_dev_info *bdi, + #define WB_FRN_HIST_MAX_SLOTS (WB_FRN_HIST_THR_SLOTS / 2 + 1) + /* one round can affect upto 5 slots */ + ++static atomic_t isw_nr_in_flight = ATOMIC_INIT(0); ++static struct workqueue_struct *isw_wq; ++ + void __inode_attach_wb(struct inode *inode, struct page *page) + { + struct backing_dev_info *bdi = inode_to_bdi(inode); +@@ -317,7 +320,6 @@ static void inode_switch_wbs_work_fn(struct work_struct *work) + struct inode_switch_wbs_context *isw = + container_of(work, struct inode_switch_wbs_context, work); + struct inode *inode = isw->inode; +- struct super_block *sb = inode->i_sb; + struct address_space *mapping = inode->i_mapping; + struct bdi_writeback *old_wb = inode->i_wb; + struct bdi_writeback *new_wb = isw->new_wb; +@@ -424,8 +426,9 @@ skip_switch: + wb_put(new_wb); + + iput(inode); +- deactivate_super(sb); + kfree(isw); ++ ++ atomic_dec(&isw_nr_in_flight); + } + + static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head) +@@ -435,7 +438,7 @@ static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head) + + /* needs to grab bh-unsafe locks, bounce to work item */ + INIT_WORK(&isw->work, inode_switch_wbs_work_fn); +- schedule_work(&isw->work); ++ queue_work(isw_wq, &isw->work); + } + + /** +@@ -471,20 +474,20 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id) + + /* while holding I_WB_SWITCH, no one else can update the association */ + spin_lock(&inode->i_lock); +- +- if (inode->i_state & (I_WB_SWITCH | I_FREEING) || +- inode_to_wb(inode) == isw->new_wb) +- goto out_unlock; +- +- if (!atomic_inc_not_zero(&inode->i_sb->s_active)) +- goto out_unlock; +- ++ if (!(inode->i_sb->s_flags & MS_ACTIVE) || ++ inode->i_state & (I_WB_SWITCH | I_FREEING) || ++ inode_to_wb(inode) == isw->new_wb) { ++ spin_unlock(&inode->i_lock); ++ goto out_free; ++ } + inode->i_state |= I_WB_SWITCH; + spin_unlock(&inode->i_lock); + + ihold(inode); + isw->inode = inode; + ++ atomic_inc(&isw_nr_in_flight); ++ + /* + * In addition to synchronizing among switchers, I_WB_SWITCH tells + * the RCU protected stat update paths to grab the mapping's +@@ -494,8 +497,6 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id) + call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn); + return; + +-out_unlock: +- spin_unlock(&inode->i_lock); + out_free: + if (isw->new_wb) + wb_put(isw->new_wb); +@@ -849,6 +850,33 @@ restart: + wb_put(last_wb); + } + ++/** ++ * cgroup_writeback_umount - flush inode wb switches for umount ++ * ++ * This function is called when a super_block is about to be destroyed and ++ * flushes in-flight inode wb switches. An inode wb switch goes through ++ * RCU and then workqueue, so the two need to be flushed in order to ensure ++ * that all previously scheduled switches are finished. As wb switches are ++ * rare occurrences and synchronize_rcu() can take a while, perform ++ * flushing iff wb switches are in flight. ++ */ ++void cgroup_writeback_umount(void) ++{ ++ if (atomic_read(&isw_nr_in_flight)) { ++ synchronize_rcu(); ++ flush_workqueue(isw_wq); ++ } ++} ++ ++static int __init cgroup_writeback_init(void) ++{ ++ isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0); ++ if (!isw_wq) ++ return -ENOMEM; ++ return 0; ++} ++fs_initcall(cgroup_writeback_init); ++ + #else /* CONFIG_CGROUP_WRITEBACK */ + + static struct bdi_writeback * +diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking +index 3ea3655..8918ac9 100644 +--- a/fs/jffs2/README.Locking ++++ b/fs/jffs2/README.Locking +@@ -2,10 +2,6 @@ + JFFS2 LOCKING DOCUMENTATION + --------------------------- + +-At least theoretically, JFFS2 does not require the Big Kernel Lock +-(BKL), which was always helpfully obtained for it by Linux 2.4 VFS +-code. It has its own locking, as described below. +- + This document attempts to describe the existing locking rules for + JFFS2. It is not expected to remain perfectly up to date, but ought to + be fairly close. +@@ -69,6 +65,7 @@ Ordering constraints: + any f->sem held. + 2. Never attempt to lock two file mutexes in one thread. + No ordering rules have been made for doing so. ++ 3. Never lock a page cache page with f->sem held. + + + erase_completion_lock spinlock +diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c +index a3750f9..c1f0494 100644 +--- a/fs/jffs2/build.c ++++ b/fs/jffs2/build.c +@@ -49,7 +49,8 @@ next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c) + + + static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, +- struct jffs2_inode_cache *ic) ++ struct jffs2_inode_cache *ic, ++ int *dir_hardlinks) + { + struct jffs2_full_dirent *fd; + +@@ -68,19 +69,21 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, + dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n", + fd->name, fd->ino, ic->ino); + jffs2_mark_node_obsolete(c, fd->raw); ++ /* Clear the ic/raw union so it doesn't cause problems later. */ ++ fd->ic = NULL; + continue; + } + ++ /* From this point, fd->raw is no longer used so we can set fd->ic */ ++ fd->ic = child_ic; ++ child_ic->pino_nlink++; ++ /* If we appear (at this stage) to have hard-linked directories, ++ * set a flag to trigger a scan later */ + if (fd->type == DT_DIR) { +- if (child_ic->pino_nlink) { +- JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", +- fd->name, fd->ino, ic->ino); +- /* TODO: What do we do about it? */ +- } else { +- child_ic->pino_nlink = ic->ino; +- } +- } else +- child_ic->pino_nlink++; ++ child_ic->flags |= INO_FLAGS_IS_DIR; ++ if (child_ic->pino_nlink > 1) ++ *dir_hardlinks = 1; ++ } + + dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino); + /* Can't free scan_dents so far. We might need them in pass 2 */ +@@ -94,8 +97,7 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, + */ + static int jffs2_build_filesystem(struct jffs2_sb_info *c) + { +- int ret; +- int i; ++ int ret, i, dir_hardlinks = 0; + struct jffs2_inode_cache *ic; + struct jffs2_full_dirent *fd; + struct jffs2_full_dirent *dead_fds = NULL; +@@ -119,7 +121,7 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) + /* Now scan the directory tree, increasing nlink according to every dirent found. */ + for_each_inode(i, c, ic) { + if (ic->scan_dents) { +- jffs2_build_inode_pass1(c, ic); ++ jffs2_build_inode_pass1(c, ic, &dir_hardlinks); + cond_resched(); + } + } +@@ -155,6 +157,20 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) + } + + dbg_fsbuild("pass 2a complete\n"); ++ ++ if (dir_hardlinks) { ++ /* If we detected directory hardlinks earlier, *hopefully* ++ * they are gone now because some of the links were from ++ * dead directories which still had some old dirents lying ++ * around and not yet garbage-collected, but which have ++ * been discarded above. So clear the pino_nlink field ++ * in each directory, so that the final scan below can ++ * print appropriate warnings. */ ++ for_each_inode(i, c, ic) { ++ if (ic->flags & INO_FLAGS_IS_DIR) ++ ic->pino_nlink = 0; ++ } ++ } + dbg_fsbuild("freeing temporary data structures\n"); + + /* Finally, we can scan again and free the dirent structs */ +@@ -162,6 +178,33 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) + while(ic->scan_dents) { + fd = ic->scan_dents; + ic->scan_dents = fd->next; ++ /* We do use the pino_nlink field to count nlink of ++ * directories during fs build, so set it to the ++ * parent ino# now. Now that there's hopefully only ++ * one. */ ++ if (fd->type == DT_DIR) { ++ if (!fd->ic) { ++ /* We'll have complained about it and marked the coresponding ++ raw node obsolete already. Just skip it. */ ++ continue; ++ } ++ ++ /* We *have* to have set this in jffs2_build_inode_pass1() */ ++ BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR)); ++ ++ /* We clear ic->pino_nlink ∀ directories' ic *only* if dir_hardlinks ++ * is set. Otherwise, we know this should never trigger anyway, so ++ * we don't do the check. And ic->pino_nlink still contains the nlink ++ * value (which is 1). */ ++ if (dir_hardlinks && fd->ic->pino_nlink) { ++ JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u is also hard linked from dir ino #%u\n", ++ fd->name, fd->ino, ic->ino, fd->ic->pino_nlink); ++ /* Should we unlink it from its previous parent? */ ++ } ++ ++ /* For directories, ic->pino_nlink holds that parent inode # */ ++ fd->ic->pino_nlink = ic->ino; ++ } + jffs2_free_full_dirent(fd); + } + ic->scan_dents = NULL; +@@ -240,11 +283,7 @@ static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, + + /* Reduce nlink of the child. If it's now zero, stick it on the + dead_fds list to be cleaned up later. Else just free the fd */ +- +- if (fd->type == DT_DIR) +- child_ic->pino_nlink = 0; +- else +- child_ic->pino_nlink--; ++ child_ic->pino_nlink--; + + if (!child_ic->pino_nlink) { + dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n", +diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c +index f509f62..3361979 100644 +--- a/fs/jffs2/file.c ++++ b/fs/jffs2/file.c +@@ -137,39 +137,33 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + struct page *pg; + struct inode *inode = mapping->host; + struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); +- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); +- struct jffs2_raw_inode ri; +- uint32_t alloc_len = 0; + pgoff_t index = pos >> PAGE_CACHE_SHIFT; + uint32_t pageofs = index << PAGE_CACHE_SHIFT; + int ret = 0; + +- jffs2_dbg(1, "%s()\n", __func__); +- +- if (pageofs > inode->i_size) { +- ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, +- ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); +- if (ret) +- return ret; +- } +- +- mutex_lock(&f->sem); + pg = grab_cache_page_write_begin(mapping, index, flags); +- if (!pg) { +- if (alloc_len) +- jffs2_complete_reservation(c); +- mutex_unlock(&f->sem); ++ if (!pg) + return -ENOMEM; +- } + *pagep = pg; + +- if (alloc_len) { ++ jffs2_dbg(1, "%s()\n", __func__); ++ ++ if (pageofs > inode->i_size) { + /* Make new hole frag from old EOF to new page */ ++ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); ++ struct jffs2_raw_inode ri; + struct jffs2_full_dnode *fn; ++ uint32_t alloc_len; + + jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", + (unsigned int)inode->i_size, pageofs); + ++ ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, ++ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); ++ if (ret) ++ goto out_page; ++ ++ mutex_lock(&f->sem); + memset(&ri, 0, sizeof(ri)); + + ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); +@@ -196,6 +190,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + if (IS_ERR(fn)) { + ret = PTR_ERR(fn); + jffs2_complete_reservation(c); ++ mutex_unlock(&f->sem); + goto out_page; + } + ret = jffs2_add_full_dnode_to_inode(c, f, fn); +@@ -210,10 +205,12 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + jffs2_mark_node_obsolete(c, fn->raw); + jffs2_free_full_dnode(fn); + jffs2_complete_reservation(c); ++ mutex_unlock(&f->sem); + goto out_page; + } + jffs2_complete_reservation(c); + inode->i_size = pageofs; ++ mutex_unlock(&f->sem); + } + + /* +@@ -222,18 +219,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + * case of a short-copy. + */ + if (!PageUptodate(pg)) { ++ mutex_lock(&f->sem); + ret = jffs2_do_readpage_nolock(inode, pg); ++ mutex_unlock(&f->sem); + if (ret) + goto out_page; + } +- mutex_unlock(&f->sem); + jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags); + return ret; + + out_page: + unlock_page(pg); + page_cache_release(pg); +- mutex_unlock(&f->sem); + return ret; + } + +diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c +index 5a2dec2..95d5880 100644 +--- a/fs/jffs2/gc.c ++++ b/fs/jffs2/gc.c +@@ -1296,14 +1296,17 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era + BUG_ON(start > orig_start); + } + +- /* First, use readpage() to read the appropriate page into the page cache */ +- /* Q: What happens if we actually try to GC the _same_ page for which commit_write() +- * triggered garbage collection in the first place? +- * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the +- * page OK. We'll actually write it out again in commit_write, which is a little +- * suboptimal, but at least we're correct. +- */ ++ /* The rules state that we must obtain the page lock *before* f->sem, so ++ * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's ++ * actually going to *change* so we're safe; we only allow reading. ++ * ++ * It is important to note that jffs2_write_begin() will ensure that its ++ * page is marked Uptodate before allocating space. That means that if we ++ * end up here trying to GC the *same* page that jffs2_write_begin() is ++ * trying to write out, read_cache_page() will not deadlock. */ ++ mutex_unlock(&f->sem); + pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg); ++ mutex_lock(&f->sem); + + if (IS_ERR(pg_ptr)) { + pr_warn("read_cache_page() returned error: %ld\n", +diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h +index fa35ff7..0637271 100644 +--- a/fs/jffs2/nodelist.h ++++ b/fs/jffs2/nodelist.h +@@ -194,6 +194,7 @@ struct jffs2_inode_cache { + #define INO_STATE_CLEARING 6 /* In clear_inode() */ + + #define INO_FLAGS_XATTR_CHECKED 0x01 /* has no duplicate xattr_ref */ ++#define INO_FLAGS_IS_DIR 0x02 /* is a directory */ + + #define RAWNODE_CLASS_INODE_CACHE 0 + #define RAWNODE_CLASS_XATTR_DATUM 1 +@@ -249,7 +250,10 @@ struct jffs2_readinode_info + + struct jffs2_full_dirent + { +- struct jffs2_raw_node_ref *raw; ++ union { ++ struct jffs2_raw_node_ref *raw; ++ struct jffs2_inode_cache *ic; /* Just during part of build */ ++ }; + struct jffs2_full_dirent *next; + uint32_t version; + uint32_t ino; /* == zero for unlink */ +diff --git a/fs/super.c b/fs/super.c +index 954aeb8..f5f4b32 100644 +--- a/fs/super.c ++++ b/fs/super.c +@@ -415,6 +415,7 @@ void generic_shutdown_super(struct super_block *sb) + sb->s_flags &= ~MS_ACTIVE; + + fsnotify_unmount_inodes(sb); ++ cgroup_writeback_umount(); + + evict_inodes(sb); + +diff --git a/include/linux/ata.h b/include/linux/ata.h +index d2992bf..c1a2f34 100644 +--- a/include/linux/ata.h ++++ b/include/linux/ata.h +@@ -487,8 +487,8 @@ enum ata_tf_protocols { + }; + + enum ata_ioctls { +- ATA_IOC_GET_IO32 = 0x309, +- ATA_IOC_SET_IO32 = 0x324, ++ ATA_IOC_GET_IO32 = 0x309, /* HDIO_GET_32BIT */ ++ ATA_IOC_SET_IO32 = 0x324, /* HDIO_SET_32BIT */ + }; + + /* core structures */ +diff --git a/include/linux/bio.h b/include/linux/bio.h +index b9b6e04..79cfaee 100644 +--- a/include/linux/bio.h ++++ b/include/linux/bio.h +@@ -310,6 +310,43 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit) + bio->bi_flags &= ~(1U << bit); + } + ++static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) ++{ ++ *bv = bio_iovec(bio); ++} ++ ++static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) ++{ ++ struct bvec_iter iter = bio->bi_iter; ++ int idx; ++ ++ if (!bio_flagged(bio, BIO_CLONED)) { ++ *bv = bio->bi_io_vec[bio->bi_vcnt - 1]; ++ return; ++ } ++ ++ if (unlikely(!bio_multiple_segments(bio))) { ++ *bv = bio_iovec(bio); ++ return; ++ } ++ ++ bio_advance_iter(bio, &iter, iter.bi_size); ++ ++ if (!iter.bi_bvec_done) ++ idx = iter.bi_idx - 1; ++ else /* in the middle of bvec */ ++ idx = iter.bi_idx; ++ ++ *bv = bio->bi_io_vec[idx]; ++ ++ /* ++ * iter.bi_bvec_done records actual length of the last bvec ++ * if this bio ends in the middle of one io vector ++ */ ++ if (iter.bi_bvec_done) ++ bv->bv_len = iter.bi_bvec_done; ++} ++ + enum bip_flags { + BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ + BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h +index c70e358..1687557 100644 +--- a/include/linux/blkdev.h ++++ b/include/linux/blkdev.h +@@ -1367,6 +1367,13 @@ static inline void put_dev_sector(Sector p) + page_cache_release(p.v); + } + ++static inline bool __bvec_gap_to_prev(struct request_queue *q, ++ struct bio_vec *bprv, unsigned int offset) ++{ ++ return offset || ++ ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); ++} ++ + /* + * Check if adding a bio_vec after bprv with offset would create a gap in + * the SG list. Most drivers don't care about this, but some do. +@@ -1376,18 +1383,22 @@ static inline bool bvec_gap_to_prev(struct request_queue *q, + { + if (!queue_virt_boundary(q)) + return false; +- return offset || +- ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); ++ return __bvec_gap_to_prev(q, bprv, offset); + } + + static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, + struct bio *next) + { +- if (!bio_has_data(prev)) +- return false; ++ if (bio_has_data(prev) && queue_virt_boundary(q)) { ++ struct bio_vec pb, nb; ++ ++ bio_get_last_bvec(prev, &pb); ++ bio_get_first_bvec(next, &nb); + +- return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1], +- next->bi_io_vec[0].bv_offset); ++ return __bvec_gap_to_prev(q, &pb, nb.bv_offset); ++ } ++ ++ return false; + } + + static inline bool req_gap_back_merge(struct request *req, struct bio *bio) +diff --git a/include/linux/dcache.h b/include/linux/dcache.h +index d67ae11..8a2e009 100644 +--- a/include/linux/dcache.h ++++ b/include/linux/dcache.h +@@ -409,9 +409,7 @@ static inline bool d_mountpoint(const struct dentry *dentry) + */ + static inline unsigned __d_entry_type(const struct dentry *dentry) + { +- unsigned type = READ_ONCE(dentry->d_flags); +- smp_rmb(); +- return type & DCACHE_ENTRY_TYPE; ++ return dentry->d_flags & DCACHE_ENTRY_TYPE; + } + + static inline bool d_is_miss(const struct dentry *dentry) +diff --git a/include/linux/libata.h b/include/linux/libata.h +index 600c1e0..b20a275 100644 +--- a/include/linux/libata.h ++++ b/include/linux/libata.h +@@ -718,7 +718,7 @@ struct ata_device { + union { + u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ + u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ +- }; ++ } ____cacheline_aligned; + + /* DEVSLP Timing Variables from Identify Device Data Log */ + u8 devslp_timing[ATA_LOG_DEVSLP_SIZE]; +diff --git a/include/linux/module.h b/include/linux/module.h +index 3a19c79..b229a99 100644 +--- a/include/linux/module.h ++++ b/include/linux/module.h +@@ -302,6 +302,12 @@ struct mod_tree_node { + struct latch_tree_node node; + }; + ++struct mod_kallsyms { ++ Elf_Sym *symtab; ++ unsigned int num_symtab; ++ char *strtab; ++}; ++ + struct module { + enum module_state state; + +@@ -411,14 +417,9 @@ struct module { + #endif + + #ifdef CONFIG_KALLSYMS +- /* +- * We keep the symbol and string tables for kallsyms. +- * The core_* fields below are temporary, loader-only (they +- * could really be discarded after module init). +- */ +- Elf_Sym *symtab, *core_symtab; +- unsigned int num_symtab, core_num_syms; +- char *strtab, *core_strtab; ++ /* Protected by RCU and/or module_mutex: use rcu_dereference() */ ++ struct mod_kallsyms *kallsyms; ++ struct mod_kallsyms core_kallsyms; + + /* Section attributes */ + struct module_sect_attrs *sect_attrs; +diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h +index 429fdfc..925730b 100644 +--- a/include/linux/trace_events.h ++++ b/include/linux/trace_events.h +@@ -568,6 +568,8 @@ enum { + FILTER_DYN_STRING, + FILTER_PTR_STRING, + FILTER_TRACE_FN, ++ FILTER_COMM, ++ FILTER_CPU, + }; + + extern int trace_event_raw_init(struct trace_event_call *call); +diff --git a/include/linux/writeback.h b/include/linux/writeback.h +index b333c94..d0b5ca5 100644 +--- a/include/linux/writeback.h ++++ b/include/linux/writeback.h +@@ -198,6 +198,7 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc, + void wbc_detach_inode(struct writeback_control *wbc); + void wbc_account_io(struct writeback_control *wbc, struct page *page, + size_t bytes); ++void cgroup_writeback_umount(void); + + /** + * inode_attach_wb - associate an inode with its wb +@@ -301,6 +302,10 @@ static inline void wbc_account_io(struct writeback_control *wbc, + { + } + ++static inline void cgroup_writeback_umount(void) ++{ ++} ++ + #endif /* CONFIG_CGROUP_WRITEBACK */ + + /* +diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h +index 56cf8e4..28ee5c2 100644 +--- a/include/target/target_core_backend.h ++++ b/include/target/target_core_backend.h +@@ -94,5 +94,8 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, + sense_reason_t (*exec_cmd)(struct se_cmd *cmd)); + + bool target_sense_desc_format(struct se_device *dev); ++sector_t target_to_linux_sector(struct se_device *dev, sector_t lb); ++bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, ++ struct request_queue *q, int block_size); + + #endif /* TARGET_CORE_BACKEND_H */ +diff --git a/kernel/module.c b/kernel/module.c +index 14833e6..0e5c711 100644 +--- a/kernel/module.c ++++ b/kernel/module.c +@@ -327,6 +327,9 @@ struct load_info { + struct _ddebug *debug; + unsigned int num_debug; + bool sig_ok; ++#ifdef CONFIG_KALLSYMS ++ unsigned long mod_kallsyms_init_off; ++#endif + struct { + unsigned int sym, str, mod, vers, info, pcpu; + } index; +@@ -2492,10 +2495,21 @@ static void layout_symtab(struct module *mod, struct load_info *info) + strsect->sh_flags |= SHF_ALLOC; + strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, + info->index.str) | INIT_OFFSET_MASK; +- mod->init_size = debug_align(mod->init_size); + pr_debug("\t%s\n", info->secstrings + strsect->sh_name); ++ ++ /* We'll tack temporary mod_kallsyms on the end. */ ++ mod->init_size = ALIGN(mod->init_size, ++ __alignof__(struct mod_kallsyms)); ++ info->mod_kallsyms_init_off = mod->init_size; ++ mod->init_size += sizeof(struct mod_kallsyms); ++ mod->init_size = debug_align(mod->init_size); + } + ++/* ++ * We use the full symtab and strtab which layout_symtab arranged to ++ * be appended to the init section. Later we switch to the cut-down ++ * core-only ones. ++ */ + static void add_kallsyms(struct module *mod, const struct load_info *info) + { + unsigned int i, ndst; +@@ -2504,28 +2518,33 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) + char *s; + Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; + +- mod->symtab = (void *)symsec->sh_addr; +- mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym); ++ /* Set up to point into init section. */ ++ mod->kallsyms = mod->module_init + info->mod_kallsyms_init_off; ++ ++ mod->kallsyms->symtab = (void *)symsec->sh_addr; ++ mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym); + /* Make sure we get permanent strtab: don't use info->strtab. */ +- mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr; ++ mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr; + + /* Set types up while we still have access to sections. */ +- for (i = 0; i < mod->num_symtab; i++) +- mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); +- +- mod->core_symtab = dst = mod->module_core + info->symoffs; +- mod->core_strtab = s = mod->module_core + info->stroffs; +- src = mod->symtab; +- for (ndst = i = 0; i < mod->num_symtab; i++) { ++ for (i = 0; i < mod->kallsyms->num_symtab; i++) ++ mod->kallsyms->symtab[i].st_info ++ = elf_type(&mod->kallsyms->symtab[i], info); ++ ++ /* Now populate the cut down core kallsyms for after init. */ ++ mod->core_kallsyms.symtab = dst = mod->module_core + info->symoffs; ++ mod->core_kallsyms.strtab = s = mod->module_core + info->stroffs; ++ src = mod->kallsyms->symtab; ++ for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) { + if (i == 0 || + is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { + dst[ndst] = src[i]; +- dst[ndst++].st_name = s - mod->core_strtab; +- s += strlcpy(s, &mod->strtab[src[i].st_name], ++ dst[ndst++].st_name = s - mod->core_kallsyms.strtab; ++ s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name], + KSYM_NAME_LEN) + 1; + } + } +- mod->core_num_syms = ndst; ++ mod->core_kallsyms.num_symtab = ndst; + } + #else + static inline void layout_symtab(struct module *mod, struct load_info *info) +@@ -3274,9 +3293,8 @@ static noinline int do_init_module(struct module *mod) + module_put(mod); + trim_init_extable(mod); + #ifdef CONFIG_KALLSYMS +- mod->num_symtab = mod->core_num_syms; +- mod->symtab = mod->core_symtab; +- mod->strtab = mod->core_strtab; ++ /* Switch to core kallsyms now init is done: kallsyms may be walking! */ ++ rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms); + #endif + mod_tree_remove_init(mod); + unset_module_init_ro_nx(mod); +@@ -3646,9 +3664,9 @@ static inline int is_arm_mapping_symbol(const char *str) + && (str[2] == '\0' || str[2] == '.'); + } + +-static const char *symname(struct module *mod, unsigned int symnum) ++static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum) + { +- return mod->strtab + mod->symtab[symnum].st_name; ++ return kallsyms->strtab + kallsyms->symtab[symnum].st_name; + } + + static const char *get_ksymbol(struct module *mod, +@@ -3658,6 +3676,7 @@ static const char *get_ksymbol(struct module *mod, + { + unsigned int i, best = 0; + unsigned long nextval; ++ struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); + + /* At worse, next value is at end of module */ + if (within_module_init(addr, mod)) +@@ -3667,32 +3686,32 @@ static const char *get_ksymbol(struct module *mod, + + /* Scan for closest preceding symbol, and next symbol. (ELF + starts real symbols at 1). */ +- for (i = 1; i < mod->num_symtab; i++) { +- if (mod->symtab[i].st_shndx == SHN_UNDEF) ++ for (i = 1; i < kallsyms->num_symtab; i++) { ++ if (kallsyms->symtab[i].st_shndx == SHN_UNDEF) + continue; + + /* We ignore unnamed symbols: they're uninformative + * and inserted at a whim. */ +- if (*symname(mod, i) == '\0' +- || is_arm_mapping_symbol(symname(mod, i))) ++ if (*symname(kallsyms, i) == '\0' ++ || is_arm_mapping_symbol(symname(kallsyms, i))) + continue; + +- if (mod->symtab[i].st_value <= addr +- && mod->symtab[i].st_value > mod->symtab[best].st_value) ++ if (kallsyms->symtab[i].st_value <= addr ++ && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value) + best = i; +- if (mod->symtab[i].st_value > addr +- && mod->symtab[i].st_value < nextval) +- nextval = mod->symtab[i].st_value; ++ if (kallsyms->symtab[i].st_value > addr ++ && kallsyms->symtab[i].st_value < nextval) ++ nextval = kallsyms->symtab[i].st_value; + } + + if (!best) + return NULL; + + if (size) +- *size = nextval - mod->symtab[best].st_value; ++ *size = nextval - kallsyms->symtab[best].st_value; + if (offset) +- *offset = addr - mod->symtab[best].st_value; +- return symname(mod, best); ++ *offset = addr - kallsyms->symtab[best].st_value; ++ return symname(kallsyms, best); + } + + /* For kallsyms to ask for address resolution. NULL means not found. Careful +@@ -3782,18 +3801,21 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + + preempt_disable(); + list_for_each_entry_rcu(mod, &modules, list) { ++ struct mod_kallsyms *kallsyms; ++ + if (mod->state == MODULE_STATE_UNFORMED) + continue; +- if (symnum < mod->num_symtab) { +- *value = mod->symtab[symnum].st_value; +- *type = mod->symtab[symnum].st_info; +- strlcpy(name, symname(mod, symnum), KSYM_NAME_LEN); ++ kallsyms = rcu_dereference_sched(mod->kallsyms); ++ if (symnum < kallsyms->num_symtab) { ++ *value = kallsyms->symtab[symnum].st_value; ++ *type = kallsyms->symtab[symnum].st_info; ++ strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN); + strlcpy(module_name, mod->name, MODULE_NAME_LEN); + *exported = is_exported(name, *value, mod); + preempt_enable(); + return 0; + } +- symnum -= mod->num_symtab; ++ symnum -= kallsyms->num_symtab; + } + preempt_enable(); + return -ERANGE; +@@ -3802,11 +3824,12 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + static unsigned long mod_find_symname(struct module *mod, const char *name) + { + unsigned int i; ++ struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); + +- for (i = 0; i < mod->num_symtab; i++) +- if (strcmp(name, symname(mod, i)) == 0 && +- mod->symtab[i].st_info != 'U') +- return mod->symtab[i].st_value; ++ for (i = 0; i < kallsyms->num_symtab; i++) ++ if (strcmp(name, symname(kallsyms, i)) == 0 && ++ kallsyms->symtab[i].st_info != 'U') ++ return kallsyms->symtab[i].st_value; + return 0; + } + +@@ -3845,11 +3868,14 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + module_assert_mutex(); + + list_for_each_entry(mod, &modules, list) { ++ /* We hold module_mutex: no need for rcu_dereference_sched */ ++ struct mod_kallsyms *kallsyms = mod->kallsyms; ++ + if (mod->state == MODULE_STATE_UNFORMED) + continue; +- for (i = 0; i < mod->num_symtab; i++) { +- ret = fn(data, symname(mod, i), +- mod, mod->symtab[i].st_value); ++ for (i = 0; i < kallsyms->num_symtab; i++) { ++ ret = fn(data, symname(kallsyms, i), ++ mod, kallsyms->symtab[i].st_value); + if (ret != 0) + return ret; + } +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c +index debf6e8..d202d99 100644 +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c +@@ -97,16 +97,16 @@ trace_find_event_field(struct trace_event_call *call, char *name) + struct ftrace_event_field *field; + struct list_head *head; + +- field = __find_event_field(&ftrace_generic_fields, name); ++ head = trace_get_fields(call); ++ field = __find_event_field(head, name); + if (field) + return field; + +- field = __find_event_field(&ftrace_common_fields, name); ++ field = __find_event_field(&ftrace_generic_fields, name); + if (field) + return field; + +- head = trace_get_fields(call); +- return __find_event_field(head, name); ++ return __find_event_field(&ftrace_common_fields, name); + } + + static int __trace_define_field(struct list_head *head, const char *type, +@@ -171,8 +171,10 @@ static int trace_define_generic_fields(void) + { + int ret; + +- __generic_field(int, cpu, FILTER_OTHER); +- __generic_field(char *, comm, FILTER_PTR_STRING); ++ __generic_field(int, CPU, FILTER_CPU); ++ __generic_field(int, cpu, FILTER_CPU); ++ __generic_field(char *, COMM, FILTER_COMM); ++ __generic_field(char *, comm, FILTER_COMM); + + return ret; + } +diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c +index f93a219..6816302 100644 +--- a/kernel/trace/trace_events_filter.c ++++ b/kernel/trace/trace_events_filter.c +@@ -1043,13 +1043,14 @@ static int init_pred(struct filter_parse_state *ps, + return -EINVAL; + } + +- if (is_string_field(field)) { ++ if (field->filter_type == FILTER_COMM) { ++ filter_build_regex(pred); ++ fn = filter_pred_comm; ++ pred->regex.field_len = TASK_COMM_LEN; ++ } else if (is_string_field(field)) { + filter_build_regex(pred); + +- if (!strcmp(field->name, "comm")) { +- fn = filter_pred_comm; +- pred->regex.field_len = TASK_COMM_LEN; +- } else if (field->filter_type == FILTER_STATIC_STRING) { ++ if (field->filter_type == FILTER_STATIC_STRING) { + fn = filter_pred_string; + pred->regex.field_len = field->size; + } else if (field->filter_type == FILTER_DYN_STRING) +@@ -1072,7 +1073,7 @@ static int init_pred(struct filter_parse_state *ps, + } + pred->val = val; + +- if (!strcmp(field->name, "cpu")) ++ if (field->filter_type == FILTER_CPU) + fn = filter_pred_cpu; + else + fn = select_comparison_fn(pred->op, field->size, +diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c +index b9c0910..0608f21 100644 +--- a/sound/core/control_compat.c ++++ b/sound/core/control_compat.c +@@ -170,6 +170,19 @@ struct snd_ctl_elem_value32 { + unsigned char reserved[128]; + }; + ++#ifdef CONFIG_X86_X32 ++/* x32 has a different alignment for 64bit values from ia32 */ ++struct snd_ctl_elem_value_x32 { ++ struct snd_ctl_elem_id id; ++ unsigned int indirect; /* bit-field causes misalignment */ ++ union { ++ s32 integer[128]; ++ unsigned char data[512]; ++ s64 integer64[64]; ++ } value; ++ unsigned char reserved[128]; ++}; ++#endif /* CONFIG_X86_X32 */ + + /* get the value type and count of the control */ + static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id, +@@ -219,9 +232,11 @@ static int get_elem_size(int type, int count) + + static int copy_ctl_value_from_user(struct snd_card *card, + struct snd_ctl_elem_value *data, +- struct snd_ctl_elem_value32 __user *data32, ++ void __user *userdata, ++ void __user *valuep, + int *typep, int *countp) + { ++ struct snd_ctl_elem_value32 __user *data32 = userdata; + int i, type, size; + int uninitialized_var(count); + unsigned int indirect; +@@ -239,8 +254,9 @@ static int copy_ctl_value_from_user(struct snd_card *card, + if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN || + type == SNDRV_CTL_ELEM_TYPE_INTEGER) { + for (i = 0; i < count; i++) { ++ s32 __user *intp = valuep; + int val; +- if (get_user(val, &data32->value.integer[i])) ++ if (get_user(val, &intp[i])) + return -EFAULT; + data->value.integer.value[i] = val; + } +@@ -250,8 +266,7 @@ static int copy_ctl_value_from_user(struct snd_card *card, + dev_err(card->dev, "snd_ioctl32_ctl_elem_value: unknown type %d\n", type); + return -EINVAL; + } +- if (copy_from_user(data->value.bytes.data, +- data32->value.data, size)) ++ if (copy_from_user(data->value.bytes.data, valuep, size)) + return -EFAULT; + } + +@@ -261,7 +276,8 @@ static int copy_ctl_value_from_user(struct snd_card *card, + } + + /* restore the value to 32bit */ +-static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32, ++static int copy_ctl_value_to_user(void __user *userdata, ++ void __user *valuep, + struct snd_ctl_elem_value *data, + int type, int count) + { +@@ -270,22 +286,22 @@ static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32, + if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN || + type == SNDRV_CTL_ELEM_TYPE_INTEGER) { + for (i = 0; i < count; i++) { ++ s32 __user *intp = valuep; + int val; + val = data->value.integer.value[i]; +- if (put_user(val, &data32->value.integer[i])) ++ if (put_user(val, &intp[i])) + return -EFAULT; + } + } else { + size = get_elem_size(type, count); +- if (copy_to_user(data32->value.data, +- data->value.bytes.data, size)) ++ if (copy_to_user(valuep, data->value.bytes.data, size)) + return -EFAULT; + } + return 0; + } + +-static int snd_ctl_elem_read_user_compat(struct snd_card *card, +- struct snd_ctl_elem_value32 __user *data32) ++static int ctl_elem_read_user(struct snd_card *card, ++ void __user *userdata, void __user *valuep) + { + struct snd_ctl_elem_value *data; + int err, type, count; +@@ -294,7 +310,9 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card, + if (data == NULL) + return -ENOMEM; + +- if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0) ++ err = copy_ctl_value_from_user(card, data, userdata, valuep, ++ &type, &count); ++ if (err < 0) + goto error; + + snd_power_lock(card); +@@ -303,14 +321,15 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card, + err = snd_ctl_elem_read(card, data); + snd_power_unlock(card); + if (err >= 0) +- err = copy_ctl_value_to_user(data32, data, type, count); ++ err = copy_ctl_value_to_user(userdata, valuep, data, ++ type, count); + error: + kfree(data); + return err; + } + +-static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, +- struct snd_ctl_elem_value32 __user *data32) ++static int ctl_elem_write_user(struct snd_ctl_file *file, ++ void __user *userdata, void __user *valuep) + { + struct snd_ctl_elem_value *data; + struct snd_card *card = file->card; +@@ -320,7 +339,9 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, + if (data == NULL) + return -ENOMEM; + +- if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0) ++ err = copy_ctl_value_from_user(card, data, userdata, valuep, ++ &type, &count); ++ if (err < 0) + goto error; + + snd_power_lock(card); +@@ -329,12 +350,39 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, + err = snd_ctl_elem_write(card, file, data); + snd_power_unlock(card); + if (err >= 0) +- err = copy_ctl_value_to_user(data32, data, type, count); ++ err = copy_ctl_value_to_user(userdata, valuep, data, ++ type, count); + error: + kfree(data); + return err; + } + ++static int snd_ctl_elem_read_user_compat(struct snd_card *card, ++ struct snd_ctl_elem_value32 __user *data32) ++{ ++ return ctl_elem_read_user(card, data32, &data32->value); ++} ++ ++static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file, ++ struct snd_ctl_elem_value32 __user *data32) ++{ ++ return ctl_elem_write_user(file, data32, &data32->value); ++} ++ ++#ifdef CONFIG_X86_X32 ++static int snd_ctl_elem_read_user_x32(struct snd_card *card, ++ struct snd_ctl_elem_value_x32 __user *data32) ++{ ++ return ctl_elem_read_user(card, data32, &data32->value); ++} ++ ++static int snd_ctl_elem_write_user_x32(struct snd_ctl_file *file, ++ struct snd_ctl_elem_value_x32 __user *data32) ++{ ++ return ctl_elem_write_user(file, data32, &data32->value); ++} ++#endif /* CONFIG_X86_X32 */ ++ + /* add or replace a user control */ + static int snd_ctl_elem_add_compat(struct snd_ctl_file *file, + struct snd_ctl_elem_info32 __user *data32, +@@ -393,6 +441,10 @@ enum { + SNDRV_CTL_IOCTL_ELEM_WRITE32 = _IOWR('U', 0x13, struct snd_ctl_elem_value32), + SNDRV_CTL_IOCTL_ELEM_ADD32 = _IOWR('U', 0x17, struct snd_ctl_elem_info32), + SNDRV_CTL_IOCTL_ELEM_REPLACE32 = _IOWR('U', 0x18, struct snd_ctl_elem_info32), ++#ifdef CONFIG_X86_X32 ++ SNDRV_CTL_IOCTL_ELEM_READ_X32 = _IOWR('U', 0x12, struct snd_ctl_elem_value_x32), ++ SNDRV_CTL_IOCTL_ELEM_WRITE_X32 = _IOWR('U', 0x13, struct snd_ctl_elem_value_x32), ++#endif /* CONFIG_X86_X32 */ + }; + + static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -431,6 +483,12 @@ static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, uns + return snd_ctl_elem_add_compat(ctl, argp, 0); + case SNDRV_CTL_IOCTL_ELEM_REPLACE32: + return snd_ctl_elem_add_compat(ctl, argp, 1); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_CTL_IOCTL_ELEM_READ_X32: ++ return snd_ctl_elem_read_user_x32(ctl->card, argp); ++ case SNDRV_CTL_IOCTL_ELEM_WRITE_X32: ++ return snd_ctl_elem_write_user_x32(ctl, argp); ++#endif /* CONFIG_X86_X32 */ + } + + down_read(&snd_ioctl_rwsem); +diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c +index 9630e9f..1f64ab0 100644 +--- a/sound/core/pcm_compat.c ++++ b/sound/core/pcm_compat.c +@@ -183,6 +183,14 @@ static int snd_pcm_ioctl_channel_info_compat(struct snd_pcm_substream *substream + return err; + } + ++#ifdef CONFIG_X86_X32 ++/* X32 ABI has the same struct as x86-64 for snd_pcm_channel_info */ ++static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream, ++ struct snd_pcm_channel_info __user *src); ++#define snd_pcm_ioctl_channel_info_x32(s, p) \ ++ snd_pcm_channel_info_user(s, p) ++#endif /* CONFIG_X86_X32 */ ++ + struct snd_pcm_status32 { + s32 state; + struct compat_timespec trigger_tstamp; +@@ -243,6 +251,71 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream, + return err; + } + ++#ifdef CONFIG_X86_X32 ++/* X32 ABI has 64bit timespec and 64bit alignment */ ++struct snd_pcm_status_x32 { ++ s32 state; ++ u32 rsvd; /* alignment */ ++ struct timespec trigger_tstamp; ++ struct timespec tstamp; ++ u32 appl_ptr; ++ u32 hw_ptr; ++ s32 delay; ++ u32 avail; ++ u32 avail_max; ++ u32 overrange; ++ s32 suspended_state; ++ u32 audio_tstamp_data; ++ struct timespec audio_tstamp; ++ struct timespec driver_tstamp; ++ u32 audio_tstamp_accuracy; ++ unsigned char reserved[52-2*sizeof(struct timespec)]; ++} __packed; ++ ++#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst)) ++ ++static int snd_pcm_status_user_x32(struct snd_pcm_substream *substream, ++ struct snd_pcm_status_x32 __user *src, ++ bool ext) ++{ ++ struct snd_pcm_status status; ++ int err; ++ ++ memset(&status, 0, sizeof(status)); ++ /* ++ * with extension, parameters are read/write, ++ * get audio_tstamp_data from user, ++ * ignore rest of status structure ++ */ ++ if (ext && get_user(status.audio_tstamp_data, ++ (u32 __user *)(&src->audio_tstamp_data))) ++ return -EFAULT; ++ err = snd_pcm_status(substream, &status); ++ if (err < 0) ++ return err; ++ ++ if (clear_user(src, sizeof(*src))) ++ return -EFAULT; ++ if (put_user(status.state, &src->state) || ++ put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) || ++ put_timespec(&status.tstamp, &src->tstamp) || ++ put_user(status.appl_ptr, &src->appl_ptr) || ++ put_user(status.hw_ptr, &src->hw_ptr) || ++ put_user(status.delay, &src->delay) || ++ put_user(status.avail, &src->avail) || ++ put_user(status.avail_max, &src->avail_max) || ++ put_user(status.overrange, &src->overrange) || ++ put_user(status.suspended_state, &src->suspended_state) || ++ put_user(status.audio_tstamp_data, &src->audio_tstamp_data) || ++ put_timespec(&status.audio_tstamp, &src->audio_tstamp) || ++ put_timespec(&status.driver_tstamp, &src->driver_tstamp) || ++ put_user(status.audio_tstamp_accuracy, &src->audio_tstamp_accuracy)) ++ return -EFAULT; ++ ++ return err; ++} ++#endif /* CONFIG_X86_X32 */ ++ + /* both for HW_PARAMS and HW_REFINE */ + static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream, + int refine, +@@ -469,6 +542,93 @@ static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream, + return 0; + } + ++#ifdef CONFIG_X86_X32 ++/* X32 ABI has 64bit timespec and 64bit alignment */ ++struct snd_pcm_mmap_status_x32 { ++ s32 state; ++ s32 pad1; ++ u32 hw_ptr; ++ u32 pad2; /* alignment */ ++ struct timespec tstamp; ++ s32 suspended_state; ++ struct timespec audio_tstamp; ++} __packed; ++ ++struct snd_pcm_mmap_control_x32 { ++ u32 appl_ptr; ++ u32 avail_min; ++}; ++ ++struct snd_pcm_sync_ptr_x32 { ++ u32 flags; ++ u32 rsvd; /* alignment */ ++ union { ++ struct snd_pcm_mmap_status_x32 status; ++ unsigned char reserved[64]; ++ } s; ++ union { ++ struct snd_pcm_mmap_control_x32 control; ++ unsigned char reserved[64]; ++ } c; ++} __packed; ++ ++static int snd_pcm_ioctl_sync_ptr_x32(struct snd_pcm_substream *substream, ++ struct snd_pcm_sync_ptr_x32 __user *src) ++{ ++ struct snd_pcm_runtime *runtime = substream->runtime; ++ volatile struct snd_pcm_mmap_status *status; ++ volatile struct snd_pcm_mmap_control *control; ++ u32 sflags; ++ struct snd_pcm_mmap_control scontrol; ++ struct snd_pcm_mmap_status sstatus; ++ snd_pcm_uframes_t boundary; ++ int err; ++ ++ if (snd_BUG_ON(!runtime)) ++ return -EINVAL; ++ ++ if (get_user(sflags, &src->flags) || ++ get_user(scontrol.appl_ptr, &src->c.control.appl_ptr) || ++ get_user(scontrol.avail_min, &src->c.control.avail_min)) ++ return -EFAULT; ++ if (sflags & SNDRV_PCM_SYNC_PTR_HWSYNC) { ++ err = snd_pcm_hwsync(substream); ++ if (err < 0) ++ return err; ++ } ++ status = runtime->status; ++ control = runtime->control; ++ boundary = recalculate_boundary(runtime); ++ if (!boundary) ++ boundary = 0x7fffffff; ++ snd_pcm_stream_lock_irq(substream); ++ /* FIXME: we should consider the boundary for the sync from app */ ++ if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL)) ++ control->appl_ptr = scontrol.appl_ptr; ++ else ++ scontrol.appl_ptr = control->appl_ptr % boundary; ++ if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN)) ++ control->avail_min = scontrol.avail_min; ++ else ++ scontrol.avail_min = control->avail_min; ++ sstatus.state = status->state; ++ sstatus.hw_ptr = status->hw_ptr % boundary; ++ sstatus.tstamp = status->tstamp; ++ sstatus.suspended_state = status->suspended_state; ++ sstatus.audio_tstamp = status->audio_tstamp; ++ snd_pcm_stream_unlock_irq(substream); ++ if (put_user(sstatus.state, &src->s.status.state) || ++ put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) || ++ put_timespec(&sstatus.tstamp, &src->s.status.tstamp) || ++ put_user(sstatus.suspended_state, &src->s.status.suspended_state) || ++ put_timespec(&sstatus.audio_tstamp, &src->s.status.audio_tstamp) || ++ put_user(scontrol.appl_ptr, &src->c.control.appl_ptr) || ++ put_user(scontrol.avail_min, &src->c.control.avail_min)) ++ return -EFAULT; ++ ++ return 0; ++} ++#endif /* CONFIG_X86_X32 */ + + /* + */ +@@ -487,7 +647,12 @@ enum { + SNDRV_PCM_IOCTL_WRITEN_FRAMES32 = _IOW('A', 0x52, struct snd_xfern32), + SNDRV_PCM_IOCTL_READN_FRAMES32 = _IOR('A', 0x53, struct snd_xfern32), + SNDRV_PCM_IOCTL_SYNC_PTR32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr32), +- ++#ifdef CONFIG_X86_X32 ++ SNDRV_PCM_IOCTL_CHANNEL_INFO_X32 = _IOR('A', 0x32, struct snd_pcm_channel_info), ++ SNDRV_PCM_IOCTL_STATUS_X32 = _IOR('A', 0x20, struct snd_pcm_status_x32), ++ SNDRV_PCM_IOCTL_STATUS_EXT_X32 = _IOWR('A', 0x24, struct snd_pcm_status_x32), ++ SNDRV_PCM_IOCTL_SYNC_PTR_X32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr_x32), ++#endif /* CONFIG_X86_X32 */ + }; + + static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -559,6 +724,16 @@ static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned l + return snd_pcm_ioctl_rewind_compat(substream, argp); + case SNDRV_PCM_IOCTL_FORWARD32: + return snd_pcm_ioctl_forward_compat(substream, argp); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_PCM_IOCTL_STATUS_X32: ++ return snd_pcm_status_user_x32(substream, argp, false); ++ case SNDRV_PCM_IOCTL_STATUS_EXT_X32: ++ return snd_pcm_status_user_x32(substream, argp, true); ++ case SNDRV_PCM_IOCTL_SYNC_PTR_X32: ++ return snd_pcm_ioctl_sync_ptr_x32(substream, argp); ++ case SNDRV_PCM_IOCTL_CHANNEL_INFO_X32: ++ return snd_pcm_ioctl_channel_info_x32(substream, argp); ++#endif /* CONFIG_X86_X32 */ + } + + return -ENOIOCTLCMD; +diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c +index 5268c1f..09a8909 100644 +--- a/sound/core/rawmidi_compat.c ++++ b/sound/core/rawmidi_compat.c +@@ -94,9 +94,58 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile, + return 0; + } + ++#ifdef CONFIG_X86_X32 ++/* X32 ABI has 64bit timespec and 64bit alignment */ ++struct snd_rawmidi_status_x32 { ++ s32 stream; ++ u32 rsvd; /* alignment */ ++ struct timespec tstamp; ++ u32 avail; ++ u32 xruns; ++ unsigned char reserved[16]; ++} __attribute__((packed)); ++ ++#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst)) ++ ++static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile, ++ struct snd_rawmidi_status_x32 __user *src) ++{ ++ int err; ++ struct snd_rawmidi_status status; ++ ++ if (rfile->output == NULL) ++ return -EINVAL; ++ if (get_user(status.stream, &src->stream)) ++ return -EFAULT; ++ ++ switch (status.stream) { ++ case SNDRV_RAWMIDI_STREAM_OUTPUT: ++ err = snd_rawmidi_output_status(rfile->output, &status); ++ break; ++ case SNDRV_RAWMIDI_STREAM_INPUT: ++ err = snd_rawmidi_input_status(rfile->input, &status); ++ break; ++ default: ++ return -EINVAL; ++ } ++ if (err < 0) ++ return err; ++ ++ if (put_timespec(&status.tstamp, &src->tstamp) || ++ put_user(status.avail, &src->avail) || ++ put_user(status.xruns, &src->xruns)) ++ return -EFAULT; ++ ++ return 0; ++} ++#endif /* CONFIG_X86_X32 */ ++ + enum { + SNDRV_RAWMIDI_IOCTL_PARAMS32 = _IOWR('W', 0x10, struct snd_rawmidi_params32), + SNDRV_RAWMIDI_IOCTL_STATUS32 = _IOWR('W', 0x20, struct snd_rawmidi_status32), ++#ifdef CONFIG_X86_X32 ++ SNDRV_RAWMIDI_IOCTL_STATUS_X32 = _IOWR('W', 0x20, struct snd_rawmidi_status_x32), ++#endif /* CONFIG_X86_X32 */ + }; + + static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -115,6 +164,10 @@ static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsign + return snd_rawmidi_ioctl_params_compat(rfile, argp); + case SNDRV_RAWMIDI_IOCTL_STATUS32: + return snd_rawmidi_ioctl_status_compat(rfile, argp); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_RAWMIDI_IOCTL_STATUS_X32: ++ return snd_rawmidi_ioctl_status_x32(rfile, argp); ++#endif /* CONFIG_X86_X32 */ + } + return -ENOIOCTLCMD; + } +diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c +index 7354b8b..cb23899 100644 +--- a/sound/core/seq/oss/seq_oss.c ++++ b/sound/core/seq/oss/seq_oss.c +@@ -148,8 +148,6 @@ odev_release(struct inode *inode, struct file *file) + if ((dp = file->private_data) == NULL) + return 0; + +- snd_seq_oss_drain_write(dp); +- + mutex_lock(®ister_mutex); + snd_seq_oss_release(dp); + mutex_unlock(®ister_mutex); +diff --git a/sound/core/seq/oss/seq_oss_device.h b/sound/core/seq/oss/seq_oss_device.h +index b439243..d7b4d01 100644 +--- a/sound/core/seq/oss/seq_oss_device.h ++++ b/sound/core/seq/oss/seq_oss_device.h +@@ -127,7 +127,6 @@ int snd_seq_oss_write(struct seq_oss_devinfo *dp, const char __user *buf, int co + unsigned int snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait); + + void snd_seq_oss_reset(struct seq_oss_devinfo *dp); +-void snd_seq_oss_drain_write(struct seq_oss_devinfo *dp); + + /* */ + void snd_seq_oss_process_queue(struct seq_oss_devinfo *dp, abstime_t time); +diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c +index 6779e82b..92c96a9 100644 +--- a/sound/core/seq/oss/seq_oss_init.c ++++ b/sound/core/seq/oss/seq_oss_init.c +@@ -436,22 +436,6 @@ snd_seq_oss_release(struct seq_oss_devinfo *dp) + + + /* +- * Wait until the queue is empty (if we don't have nonblock) +- */ +-void +-snd_seq_oss_drain_write(struct seq_oss_devinfo *dp) +-{ +- if (! dp->timer->running) +- return; +- if (is_write_mode(dp->file_mode) && !is_nonblock_mode(dp->file_mode) && +- dp->writeq) { +- while (snd_seq_oss_writeq_sync(dp->writeq)) +- ; +- } +-} +- +- +-/* + * reset sequencer devices + */ + void +diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c +index e05802a..2e90822 100644 +--- a/sound/core/timer_compat.c ++++ b/sound/core/timer_compat.c +@@ -70,13 +70,14 @@ static int snd_timer_user_status_compat(struct file *file, + struct snd_timer_status32 __user *_status) + { + struct snd_timer_user *tu; +- struct snd_timer_status status; ++ struct snd_timer_status32 status; + + tu = file->private_data; + if (snd_BUG_ON(!tu->timeri)) + return -ENXIO; + memset(&status, 0, sizeof(status)); +- status.tstamp = tu->tstamp; ++ status.tstamp.tv_sec = tu->tstamp.tv_sec; ++ status.tstamp.tv_nsec = tu->tstamp.tv_nsec; + status.resolution = snd_timer_resolution(tu->timeri); + status.lost = tu->timeri->lost; + status.overrun = tu->overrun; +@@ -88,12 +89,21 @@ static int snd_timer_user_status_compat(struct file *file, + return 0; + } + ++#ifdef CONFIG_X86_X32 ++/* X32 ABI has the same struct as x86-64 */ ++#define snd_timer_user_status_x32(file, s) \ ++ snd_timer_user_status(file, s) ++#endif /* CONFIG_X86_X32 */ ++ + /* + */ + + enum { + SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32), + SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32), ++#ifdef CONFIG_X86_X32 ++ SNDRV_TIMER_IOCTL_STATUS_X32 = _IOW('T', 0x14, struct snd_timer_status), ++#endif /* CONFIG_X86_X32 */ + }; + + static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +@@ -122,6 +132,10 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns + return snd_timer_user_info_compat(file, argp); + case SNDRV_TIMER_IOCTL_STATUS32: + return snd_timer_user_status_compat(file, argp); ++#ifdef CONFIG_X86_X32 ++ case SNDRV_TIMER_IOCTL_STATUS_X32: ++ return snd_timer_user_status_x32(file, argp); ++#endif /* CONFIG_X86_X32 */ + } + return -ENOIOCTLCMD; + } +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 72fa58d..c2430b3 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -5386,6 +5386,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC), + SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK), + SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), ++ SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), + SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), + SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), + SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK), +diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c +index 2875b4f..7c8941b 100644 +--- a/sound/pci/rme9652/hdsp.c ++++ b/sound/pci/rme9652/hdsp.c +@@ -2879,7 +2879,7 @@ static int snd_hdsp_get_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl + { + struct hdsp *hdsp = snd_kcontrol_chip(kcontrol); + +- ucontrol->value.enumerated.item[0] = hdsp_dds_offset(hdsp); ++ ucontrol->value.integer.value[0] = hdsp_dds_offset(hdsp); + return 0; + } + +@@ -2891,7 +2891,7 @@ static int snd_hdsp_put_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl + + if (!snd_hdsp_use_is_exclusive(hdsp)) + return -EBUSY; +- val = ucontrol->value.enumerated.item[0]; ++ val = ucontrol->value.integer.value[0]; + spin_lock_irq(&hdsp->lock); + if (val != hdsp_dds_offset(hdsp)) + change = (hdsp_set_dds_offset(hdsp, val) == 0) ? 1 : 0; +diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c +index 8bc8016..a4a999a 100644 +--- a/sound/pci/rme9652/hdspm.c ++++ b/sound/pci/rme9652/hdspm.c +@@ -1601,6 +1601,9 @@ static void hdspm_set_dds_value(struct hdspm *hdspm, int rate) + { + u64 n; + ++ if (snd_BUG_ON(rate <= 0)) ++ return; ++ + if (rate >= 112000) + rate /= 4; + else if (rate >= 56000) +@@ -2215,6 +2218,8 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm) + } else { + /* slave mode, return external sample rate */ + rate = hdspm_external_sample_rate(hdspm); ++ if (!rate) ++ rate = hdspm->system_sample_rate; + } + } + +@@ -2260,8 +2265,11 @@ static int snd_hdspm_put_system_sample_rate(struct snd_kcontrol *kcontrol, + ucontrol) + { + struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); ++ int rate = ucontrol->value.integer.value[0]; + +- hdspm_set_dds_value(hdspm, ucontrol->value.enumerated.item[0]); ++ if (rate < 27000 || rate > 207000) ++ return -EINVAL; ++ hdspm_set_dds_value(hdspm, ucontrol->value.integer.value[0]); + return 0; + } + +@@ -4449,7 +4457,7 @@ static int snd_hdspm_get_tco_word_term(struct snd_kcontrol *kcontrol, + { + struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); + +- ucontrol->value.enumerated.item[0] = hdspm->tco->term; ++ ucontrol->value.integer.value[0] = hdspm->tco->term; + + return 0; + } +@@ -4460,8 +4468,8 @@ static int snd_hdspm_put_tco_word_term(struct snd_kcontrol *kcontrol, + { + struct hdspm *hdspm = snd_kcontrol_chip(kcontrol); + +- if (hdspm->tco->term != ucontrol->value.enumerated.item[0]) { +- hdspm->tco->term = ucontrol->value.enumerated.item[0]; ++ if (hdspm->tco->term != ucontrol->value.integer.value[0]) { ++ hdspm->tco->term = ucontrol->value.integer.value[0]; + + hdspm_tco_write(hdspm); + +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c +index 4f6ce1c..c458d60 100644 +--- a/sound/usb/quirks.c ++++ b/sound/usb/quirks.c +@@ -1124,6 +1124,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) + case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */ + case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */ + case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */ ++ case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */ + case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ + case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */ + case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */ diff --git a/4.4.4/4420_grsecurity-3.1-4.4.4-201603032158.patch b/4.4.5/4420_grsecurity-3.1-4.4.5-201603102309.patch index 88b7093..13412c8 100644 --- a/4.4.4/4420_grsecurity-3.1-4.4.4-201603032158.patch +++ b/4.4.5/4420_grsecurity-3.1-4.4.5-201603102309.patch @@ -449,7 +449,7 @@ index af70d15..ccd3786 100644 A toggle value indicating if modules are allowed to be loaded diff --git a/Makefile b/Makefile -index 344bc6f..4753efd 100644 +index d13322a..6eaab55 100644 --- a/Makefile +++ b/Makefile @@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ @@ -3743,7 +3743,7 @@ index c8c8b9e..c55cc79 100644 atomic64_set(&mm->context.id, asid); } diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c -index daafcf1..400ddf25 100644 +index daafcf1..8205ed6 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -25,6 +25,7 @@ @@ -3940,7 +3940,7 @@ index daafcf1..400ddf25 100644 +#ifdef CONFIG_THUMB2_KERNEL + unsigned short bkpt; + -+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) { ++ if (!probe_kernel_address((const unsigned short *)pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) { +#else + unsigned int bkpt; + @@ -6695,11 +6695,11 @@ index 2242bdd..b284048 100644 } /* Arrange for an interrupt in a short while */ diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c -index 886cb19..e73a287 100644 +index ca9a810..e00a026 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c -@@ -693,7 +693,18 @@ asmlinkage void do_ov(struct pt_regs *regs) - siginfo_t info; +@@ -697,7 +697,18 @@ asmlinkage void do_ov(struct pt_regs *regs) + }; prev_state = exception_enter(); - die_if_kernel("Integer overflow", regs); @@ -6716,8 +6716,8 @@ index 886cb19..e73a287 100644 + die("Integer overflow", regs); + } - info.si_code = FPE_INTOVF; - info.si_signo = SIGFPE; + force_sig_info(SIGFPE, &info, current); + exception_exit(prev_state); diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 4b88fa0..b16bc17 100644 --- a/arch/mips/mm/fault.c @@ -16502,9 +16502,18 @@ index efb2b93..8a9cb8e 100644 _ASM_NOKPROBE(restore) #endif diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile -index 265c0ed..6f03321 100644 +index 265c0ed..a9ca19a 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile +@@ -69,7 +69,7 @@ CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ + -fno-omit-frame-pointer -foptimize-sibling-calls \ + -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO + +-$(vobjs): KBUILD_CFLAGS += $(CFL) ++$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) + + # + # vDSO code runs in userspace and -pg doesn't help with profiling anyway. @@ -162,7 +162,7 @@ quiet_cmd_vdso = VDSO $@ -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \ sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' @@ -22468,10 +22477,10 @@ index e759076..441137a 100644 * HP laptops which use a DSDT reporting as HP/SB400/10000, * which includes some code which overrides all temperature diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c -index d1daead..acd77e2 100644 +index adb3eaf..0eb666c 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c -@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void) +@@ -100,8 +100,12 @@ int x86_acpi_suspend_lowlevel(void) #else /* CONFIG_64BIT */ #ifdef CONFIG_SMP stack_start = (unsigned long)temp_stack + sizeof(temp_stack); @@ -29635,10 +29644,10 @@ index 899c40f..a114588 100644 .disabled_by_bios = is_disabled, .hardware_setup = svm_hardware_setup, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c -index 10e7693..aa4d471 100644 +index 5fd846c..cdf2fca 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c -@@ -1512,12 +1512,12 @@ static void vmcs_write64(unsigned long field, u64 value) +@@ -1514,12 +1514,12 @@ static void vmcs_write64(unsigned long field, u64 value) #endif } @@ -29653,7 +29662,7 @@ index 10e7693..aa4d471 100644 { vmcs_writel(field, vmcs_readl(field) | mask); } -@@ -1777,7 +1777,11 @@ static void reload_tss(void) +@@ -1779,7 +1779,11 @@ static void reload_tss(void) struct desc_struct *descs; descs = (void *)gdt->address; @@ -29665,7 +29674,7 @@ index 10e7693..aa4d471 100644 load_TR_desc(); } -@@ -2059,6 +2063,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +@@ -2061,6 +2065,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */ vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */ @@ -29676,7 +29685,7 @@ index 10e7693..aa4d471 100644 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ -@@ -2374,7 +2382,7 @@ static void setup_msrs(struct vcpu_vmx *vmx) +@@ -2378,7 +2386,7 @@ static void setup_msrs(struct vcpu_vmx *vmx) * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3 */ @@ -29685,7 +29694,7 @@ index 10e7693..aa4d471 100644 { u64 host_tsc, tsc_offset; -@@ -4605,7 +4613,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) +@@ -4609,7 +4617,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) unsigned long cr4; vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */ @@ -29696,7 +29705,7 @@ index 10e7693..aa4d471 100644 /* Save the most likely value for this task's CR4 in the VMCS. */ cr4 = cr4_read_shadow(); -@@ -4632,7 +4643,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) +@@ -4636,7 +4647,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ vmx->host_idt_base = dt.address; @@ -29705,7 +29714,7 @@ index 10e7693..aa4d471 100644 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); vmcs_write32(HOST_IA32_SYSENTER_CS, low32); -@@ -6182,11 +6193,17 @@ static __init int hardware_setup(void) +@@ -6186,11 +6197,17 @@ static __init int hardware_setup(void) * page upon invalidation. No need to do anything if not * using the APIC_ACCESS_ADDR VMCS field. */ @@ -29725,7 +29734,7 @@ index 10e7693..aa4d471 100644 if (enable_ept && !cpu_has_vmx_ept_2m_page()) kvm_disable_largepages(); -@@ -6203,6 +6220,7 @@ static __init int hardware_setup(void) +@@ -6207,6 +6224,7 @@ static __init int hardware_setup(void) kvm_tsc_scaling_ratio_frac_bits = 48; } @@ -29733,7 +29742,7 @@ index 10e7693..aa4d471 100644 if (enable_apicv) kvm_x86_ops->update_cr8_intercept = NULL; else { -@@ -6211,6 +6229,7 @@ static __init int hardware_setup(void) +@@ -6215,6 +6233,7 @@ static __init int hardware_setup(void) kvm_x86_ops->deliver_posted_interrupt = NULL; kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy; } @@ -29741,7 +29750,7 @@ index 10e7693..aa4d471 100644 vmx_disable_intercept_for_msr(MSR_FS_BASE, false); vmx_disable_intercept_for_msr(MSR_GS_BASE, false); -@@ -6265,10 +6284,12 @@ static __init int hardware_setup(void) +@@ -6269,10 +6288,12 @@ static __init int hardware_setup(void) enable_pml = 0; if (!enable_pml) { @@ -29754,7 +29763,7 @@ index 10e7693..aa4d471 100644 } kvm_set_posted_intr_wakeup_handler(wakeup_handler); -@@ -8580,6 +8601,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) +@@ -8584,6 +8605,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) "jmp 2f \n\t" "1: " __ex(ASM_VMX_VMRESUME) "\n\t" "2: " @@ -29767,7 +29776,7 @@ index 10e7693..aa4d471 100644 /* Save guest registers, load host registers, keep flags */ "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" "pop %0 \n\t" -@@ -8632,6 +8659,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) +@@ -8636,6 +8663,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) #endif [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)), [wordsize]"i"(sizeof(ulong)) @@ -29779,7 +29788,7 @@ index 10e7693..aa4d471 100644 : "cc", "memory" #ifdef CONFIG_X86_64 , "rax", "rbx", "rdi", "rsi" -@@ -8645,7 +8677,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) +@@ -8649,7 +8681,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) if (debugctlmsr) update_debugctlmsr(debugctlmsr); @@ -29788,7 +29797,7 @@ index 10e7693..aa4d471 100644 /* * The sysexit path does not restore ds/es, so we must set them to * a reasonable value ourselves. -@@ -8654,8 +8686,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) +@@ -8658,8 +8690,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) * may be executed in interrupt context, which saves and restore segments * around it, nullifying its effect. */ @@ -29809,7 +29818,7 @@ index 10e7693..aa4d471 100644 #endif vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) -@@ -10737,7 +10779,7 @@ out: +@@ -10741,7 +10783,7 @@ out: return ret; } @@ -29819,7 +29828,7 @@ index 10e7693..aa4d471 100644 .disabled_by_bios = vmx_disabled_by_bios, .hardware_setup = hardware_setup, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index 6ef3856..12e4701 100644 +index d294502..af80bcb 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1937,8 +1937,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) @@ -29842,7 +29851,7 @@ index 6ef3856..12e4701 100644 if (copy_to_user(user_msr_list->indices, &msrs_to_save, num_msrs_to_save * sizeof(u32))) goto out; -@@ -3029,7 +3031,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, +@@ -3028,7 +3030,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) { @@ -29851,7 +29860,7 @@ index 6ef3856..12e4701 100644 u64 xstate_bv = xsave->header.xfeatures; u64 valid; -@@ -3065,7 +3067,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) +@@ -3064,7 +3066,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) static void load_xsave(struct kvm_vcpu *vcpu, u8 *src) { @@ -29860,7 +29869,7 @@ index 6ef3856..12e4701 100644 u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET); u64 valid; -@@ -3109,7 +3111,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, +@@ -3108,7 +3110,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, fill_xsave((u8 *) guest_xsave->region, vcpu); } else { memcpy(guest_xsave->region, @@ -29869,7 +29878,7 @@ index 6ef3856..12e4701 100644 sizeof(struct fxregs_state)); *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = XFEATURE_MASK_FPSSE; -@@ -3134,7 +3136,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, +@@ -3133,7 +3135,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, } else { if (xstate_bv & ~XFEATURE_MASK_FPSSE) return -EINVAL; @@ -29878,7 +29887,7 @@ index 6ef3856..12e4701 100644 guest_xsave->region, sizeof(struct fxregs_state)); } return 0; -@@ -6364,6 +6366,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, +@@ -6363,6 +6365,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, * exiting to the userspace. Otherwise, the value will be returned to the * userspace. */ @@ -29886,7 +29895,7 @@ index 6ef3856..12e4701 100644 static int vcpu_enter_guest(struct kvm_vcpu *vcpu) { int r; -@@ -6612,6 +6615,7 @@ out: +@@ -6611,6 +6614,7 @@ out: return r; } @@ -29894,7 +29903,7 @@ index 6ef3856..12e4701 100644 static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) { if (!kvm_arch_vcpu_runnable(vcpu) && -@@ -7159,7 +7163,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, +@@ -7158,7 +7162,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { struct fxregs_state *fxsave = @@ -29903,7 +29912,7 @@ index 6ef3856..12e4701 100644 memcpy(fpu->fpr, fxsave->st_space, 128); fpu->fcw = fxsave->cwd; -@@ -7176,7 +7180,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +@@ -7175,7 +7179,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { struct fxregs_state *fxsave = @@ -29912,7 +29921,7 @@ index 6ef3856..12e4701 100644 memcpy(fxsave->st_space, fpu->fpr, 128); fxsave->cwd = fpu->fcw; -@@ -7192,9 +7196,9 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +@@ -7191,9 +7195,9 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) static void fx_init(struct kvm_vcpu *vcpu) { @@ -29924,7 +29933,7 @@ index 6ef3856..12e4701 100644 host_xcr0 | XSTATE_COMPACTION_ENABLED; /* -@@ -7218,7 +7222,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) +@@ -7217,7 +7221,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) kvm_put_guest_xcr0(vcpu); vcpu->guest_fpu_loaded = 1; __kernel_fpu_begin(); @@ -29933,7 +29942,7 @@ index 6ef3856..12e4701 100644 trace_kvm_fpu(1); } -@@ -7521,6 +7525,8 @@ bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) +@@ -7520,6 +7524,8 @@ bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) struct static_key kvm_no_apic_vcpu __read_mostly; @@ -29942,7 +29951,7 @@ index 6ef3856..12e4701 100644 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { struct page *page; -@@ -7537,11 +7543,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) +@@ -7536,11 +7542,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) else vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; @@ -29961,7 +29970,7 @@ index 6ef3856..12e4701 100644 vcpu->arch.pio_data = page_address(page); kvm_set_tsc_khz(vcpu, max_tsc_khz); -@@ -7597,6 +7606,9 @@ fail_mmu_destroy: +@@ -7596,6 +7605,9 @@ fail_mmu_destroy: kvm_mmu_destroy(vcpu); fail_free_pio_data: free_page((unsigned long)vcpu->arch.pio_data); @@ -29971,7 +29980,7 @@ index 6ef3856..12e4701 100644 fail: return r; } -@@ -7614,6 +7626,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) +@@ -7613,6 +7625,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) free_page((unsigned long)vcpu->arch.pio_data); if (!lapic_in_kernel(vcpu)) static_key_slow_dec(&kvm_no_apic_vcpu); @@ -37709,10 +37718,10 @@ index b79cb10..7daa9f7 100644 } diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c -index 7e959f9..cc71b14 100644 +index e417e1a..baf752f 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c -@@ -4197,7 +4197,7 @@ int ata_sas_port_init(struct ata_port *ap) +@@ -4196,7 +4196,7 @@ int ata_sas_port_init(struct ata_port *ap) if (rc) return rc; @@ -42844,7 +42853,7 @@ index 61fcb3b..bad2d5f 100644 -int i915_max_ioctl = ARRAY_SIZE(i915_ioctls); +const int i915_max_ioctl = ARRAY_SIZE(i915_ioctls); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c -index 760e0ce..ab11c3f 100644 +index a6ad938..ad4ff46 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -40,7 +40,7 @@ @@ -42856,7 +42865,7 @@ index 760e0ce..ab11c3f 100644 #define GEN_DEFAULT_PIPEOFFSETS \ .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ -@@ -1687,7 +1687,7 @@ static const struct file_operations i915_driver_fops = { +@@ -1690,7 +1690,7 @@ static const struct file_operations i915_driver_fops = { .llseek = noop_llseek, }; @@ -42865,7 +42874,7 @@ index 760e0ce..ab11c3f 100644 /* Don't use MTRRs here; the Xserver or userspace app should * deal with them for Intel hardware. */ -@@ -1737,6 +1737,7 @@ static struct pci_driver i915_pci_driver = { +@@ -1740,6 +1740,7 @@ static struct pci_driver i915_pci_driver = { static int __init i915_init(void) { @@ -42873,7 +42882,7 @@ index 760e0ce..ab11c3f 100644 driver.num_ioctls = i915_max_ioctl; /* -@@ -1754,6 +1755,7 @@ static int __init i915_init(void) +@@ -1757,6 +1758,7 @@ static int __init i915_init(void) #endif if (!(driver.driver_features & DRIVER_MODESET)) { @@ -42881,7 +42890,7 @@ index 760e0ce..ab11c3f 100644 /* Silently fail loading to not upset userspace. */ DRM_DEBUG_DRIVER("KMS and UMS disabled.\n"); return 0; -@@ -1761,6 +1763,7 @@ static int __init i915_init(void) +@@ -1764,6 +1766,7 @@ static int __init i915_init(void) if (i915.nuclear_pageflip) driver.driver_features |= DRIVER_ATOMIC; @@ -42890,10 +42899,10 @@ index 760e0ce..ab11c3f 100644 return drm_pci_init(&driver, &i915_pci_driver); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h -index f4af19a..99d81cb 100644 +index d3ce4da..73a3dda 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h -@@ -2636,7 +2636,7 @@ struct drm_i915_cmd_table { +@@ -2637,7 +2637,7 @@ struct drm_i915_cmd_table { #include "i915_trace.h" extern const struct drm_ioctl_desc i915_ioctls[]; @@ -57793,10 +57802,10 @@ index 35f7d31..a9928a7 100644 login->tgt_agt = sbp_target_agent_register(login); if (IS_ERR(login->tgt_agt)) { diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c -index e3fbc5a..a72202c 100644 +index 6ceac4f..b2ed52c 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c -@@ -836,10 +836,11 @@ __cpufreq_cooling_register(struct device_node *np, +@@ -838,10 +838,11 @@ __cpufreq_cooling_register(struct device_node *np, cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus); if (capacitance) { @@ -60379,7 +60388,7 @@ index 84a110a..96312c3 100644 .ident = "Sahara Touch-iT", .matches = { diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c -index 92f3949..7bb0e86 100644 +index 6e92917..2f3dbad 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -106,7 +106,7 @@ static int fbcon_softback_size = 32768; @@ -77464,7 +77473,7 @@ index 5b8e235..33d31fc 100644 WARN_ON(trans->transid != btrfs_header_generation(parent)); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h -index 35489e7..fac96ff 100644 +index 385b449..289f2c4 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1576,7 +1576,7 @@ struct btrfs_fs_info { @@ -77561,7 +77570,7 @@ index e06dd75a..22221aa 100644 /* first set the basic ref node struct up */ atomic_set(&ref->refs, 1); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c -index 4958360..70b753e 100644 +index 41fb431..9b5ad63 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1263,7 +1263,7 @@ static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize, @@ -77573,7 +77582,7 @@ index 4958360..70b753e 100644 atomic_set(&root->orphan_inodes, 0); atomic_set(&root->refs, 1); atomic_set(&root->will_be_snapshoted, 0); -@@ -2579,7 +2579,7 @@ int open_ctree(struct super_block *sb, +@@ -2582,7 +2582,7 @@ int open_ctree(struct super_block *sb, atomic_set(&fs_info->nr_async_bios, 0); atomic_set(&fs_info->defrag_running, 0); atomic_set(&fs_info->qgroup_op_seq, 0); @@ -78694,10 +78703,10 @@ index 53ccdde..3debfe6 100644 } diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c -index 7675555..9efb579 100644 +index 373b5cd..b231fe6 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c -@@ -2386,8 +2386,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, +@@ -2390,8 +2390,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, default: cifs_dbg(VFS, "info level %u isn't supported\n", srch_inf->info_level); @@ -78756,7 +78765,7 @@ index 5bb630a..043dc70 100644 return hit; diff --git a/fs/compat.c b/fs/compat.c -index 6fd272d..ae85f4f 100644 +index 6fd272d..39582b6 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -54,7 +54,7 @@ @@ -78875,6 +78884,15 @@ index 6fd272d..ae85f4f 100644 error = -EFAULT; else error = count - buf.count; +@@ -1328,7 +1345,7 @@ COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp, + } + + struct compat_sel_arg_struct { +- compat_ulong_t n; ++ compat_long_t n; + compat_uptr_t inp; + compat_uptr_t outp; + compat_uptr_t exp; diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c index 4d24d17..4f8c09e 100644 --- a/fs/compat_binfmt_elf.c @@ -79594,10 +79612,10 @@ index 1777331..d6154a2 100644 return 0; while (nr) { diff --git a/fs/dcache.c b/fs/dcache.c -index 5c33aeb..e8739c7 100644 +index 877bcbb..7ec5109 100644 --- a/fs/dcache.c +++ b/fs/dcache.c -@@ -341,8 +341,9 @@ static inline void dentry_rcuwalk_invalidate(struct dentry *dentry) +@@ -328,8 +328,9 @@ static inline void dentry_rcuwalk_invalidate(struct dentry *dentry) * and is unhashed. */ static void dentry_iput(struct dentry * dentry) @@ -79609,7 +79627,7 @@ index 5c33aeb..e8739c7 100644 { struct inode *inode = dentry->d_inode; if (inode) { -@@ -366,8 +367,9 @@ static void dentry_iput(struct dentry * dentry) +@@ -353,8 +354,9 @@ static void dentry_iput(struct dentry * dentry) * d_iput() operation if defined. dentry remains in-use. */ static void dentry_unlink_inode(struct dentry * dentry) @@ -79620,8 +79638,8 @@ index 5c33aeb..e8739c7 100644 +static void dentry_unlink_inode(struct dentry * dentry) { struct inode *inode = dentry->d_inode; - __d_clear_type_and_inode(dentry); -@@ -545,7 +547,7 @@ static void __dentry_kill(struct dentry *dentry) + +@@ -534,7 +536,7 @@ static void __dentry_kill(struct dentry *dentry) * dentry_iput drops the locks, at which point nobody (except * transient RCU lookups) can reach this dentry. */ @@ -79630,7 +79648,7 @@ index 5c33aeb..e8739c7 100644 this_cpu_dec(nr_dentry); if (dentry->d_op && dentry->d_op->d_release) dentry->d_op->d_release(dentry); -@@ -567,7 +569,8 @@ static void __dentry_kill(struct dentry *dentry) +@@ -556,7 +558,8 @@ static void __dentry_kill(struct dentry *dentry) * Returns dentry requiring refcount drop, or NULL if we're done. */ static struct dentry *dentry_kill(struct dentry *dentry) @@ -79640,7 +79658,7 @@ index 5c33aeb..e8739c7 100644 { struct inode *inode = dentry->d_inode; struct dentry *parent = NULL; -@@ -598,7 +601,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry) +@@ -587,7 +590,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry) struct dentry *parent = dentry->d_parent; if (IS_ROOT(dentry)) return NULL; @@ -79649,7 +79667,7 @@ index 5c33aeb..e8739c7 100644 return NULL; if (likely(spin_trylock(&parent->d_lock))) return parent; -@@ -660,8 +663,8 @@ static inline bool fast_dput(struct dentry *dentry) +@@ -649,8 +652,8 @@ static inline bool fast_dput(struct dentry *dentry) */ if (unlikely(ret < 0)) { spin_lock(&dentry->d_lock); @@ -79660,7 +79678,7 @@ index 5c33aeb..e8739c7 100644 spin_unlock(&dentry->d_lock); return 1; } -@@ -716,7 +719,7 @@ static inline bool fast_dput(struct dentry *dentry) +@@ -705,7 +708,7 @@ static inline bool fast_dput(struct dentry *dentry) * else could have killed it and marked it dead. Either way, we * don't need to do anything else. */ @@ -79669,7 +79687,7 @@ index 5c33aeb..e8739c7 100644 spin_unlock(&dentry->d_lock); return 1; } -@@ -726,7 +729,7 @@ static inline bool fast_dput(struct dentry *dentry) +@@ -715,7 +718,7 @@ static inline bool fast_dput(struct dentry *dentry) * lock, and we just tested that it was zero, so we can just * set it to 1. */ @@ -79678,7 +79696,7 @@ index 5c33aeb..e8739c7 100644 return 0; } -@@ -788,7 +791,7 @@ repeat: +@@ -777,7 +780,7 @@ repeat: dentry->d_flags |= DCACHE_REFERENCED; dentry_lru_add(dentry); @@ -79687,7 +79705,7 @@ index 5c33aeb..e8739c7 100644 spin_unlock(&dentry->d_lock); return; -@@ -803,7 +806,7 @@ EXPORT_SYMBOL(dput); +@@ -792,7 +795,7 @@ EXPORT_SYMBOL(dput); /* This must be called with d_lock held */ static inline void __dget_dlock(struct dentry *dentry) { @@ -79696,7 +79714,7 @@ index 5c33aeb..e8739c7 100644 } static inline void __dget(struct dentry *dentry) -@@ -844,8 +847,8 @@ repeat: +@@ -833,8 +836,8 @@ repeat: goto repeat; } rcu_read_unlock(); @@ -79707,7 +79725,7 @@ index 5c33aeb..e8739c7 100644 spin_unlock(&ret->d_lock); return ret; } -@@ -923,9 +926,9 @@ restart: +@@ -912,9 +915,9 @@ restart: spin_lock(&inode->i_lock); hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) { spin_lock(&dentry->d_lock); @@ -79719,7 +79737,7 @@ index 5c33aeb..e8739c7 100644 __dentry_kill(dentry); dput(parent); goto restart; -@@ -960,7 +963,7 @@ static void shrink_dentry_list(struct list_head *list) +@@ -949,7 +952,7 @@ static void shrink_dentry_list(struct list_head *list) * We found an inuse dentry which was not removed from * the LRU because of laziness during lookup. Do not free it. */ @@ -79728,7 +79746,7 @@ index 5c33aeb..e8739c7 100644 spin_unlock(&dentry->d_lock); if (parent) spin_unlock(&parent->d_lock); -@@ -998,8 +1001,8 @@ static void shrink_dentry_list(struct list_head *list) +@@ -987,8 +990,8 @@ static void shrink_dentry_list(struct list_head *list) dentry = parent; while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) { parent = lock_parent(dentry); @@ -79739,7 +79757,7 @@ index 5c33aeb..e8739c7 100644 spin_unlock(&dentry->d_lock); if (parent) spin_unlock(&parent->d_lock); -@@ -1039,7 +1042,7 @@ static enum lru_status dentry_lru_isolate(struct list_head *item, +@@ -1028,7 +1031,7 @@ static enum lru_status dentry_lru_isolate(struct list_head *item, * counts, just remove them from the LRU. Otherwise give them * another pass through the LRU. */ @@ -79748,7 +79766,7 @@ index 5c33aeb..e8739c7 100644 d_lru_isolate(lru, dentry); spin_unlock(&dentry->d_lock); return LRU_REMOVED; -@@ -1373,7 +1376,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) +@@ -1362,7 +1365,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) } else { if (dentry->d_flags & DCACHE_LRU_LIST) d_lru_del(dentry); @@ -79757,7 +79775,7 @@ index 5c33aeb..e8739c7 100644 d_shrink_add(dentry, &data->dispose); data->found++; } -@@ -1421,7 +1424,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry) +@@ -1410,7 +1413,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry) return D_WALK_CONTINUE; /* root with refcount 1 is fine */ @@ -79766,7 +79784,7 @@ index 5c33aeb..e8739c7 100644 return D_WALK_CONTINUE; printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} " -@@ -1430,7 +1433,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry) +@@ -1419,7 +1422,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry) dentry->d_inode ? dentry->d_inode->i_ino : 0UL, dentry, @@ -79775,7 +79793,7 @@ index 5c33aeb..e8739c7 100644 dentry->d_sb->s_type->name, dentry->d_sb->s_id); WARN_ON(1); -@@ -1571,7 +1574,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) +@@ -1560,7 +1563,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) dentry->d_iname[DNAME_INLINE_LEN-1] = 0; if (name->len > DNAME_INLINE_LEN-1) { size_t size = offsetof(struct external_name, name[1]); @@ -79784,7 +79802,7 @@ index 5c33aeb..e8739c7 100644 if (!p) { kmem_cache_free(dentry_cache, dentry); return NULL; -@@ -1594,7 +1597,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) +@@ -1583,7 +1586,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) smp_wmb(); dentry->d_name.name = dname; @@ -79793,7 +79811,7 @@ index 5c33aeb..e8739c7 100644 dentry->d_flags = 0; spin_lock_init(&dentry->d_lock); seqcount_init(&dentry->d_seq); -@@ -1603,6 +1606,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) +@@ -1592,6 +1595,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) dentry->d_sb = sb; dentry->d_op = NULL; dentry->d_fsdata = NULL; @@ -79803,7 +79821,7 @@ index 5c33aeb..e8739c7 100644 INIT_HLIST_BL_NODE(&dentry->d_hash); INIT_LIST_HEAD(&dentry->d_lru); INIT_LIST_HEAD(&dentry->d_subdirs); -@@ -2327,7 +2333,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name) +@@ -2317,7 +2323,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name) goto next; } @@ -79812,7 +79830,7 @@ index 5c33aeb..e8739c7 100644 found = dentry; spin_unlock(&dentry->d_lock); break; -@@ -2395,7 +2401,7 @@ again: +@@ -2385,7 +2391,7 @@ again: spin_lock(&dentry->d_lock); inode = dentry->d_inode; isdir = S_ISDIR(inode->i_mode); @@ -79821,7 +79839,7 @@ index 5c33aeb..e8739c7 100644 if (!spin_trylock(&inode->i_lock)) { spin_unlock(&dentry->d_lock); cpu_relax(); -@@ -3343,7 +3349,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry) +@@ -3333,7 +3339,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry) if (!(dentry->d_flags & DCACHE_GENOCIDE)) { dentry->d_flags |= DCACHE_GENOCIDE; @@ -79830,7 +79848,7 @@ index 5c33aeb..e8739c7 100644 } } return D_WALK_CONTINUE; -@@ -3451,7 +3457,8 @@ void __init vfs_caches_init_early(void) +@@ -3441,7 +3447,8 @@ void __init vfs_caches_init_early(void) void __init vfs_caches_init(void) { names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, @@ -81214,10 +81232,10 @@ index 5797d45..7d7d79a 100644 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) { diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c -index e5232bb..d7b20d1 100644 +index 7a8ea13..f7a17db 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c -@@ -852,9 +852,9 @@ restart: +@@ -880,9 +880,9 @@ fs_initcall(cgroup_writeback_init); #else /* CONFIG_CGROUP_WRITEBACK */ static struct bdi_writeback * @@ -81229,7 +81247,7 @@ index e5232bb..d7b20d1 100644 { struct bdi_writeback *wb = inode_to_wb(inode); -@@ -863,8 +863,8 @@ locked_inode_to_wb_and_lock_list(struct inode *inode) +@@ -891,8 +891,8 @@ locked_inode_to_wb_and_lock_list(struct inode *inode) return wb; } @@ -81239,7 +81257,7 @@ index e5232bb..d7b20d1 100644 { struct bdi_writeback *wb = inode_to_wb(inode); -@@ -1108,9 +1108,8 @@ static int write_inode(struct inode *inode, struct writeback_control *wbc) +@@ -1136,9 +1136,8 @@ static int write_inode(struct inode *inode, struct writeback_control *wbc) * Wait for writeback on an inode to complete. Called with i_lock held. * Caller must make sure inode cannot go away when we drop i_lock. */ @@ -81250,7 +81268,7 @@ index e5232bb..d7b20d1 100644 { DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); wait_queue_head_t *wqh; -@@ -1139,8 +1138,8 @@ void inode_wait_for_writeback(struct inode *inode) +@@ -1167,8 +1166,8 @@ void inode_wait_for_writeback(struct inode *inode) * held and drops it. It is aimed for callers not holding any inode reference * so once i_lock is dropped, inode can go away. */ @@ -87171,7 +87189,7 @@ index 4a62fe8..5dc2f5f 100644 sbi->s_alloc_options.preallocmin = 0; /* Preallocate by 16 blocks (17-1) at once */ diff --git a/fs/select.c b/fs/select.c -index 0155473..29d751f 100644 +index 0155473..da772c4 100644 --- a/fs/select.c +++ b/fs/select.c @@ -20,6 +20,7 @@ @@ -87182,6 +87200,15 @@ index 0155473..29d751f 100644 #include <linux/personality.h> /* for STICKY_TIMEOUTS */ #include <linux/file.h> #include <linux/fdtable.h> +@@ -717,7 +718,7 @@ SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp, + + #ifdef __ARCH_WANT_SYS_OLD_SELECT + struct sel_arg_struct { +- unsigned long n; ++ long n; + fd_set __user *inp, *outp, *exp; + struct timeval __user *tvp; + }; @@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, struct poll_list *walk = head; unsigned long todo = nfds; @@ -87548,7 +87575,7 @@ index d4a61d8..87dbeb3 100644 generic_fillattr(inode, stat); return 0; diff --git a/fs/super.c b/fs/super.c -index 954aeb8..fcf9154 100644 +index f5f4b32..f6bedd2 100644 --- a/fs/super.c +++ b/fs/super.c @@ -348,7 +348,8 @@ EXPORT_SYMBOL(deactivate_super); @@ -100061,10 +100088,34 @@ index c1da539..1dcec55 100644 struct atmphy_ops { int (*start)(struct atm_dev *dev); diff --git a/include/linux/atomic.h b/include/linux/atomic.h -index 301de78..a8efdc4 100644 +index 301de78..f5f8e42 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h -@@ -432,7 +432,7 @@ +@@ -103,6 +103,11 @@ + #define atomic_inc_return(...) \ + __atomic_op_fence(atomic_inc_return, __VA_ARGS__) + #endif ++ ++#ifndef atomic_inc_return_unchecked ++#define atomic_inc_return_unchecked(...) \ ++ __atomic_op_fence(atomic_inc_return_unchecked, __VA_ARGS__) ++#endif + #endif /* atomic_inc_return_relaxed */ + + /* atomic_sub_return_relaxed */ +@@ -255,6 +260,11 @@ + #define atomic64_inc_return(...) \ + __atomic_op_fence(atomic64_inc_return, __VA_ARGS__) + #endif ++ ++#ifndef atomic64_inc_return_unchecked ++#define atomic64_inc_return_unchecked(...) \ ++ __atomic_op_fence(atomic64_inc_return_unchecked, __VA_ARGS__) ++#endif + #endif /* atomic64_inc_return_relaxed */ + + +@@ -432,7 +442,7 @@ * Atomically adds @a to @v, so long as @v was not already @u. * Returns non-zero if @v was not @u, and zero otherwise. */ @@ -100273,10 +100324,10 @@ index c02e669..439bd4b 100644 } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index c70e358..7718240 100644 +index 1687557..3a013dd 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h -@@ -1636,7 +1636,7 @@ struct block_device_operations { +@@ -1647,7 +1647,7 @@ struct block_device_operations { void (*swap_slot_free_notify) (struct block_device *, unsigned long); struct module *owner; const struct pr_ops *pr_ops; @@ -100984,7 +101035,7 @@ index 653589e..4ef254a 100644 return c | 0x20; } diff --git a/include/linux/dcache.h b/include/linux/dcache.h -index d67ae11..9ec20d2 100644 +index 8a2e009..c3e6d63 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -123,6 +123,9 @@ struct dentry { @@ -103345,7 +103396,7 @@ index e15828f..531fd0a 100644 if (atomic_sub_and_test((int) count, &kref->refcount)) { release(kref); diff --git a/include/linux/libata.h b/include/linux/libata.h -index 600c1e0..255c7b9 100644 +index b20a275..4aff061 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -992,7 +992,7 @@ struct ata_port_operations { @@ -103928,7 +103979,7 @@ index 64f36e0..fa7f0d8 100644 * struct dmi_device_id appears during expansion of * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it diff --git a/include/linux/module.h b/include/linux/module.h -index 3a19c79..dea8c47 100644 +index b229a99..37a70e1 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -19,9 +19,11 @@ @@ -103967,7 +104018,7 @@ index 3a19c79..dea8c47 100644 extern ssize_t __modver_version_show(struct module_attribute *, struct module_kobject *, char *); -@@ -313,7 +316,7 @@ struct module { +@@ -319,7 +322,7 @@ struct module { /* Sysfs stuff. */ struct module_kobject mkobj; @@ -103976,7 +104027,7 @@ index 3a19c79..dea8c47 100644 const char *version; const char *srcversion; struct kobject *holders_dir; -@@ -370,20 +373,21 @@ struct module { +@@ -376,20 +379,21 @@ struct module { * If this is non-NULL, vfree() after init() returns. * * Cacheline align here, such that: @@ -104003,7 +104054,7 @@ index 3a19c79..dea8c47 100644 #ifdef CONFIG_MODULES_TREE_LOOKUP /* -@@ -391,13 +395,12 @@ struct module { +@@ -397,13 +401,12 @@ struct module { * above entries such that a regular lookup will only touch one * cacheline. */ @@ -104021,7 +104072,7 @@ index 3a19c79..dea8c47 100644 /* Arch-specific module values */ struct mod_arch_specific arch; -@@ -454,6 +457,10 @@ struct module { +@@ -455,6 +458,10 @@ struct module { unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; @@ -104032,7 +104083,7 @@ index 3a19c79..dea8c47 100644 #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD unsigned int num_ftrace_callsites; -@@ -481,7 +488,8 @@ struct module { +@@ -482,7 +489,8 @@ struct module { ctor_fn_t *ctors; unsigned int num_ctors; #endif @@ -104042,7 +104093,7 @@ index 3a19c79..dea8c47 100644 #ifndef MODULE_ARCH_INIT #define MODULE_ARCH_INIT {} #endif -@@ -502,18 +510,48 @@ bool is_module_address(unsigned long addr); +@@ -503,18 +511,48 @@ bool is_module_address(unsigned long addr); bool is_module_percpu_address(unsigned long addr); bool is_module_text_address(unsigned long addr); @@ -106934,10 +106985,10 @@ index 3e5d907..037e86c 100644 static inline void __inc_zone_page_state(struct page *page, diff --git a/include/linux/writeback.h b/include/linux/writeback.h -index b333c94..6b59f39 100644 +index d0b5ca5..c13529b 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h -@@ -277,8 +277,9 @@ static inline void inode_detach_wb(struct inode *inode) +@@ -278,8 +278,9 @@ static inline void inode_detach_wb(struct inode *inode) } static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc, @@ -111599,7 +111650,7 @@ index 0551c21..f753f95 100644 debug_mutex_free_waiter(&waiter); mutex_release(&lock->dep_map, 1, ip); diff --git a/kernel/module.c b/kernel/module.c -index 14833e6..659a51a 100644 +index 0e5c711..540d4d4 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -59,6 +59,7 @@ @@ -111784,7 +111835,7 @@ index 14833e6..659a51a 100644 core_param(nomodule, modules_disabled, bint, 0); /* Waiting for a module to finish initializing? */ -@@ -473,7 +510,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr, +@@ -476,7 +513,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr, return true; list_for_each_entry_rcu(mod, &modules, list) { @@ -111793,7 +111844,7 @@ index 14833e6..659a51a 100644 { mod->syms, mod->syms + mod->num_syms, mod->crcs, NOT_GPL_ONLY, false }, { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, -@@ -498,7 +535,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr, +@@ -501,7 +538,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr, if (mod->state == MODULE_STATE_UNFORMED) continue; @@ -111802,7 +111853,7 @@ index 14833e6..659a51a 100644 return true; } return false; -@@ -644,7 +681,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info) +@@ -647,7 +684,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info) if (!pcpusec->sh_size) return 0; @@ -111811,7 +111862,7 @@ index 14833e6..659a51a 100644 pr_warn("%s: per-cpu alignment %li > %li\n", mod->name, align, PAGE_SIZE); align = PAGE_SIZE; -@@ -1214,7 +1251,7 @@ struct module_attribute module_uevent = +@@ -1217,7 +1254,7 @@ struct module_attribute module_uevent = static ssize_t show_coresize(struct module_attribute *mattr, struct module_kobject *mk, char *buffer) { @@ -111820,7 +111871,7 @@ index 14833e6..659a51a 100644 } static struct module_attribute modinfo_coresize = -@@ -1223,7 +1260,7 @@ static struct module_attribute modinfo_coresize = +@@ -1226,7 +1263,7 @@ static struct module_attribute modinfo_coresize = static ssize_t show_initsize(struct module_attribute *mattr, struct module_kobject *mk, char *buffer) { @@ -111829,7 +111880,7 @@ index 14833e6..659a51a 100644 } static struct module_attribute modinfo_initsize = -@@ -1315,12 +1352,29 @@ static int check_version(Elf_Shdr *sechdrs, +@@ -1318,12 +1355,29 @@ static int check_version(Elf_Shdr *sechdrs, goto bad_version; } @@ -111859,7 +111910,7 @@ index 14833e6..659a51a 100644 return 0; } -@@ -1448,7 +1502,7 @@ resolve_symbol_wait(struct module *mod, +@@ -1451,7 +1505,7 @@ resolve_symbol_wait(struct module *mod, */ #ifdef CONFIG_SYSFS @@ -111868,7 +111919,7 @@ index 14833e6..659a51a 100644 static inline bool sect_empty(const Elf_Shdr *sect) { return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0; -@@ -1586,7 +1640,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info) +@@ -1589,7 +1643,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info) { unsigned int notes, loaded, i; struct module_notes_attrs *notes_attrs; @@ -111877,7 +111928,7 @@ index 14833e6..659a51a 100644 /* failed to create section attributes, so can't create notes */ if (!mod->sect_attrs) -@@ -1698,7 +1752,7 @@ static void del_usage_links(struct module *mod) +@@ -1701,7 +1755,7 @@ static void del_usage_links(struct module *mod) static int module_add_modinfo_attrs(struct module *mod) { struct module_attribute *attr; @@ -111886,7 +111937,7 @@ index 14833e6..659a51a 100644 int error = 0; int i; -@@ -1915,21 +1969,21 @@ static void set_section_ro_nx(void *base, +@@ -1918,21 +1972,21 @@ static void set_section_ro_nx(void *base, static void unset_module_core_ro_nx(struct module *mod) { @@ -111916,7 +111967,7 @@ index 14833e6..659a51a 100644 set_memory_rw); } -@@ -1942,14 +1996,14 @@ void set_all_modules_text_rw(void) +@@ -1945,14 +1999,14 @@ void set_all_modules_text_rw(void) list_for_each_entry_rcu(mod, &modules, list) { if (mod->state == MODULE_STATE_UNFORMED) continue; @@ -111937,7 +111988,7 @@ index 14833e6..659a51a 100644 set_memory_rw); } } -@@ -1965,14 +2019,14 @@ void set_all_modules_text_ro(void) +@@ -1968,14 +2022,14 @@ void set_all_modules_text_ro(void) list_for_each_entry_rcu(mod, &modules, list) { if (mod->state == MODULE_STATE_UNFORMED) continue; @@ -111958,7 +112009,7 @@ index 14833e6..659a51a 100644 set_memory_ro); } } -@@ -1981,7 +2035,15 @@ void set_all_modules_text_ro(void) +@@ -1984,7 +2038,15 @@ void set_all_modules_text_ro(void) #else static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { } static void unset_module_core_ro_nx(struct module *mod) { } @@ -111975,7 +112026,7 @@ index 14833e6..659a51a 100644 #endif void __weak module_memfree(void *module_region) -@@ -2036,16 +2098,19 @@ static void free_module(struct module *mod) +@@ -2039,16 +2101,19 @@ static void free_module(struct module *mod) /* This may be NULL, but that's OK */ unset_module_init_ro_nx(mod); module_arch_freeing_init(mod); @@ -111998,7 +112049,7 @@ index 14833e6..659a51a 100644 #ifdef CONFIG_MPU update_protections(current->mm); -@@ -2114,9 +2179,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) +@@ -2117,9 +2182,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) int ret = 0; const struct kernel_symbol *ksym; @@ -112030,7 +112081,7 @@ index 14833e6..659a51a 100644 switch (sym[i].st_shndx) { case SHN_COMMON: /* Ignore common symbols */ -@@ -2141,7 +2228,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) +@@ -2144,7 +2231,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) ksym = resolve_symbol_wait(mod, info, name); /* Ok if resolved. */ if (ksym && !IS_ERR(ksym)) { @@ -112040,7 +112091,7 @@ index 14833e6..659a51a 100644 break; } -@@ -2160,11 +2249,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) +@@ -2163,11 +2252,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) secbase = (unsigned long)mod_percpu(mod); else secbase = info->sechdrs[sym[i].st_shndx].sh_addr; @@ -112061,7 +112112,7 @@ index 14833e6..659a51a 100644 return ret; } -@@ -2248,22 +2346,12 @@ static void layout_sections(struct module *mod, struct load_info *info) +@@ -2251,22 +2349,12 @@ static void layout_sections(struct module *mod, struct load_info *info) || s->sh_entsize != ~0UL || strstarts(sname, ".init")) continue; @@ -112088,7 +112139,7 @@ index 14833e6..659a51a 100644 } pr_debug("Init section allocation order:\n"); -@@ -2277,23 +2365,13 @@ static void layout_sections(struct module *mod, struct load_info *info) +@@ -2280,23 +2368,13 @@ static void layout_sections(struct module *mod, struct load_info *info) || s->sh_entsize != ~0UL || !strstarts(sname, ".init")) continue; @@ -112117,7 +112168,7 @@ index 14833e6..659a51a 100644 } } -@@ -2466,7 +2544,7 @@ static void layout_symtab(struct module *mod, struct load_info *info) +@@ -2469,7 +2547,7 @@ static void layout_symtab(struct module *mod, struct load_info *info) /* Put symbol section at end of init part of module. */ symsect->sh_flags |= SHF_ALLOC; @@ -112126,7 +112177,7 @@ index 14833e6..659a51a 100644 info->index.sym) | INIT_OFFSET_MASK; pr_debug("\t%s\n", info->secstrings + symsect->sh_name); -@@ -2483,16 +2561,16 @@ static void layout_symtab(struct module *mod, struct load_info *info) +@@ -2486,23 +2564,23 @@ static void layout_symtab(struct module *mod, struct load_info *info) } /* Append room for core symbols at end of core part. */ @@ -112144,38 +112195,59 @@ index 14833e6..659a51a 100644 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect, info->index.str) | INIT_OFFSET_MASK; + pr_debug("\t%s\n", info->secstrings + strsect->sh_name); + + /* We'll tack temporary mod_kallsyms on the end. */ +- mod->init_size = ALIGN(mod->init_size, ++ mod->init_size_rx = ALIGN(mod->init_size_rx, + __alignof__(struct mod_kallsyms)); +- info->mod_kallsyms_init_off = mod->init_size; +- mod->init_size += sizeof(struct mod_kallsyms); - mod->init_size = debug_align(mod->init_size); ++ info->mod_kallsyms_init_off = mod->init_size_rx; ++ mod->init_size_rx += sizeof(struct mod_kallsyms); + mod->init_size_rx = debug_align(mod->init_size_rx); - pr_debug("\t%s\n", info->secstrings + strsect->sh_name); } -@@ -2509,12 +2587,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) - /* Make sure we get permanent strtab: don't use info->strtab. */ - mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr; + /* +@@ -2519,21 +2597,24 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) + Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; + /* Set up to point into init section. */ +- mod->kallsyms = mod->module_init + info->mod_kallsyms_init_off; ++ mod->kallsyms = mod->module_init_rx + info->mod_kallsyms_init_off; ++ + pax_open_kernel(); + + mod->kallsyms->symtab = (void *)symsec->sh_addr; + mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym); + /* Make sure we get permanent strtab: don't use info->strtab. */ + mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr; + + /* Set types up while we still have access to sections. */ - for (i = 0; i < mod->num_symtab; i++) - mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); - -- mod->core_symtab = dst = mod->module_core + info->symoffs; -- mod->core_strtab = s = mod->module_core + info->stroffs; -+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs; -+ mod->core_strtab = s = mod->module_core_rx + info->stroffs; - src = mod->symtab; - for (ndst = i = 0; i < mod->num_symtab; i++) { + for (i = 0; i < mod->kallsyms->num_symtab; i++) + mod->kallsyms->symtab[i].st_info + = elf_type(&mod->kallsyms->symtab[i], info); + + /* Now populate the cut down core kallsyms for after init. */ +- mod->core_kallsyms.symtab = dst = mod->module_core + info->symoffs; +- mod->core_kallsyms.strtab = s = mod->module_core + info->stroffs; ++ mod->core_kallsyms.symtab = dst = mod->module_core_rx + info->symoffs; ++ mod->core_kallsyms.strtab = s = mod->module_core_rx + info->stroffs; + src = mod->kallsyms->symtab; + for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) { if (i == 0 || -@@ -2526,6 +2606,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) +@@ -2545,6 +2626,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) } } - mod->core_num_syms = ndst; + mod->core_kallsyms.num_symtab = ndst; + + pax_close_kernel(); } #else static inline void layout_symtab(struct module *mod, struct load_info *info) -@@ -2825,7 +2907,15 @@ static struct module *setup_load_info(struct load_info *info, int flags) +@@ -2844,7 +2927,15 @@ static struct module *setup_load_info(struct load_info *info, int flags) mod = (void *)info->sechdrs[info->index.mod].sh_addr; if (info->index.sym == 0) { @@ -112191,7 +112263,7 @@ index 14833e6..659a51a 100644 return ERR_PTR(-ENOEXEC); } -@@ -2841,8 +2931,14 @@ static struct module *setup_load_info(struct load_info *info, int flags) +@@ -2860,8 +2951,14 @@ static struct module *setup_load_info(struct load_info *info, int flags) static int check_modinfo(struct module *mod, struct load_info *info, int flags) { const char *modmagic = get_modinfo(info, "vermagic"); @@ -112206,7 +112278,7 @@ index 14833e6..659a51a 100644 if (flags & MODULE_INIT_IGNORE_VERMAGIC) modmagic = NULL; -@@ -2867,7 +2963,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags) +@@ -2886,7 +2983,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags) } /* Set up license info based on the info section */ @@ -112215,7 +112287,7 @@ index 14833e6..659a51a 100644 return 0; } -@@ -2964,7 +3060,7 @@ static int move_module(struct module *mod, struct load_info *info) +@@ -2983,7 +3080,7 @@ static int move_module(struct module *mod, struct load_info *info) void *ptr; /* Do the allocs. */ @@ -112224,7 +112296,7 @@ index 14833e6..659a51a 100644 /* * The pointer to this block is stored in the module structure * which is inside the block. Just mark it as not being a -@@ -2974,11 +3070,11 @@ static int move_module(struct module *mod, struct load_info *info) +@@ -2993,11 +3090,11 @@ static int move_module(struct module *mod, struct load_info *info) if (!ptr) return -ENOMEM; @@ -112240,7 +112312,7 @@ index 14833e6..659a51a 100644 /* * The pointer to this block is stored in the module structure * which is inside the block. This block doesn't need to be -@@ -2987,13 +3083,45 @@ static int move_module(struct module *mod, struct load_info *info) +@@ -3006,13 +3103,45 @@ static int move_module(struct module *mod, struct load_info *info) */ kmemleak_ignore(ptr); if (!ptr) { @@ -112290,7 +112362,7 @@ index 14833e6..659a51a 100644 /* Transfer each section which specifies SHF_ALLOC */ pr_debug("final section addresses:\n"); -@@ -3004,16 +3132,45 @@ static int move_module(struct module *mod, struct load_info *info) +@@ -3023,16 +3152,45 @@ static int move_module(struct module *mod, struct load_info *info) if (!(shdr->sh_flags & SHF_ALLOC)) continue; @@ -112343,7 +112415,7 @@ index 14833e6..659a51a 100644 pr_debug("\t0x%lx %s\n", (long)shdr->sh_addr, info->secstrings + shdr->sh_name); } -@@ -3070,12 +3227,12 @@ static void flush_module_icache(const struct module *mod) +@@ -3089,12 +3247,12 @@ static void flush_module_icache(const struct module *mod) * Do it before processing of module parameters, so the module * can provide parameter accessor functions of its own. */ @@ -112362,7 +112434,7 @@ index 14833e6..659a51a 100644 set_fs(old_fs); } -@@ -3133,8 +3290,10 @@ static void module_deallocate(struct module *mod, struct load_info *info) +@@ -3152,8 +3310,10 @@ static void module_deallocate(struct module *mod, struct load_info *info) { percpu_modfree(mod); module_arch_freeing_init(mod); @@ -112375,7 +112447,7 @@ index 14833e6..659a51a 100644 } int __weak module_finalize(const Elf_Ehdr *hdr, -@@ -3147,7 +3306,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr, +@@ -3166,7 +3326,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr, static int post_relocation(struct module *mod, const struct load_info *info) { /* Sort exception table now relocations are done. */ @@ -112385,7 +112457,7 @@ index 14833e6..659a51a 100644 /* Copy relocated percpu area over. */ percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, -@@ -3195,13 +3356,15 @@ static void do_mod_ctors(struct module *mod) +@@ -3214,13 +3376,15 @@ static void do_mod_ctors(struct module *mod) /* For freeing module_init on success, in case kallsyms traversing */ struct mod_initfree { struct rcu_head rcu; @@ -112403,7 +112475,7 @@ index 14833e6..659a51a 100644 kfree(m); } -@@ -3221,7 +3384,8 @@ static noinline int do_init_module(struct module *mod) +@@ -3240,7 +3404,8 @@ static noinline int do_init_module(struct module *mod) ret = -ENOMEM; goto fail; } @@ -112413,7 +112485,7 @@ index 14833e6..659a51a 100644 /* * We want to find out whether @mod uses async during init. Clear -@@ -3281,10 +3445,10 @@ static noinline int do_init_module(struct module *mod) +@@ -3299,10 +3464,10 @@ static noinline int do_init_module(struct module *mod) mod_tree_remove_init(mod); unset_module_init_ro_nx(mod); module_arch_freeing_init(mod); @@ -112428,7 +112500,7 @@ index 14833e6..659a51a 100644 /* * We want to free module_init, but be aware that kallsyms may be * walking this with preempt disabled. In all the failure paths, we -@@ -3374,16 +3538,16 @@ static int complete_formation(struct module *mod, struct load_info *info) +@@ -3392,16 +3557,16 @@ static int complete_formation(struct module *mod, struct load_info *info) module_bug_finalize(info->hdr, info->sechdrs, mod); /* Set RO and NX regions for core */ @@ -112453,7 +112525,7 @@ index 14833e6..659a51a 100644 /* Mark state as coming so strong_try_module_get() ignores us, * but kallsyms etc. can see us. */ -@@ -3478,9 +3642,38 @@ static int load_module(struct load_info *info, const char __user *uargs, +@@ -3496,9 +3661,38 @@ static int load_module(struct load_info *info, const char __user *uargs, if (err) goto free_unload; @@ -112492,7 +112564,7 @@ index 14833e6..659a51a 100644 /* Fix up syms, so that st_value is a pointer to location. */ err = simplify_symbols(mod, info); if (err < 0) -@@ -3496,13 +3689,6 @@ static int load_module(struct load_info *info, const char __user *uargs, +@@ -3514,13 +3708,6 @@ static int load_module(struct load_info *info, const char __user *uargs, flush_module_icache(mod); @@ -112506,7 +112578,7 @@ index 14833e6..659a51a 100644 dynamic_debug_setup(info->debug, info->num_debug); /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */ -@@ -3554,11 +3740,10 @@ static int load_module(struct load_info *info, const char __user *uargs, +@@ -3572,11 +3759,10 @@ static int load_module(struct load_info *info, const char __user *uargs, ddebug_cleanup: dynamic_debug_remove(info->debug); synchronize_sched(); @@ -112519,7 +112591,7 @@ index 14833e6..659a51a 100644 free_unload: module_unload_free(mod); unlink_mod: -@@ -3578,7 +3763,8 @@ static int load_module(struct load_info *info, const char __user *uargs, +@@ -3596,7 +3782,8 @@ static int load_module(struct load_info *info, const char __user *uargs, */ ftrace_release_mod(mod); /* Free lock-classes; relies on the preceding sync_rcu() */ @@ -112529,8 +112601,8 @@ index 14833e6..659a51a 100644 module_deallocate(mod, info); free_copy: -@@ -3660,10 +3846,16 @@ static const char *get_ksymbol(struct module *mod, - unsigned long nextval; +@@ -3679,10 +3866,16 @@ static const char *get_ksymbol(struct module *mod, + struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); /* At worse, next value is at end of module */ - if (within_module_init(addr, mod)) @@ -112549,7 +112621,7 @@ index 14833e6..659a51a 100644 /* Scan for closest preceding symbol, and next symbol. (ELF starts real symbols at 1). */ -@@ -3909,7 +4101,7 @@ static int m_show(struct seq_file *m, void *p) +@@ -3935,7 +4128,7 @@ static int m_show(struct seq_file *m, void *p) return 0; seq_printf(m, "%s %u", @@ -112558,7 +112630,7 @@ index 14833e6..659a51a 100644 print_unload_info(m, mod); /* Informative for users. */ -@@ -3918,7 +4110,7 @@ static int m_show(struct seq_file *m, void *p) +@@ -3944,7 +4137,7 @@ static int m_show(struct seq_file *m, void *p) mod->state == MODULE_STATE_COMING ? "Loading" : "Live"); /* Used by oprofile and other similar tools. */ @@ -112567,7 +112639,7 @@ index 14833e6..659a51a 100644 /* Taints info */ if (mod->taints) -@@ -3954,7 +4146,17 @@ static const struct file_operations proc_modules_operations = { +@@ -3980,7 +4173,17 @@ static const struct file_operations proc_modules_operations = { static int __init proc_modules_init(void) { @@ -112585,7 +112657,7 @@ index 14833e6..659a51a 100644 return 0; } module_init(proc_modules_init); -@@ -4015,7 +4217,8 @@ struct module *__module_address(unsigned long addr) +@@ -4041,7 +4244,8 @@ struct module *__module_address(unsigned long addr) { struct module *mod; @@ -112595,7 +112667,7 @@ index 14833e6..659a51a 100644 return NULL; module_assert_mutex_or_preempt(); -@@ -4058,11 +4261,20 @@ bool is_module_text_address(unsigned long addr) +@@ -4084,11 +4288,20 @@ bool is_module_text_address(unsigned long addr) */ struct module *__module_text_address(unsigned long addr) { @@ -115559,10 +115631,10 @@ index 0f06532..247c8e7 100644 + return atomic64_inc_return_unchecked(&trace_counter); } diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c -index debf6e8..60fa064 100644 +index d202d99..1bf816c 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c -@@ -2367,7 +2367,6 @@ __trace_early_add_new_event(struct trace_event_call *call, +@@ -2369,7 +2369,6 @@ __trace_early_add_new_event(struct trace_event_call *call, return 0; } @@ -132884,7 +132956,7 @@ index 33e72c8..2b72441 100644 } } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) { diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c -index 9630e9f..2071ac2 100644 +index 1f64ab0..26a7233 100644 --- a/sound/core/pcm_compat.c +++ b/sound/core/pcm_compat.c @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream, @@ -134928,10 +135000,10 @@ index 0000000..b52a700 +} diff --git a/tools/gcc/gcc-common.h b/tools/gcc/gcc-common.h new file mode 100644 -index 0000000..a910e1c +index 0000000..fbed673 --- /dev/null +++ b/tools/gcc/gcc-common.h -@@ -0,0 +1,831 @@ +@@ -0,0 +1,858 @@ +#ifndef GCC_COMMON_H_INCLUDED +#define GCC_COMMON_H_INCLUDED + @@ -135088,21 +135160,6 @@ index 0000000..a910e1c +extern void dump_gimple_stmt(pretty_printer *, gimple, int, int); +#endif + -+#ifdef __cplusplus -+static inline void debug_tree(const_tree t) -+{ -+ debug_tree(CONST_CAST_TREE(t)); -+} -+ -+static inline void debug_gimple_stmt(const_gimple s) -+{ -+ debug_gimple_stmt(CONST_CAST_GIMPLE(s)); -+} -+#else -+#define debug_tree(t) debug_tree(CONST_CAST_TREE(t)) -+#define debug_gimple_stmt(s) debug_gimple_stmt(CONST_CAST_GIMPLE(s)) -+#endif -+ +#define __unused __attribute__((__unused__)) + +#define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node)) @@ -135226,6 +135283,11 @@ index 0000000..a910e1c + return cgraph_postorder(order); +} + ++static inline struct cgraph_node *cgraph_create_node(tree decl) ++{ ++ return cgraph_node(decl); ++} ++ +static inline struct cgraph_node *cgraph_get_create_node(tree decl) +{ + struct cgraph_node *node = cgraph_get_node(decl); @@ -135275,6 +135337,7 @@ index 0000000..a910e1c +#define NODE_SYMBOL(node) (node) +#define NODE_DECL(node) (node)->decl +#define INSN_LOCATION(INSN) RTL_LOCATION(INSN) ++#define vNULL NULL + +static inline int bb_loop_depth(const_basic_block bb) +{ @@ -135287,6 +135350,10 @@ index 0000000..a910e1c + + return lhs && !is_gimple_reg(lhs); +} ++ ++static inline void gimple_init_singleton(gimple g __unused) ++{ ++} +#endif + +#if BUILDING_GCC_VERSION == 4007 || BUILDING_GCC_VERSION == 4008 @@ -135462,7 +135529,10 @@ index 0000000..a910e1c + +static inline void set_decl_section_name(tree node, const char *value) +{ -+ DECL_SECTION_NAME(node) = build_string(strlen(value) + 1, value); ++ if (value) ++ DECL_SECTION_NAME(node) = build_string(strlen(value) + 1, value); ++ else ++ DECL_SECTION_NAME(node) = NULL; +} +#endif + @@ -135578,6 +135648,7 @@ index 0000000..a910e1c +#define debug_cgraph_node(node) (node)->debug() +#define cgraph_get_node(decl) cgraph_node::get(decl) +#define cgraph_get_create_node(decl) cgraph_node::get_create(decl) ++#define cgraph_create_node(decl) cgraph_node::create(decl) +#define cgraph_n_nodes symtab->cgraph_count +#define cgraph_max_uid symtab->cgraph_max_uid +#define varpool_get_node(decl) varpool_node::get(decl) @@ -135666,10 +135737,23 @@ index 0000000..a910e1c + symtab->remove_cgraph_duplication_hook(entry); +} + ++static inline void cgraph_call_node_duplication_hooks(cgraph_node_ptr node, cgraph_node_ptr node2) ++{ ++ symtab->call_cgraph_duplication_hooks(node, node2); ++} ++ ++static inline void cgraph_call_edge_duplication_hooks(cgraph_edge *cs1, cgraph_edge *cs2) ++{ ++ symtab->call_edge_duplication_hooks(cs1, cs2); ++} ++ +#if BUILDING_GCC_VERSION >= 6000 +typedef gimple *gimple_ptr; -+typedef const gimple *const_gimple; ++typedef const gimple *const_gimple_ptr; +#define gimple gimple_ptr ++#define const_gimple const_gimple_ptr ++#undef CONST_CAST_GIMPLE ++#define CONST_CAST_GIMPLE(X) CONST_CAST(gimple, (X)) +#endif + +/* gimple related */ @@ -135762,13 +135846,28 @@ index 0000000..a910e1c +#define gen_rtx_set(ARG0, ARG1) gen_rtx_SET((ARG0), (ARG1)) +#endif + ++#ifdef __cplusplus ++static inline void debug_tree(const_tree t) ++{ ++ debug_tree(CONST_CAST_TREE(t)); ++} ++ ++static inline void debug_gimple_stmt(const_gimple s) ++{ ++ debug_gimple_stmt(CONST_CAST_GIMPLE(s)); ++} ++#else ++#define debug_tree(t) debug_tree(CONST_CAST_TREE(t)) ++#define debug_gimple_stmt(s) debug_gimple_stmt(CONST_CAST_GIMPLE(s)) ++#endif ++ +#endif diff --git a/tools/gcc/gcc-generate-gimple-pass.h b/tools/gcc/gcc-generate-gimple-pass.h new file mode 100644 -index 0000000..0ba6a0d +index 0000000..0b081fe --- /dev/null +++ b/tools/gcc/gcc-generate-gimple-pass.h -@@ -0,0 +1,173 @@ +@@ -0,0 +1,175 @@ +/* + * Generator for GIMPLE pass related boilerplate code/data + * @@ -135886,15 +135985,17 @@ index 0000000..0ba6a0d +#if BUILDING_GCC_VERSION >= 5000 + virtual bool gate(function *) { return _GATE(); } +#else -+ bool gate(void) { return _GATE(); } ++ virtual bool gate(void) { return _GATE(); } +#endif +#endif + ++ virtual opt_pass *clone() { return new _PASS_NAME_PASS(); } ++ +#ifndef NO_EXECUTE +#if BUILDING_GCC_VERSION >= 5000 + virtual unsigned int execute(function *) { return _EXECUTE(); } +#else -+ unsigned int execute(void) { return _EXECUTE(); } ++ virtual unsigned int execute(void) { return _EXECUTE(); } +#endif +#endif +}; @@ -135944,10 +136045,10 @@ index 0000000..0ba6a0d +#endif /* PASS_NAME */ diff --git a/tools/gcc/gcc-generate-ipa-pass.h b/tools/gcc/gcc-generate-ipa-pass.h new file mode 100644 -index 0000000..283c2e1 +index 0000000..9bd926e --- /dev/null +++ b/tools/gcc/gcc-generate-ipa-pass.h -@@ -0,0 +1,287 @@ +@@ -0,0 +1,289 @@ +/* + * Generator for IPA pass related boilerplate code/data + * @@ -136146,7 +136247,7 @@ index 0000000..283c2e1 + _WRITE_OPTIMIZATION_SUMMARY, + _READ_OPTIMIZATION_SUMMARY, + _STMT_FIXUP, -+ 0, ++ FUNCTION_TRANSFORM_TODO_FLAGS_START, + _FUNCTION_TRANSFORM, + _VARIABLE_TRANSFORM) {} + @@ -136154,15 +136255,17 @@ index 0000000..283c2e1 +#if BUILDING_GCC_VERSION >= 5000 + virtual bool gate(function *) { return _GATE(); } +#else -+ bool gate(void) { return _GATE(); } ++ virtual bool gate(void) { return _GATE(); } +#endif +#endif + ++ virtual opt_pass *clone() { return new _PASS_NAME_PASS(); } ++ +#ifndef NO_EXECUTE +#if BUILDING_GCC_VERSION >= 5000 + virtual unsigned int execute(function *) { return _EXECUTE(); } +#else -+ unsigned int execute(void) { return _EXECUTE(); } ++ virtual unsigned int execute(void) { return _EXECUTE(); } +#endif +#endif +}; @@ -136237,10 +136340,10 @@ index 0000000..283c2e1 +#endif /* PASS_NAME */ diff --git a/tools/gcc/gcc-generate-rtl-pass.h b/tools/gcc/gcc-generate-rtl-pass.h new file mode 100644 -index 0000000..f4cc205 +index 0000000..1dc67a5 --- /dev/null +++ b/tools/gcc/gcc-generate-rtl-pass.h -@@ -0,0 +1,173 @@ +@@ -0,0 +1,175 @@ +/* + * Generator for RTL pass related boilerplate code/data + * @@ -136358,15 +136461,17 @@ index 0000000..f4cc205 +#if BUILDING_GCC_VERSION >= 5000 + virtual bool gate(function *) { return _GATE(); } +#else -+ bool gate(void) { return _GATE(); } ++ virtual bool gate(void) { return _GATE(); } +#endif +#endif + ++ virtual opt_pass *clone() { return new _PASS_NAME_PASS(); } ++ +#ifndef NO_EXECUTE +#if BUILDING_GCC_VERSION >= 5000 + virtual unsigned int execute(function *) { return _EXECUTE(); } +#else -+ unsigned int execute(void) { return _EXECUTE(); } ++ virtual unsigned int execute(void) { return _EXECUTE(); } +#endif +#endif +}; @@ -136416,10 +136521,10 @@ index 0000000..f4cc205 +#endif /* PASS_NAME */ diff --git a/tools/gcc/gcc-generate-simple_ipa-pass.h b/tools/gcc/gcc-generate-simple_ipa-pass.h new file mode 100644 -index 0000000..159d5ae +index 0000000..a27e2b3 --- /dev/null +++ b/tools/gcc/gcc-generate-simple_ipa-pass.h -@@ -0,0 +1,173 @@ +@@ -0,0 +1,175 @@ +/* + * Generator for SIMPLE_IPA pass related boilerplate code/data + * @@ -136537,15 +136642,17 @@ index 0000000..159d5ae +#if BUILDING_GCC_VERSION >= 5000 + virtual bool gate(function *) { return _GATE(); } +#else -+ bool gate(void) { return _GATE(); } ++ virtual bool gate(void) { return _GATE(); } +#endif +#endif + ++ virtual opt_pass *clone() { return new _PASS_NAME_PASS(); } ++ +#ifndef NO_EXECUTE +#if BUILDING_GCC_VERSION >= 5000 + virtual unsigned int execute(function *) { return _EXECUTE(); } +#else -+ unsigned int execute(void) { return _EXECUTE(); } ++ virtual unsigned int execute(void) { return _EXECUTE(); } +#endif +#endif +}; @@ -136609,10 +136716,10 @@ index 0000000..7514850 +fi diff --git a/tools/gcc/initify_plugin.c b/tools/gcc/initify_plugin.c new file mode 100644 -index 0000000..a686bf8 +index 0000000..bf3eb6c --- /dev/null +++ b/tools/gcc/initify_plugin.c -@@ -0,0 +1,541 @@ +@@ -0,0 +1,536 @@ +/* + * Copyright 2015-2016 by Emese Revfy <re.emese@gmail.com> + * Licensed under the GPL v2, or (at your option) v3 @@ -136633,7 +136740,7 @@ index 0000000..a686bf8 +int plugin_is_GPL_compatible; + +static struct plugin_info initify_plugin_info = { -+ .version = "20160217", ++ .version = "20160306", + .help = "initify_plugin\n", +}; + @@ -136998,16 +137105,11 @@ index 0000000..a686bf8 + +static tree create_tmp_assign(gcall *stmt, unsigned int num) +{ -+ tree str, type, decl, arg = gimple_call_arg(stmt, num); ++ tree str, decl, arg = gimple_call_arg(stmt, num); + + str = get_string_cst(arg); + decl = build_decl(DECL_SOURCE_LOCATION(current_function_decl), VAR_DECL, create_tmp_var_name("cicus"), TREE_TYPE(str)); + -+ type = TREE_TYPE(TREE_TYPE(decl)); -+ type = build_qualified_type(type, TYPE_QUALS(type) | TYPE_QUAL_CONST); -+ TYPE_READONLY(type) = 1; -+ TREE_PUBLIC(type) = 0; -+ + DECL_INITIAL(decl) = str; + DECL_CONTEXT(decl) = current_function_decl; + DECL_ARTIFICIAL(decl) = 1; @@ -151992,7 +152094,7 @@ index 0000000..5aabc22 +#include "gcc-generate-gimple-pass.h" diff --git a/tools/gcc/size_overflow_plugin/intentional_overflow.c b/tools/gcc/size_overflow_plugin/intentional_overflow.c new file mode 100644 -index 0000000..33b5b1d +index 0000000..9f9bcd6 --- /dev/null +++ b/tools/gcc/size_overflow_plugin/intentional_overflow.c @@ -0,0 +1,1118 @@ @@ -152541,7 +152643,7 @@ index 0000000..33b5b1d + new_type = intDI_type_node; + break; + default: -+ debug_tree((tree)type); ++ debug_tree(type); + gcc_unreachable(); + } + @@ -153794,10 +153896,10 @@ index 0000000..fc58e16 +} diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data new file mode 100644 -index 0000000..9e45ae9 +index 0000000..3702ccea --- /dev/null +++ b/tools/gcc/size_overflow_plugin/size_overflow_hash.data -@@ -0,0 +1,21511 @@ +@@ -0,0 +1,21512 @@ +enable_so_recv_ctrl_pipe_us_data_0 recv_ctrl_pipe us_data 0 0 NULL +enable_so___earlyonly_bootmem_alloc_fndecl_3 __earlyonly_bootmem_alloc fndecl 2-3-4 3 NULL +enable_so_v9fs_xattr_get_acl_fndecl_4 v9fs_xattr_get_acl fndecl 5 4 NULL @@ -165547,6 +165649,7 @@ index 0000000..9e45ae9 +enable_so_xfs_rtallocate_extent_block_fndecl_35604 xfs_rtallocate_extent_block fndecl 5-10 35604 NULL +enable_so_hback_porch_videomode_35606 hback_porch videomode 0 35606 NULL nohasharray +enable_so_count_nfs_direct_req_35606 count nfs_direct_req 0 35606 &enable_so_hback_porch_videomode_35606 ++enable_so_copy_ctl_value_to_user_fndecl_35608 copy_ctl_value_to_user fndecl 5 35608 NULL +enable_so_f2fs_iget_fndecl_35610 f2fs_iget fndecl 2 35610 NULL nohasharray +enable_so_flexcop_device_kmalloc_fndecl_35610 flexcop_device_kmalloc fndecl 1 35610 &enable_so_f2fs_iget_fndecl_35610 +enable_so_blocksize_brcmf_sdio_35612 blocksize brcmf_sdio 0 35612 NULL @@ -175409,10 +175512,10 @@ index 0000000..17bc0d8 +enable_so_zpios_read_fndecl_64734 zpios_read fndecl 3 64734 NULL diff --git a/tools/gcc/size_overflow_plugin/size_overflow_ipa.c b/tools/gcc/size_overflow_plugin/size_overflow_ipa.c new file mode 100644 -index 0000000..eae4c88 +index 0000000..758edea --- /dev/null +++ b/tools/gcc/size_overflow_plugin/size_overflow_ipa.c -@@ -0,0 +1,1160 @@ +@@ -0,0 +1,1163 @@ +/* + * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com> + * Licensed under the GPL v2, or (at your option) v3 @@ -175449,7 +175552,7 @@ index 0000000..eae4c88 +{ + gcc_assert(TREE_CODE(fndecl) == FUNCTION_DECL); +#if BUILDING_GCC_VERSION <= 4005 -+ return cgraph_get_node((tree)fndecl); ++ return cgraph_get_node(CONST_CAST_TREE(fndecl)); +#else + return cgraph_get_node(fndecl); +#endif @@ -175501,7 +175604,7 @@ index 0000000..eae4c88 + if (!TREE_STATIC(decl) && !DECL_EXTERNAL(decl)) + return NULL; + default: -+ debug_tree((tree)decl); ++ debug_tree(decl); + gcc_unreachable(); + } +} @@ -175645,6 +175748,9 @@ index 0000000..eae4c88 + if (DECL_BUILT_IN(orig_raw_data.decl) || DECL_BUILT_IN_CLASS(orig_raw_data.decl) == BUILT_IN_NORMAL) + return NULL; + ++ if (made_by_compiler(orig_raw_data.decl)) ++ return NULL; ++ + decl_code = TREE_CODE(orig_raw_data.decl); + if (decl_code == FIELD_DECL || decl_code == VAR_DECL) + orig_raw_data.num = clone_raw_data->num; @@ -175841,7 +175947,7 @@ index 0000000..eae4c88 + case IMAGPART_EXPR: + return; + default: -+ debug_tree((tree)node); ++ debug_tree(node); + gcc_unreachable(); + } + @@ -176575,7 +176681,7 @@ index 0000000..eae4c88 +#include "gcc-generate-ipa-pass.h" diff --git a/tools/gcc/size_overflow_plugin/size_overflow_misc.c b/tools/gcc/size_overflow_plugin/size_overflow_misc.c new file mode 100644 -index 0000000..6075e8f +index 0000000..a44ea14 --- /dev/null +++ b/tools/gcc/size_overflow_plugin/size_overflow_misc.c @@ -0,0 +1,505 @@ @@ -176677,8 +176783,8 @@ index 0000000..6075e8f + else if (TREE_CODE(type_name) == TYPE_DECL) + return DECL_NAME_POINTER(type_name); + -+ debug_tree((tree)field_decl); -+ debug_tree((tree)type_name); ++ debug_tree(field_decl); ++ debug_tree(type_name); + gcc_unreachable(); +} + @@ -176695,7 +176801,7 @@ index 0000000..6075e8f + return false; + + gcc_assert(decl_code == FUNCTION_DECL); -+ if (DECL_ABSTRACT_ORIGIN(decl) != NULL_TREE) ++ if (DECL_ABSTRACT_ORIGIN(decl) != NULL_TREE && DECL_ABSTRACT_ORIGIN(decl) != decl) + return true; + if (DECL_ARTIFICIAL(decl)) + return true; @@ -176971,8 +177077,8 @@ index 0000000..6075e8f + return orig_argnum_on_clone(correct_argnum_of_node, node, argnum); + + if (node) -+ debug_tree((tree)NODE_DECL(node)); -+ debug_tree((tree)correct_argnum_of_node_decl); ++ debug_tree(NODE_DECL(node)); ++ debug_tree(correct_argnum_of_node_decl); + gcc_unreachable(); +} + @@ -176999,17 +177105,17 @@ index 0000000..6075e8f + gcc_assert(TREE_CODE(clone_fndecl) == FUNCTION_DECL); + + if (DECL_ABSTRACT_ORIGIN(clone_fndecl)) -+ return (tree)DECL_ORIGIN(clone_fndecl); ++ return CONST_CAST_TREE(DECL_ABSTRACT_ORIGIN(clone_fndecl)); + node = get_cnode(clone_fndecl); + if (!node) -+ return (tree)clone_fndecl; ++ return CONST_CAST_TREE(clone_fndecl); + + while (node->clone_of) + node = node->clone_of; + if (!made_by_compiler(NODE_DECL(node))) + return NODE_DECL(node); + // Return the cloned decl because it is needed for the transform callback -+ return (tree)clone_fndecl; ++ return CONST_CAST_TREE(clone_fndecl); +} + +static tree get_interesting_fndecl_from_stmt(const gcall *stmt) @@ -177086,10 +177192,10 @@ index 0000000..6075e8f + diff --git a/tools/gcc/size_overflow_plugin/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin/size_overflow_plugin.c new file mode 100644 -index 0000000..f8a24c1 +index 0000000..21e7d5c --- /dev/null +++ b/tools/gcc/size_overflow_plugin/size_overflow_plugin.c -@@ -0,0 +1,318 @@ +@@ -0,0 +1,290 @@ +/* + * Copyright 2011-2016 by Emese Revfy <re.emese@gmail.com> + * Licensed under the GPL v2, or (at your option) v3 @@ -177121,7 +177227,7 @@ index 0000000..f8a24c1 +tree size_overflow_type_TI; + +static struct plugin_info size_overflow_plugin_info = { -+ .version = "20160217", ++ .version = "20160306", + .help = "no-size-overflow\tturn off size overflow checking\n", +}; + @@ -177293,47 +177399,19 @@ index 0000000..f8a24c1 + TREE_NOTHROW(report_size_overflow_decl) = 1; +} + -+#if BUILDING_GCC_VERSION >= 4009 -+static bool gate_disable_ubsan_si_overflow(void) ++static bool disable_ubsan_si_overflow_gate(void) +{ ++#if BUILDING_GCC_VERSION >= 4009 + flag_sanitize &= ~SANITIZE_SI_OVERFLOW; ++#endif + return true; +} + -+static const struct pass_data disable_ubsan_si_overflow_pass_data = { -+ .type = GIMPLE_PASS, -+ .name = "disable_ubsan_si_overflow", -+ .optinfo_flags = OPTGROUP_NONE, -+#if BUILDING_GCC_VERSION >= 5000 -+#else -+ .has_gate = true, -+ .has_execute = false, -+#endif -+ .tv_id = TV_NONE, -+ .properties_required = 0, -+ .properties_provided = 0, -+ .properties_destroyed = 0, -+ .todo_flags_start = 0, -+ .todo_flags_finish = 0 -+}; ++#define PASS_NAME disable_ubsan_si_overflow + -+namespace { -+class disable_ubsan_si_overflow_pass : public gimple_opt_pass { -+public: -+ disable_ubsan_si_overflow_pass() : gimple_opt_pass(disable_ubsan_si_overflow_pass_data, g) {} -+#if BUILDING_GCC_VERSION >= 5000 -+ virtual bool gate(function *) { return gate_disable_ubsan_si_overflow(); } -+#else -+ bool gate() { return gate_disable_ubsan_si_overflow(); } -+#endif -+}; -+} ++#define NO_EXECUTE + -+opt_pass *make_disable_ubsan_si_overflow_pass(void) -+{ -+ return new disable_ubsan_si_overflow_pass(); -+} -+#endif ++#include "gcc-generate-gimple-pass.h" + +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) +{ @@ -177410,7 +177488,7 @@ index 0000000..f8a24c1 +} diff --git a/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c b/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c new file mode 100644 -index 0000000..317cd6c +index 0000000..4ac5098 --- /dev/null +++ b/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c @@ -0,0 +1,352 @@ @@ -177479,7 +177557,7 @@ index 0000000..317cd6c + case COMPLEX_TYPE: + return 13; + default: -+ debug_tree((tree)type); ++ debug_tree(type); + gcc_unreachable(); + } +} @@ -177768,7 +177846,7 @@ index 0000000..317cd6c + diff --git a/tools/gcc/size_overflow_plugin/size_overflow_transform.c b/tools/gcc/size_overflow_plugin/size_overflow_transform.c new file mode 100644 -index 0000000..3e8d46c +index 0000000..c958d66 --- /dev/null +++ b/tools/gcc/size_overflow_plugin/size_overflow_transform.c @@ -0,0 +1,745 @@ @@ -177854,13 +177932,13 @@ index 0000000..3e8d46c + + debug_gimple_stmt(assign); + fprintf(stderr, "orig_rhs:\n"); -+ debug_tree((tree)orig_rhs); ++ debug_tree(orig_rhs); + fprintf(stderr, "rhs1:\n"); -+ debug_tree((tree)rhs1); ++ debug_tree(rhs1); + fprintf(stderr, "rhs2:\n"); -+ debug_tree((tree)rhs2); ++ debug_tree(rhs2); + fprintf(stderr, "rhs3:\n"); -+ debug_tree((tree)rhs3); ++ debug_tree(rhs3); + gcc_unreachable(); +} + @@ -178519,7 +178597,7 @@ index 0000000..3e8d46c +} diff --git a/tools/gcc/size_overflow_plugin/size_overflow_transform_core.c b/tools/gcc/size_overflow_plugin/size_overflow_transform_core.c new file mode 100644 -index 0000000..4c231dd +index 0000000..9162fe5 --- /dev/null +++ b/tools/gcc/size_overflow_plugin/size_overflow_transform_core.c @@ -0,0 +1,1015 @@ @@ -178580,7 +178658,7 @@ index 0000000..4c231dd + new_type = size_overflow_type_TI; + break; + default: -+ debug_tree((tree)node); ++ debug_tree(node); + error("%s: unsupported gcc configuration (%qE).", __func__, current_function_decl); + gcc_unreachable(); + } diff --git a/4.4.4/4425_grsec_remove_EI_PAX.patch b/4.4.5/4425_grsec_remove_EI_PAX.patch index 2a1aa6c..2a1aa6c 100644 --- a/4.4.4/4425_grsec_remove_EI_PAX.patch +++ b/4.4.5/4425_grsec_remove_EI_PAX.patch diff --git a/4.4.4/4427_force_XATTR_PAX_tmpfs.patch b/4.4.5/4427_force_XATTR_PAX_tmpfs.patch index f6aea64..f6aea64 100644 --- a/4.4.4/4427_force_XATTR_PAX_tmpfs.patch +++ b/4.4.5/4427_force_XATTR_PAX_tmpfs.patch diff --git a/4.4.4/4430_grsec-remove-localversion-grsec.patch b/4.4.5/4430_grsec-remove-localversion-grsec.patch index 31cf878..31cf878 100644 --- a/4.4.4/4430_grsec-remove-localversion-grsec.patch +++ b/4.4.5/4430_grsec-remove-localversion-grsec.patch diff --git a/4.4.4/4435_grsec-mute-warnings.patch b/4.4.5/4435_grsec-mute-warnings.patch index b7564e4..b7564e4 100644 --- a/4.4.4/4435_grsec-mute-warnings.patch +++ b/4.4.5/4435_grsec-mute-warnings.patch diff --git a/4.4.4/4440_grsec-remove-protected-paths.patch b/4.4.5/4440_grsec-remove-protected-paths.patch index 741546d..741546d 100644 --- a/4.4.4/4440_grsec-remove-protected-paths.patch +++ b/4.4.5/4440_grsec-remove-protected-paths.patch diff --git a/4.4.4/4450_grsec-kconfig-default-gids.patch b/4.4.5/4450_grsec-kconfig-default-gids.patch index 77f9706..77f9706 100644 --- a/4.4.4/4450_grsec-kconfig-default-gids.patch +++ b/4.4.5/4450_grsec-kconfig-default-gids.patch diff --git a/4.4.4/4465_selinux-avc_audit-log-curr_ip.patch b/4.4.5/4465_selinux-avc_audit-log-curr_ip.patch index f1c4923..f1c4923 100644 --- a/4.4.4/4465_selinux-avc_audit-log-curr_ip.patch +++ b/4.4.5/4465_selinux-avc_audit-log-curr_ip.patch diff --git a/4.4.4/4470_disable-compat_vdso.patch b/4.4.5/4470_disable-compat_vdso.patch index 281aad9..281aad9 100644 --- a/4.4.4/4470_disable-compat_vdso.patch +++ b/4.4.5/4470_disable-compat_vdso.patch diff --git a/4.4.4/4475_emutramp_default_on.patch b/4.4.5/4475_emutramp_default_on.patch index afd6019..afd6019 100644 --- a/4.4.4/4475_emutramp_default_on.patch +++ b/4.4.5/4475_emutramp_default_on.patch |