summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Makefile2
-rw-r--r--arch/arm/crypto/sha256-armv4.pl3
-rw-r--r--arch/arm/crypto/sha256-core.S_shipped3
-rw-r--r--arch/arm/crypto/sha512-armv4.pl3
-rw-r--r--arch/arm/crypto/sha512-core.S_shipped3
-rw-r--r--arch/arm64/crypto/chacha-neon-core.S20
-rw-r--r--arch/mips/bcm63xx/dev-enet.c8
-rw-r--r--arch/mips/kernel/cmpxchg.c3
-rw-r--r--arch/mips/kernel/setup.c3
-rw-r--r--arch/mips/lantiq/xway/vmmc.c4
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h2
-rw-r--r--arch/x86/include/asm/uaccess.h6
-rw-r--r--arch/x86/mm/extable.c58
-rw-r--r--drivers/crypto/ccree/cc_pm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c27
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c4
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c9
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/mmc/core/block.c6
-rw-r--r--drivers/mmc/core/core.c2
-rw-r--r--drivers/mmc/core/queue.c9
-rw-r--r--drivers/mmc/host/cqhci.c13
-rw-r--r--drivers/mmc/host/mmc_spi.c1
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c1
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c9
-rw-r--r--drivers/mmc/host/tmio_mmc.h5
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c17
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c12
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c14
-rw-r--r--drivers/net/usb/qmi_wwan.c26
-rw-r--r--drivers/scsi/3w-9xxx.c14
-rw-r--r--drivers/scsi/3w-sas.c12
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c8
-rw-r--r--drivers/scsi/bfa/bfad.c18
-rw-r--r--drivers/scsi/csiostor/csio_init.c7
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c8
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c8
-rw-r--r--drivers/scsi/hptiop.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c19
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--fs/afs/cell.c1
-rw-r--r--fs/hugetlbfs/inode.c12
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/orangefs/file.c4
-rw-r--r--include/linux/atalk.h2
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/net/sch_generic.h31
-rw-r--r--lib/Kconfig.kasan22
-rw-r--r--mm/hugetlb.c16
-rw-r--r--mm/maccess.c6
-rw-r--r--mm/migrate.c11
-rw-r--r--mm/mmap.c7
-rw-r--r--mm/shmem.c2
-rw-r--r--net/appletalk/atalk_proc.c58
-rw-r--r--net/appletalk/ddp.c37
-rw-r--r--net/appletalk/sysctl_net_atalk.c5
-rw-r--r--net/core/gen_stats.c2
-rw-r--r--net/core/net-sysfs.c3
-rw-r--r--net/sched/sch_generic.c13
-rw-r--r--scripts/Makefile.kasan2
-rw-r--r--virt/kvm/kvm_main.c2
63 files changed, 375 insertions, 254 deletions
diff --git a/Makefile b/Makefile
index 96c5335e7ee4..ac5ac28a24e9 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 0
SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc8
NAME = Shy Crocodile
# *DOCUMENTATION*
diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl
index b9ec44060ed3..a03cf4dfb781 100644
--- a/arch/arm/crypto/sha256-armv4.pl
+++ b/arch/arm/crypto/sha256-armv4.pl
@@ -212,10 +212,11 @@ K256:
.global sha256_block_data_order
.type sha256_block_data_order,%function
sha256_block_data_order:
+.Lsha256_block_data_order:
#if __ARM_ARCH__<7
sub r3,pc,#8 @ sha256_block_data_order
#else
- adr r3,sha256_block_data_order
+ adr r3,.Lsha256_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped
index 3b58300d611c..054aae0edfce 100644
--- a/arch/arm/crypto/sha256-core.S_shipped
+++ b/arch/arm/crypto/sha256-core.S_shipped
@@ -93,10 +93,11 @@ K256:
.global sha256_block_data_order
.type sha256_block_data_order,%function
sha256_block_data_order:
+.Lsha256_block_data_order:
#if __ARM_ARCH__<7
sub r3,pc,#8 @ sha256_block_data_order
#else
- adr r3,sha256_block_data_order
+ adr r3,.Lsha256_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl
index fb5d15048c0b..788c17b56ecc 100644
--- a/arch/arm/crypto/sha512-armv4.pl
+++ b/arch/arm/crypto/sha512-armv4.pl
@@ -274,10 +274,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
.global sha512_block_data_order
.type sha512_block_data_order,%function
sha512_block_data_order:
+.Lsha512_block_data_order:
#if __ARM_ARCH__<7
sub r3,pc,#8 @ sha512_block_data_order
#else
- adr r3,sha512_block_data_order
+ adr r3,.Lsha512_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped
index b1c334a49cda..710ea309769e 100644
--- a/arch/arm/crypto/sha512-core.S_shipped
+++ b/arch/arm/crypto/sha512-core.S_shipped
@@ -141,10 +141,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
.global sha512_block_data_order
.type sha512_block_data_order,%function
sha512_block_data_order:
+.Lsha512_block_data_order:
#if __ARM_ARCH__<7
sub r3,pc,#8 @ sha512_block_data_order
#else
- adr r3,sha512_block_data_order
+ adr r3,.Lsha512_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
diff --git a/arch/arm64/crypto/chacha-neon-core.S b/arch/arm64/crypto/chacha-neon-core.S
index 021bb9e9784b..706c4e10e9e2 100644
--- a/arch/arm64/crypto/chacha-neon-core.S
+++ b/arch/arm64/crypto/chacha-neon-core.S
@@ -158,8 +158,8 @@ ENTRY(hchacha_block_neon)
mov w3, w2
bl chacha_permute
- st1 {v0.16b}, [x1], #16
- st1 {v3.16b}, [x1]
+ st1 {v0.4s}, [x1], #16
+ st1 {v3.4s}, [x1]
ldp x29, x30, [sp], #16
ret
@@ -532,6 +532,10 @@ ENTRY(chacha_4block_xor_neon)
add v3.4s, v3.4s, v19.4s
add a2, a2, w8
add a3, a3, w9
+CPU_BE( rev a0, a0 )
+CPU_BE( rev a1, a1 )
+CPU_BE( rev a2, a2 )
+CPU_BE( rev a3, a3 )
ld4r {v24.4s-v27.4s}, [x0], #16
ld4r {v28.4s-v31.4s}, [x0]
@@ -552,6 +556,10 @@ ENTRY(chacha_4block_xor_neon)
add v7.4s, v7.4s, v23.4s
add a6, a6, w8
add a7, a7, w9
+CPU_BE( rev a4, a4 )
+CPU_BE( rev a5, a5 )
+CPU_BE( rev a6, a6 )
+CPU_BE( rev a7, a7 )
// x8[0-3] += s2[0]
// x9[0-3] += s2[1]
@@ -569,6 +577,10 @@ ENTRY(chacha_4block_xor_neon)
add v11.4s, v11.4s, v27.4s
add a10, a10, w8
add a11, a11, w9
+CPU_BE( rev a8, a8 )
+CPU_BE( rev a9, a9 )
+CPU_BE( rev a10, a10 )
+CPU_BE( rev a11, a11 )
// x12[0-3] += s3[0]
// x13[0-3] += s3[1]
@@ -586,6 +598,10 @@ ENTRY(chacha_4block_xor_neon)
add v15.4s, v15.4s, v31.4s
add a14, a14, w8
add a15, a15, w9
+CPU_BE( rev a12, a12 )
+CPU_BE( rev a13, a13 )
+CPU_BE( rev a14, a14 )
+CPU_BE( rev a15, a15 )
// interleave 32-bit words in state n, n+1
ldp w6, w7, [x2], #64
diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c
index 07b4c65a88a4..8e73d65f3480 100644
--- a/arch/mips/bcm63xx/dev-enet.c
+++ b/arch/mips/bcm63xx/dev-enet.c
@@ -70,6 +70,8 @@ static struct platform_device bcm63xx_enet_shared_device = {
static int shared_device_registered;
+static u64 enet_dmamask = DMA_BIT_MASK(32);
+
static struct resource enet0_res[] = {
{
.start = -1, /* filled at runtime */
@@ -99,6 +101,8 @@ static struct platform_device bcm63xx_enet0_device = {
.resource = enet0_res,
.dev = {
.platform_data = &enet0_pd,
+ .dma_mask = &enet_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -131,6 +135,8 @@ static struct platform_device bcm63xx_enet1_device = {
.resource = enet1_res,
.dev = {
.platform_data = &enet1_pd,
+ .dma_mask = &enet_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -157,6 +163,8 @@ static struct platform_device bcm63xx_enetsw_device = {
.resource = enetsw_res,
.dev = {
.platform_data = &enetsw_pd,
+ .dma_mask = &enet_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c
index 0b9535bc2c53..6b2a4a902a98 100644
--- a/arch/mips/kernel/cmpxchg.c
+++ b/arch/mips/kernel/cmpxchg.c
@@ -54,10 +54,9 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
unsigned long new, unsigned int size)
{
- u32 mask, old32, new32, load32;
+ u32 mask, old32, new32, load32, load;
volatile u32 *ptr32;
unsigned int shift;
- u8 load;
/* Check that ptr is naturally aligned */
WARN_ON((unsigned long)ptr & (size - 1));
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 8c6c48ed786a..d2e5a5ad0e6f 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -384,7 +384,8 @@ static void __init bootmem_init(void)
init_initrd();
reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
- memblock_reserve(PHYS_OFFSET, reserved_end << PAGE_SHIFT);
+ memblock_reserve(PHYS_OFFSET,
+ (reserved_end << PAGE_SHIFT) - PHYS_OFFSET);
/*
* max_low_pfn is not a number of pages. The number of pages
diff --git a/arch/mips/lantiq/xway/vmmc.c b/arch/mips/lantiq/xway/vmmc.c
index 577ec81b557d..3deab9a77718 100644
--- a/arch/mips/lantiq/xway/vmmc.c
+++ b/arch/mips/lantiq/xway/vmmc.c
@@ -31,8 +31,8 @@ static int vmmc_probe(struct platform_device *pdev)
dma_addr_t dma;
cp1_base =
- (void *) CPHYSADDR(dma_alloc_coherent(NULL, CP1_SIZE,
- &dma, GFP_ATOMIC));
+ (void *) CPHYSADDR(dma_alloc_coherent(&pdev->dev, CP1_SIZE,
+ &dma, GFP_KERNEL));
gpio_count = of_gpio_count(pdev->dev.of_node);
while (gpio_count > 0) {
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index 705dafc2d11a..2bdbbbcfa393 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -841,7 +841,7 @@ union hv_gpa_page_range {
* count is equal with how many entries of union hv_gpa_page_range can
* be populated into the input parameter page.
*/
-#define HV_MAX_FLUSH_REP_COUNT (PAGE_SIZE - 2 * sizeof(u64) / \
+#define HV_MAX_FLUSH_REP_COUNT ((PAGE_SIZE - 2 * sizeof(u64)) / \
sizeof(union hv_gpa_page_range))
struct hv_guest_mapping_flush_list {
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 780f2b42c8ef..c1334aaaa78d 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -284,7 +284,7 @@ do { \
__put_user_goto(x, ptr, "l", "k", "ir", label); \
break; \
case 8: \
- __put_user_goto_u64((__typeof__(*ptr))(x), ptr, label); \
+ __put_user_goto_u64(x, ptr, label); \
break; \
default: \
__put_user_bad(); \
@@ -431,8 +431,10 @@ do { \
({ \
__label__ __pu_label; \
int __pu_err = -EFAULT; \
+ __typeof__(*(ptr)) __pu_val; \
+ __pu_val = x; \
__uaccess_begin(); \
- __put_user_size((x), (ptr), (size), __pu_label); \
+ __put_user_size(__pu_val, (ptr), (size), __pu_label); \
__pu_err = 0; \
__pu_label: \
__uaccess_end(); \
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 6521134057e8..856fa409c536 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -117,67 +117,11 @@ __visible bool ex_handler_fprestore(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL_GPL(ex_handler_fprestore);
-/* Helper to check whether a uaccess fault indicates a kernel bug. */
-static bool bogus_uaccess(struct pt_regs *regs, int trapnr,
- unsigned long fault_addr)
-{
- /* This is the normal case: #PF with a fault address in userspace. */
- if (trapnr == X86_TRAP_PF && fault_addr < TASK_SIZE_MAX)
- return false;
-
- /*
- * This code can be reached for machine checks, but only if the #MC
- * handler has already decided that it looks like a candidate for fixup.
- * This e.g. happens when attempting to access userspace memory which
- * the CPU can't access because of uncorrectable bad memory.
- */
- if (trapnr == X86_TRAP_MC)
- return false;
-
- /*
- * There are two remaining exception types we might encounter here:
- * - #PF for faulting accesses to kernel addresses
- * - #GP for faulting accesses to noncanonical addresses
- * Complain about anything else.
- */
- if (trapnr != X86_TRAP_PF && trapnr != X86_TRAP_GP) {
- WARN(1, "unexpected trap %d in uaccess\n", trapnr);
- return false;
- }
-
- /*
- * This is a faulting memory access in kernel space, on a kernel
- * address, in a usercopy function. This can e.g. be caused by improper
- * use of helpers like __put_user and by improper attempts to access
- * userspace addresses in KERNEL_DS regions.
- * The one (semi-)legitimate exception are probe_kernel_{read,write}(),
- * which can be invoked from places like kgdb, /dev/mem (for reading)
- * and privileged BPF code (for reading).
- * The probe_kernel_*() functions set the kernel_uaccess_faults_ok flag
- * to tell us that faulting on kernel addresses, and even noncanonical
- * addresses, in a userspace accessor does not necessarily imply a
- * kernel bug, root might just be doing weird stuff.
- */
- if (current->kernel_uaccess_faults_ok)
- return false;
-
- /* This is bad. Refuse the fixup so that we go into die(). */
- if (trapnr == X86_TRAP_PF) {
- pr_emerg("BUG: pagefault on kernel address 0x%lx in non-whitelisted uaccess\n",
- fault_addr);
- } else {
- pr_emerg("BUG: GPF in non-whitelisted uaccess (non-canonical address?)\n");
- }
- return true;
-}
-
__visible bool ex_handler_uaccess(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr,
unsigned long error_code,
unsigned long fault_addr)
{
- if (bogus_uaccess(regs, trapnr, fault_addr))
- return false;
regs->ip = ex_fixup_addr(fixup);
return true;
}
@@ -188,8 +132,6 @@ __visible bool ex_handler_ext(const struct exception_table_entry *fixup,
unsigned long error_code,
unsigned long fault_addr)
{
- if (bogus_uaccess(regs, trapnr, fault_addr))
- return false;
/* Special hack for uaccess_err */
current->thread.uaccess_err = 1;
regs->ip = ex_fixup_addr(fixup);
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index f62624357020..907a6db4d6c0 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -30,7 +30,7 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata)
return 0;
}
-static void cc_pm_go(struct cc_drvdata *drvdata) {}
+static inline void cc_pm_go(struct cc_drvdata *drvdata) {}
static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index aadd0fa42e43..3aa42c64484a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -405,6 +405,7 @@ struct amdgpu_crtc {
struct amdgpu_flip_work *pflip_works;
enum amdgpu_flip_status pflip_status;
int deferred_flip_completion;
+ u64 last_flip_vblank;
/* pll sharing */
struct amdgpu_atom_ss ss;
bool ss_enabled;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 5296b8f3e0ab..636d14a60952 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -303,12 +303,11 @@ static void dm_pflip_high_irq(void *interrupt_params)
return;
}
+ /* Update to correct count(s) if racing with vblank irq */
+ amdgpu_crtc->last_flip_vblank = drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
/* wake up userspace */
if (amdgpu_crtc->event) {
- /* Update to correct count(s) if racing with vblank irq */
- drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
-
drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
/* page flip completed. clean up */
@@ -4828,6 +4827,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
int planes_count = 0;
unsigned long flags;
+ u64 last_flip_vblank;
+ bool vrr_active = acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
/* update planes when needed */
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
@@ -4859,6 +4860,16 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
/* In commit tail framework this cannot happen */
WARN_ON(1);
}
+
+ /* For variable refresh rate mode only:
+ * Get vblank of last completed flip to avoid > 1 vrr flips per
+ * video frame by use of throttling, but allow flip programming
+ * anywhere in the possibly large variable vrr vblank interval
+ * for fine-grained flip timing control and more opportunity to
+ * avoid stutter on late submission of amdgpu_dm_do_flip() calls.
+ */
+ last_flip_vblank = acrtc_attach->last_flip_vblank;
+
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) {
@@ -4882,10 +4893,18 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
drm_crtc_vblank_get(crtc);
+ /* Use old throttling in non-vrr fixed refresh rate mode
+ * to keep flip scheduling based on target vblank counts
+ * working in a backwards compatible way, e.g., clients
+ * using GLX_OML_sync_control extension.
+ */
+ if (!vrr_active)
+ last_flip_vblank = drm_crtc_vblank_count(crtc);
+
amdgpu_dm_do_flip(
crtc,
fb,
- (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
+ (uint32_t) last_flip_vblank + *wait_for_vblank,
dc_state);
}
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index f3dd66ae990a..aa35007262cd 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -154,6 +154,10 @@ static int bochs_pci_probe(struct pci_dev *pdev,
if (IS_ERR(dev))
return PTR_ERR(dev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto err_free_dev;
+
dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 54e2ae614dcc..f4290f6b0c38 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1602,6 +1602,15 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
old_plane_state->crtc != new_plane_state->crtc)
return -EINVAL;
+ /*
+ * FIXME: Since prepare_fb and cleanup_fb are always called on
+ * the new_plane_state for async updates we need to block framebuffer
+ * changes. This prevents use of a fb that's been cleaned up and
+ * double cleanups from occuring.
+ */
+ if (old_plane_state->fb != new_plane_state->fb)
+ return -EINVAL;
+
funcs = plane->helper_private;
if (!funcs->atomic_async_update)
return -EINVAL;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index dc9f14811e0f..58dc70bffd5b 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -144,7 +144,7 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
for (tmp = dev; tmp; tmp = tmp->bus->self)
level++;
- size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
+ size = sizeof(*info) + level * sizeof(info->path[0]);
if (size <= sizeof(dmar_pci_notify_info_buf)) {
info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
} else {
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 14f3fdb8c6bb..9ce8eb51a60f 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2380,12 +2380,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
"mmcblk%u%s", card->host->index, subname ? subname : "");
- if (mmc_card_mmc(card))
- blk_queue_logical_block_size(md->queue.queue,
- card->ext_csd.data_sector_size);
- else
- blk_queue_logical_block_size(md->queue.queue, 512);
-
set_capacity(md->disk, size);
if (mmc_host_cmd23(card->host)) {
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5bd58b95d318..b27a1e620233 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -95,7 +95,7 @@ static void mmc_should_fail_request(struct mmc_host *host,
if (!data)
return;
- if (cmd->error || data->error ||
+ if ((cmd && cmd->error) || data->error ||
!should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
return;
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 35cc138b096d..15a45ec6518d 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -355,6 +355,7 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
{
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH;
+ unsigned block_size = 512;
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
@@ -368,7 +369,13 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segments(mq->queue, host->max_segs);
- blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+
+ if (mmc_card_mmc(card))
+ block_size = card->ext_csd.data_sector_size;
+
+ blk_queue_logical_block_size(mq->queue, block_size);
+ blk_queue_max_segment_size(mq->queue,
+ round_down(host->max_seg_size, block_size));
INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
index 159270e947cf..a8af682a9182 100644
--- a/drivers/mmc/host/cqhci.c
+++ b/drivers/mmc/host/cqhci.c
@@ -201,7 +201,7 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
- (cq_host->num_slots - 1);
+ cq_host->mmc->cqe_qdepth;
pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",</