With recent 4.10 kernel the graphics isn't coming up under Xen. First failure message is:
[ 46.656649] i915 0000:00:02.0: swiotlb buffer is full (sz: 1630208 bytes)
Later I see splats like:
[ 49.393583] general protection fault: 0000 [#1] SMP [ 49.403800] Modules linked in: bridge stp llc tun arc4 iwldvm mac80211 uvcvideo snd_hda_codec_realtek snd_hda_codec_hdmi videobuf2_vmalloc snd_hda_codec_generic videobuf2_memops snd_hda_intel videobuf2_v4l2 snd_hda_codec videobuf2_core iwlwifi videodev snd_hda_core e1000e intel_rapl xhci_pci x86_pkg_temp_thermal xhci_hcd intel_powerclamp coretemp crct10dif_pclmul snd_hwdep joydev crc32_pclmul crc32c_intel ghash_clmulni_intel mei_me ptp iTCO_wdt aesni_intel snd_pcm ppdev mei aes_x86_64 iTCO_vendor_support lrw sdhci_pci gf128mul lpc_ich parport_pc pps_core cfg80211 glue_helper ablk_helper snd_timer parport mfd_core i2c_i801 snd i2c_smbus dell_laptop i2c_hid hid psmouse dell_rbtn dell_smbios tpm_tis i2c_designware_platform rfkill soundcore xenfs tpm_tis_core i2c_designware_core tpm dcdbas xen_privcmd evdev serio_raw battery ac dell_smm_hwmon thermal dell_smo8800 cryptd pcspkr dm_mod ext4 crc16 jbd2 mbcache sr_mod cdrom sd_mod ahci libahci ehci_pci ehci_hcd libata usbcore usb_common sdhci_acpi sdhci i915 video mmc_core i2c_algo_bit drm_kms_helper drm button xen_acpi_processor xen_pciback xen_netback xen_blkback xen_gntalloc xen_gntdev xen_evtchn target_core_mod configfs sg scsi_mod autofs4 [ 49.616634] CPU: 2 PID: 1663 Comm: gnome-shell Tainted: G W 4.8.0-rc2-pv+ #763 [ 49.633989] Hardware name: Dell Inc. Latitude E6440/0159N7, BIOS A07 06/26/2014 [ 49.649235] task: ffff8801f44ac140 task.stack: ffff8801f944c000 [ 49.661738] RIP: e030:[<ffffffffa01f5236>] [<ffffffffa01f5236>] gen6_ppgtt_insert_entries+0xd6/0x1f0 [i915] [ 49.682062] RSP: e02b:ffff8801f944faa0 EFLAGS: 00010202 [ 49.693355] RAX: 672f726574697254 RBX: ffff8801f46beda0 RCX: 0000000000000000 [ 49.708282] RDX: 672f726574697257 RSI: 0000000000000001 RDI: 0000000000000640 [ 49.723232] RBP: 0000000003020140 R08: 0000000000000000 R09: ffff8801f46e2000 [ 49.738182] R10: ffff8801f944fc00 R11: 0000000000000007 R12: 0000000000001031 [ 49.753110] R13: ffff8801f46e2000 R14: ffff8801f46be000 R15: 0000000000000369 [ 49.768039] FS: 00007f0e354e5a80(0000) GS:ffff8801ff900000(0000) knlGS:ffff8801ff900000 [ 49.784879] CS: e033 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 49.797033] CR2: 0000000005636ff0 CR3: 00000001e9c8c000 CR4: 0000000000042660 [ 49.811972] Stack: [ 49.816681] 727365642f61632f 0000000100000031 0000000100000000 ffff8801f44ac140 [ 49.832131] ffff8801f45f7a00 0000000000000080 0000000000000001 0000000000000000 [ 49.847584] ffff8801f944fcde ffff8801f45f7a00 ffffffffa01f7333 00000000eb9bbc88 [ 49.863045] Call Trace: [ 49.868652] [<ffffffffa01f7333>] ? aliasing_gtt_bind_vma+0x83/0xd0 [i915] [ 49.883111] [<ffffffffa01f9e69>] ? i915_vma_bind+0x79/0x110 [i915] [ 49.896345] [<ffffffffa01fd9cf>] ? __i915_vma_do_pin+0x2ff/0x4c0 [i915] [ 49.910477] [<ffffffff812d50c9>] ? __radix_tree_insert+0x29/0xc0 [ 49.923368] [<ffffffff811a2e04>] ? kmem_cache_alloc+0x1c4/0x1e0 [ 49.936159] [<ffffffffa01ef701>] ? i915_gem_execbuffer_reserve_vma.isra.43+0x131/0x1a0 [i915] [ 49.954117] [<ffffffffa01efa1a>] ? i915_gem_execbuffer_reserve.isra.44+0x2aa/0x3a0 [i915] [ 49.971352] [<ffffffffa01f1349>] ? i915_gem_do_execbuffer.isra.49+0x6a9/0x1690 [i915] [ 49.987889] [<ffffffff811514a3>] ? __alloc_pages_nodemask+0x133/0xc40 [ 50.001677] [<ffffffffa01f2726>] ? i915_gem_execbuffer2+0xd6/0x240 [i915] [ 50.016131] [<ffffffffa0107781>] ? drm_ioctl+0x191/0x3f0 [drm] [ 50.028675] [<ffffffffa01f2650>] ? i915_gem_execbuffer+0x320/0x320 [i915] [ 50.043123] [<ffffffff8117c6cc>] ? handle_mm_fault+0x3ac/0x12e0 [ 50.055839] [<ffffffff811d693a>] ? do_vfs_ioctl+0x8a/0x5b0 [ 50.067677] [<ffffffff811d6ecf>] ? SyS_ioctl+0x6f/0x80 [ 50.078815] [<ffffffff81581b36>] ? entry_SYSCALL_64_fastpath+0x1e/0xa8 [ 50.092725] Code: 00 10 00 00 44 3b 64 24 08 72 ae f6 45 00 02 75 66 48 8b 55 20 48 8d 45 20 f6 c2 01 0f 85 f8 00 00 00 48 85 c0 0f 84 fb 00 00 00 <44> 8b 60 08 8b 48 0c 48 89 c5 48 8b 70 10 44 01 e1 44 89 e7 48 [ 50.131694] RIP [<ffffffffa01f5236>] gen6_ppgtt_insert_entries+0xd6/0x1f0 [i915] [ 50.147396] RSP <ffff8801f944faa0> [ 50.158793] ---[ end trace 6fc5bf1c09423b3f ]---
I have bisected the commits and found the bug to be introduced in commit 871dfbd67d4ecbcc83fc9e80a310ca9bf3c44c40 ("drm/i915: Allow compaction upto SWIOTLB max segment size").
To be able to test this commit with Xen you'll need the attached patch in order to enable the system to boot or you have to disable loading of microcode.
Juergen
On Mon, Dec 19, 2016 at 12:39:16PM +0100, Juergen Gross wrote:
With recent 4.10 kernel the graphics isn't coming up under Xen. First failure message is:
[ 46.656649] i915 0000:00:02.0: swiotlb buffer is full (sz: 1630208 bytes)
Do we get a silent failure? i915_gem_gtt_prepare_pages() is where we call dma_map_sg() and pass the sg to swiotlb (in this case) for remapping, and we do check for an error value of 0. After that error, SWIOTLB_MAP_ERROR is propagated back and converted to 0 for dma_map_sg(). That looks valid, and we should report ENOMEM back to the caller.
Later I see splats like:
[ 49.393583] general protection fault: 0000 [#1] SMP
What was the faulting address? RAX is particularly non-pointer-like so I wonder if we walked onto an uninitialised portion of the sgtable. We may have tripped over a bug in our sg_page iterator.
The attached patch should prevent an early ENOMEM following the swiotlb allocation failure. But I suspect that we will still be tripping up the failure in the sg walker when binding to the GPU. -Chris
On 19/12/16 13:29, Chris Wilson wrote:
On Mon, Dec 19, 2016 at 12:39:16PM +0100, Juergen Gross wrote:
With recent 4.10 kernel the graphics isn't coming up under Xen. First failure message is:
[ 46.656649] i915 0000:00:02.0: swiotlb buffer is full (sz: 1630208 bytes)
Do we get a silent failure? i915_gem_gtt_prepare_pages() is where we call dma_map_sg() and pass the sg to swiotlb (in this case) for remapping, and we do check for an error value of 0. After that error, SWIOTLB_MAP_ERROR is propagated back and converted to 0 for dma_map_sg(). That looks valid, and we should report ENOMEM back to the caller.
Later I see splats like:
[ 49.393583] general protection fault: 0000 [#1] SMP
What was the faulting address? RAX is particularly non-pointer-like so I wonder if we walked onto an uninitialised portion of the sgtable. We may have tripped over a bug in our sg_page iterator.
During the bisect process there have been either GP or NULL pointer dereferences or other page faults. Typical addresses where:
xen_swiotlb_unmap_sg_attrs+0x1f/0x50: access to 0000000000000018 xen_swiotlb_unmap_sg_attrs+0x1f/0x50: access to 0000000003020118
The attached patch should prevent an early ENOMEM following the swiotlb allocation failure. But I suspect that we will still be tripping up the failure in the sg walker when binding to the GPU. -Chris
The patch is working not too bad. :-)
Still several "swiotlb buffer is full" messages (some with sz:, most without), but no faults any more (neither GP nor NULL pointer dereference). Graphical login is working now.
What I do see, however, is (no idea whether this is related):
[ 735.826492] INFO: task systemd-udevd:484 blocked for more than 120 seconds. [ 735.826497] Tainted: G W 4.9.0-pv+ #767 [ 735.826499] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 735.826501] systemd-udevd D 0 484 443 0x00000000 [ 735.826507] Call Trace: [ 735.826522] ? __schedule+0x192/0x640 [ 735.826530] ? kmem_cache_free+0x45/0x150 [ 735.826535] ? schedule+0x2d/0x80 [ 735.826539] ? schedule_timeout+0x1f3/0x380 [ 735.826545] ? error_exit+0x9/0x20 [ 735.826555] ? sg_pool_index.part.0+0x2/0x2 [ 735.826561] ? wait_for_completion+0xa4/0x110 [ 735.826569] ? wake_up_q+0x70/0x70 [ 735.826577] ? cpufreq_boost_online+0x10/0x10 [acpi_cpufreq] [ 735.826585] ? cpuhp_issue_call+0x9c/0xe0 [ 735.826590] ? __cpuhp_setup_state+0xd5/0x1d0 [ 735.826599] ? acpi_cpufreq_init+0x1cd/0x1000 [acpi_cpufreq] [ 735.826601] ? 0xffffffffa00b1000 [ 735.826607] ? do_one_initcall+0x38/0x180 [ 735.826611] ? kmem_cache_alloc_trace+0x98/0x1e0 [ 735.826620] ? do_init_module+0x55/0x1e5 [ 735.826629] ? load_module+0x2088/0x26b0 [ 735.826633] ? __symbol_put+0x30/0x30 [ 735.826639] ? SYSC_finit_module+0x80/0xb0 [ 735.826644] ? entry_SYSCALL_64_fastpath+0x1e/0xad
I guess it is _not_ related, OTOH there is sg_pool_index() involved...
Juergen
On Mon, Dec 19, 2016 at 03:16:44PM +0100, Juergen Gross wrote:
On 19/12/16 13:29, Chris Wilson wrote:
On Mon, Dec 19, 2016 at 12:39:16PM +0100, Juergen Gross wrote:
With recent 4.10 kernel the graphics isn't coming up under Xen. First failure message is:
[ 46.656649] i915 0000:00:02.0: swiotlb buffer is full (sz: 1630208 bytes)
Do we get a silent failure? i915_gem_gtt_prepare_pages() is where we call dma_map_sg() and pass the sg to swiotlb (in this case) for remapping, and we do check for an error value of 0. After that error, SWIOTLB_MAP_ERROR is propagated back and converted to 0 for dma_map_sg(). That looks valid, and we should report ENOMEM back to the caller.
Later I see splats like:
[ 49.393583] general protection fault: 0000 [#1] SMP
What was the faulting address? RAX is particularly non-pointer-like so I wonder if we walked onto an uninitialised portion of the sgtable. We may have tripped over a bug in our sg_page iterator.
During the bisect process there have been either GP or NULL pointer dereferences or other page faults. Typical addresses where:
xen_swiotlb_unmap_sg_attrs+0x1f/0x50: access to 0000000000000018 xen_swiotlb_unmap_sg_attrs+0x1f/0x50: access to 0000000003020118
The attached patch should prevent an early ENOMEM following the swiotlb allocation failure. But I suspect that we will still be tripping up the failure in the sg walker when binding to the GPU. -Chris
The patch is working not too bad. :-)
Still several "swiotlb buffer is full" messages (some with sz:, most without), but no faults any more (neither GP nor NULL pointer dereference). Graphical login is working now.
I think I know why. The optimization that was added assumes that bus addresses is the same as physical address. Hence it packs all of the virtual addresses in the sg, and hands it off to SWIOTLB which walks each one and realizes that it has to use the bounce buffer.
I am wondering if would make sense to pull 'swiotlb_max_size' inside of SWIOTLB and make it an library-ish - so Xen-SWIOTLB can register as well and report say that it can only provide one page (unless it is running under baremtal).
Or make the usage of 'max_segement' and 'page_to_pfn(page) != last_pfn + 1' in i915_gem_object_Get_pages_gtt use something similar to xen_biovec_phys_mergeable?
On Tue, Dec 20, 2016 at 09:42:46AM -0500, Konrad Rzeszutek Wilk wrote:
On Mon, Dec 19, 2016 at 03:16:44PM +0100, Juergen Gross wrote:
On 19/12/16 13:29, Chris Wilson wrote:
On Mon, Dec 19, 2016 at 12:39:16PM +0100, Juergen Gross wrote:
With recent 4.10 kernel the graphics isn't coming up under Xen. First failure message is:
[ 46.656649] i915 0000:00:02.0: swiotlb buffer is full (sz: 1630208 bytes)
Do we get a silent failure? i915_gem_gtt_prepare_pages() is where we call dma_map_sg() and pass the sg to swiotlb (in this case) for remapping, and we do check for an error value of 0. After that error, SWIOTLB_MAP_ERROR is propagated back and converted to 0 for dma_map_sg(). That looks valid, and we should report ENOMEM back to the caller.
Later I see splats like:
[ 49.393583] general protection fault: 0000 [#1] SMP
What was the faulting address? RAX is particularly non-pointer-like so I wonder if we walked onto an uninitialised portion of the sgtable. We may have tripped over a bug in our sg_page iterator.
During the bisect process there have been either GP or NULL pointer dereferences or other page faults. Typical addresses where:
xen_swiotlb_unmap_sg_attrs+0x1f/0x50: access to 0000000000000018 xen_swiotlb_unmap_sg_attrs+0x1f/0x50: access to 0000000003020118
The attached patch should prevent an early ENOMEM following the swiotlb allocation failure. But I suspect that we will still be tripping up the failure in the sg walker when binding to the GPU. -Chris
The patch is working not too bad. :-)
Still several "swiotlb buffer is full" messages (some with sz:, most without), but no faults any more (neither GP nor NULL pointer dereference). Graphical login is working now.
I think I know why. The optimization that was added assumes that bus addresses is the same as physical address. Hence it packs all of the virtual addresses in the sg, and hands it off to SWIOTLB which walks each one and realizes that it has to use the bounce buffer.
I am wondering if would make sense to pull 'swiotlb_max_size' inside of SWIOTLB and make it an library-ish - so Xen-SWIOTLB can register as well and report say that it can only provide one page (unless it is running under baremtal).
Or make the usage of 'max_segement' and 'page_to_pfn(page) != last_pfn + 1' in i915_gem_object_Get_pages_gtt use something similar to xen_biovec_phys_mergeable?
Chris,
I was thinking of something like this (which Juergen has already tested).
From f196f1294fd25f1402c3dd1231babb8d7f5db2e7 Mon Sep 17 00:00:00 2001
From: Konrad Rzeszutek Wilk konrad.wilk@oracle.com Date: Tue, 20 Dec 2016 10:02:02 -0500 Subject: [PATCH] swiotlb: Export swiotlb_max_segment to users
So they can figure out what is the optimal number of pages that can be contingously stitched together without fear of bounce buffer.
We also expose an mechanism for sub-users of SWIOTLB API, such as Xen-SWIOTLB to set the max segment value. And lastly if swiotlb=force is set (which mandates we bounce buffer everything) we set max_segment so at least we can bounce buffer one 4K page instead of a giant 512KB one for which we may not have space.
Reported-and-Tested-by: Juergen Gross jgross@suse.com Signed-off-by: Konrad Rzeszutek Wilk konrad.wilk@oracle.com --- drivers/gpu/drm/i915/i915_gem.c | 11 +---------- drivers/xen/swiotlb-xen.c | 4 ++++ include/linux/swiotlb.h | 3 +++ lib/swiotlb.c | 26 ++++++++++++++++++++++++++ 4 files changed, 34 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d0dcaf3..115fa39 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2290,15 +2290,6 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, mutex_unlock(&obj->mm.lock); }
-static unsigned int swiotlb_max_size(void) -{ -#if IS_ENABLED(CONFIG_SWIOTLB) - return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE); -#else - return 0; -#endif -} - static void i915_sg_trim(struct sg_table *orig_st) { struct sg_table new_st; @@ -2345,7 +2336,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
- max_segment = swiotlb_max_size(); + max_segment = swiotlb_max_segment(); if (!max_segment) max_segment = rounddown(UINT_MAX, PAGE_SIZE);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index aba1200..f905d6e 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -275,6 +275,10 @@ int __ref xen_swiotlb_init(int verbose, bool early) rc = 0; } else rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs); + + if (!rc) + swiotlb_set_max_segment(PAGE_SIZE); + return rc; error: if (repeat--) { diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index d9c84a9..d7eacd8 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -114,11 +114,14 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask);
#ifdef CONFIG_SWIOTLB extern void __init swiotlb_free(void); +unsigned int swiotlb_max_segment(void); #else static inline void swiotlb_free(void) { } +unsigned int swiotlb_max_segment(void) { return 0; } #endif
extern void swiotlb_print_info(void); extern int is_swiotlb_buffer(phys_addr_t paddr); +extern void swiotlb_set_max_segment(unsigned int);
#endif /* __LINUX_SWIOTLB_H */ diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 9def738..975b8fc 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -83,6 +83,12 @@ static unsigned int *io_tlb_list; static unsigned int io_tlb_index;
/* + * Max segment that we can provide which (if pages are contingous) will + * not be bounced (unless SWIOTLB_FORCE is set). + */ +unsigned int max_segment; + +/* * We need to save away the original address corresponding to a mapped entry * for the sync operations. */ @@ -124,6 +130,20 @@ unsigned long swiotlb_nr_tbl(void) } EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
+unsigned int swiotlb_max_segment(void) +{ + return max_segment; +} +EXPORT_SYMBOL_GPL(swiotlb_max_segment); + +void swiotlb_set_max_segment(unsigned int val) +{ + if (swiotlb_force == SWIOTLB_FORCE) + max_segment = 1; + else + max_segment = rounddown(val, PAGE_SIZE); +} + /* default to 64MB */ #define IO_TLB_DEFAULT_SIZE (64UL<<20) unsigned long swiotlb_size_or_default(void) @@ -205,6 +225,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) if (verbose) swiotlb_print_info();
+ swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT); return 0; }
@@ -283,6 +304,7 @@ swiotlb_late_init_with_default_size(size_t default_size) rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs); if (rc) free_pages((unsigned long)vstart, order); + return rc; }
@@ -337,6 +359,8 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
late_alloc = 1;
+ swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT); + return 0;
cleanup4: @@ -351,6 +375,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) io_tlb_end = 0; io_tlb_start = 0; io_tlb_nslabs = 0; + max_segment = 0; return -ENOMEM; }
@@ -379,6 +404,7 @@ void __init swiotlb_free(void) PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); } io_tlb_nslabs = 0; + max_segment = 0; }
int is_swiotlb_buffer(phys_addr_t paddr)
dri-devel@lists.freedesktop.org