From 7453c549f5f6485c0d79cad7844870dcc7d1b34d Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Date: Tue, 20 Dec 2016 10:02:02 -0500 Subject: [PATCH] swiotlb: Export swiotlb_max_segment to users So they can figure out what is the optimal number of pages that can be contingously stitched together without fear of bounce buffer. We also expose an mechanism for sub-users of SWIOTLB API, such as Xen-SWIOTLB to set the max segment value. And lastly if swiotlb=force is set (which mandates we bounce buffer everything) we set max_segment so at least we can bounce buffer one 4K page instead of a giant 512KB one for which we may not have space. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Reported-and-Tested-by: Juergen Gross <jgross@suse.com> --- drivers/gpu/drm/i915/i915_gem.c | 11 +---------- drivers/xen/swiotlb-xen.c | 4 ++++ include/linux/swiotlb.h | 3 +++ lib/swiotlb.c | 26 ++++++++++++++++++++++++++ 4 files changed, 34 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d0dcaf35b4298..115fa399608cb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2290,15 +2290,6 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, mutex_unlock(&obj->mm.lock); } -static unsigned int swiotlb_max_size(void) -{ -#if IS_ENABLED(CONFIG_SWIOTLB) - return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE); -#else - return 0; -#endif -} - static void i915_sg_trim(struct sg_table *orig_st) { struct sg_table new_st; @@ -2345,7 +2336,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); - max_segment = swiotlb_max_size(); + max_segment = swiotlb_max_segment(); if (!max_segment) max_segment = rounddown(UINT_MAX, PAGE_SIZE); diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index aba12009422e3..f905d6eeb0482 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -275,6 +275,10 @@ int __ref xen_swiotlb_init(int verbose, bool early) rc = 0; } else rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs); + + if (!rc) + swiotlb_set_max_segment(PAGE_SIZE); + return rc; error: if (repeat--) { diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index d9c84a9cde3dd..4ee479f2f355b 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -114,11 +114,14 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask); #ifdef CONFIG_SWIOTLB extern void __init swiotlb_free(void); +unsigned int swiotlb_max_segment(void); #else static inline void swiotlb_free(void) { } +static inline unsigned int swiotlb_max_segment(void) { return 0; } #endif extern void swiotlb_print_info(void); extern int is_swiotlb_buffer(phys_addr_t paddr); +extern void swiotlb_set_max_segment(unsigned int); #endif /* __LINUX_SWIOTLB_H */ diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 9def738af4f4e..975b8fc4f1e11 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -82,6 +82,12 @@ static phys_addr_t io_tlb_overflow_buffer; static unsigned int *io_tlb_list; static unsigned int io_tlb_index; +/* + * Max segment that we can provide which (if pages are contingous) will + * not be bounced (unless SWIOTLB_FORCE is set). + */ +unsigned int max_segment; + /* * We need to save away the original address corresponding to a mapped entry * for the sync operations. @@ -124,6 +130,20 @@ unsigned long swiotlb_nr_tbl(void) } EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); +unsigned int swiotlb_max_segment(void) +{ + return max_segment; +} +EXPORT_SYMBOL_GPL(swiotlb_max_segment); + +void swiotlb_set_max_segment(unsigned int val) +{ + if (swiotlb_force == SWIOTLB_FORCE) + max_segment = 1; + else + max_segment = rounddown(val, PAGE_SIZE); +} + /* default to 64MB */ #define IO_TLB_DEFAULT_SIZE (64UL<<20) unsigned long swiotlb_size_or_default(void) @@ -205,6 +225,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) if (verbose) swiotlb_print_info(); + swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT); return 0; } @@ -283,6 +304,7 @@ swiotlb_late_init_with_default_size(size_t default_size) rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs); if (rc) free_pages((unsigned long)vstart, order); + return rc; } @@ -337,6 +359,8 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) late_alloc = 1; + swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT); + return 0; cleanup4: @@ -351,6 +375,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) io_tlb_end = 0; io_tlb_start = 0; io_tlb_nslabs = 0; + max_segment = 0; return -ENOMEM; } @@ -379,6 +404,7 @@ void __init swiotlb_free(void) PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); } io_tlb_nslabs = 0; + max_segment = 0; } int is_swiotlb_buffer(phys_addr_t paddr) -- GitLab