drm/i915: Use a radixtree for random access to the object's backing storage
A while ago we switched from a contiguous array of pages into an sglist,
for that was both more convenient for mapping to hardware and avoided
the requirement for a vmalloc array of pages on every object. However,
certain GEM API calls (like pwrite, pread as well as performing
relocations) do desire access to individual struct pages. A quick hack
was to introduce a cache of the last access such that finding the
following page was quick - this works so long as the caller desired
sequential access. Walking backwards, or multiple callers, still hits a
slow linear search for each page. One solution is to store each
successful lookup in a radix tree.
v2: Rewrite building the radixtree for clarity, hopefully.
v3: Rearrange execbuf to avoid calling i915_gem_object_get_sg() from
within an atomic section and so relax the allocation context to a simple
GFP_KERNEL and mutex.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-10-chris@chris-wilson.co.uk
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a1b9801..85ae83a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2286,9 +2286,12 @@ struct drm_i915_gem_object {
struct sg_table *pages;
int pages_pin_count;
- struct get_page {
- struct scatterlist *sg;
- int last;
+ struct i915_gem_object_page_iter {
+ struct scatterlist *sg_pos;
+ unsigned int sg_idx; /* in pages, but 32bit eek! */
+
+ struct radix_tree_root radix;
+ struct mutex lock; /* protects this cache */
} get_page;
void *mapping;
@@ -2491,6 +2494,14 @@ static __always_inline struct sgt_iter {
return s;
}
+static inline struct scatterlist *____sg_next(struct scatterlist *sg)
+{
+ ++sg;
+ if (unlikely(sg_is_chain(sg)))
+ sg = sg_chain_ptr(sg);
+ return sg;
+}
+
/**
* __sg_next - return the next scatterlist entry in a list
* @sg: The current sg entry
@@ -2505,9 +2516,7 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
- return sg_is_last(sg) ? NULL :
- likely(!sg_is_chain(++sg)) ? sg :
- sg_chain_ptr(sg);
+ return sg_is_last(sg) ? NULL : ____sg_next(sg);
}
/**
@@ -3185,45 +3194,21 @@ static inline int __sg_page_count(struct scatterlist *sg)
return sg->length >> PAGE_SHIFT;
}
+struct scatterlist *
+i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+ unsigned int n, unsigned int *offset);
+
struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n);
+i915_gem_object_get_page(struct drm_i915_gem_object *obj,
+ unsigned int n);
-static inline dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, int n)
-{
- if (n < obj->get_page.last) {
- obj->get_page.sg = obj->pages->sgl;
- obj->get_page.last = 0;
- }
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+ unsigned int n);
- while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
- obj->get_page.last += __sg_page_count(obj->get_page.sg++);
- if (unlikely(sg_is_chain(obj->get_page.sg)))
- obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
- }
-
- return sg_dma_address(obj->get_page.sg) + ((n - obj->get_page.last) << PAGE_SHIFT);
-}
-
-static inline struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
-{
- if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT))
- return NULL;
-
- if (n < obj->get_page.last) {
- obj->get_page.sg = obj->pages->sgl;
- obj->get_page.last = 0;
- }
-
- while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
- obj->get_page.last += __sg_page_count(obj->get_page.sg++);
- if (unlikely(sg_is_chain(obj->get_page.sg)))
- obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
- }
-
- return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last);
-}
+dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
+ unsigned long n);
static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{