blob: 205ddcf6d55de7c574f3442785433265d4c492e0 [file] [log] [blame]
Jerome Glisse249d6042009-04-08 17:11:16 +02001/**************************************************************************
2 *
3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28/*
29 * Authors:
30 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
31 */
32
33#ifndef _DRM_MM_H_
34#define _DRM_MM_H_
35
36/*
37 * Generic range manager structs
38 */
David Herrmann86e81f02013-07-25 18:02:31 +020039#include <linux/bug.h>
Chris Wilson202b52b2016-08-03 16:04:09 +010040#include <linux/rbtree.h>
David Herrmann86e81f02013-07-25 18:02:31 +020041#include <linux/kernel.h>
Jerome Glisse249d6042009-04-08 17:11:16 +020042#include <linux/list.h>
David Herrmann86e81f02013-07-25 18:02:31 +020043#include <linux/spinlock.h>
Dave Airlief1938cd2009-09-08 11:32:08 +100044#ifdef CONFIG_DEBUG_FS
45#include <linux/seq_file.h>
46#endif
Jerome Glisse249d6042009-04-08 17:11:16 +020047
David Herrmann31e5d7c2013-07-27 13:36:27 +020048enum drm_mm_search_flags {
49 DRM_MM_SEARCH_DEFAULT = 0,
50 DRM_MM_SEARCH_BEST = 1 << 0,
Lauri Kasanen62347f92014-04-02 20:03:57 +030051 DRM_MM_SEARCH_BELOW = 1 << 1,
David Herrmann31e5d7c2013-07-27 13:36:27 +020052};
53
Lauri Kasanen62347f92014-04-02 20:03:57 +030054enum drm_mm_allocator_flags {
55 DRM_MM_CREATE_DEFAULT = 0,
56 DRM_MM_CREATE_TOP = 1 << 0,
57};
58
59#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
60#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
61
Jerome Glisse249d6042009-04-08 17:11:16 +020062struct drm_mm_node {
Daniel Vetterd1024ce2010-07-02 15:02:14 +010063 struct list_head node_list;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +010064 struct list_head hole_stack;
Chris Wilson202b52b2016-08-03 16:04:09 +010065 struct rb_node rb;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +010066 unsigned hole_follows : 1;
Daniel Vetter709ea972010-07-02 15:02:16 +010067 unsigned scanned_block : 1;
68 unsigned scanned_prev_free : 1;
69 unsigned scanned_next_free : 1;
Daniel Vetterea7b1dd2011-02-18 17:59:12 +010070 unsigned scanned_preceeds_hole : 1;
Daniel Vetterb0b7af12011-02-18 17:59:14 +010071 unsigned allocated : 1;
Chris Wilson6b9d89b2012-07-10 11:15:23 +010072 unsigned long color;
Thierry Reding440fd522015-01-23 09:05:06 +010073 u64 start;
74 u64 size;
Chris Wilson202b52b2016-08-03 16:04:09 +010075 u64 __subtree_last;
Jerome Glisse249d6042009-04-08 17:11:16 +020076 struct drm_mm *mm;
Jerome Glisse249d6042009-04-08 17:11:16 +020077};
78
79struct drm_mm {
Lucas De Marchi25985ed2011-03-30 22:57:33 -030080 /* List of all memory nodes that immediately precede a free hole. */
Daniel Vetterea7b1dd2011-02-18 17:59:12 +010081 struct list_head hole_stack;
82 /* head_node.node_list is the list of all memory nodes, ordered
83 * according to the (increasing) start address of the memory node. */
84 struct drm_mm_node head_node;
Chris Wilson202b52b2016-08-03 16:04:09 +010085 /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
86 struct rb_root interval_tree;
87
Daniel Vetterd935cc62010-09-16 15:13:11 +020088 unsigned int scan_check_range : 1;
Daniel Vetter709ea972010-07-02 15:02:16 +010089 unsigned scan_alignment;
Chris Wilson6b9d89b2012-07-10 11:15:23 +010090 unsigned long scan_color;
Thierry Reding440fd522015-01-23 09:05:06 +010091 u64 scan_size;
92 u64 scan_hit_start;
93 u64 scan_hit_end;
Daniel Vetter709ea972010-07-02 15:02:16 +010094 unsigned scanned_blocks;
Thierry Reding440fd522015-01-23 09:05:06 +010095 u64 scan_start;
96 u64 scan_end;
Daniel Vetterae0cec22011-02-18 17:59:15 +010097 struct drm_mm_node *prev_scanned_node;
Chris Wilson6b9d89b2012-07-10 11:15:23 +010098
99 void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
Thierry Reding440fd522015-01-23 09:05:06 +0100100 u64 *start, u64 *end);
Jerome Glisse249d6042009-04-08 17:11:16 +0200101};
102
Daniel Vettere18c0412014-01-23 00:39:13 +0100103/**
104 * drm_mm_node_allocated - checks whether a node is allocated
105 * @node: drm_mm_node to check
106 *
107 * Drivers should use this helpers for proper encapusulation of drm_mm
108 * internals.
109 *
110 * Returns:
111 * True if the @node is allocated.
112 */
Daniel Vetterb0b7af12011-02-18 17:59:14 +0100113static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
114{
115 return node->allocated;
116}
117
Daniel Vettere18c0412014-01-23 00:39:13 +0100118/**
119 * drm_mm_initialized - checks whether an allocator is initialized
120 * @mm: drm_mm to check
121 *
122 * Drivers should use this helpers for proper encapusulation of drm_mm
123 * internals.
124 *
125 * Returns:
126 * True if the @mm is initialized.
127 */
Daniel Vetter31a5b8c2011-02-18 17:59:11 +0100128static inline bool drm_mm_initialized(struct drm_mm *mm)
129{
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100130 return mm->hole_stack.next;
Daniel Vetter31a5b8c2011-02-18 17:59:11 +0100131}
Chris Wilson9e8944a2012-11-15 11:32:17 +0000132
Thierry Reding440fd522015-01-23 09:05:06 +0100133static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
Chris Wilson9e8944a2012-11-15 11:32:17 +0000134{
135 return hole_node->start + hole_node->size;
136}
137
Daniel Vettere18c0412014-01-23 00:39:13 +0100138/**
139 * drm_mm_hole_node_start - computes the start of the hole following @node
140 * @hole_node: drm_mm_node which implicitly tracks the following hole
141 *
142 * This is useful for driver-sepific debug dumpers. Otherwise drivers should not
143 * inspect holes themselves. Drivers must check first whether a hole indeed
144 * follows by looking at node->hole_follows.
145 *
146 * Returns:
147 * Start of the subsequent hole.
148 */
Thierry Reding440fd522015-01-23 09:05:06 +0100149static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node)
Chris Wilson9e8944a2012-11-15 11:32:17 +0000150{
151 BUG_ON(!hole_node->hole_follows);
152 return __drm_mm_hole_node_start(hole_node);
153}
154
Thierry Reding440fd522015-01-23 09:05:06 +0100155static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
Chris Wilson9e8944a2012-11-15 11:32:17 +0000156{
Geliang Tang87069f42015-11-25 21:23:07 +0800157 return list_next_entry(hole_node, node_list)->start;
Chris Wilson9e8944a2012-11-15 11:32:17 +0000158}
159
Daniel Vettere18c0412014-01-23 00:39:13 +0100160/**
161 * drm_mm_hole_node_end - computes the end of the hole following @node
162 * @hole_node: drm_mm_node which implicitly tracks the following hole
163 *
164 * This is useful for driver-sepific debug dumpers. Otherwise drivers should not
165 * inspect holes themselves. Drivers must check first whether a hole indeed
166 * follows by looking at node->hole_follows.
167 *
168 * Returns:
169 * End of the subsequent hole.
170 */
Thierry Reding440fd522015-01-23 09:05:06 +0100171static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
Chris Wilson9e8944a2012-11-15 11:32:17 +0000172{
173 return __drm_mm_hole_node_end(hole_node);
174}
175
Daniel Vettere18c0412014-01-23 00:39:13 +0100176/**
177 * drm_mm_for_each_node - iterator to walk over all allocated nodes
178 * @entry: drm_mm_node structure to assign to in each iteration step
179 * @mm: drm_mm allocator to walk
180 *
181 * This iterator walks over all nodes in the range allocator. It is implemented
182 * with list_for_each, so not save against removal of elements.
183 */
Daniel Vetterea7b1dd2011-02-18 17:59:12 +0100184#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
185 &(mm)->head_node.node_list, \
Daniel Vetter2bbd4492011-05-06 23:47:53 +0200186 node_list)
Chris Wilson9e8944a2012-11-15 11:32:17 +0000187
Geliang Tang18b40c52015-11-21 22:04:04 +0800188#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
189 for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
190 &entry->hole_stack != &(mm)->hole_stack ? \
191 hole_start = drm_mm_hole_node_start(entry), \
192 hole_end = drm_mm_hole_node_end(entry), \
193 1 : 0; \
194 entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
195
Daniel Vettere18c0412014-01-23 00:39:13 +0100196/**
197 * drm_mm_for_each_hole - iterator to walk over all holes
198 * @entry: drm_mm_node used internally to track progress
199 * @mm: drm_mm allocator to walk
200 * @hole_start: ulong variable to assign the hole start to on each iteration
201 * @hole_end: ulong variable to assign the hole end to on each iteration
202 *
203 * This iterator walks over all holes in the range allocator. It is implemented
204 * with list_for_each, so not save against removal of elements. @entry is used
205 * internally and will not reflect a real drm_mm_node for the very first hole.
206 * Hence users of this iterator may not access it.
207 *
208 * Implementation Note:
209 * We need to inline list_for_each_entry in order to be able to set hole_start
210 * and hole_end on each iteration while keeping the macro sane.
Lauri Kasanen62347f92014-04-02 20:03:57 +0300211 *
212 * The __drm_mm_for_each_hole version is similar, but with added support for
213 * going backwards.
Chris Wilson9e8944a2012-11-15 11:32:17 +0000214 */
215#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
Geliang Tang18b40c52015-11-21 22:04:04 +0800216 __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0)
Lauri Kasanen62347f92014-04-02 20:03:57 +0300217
Jerome Glisse249d6042009-04-08 17:11:16 +0200218/*
219 * Basic range manager support (drm_mm.c)
220 */
Daniel Vettere18c0412014-01-23 00:39:13 +0100221int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
Chris Wilsonb8103452012-12-07 20:37:06 +0000222
Daniel Vettere18c0412014-01-23 00:39:13 +0100223int drm_mm_insert_node_generic(struct drm_mm *mm,
224 struct drm_mm_node *node,
Thierry Reding440fd522015-01-23 09:05:06 +0100225 u64 size,
Daniel Vettere18c0412014-01-23 00:39:13 +0100226 unsigned alignment,
227 unsigned long color,
Lauri Kasanen62347f92014-04-02 20:03:57 +0300228 enum drm_mm_search_flags sflags,
229 enum drm_mm_allocator_flags aflags);
Daniel Vettere18c0412014-01-23 00:39:13 +0100230/**
231 * drm_mm_insert_node - search for space and insert @node
232 * @mm: drm_mm to allocate from
233 * @node: preallocate node to insert
234 * @size: size of the allocation
235 * @alignment: alignment of the allocation
236 * @flags: flags to fine-tune the allocation
237 *
238 * This is a simplified version of drm_mm_insert_node_generic() with @color set
239 * to 0.
240 *
241 * The preallocated node must be cleared to 0.
242 *
243 * Returns:
244 * 0 on success, -ENOSPC if there's no suitable hole.
245 */
David Herrmann31e5d7c2013-07-27 13:36:27 +0200246static inline int drm_mm_insert_node(struct drm_mm *mm,
247 struct drm_mm_node *node,
Thierry Reding440fd522015-01-23 09:05:06 +0100248 u64 size,
David Herrmann31e5d7c2013-07-27 13:36:27 +0200249 unsigned alignment,
250 enum drm_mm_search_flags flags)
251{
Lauri Kasanen62347f92014-04-02 20:03:57 +0300252 return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags,
253 DRM_MM_CREATE_DEFAULT);
David Herrmann31e5d7c2013-07-27 13:36:27 +0200254}
255
Daniel Vettere18c0412014-01-23 00:39:13 +0100256int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
257 struct drm_mm_node *node,
Thierry Reding440fd522015-01-23 09:05:06 +0100258 u64 size,
Daniel Vettere18c0412014-01-23 00:39:13 +0100259 unsigned alignment,
260 unsigned long color,
Thierry Reding440fd522015-01-23 09:05:06 +0100261 u64 start,
262 u64 end,
Lauri Kasanen62347f92014-04-02 20:03:57 +0300263 enum drm_mm_search_flags sflags,
264 enum drm_mm_allocator_flags aflags);
Daniel Vettere18c0412014-01-23 00:39:13 +0100265/**
266 * drm_mm_insert_node_in_range - ranged search for space and insert @node
267 * @mm: drm_mm to allocate from
268 * @node: preallocate node to insert
269 * @size: size of the allocation
270 * @alignment: alignment of the allocation
271 * @start: start of the allowed range for this node
272 * @end: end of the allowed range for this node
273 * @flags: flags to fine-tune the allocation
274 *
275 * This is a simplified version of drm_mm_insert_node_in_range_generic() with
276 * @color set to 0.
277 *
278 * The preallocated node must be cleared to 0.
279 *
280 * Returns:
281 * 0 on success, -ENOSPC if there's no suitable hole.
282 */
David Herrmann31e5d7c2013-07-27 13:36:27 +0200283static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
284 struct drm_mm_node *node,
Thierry Reding440fd522015-01-23 09:05:06 +0100285 u64 size,
David Herrmann31e5d7c2013-07-27 13:36:27 +0200286 unsigned alignment,
Thierry Reding440fd522015-01-23 09:05:06 +0100287 u64 start,
288 u64 end,
David Herrmann31e5d7c2013-07-27 13:36:27 +0200289 enum drm_mm_search_flags flags)
290{
291 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
Lauri Kasanen62347f92014-04-02 20:03:57 +0300292 0, start, end, flags,
293 DRM_MM_CREATE_DEFAULT);
David Herrmann31e5d7c2013-07-27 13:36:27 +0200294}
295
Daniel Vettere18c0412014-01-23 00:39:13 +0100296void drm_mm_remove_node(struct drm_mm_node *node);
297void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
298void drm_mm_init(struct drm_mm *mm,
Thierry Reding440fd522015-01-23 09:05:06 +0100299 u64 start,
300 u64 size);
Daniel Vettere18c0412014-01-23 00:39:13 +0100301void drm_mm_takedown(struct drm_mm *mm);
302bool drm_mm_clean(struct drm_mm *mm);
Jerome Glisse249d6042009-04-08 17:11:16 +0200303
Chris Wilson202b52b2016-08-03 16:04:09 +0100304struct drm_mm_node *
305drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last);
306
307struct drm_mm_node *
308drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last);
309
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100310void drm_mm_init_scan(struct drm_mm *mm,
Thierry Reding440fd522015-01-23 09:05:06 +0100311 u64 size,
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100312 unsigned alignment,
313 unsigned long color);
314void drm_mm_init_scan_with_range(struct drm_mm *mm,
Thierry Reding440fd522015-01-23 09:05:06 +0100315 u64 size,
Daniel Vetterd935cc62010-09-16 15:13:11 +0200316 unsigned alignment,
Chris Wilson6b9d89b2012-07-10 11:15:23 +0100317 unsigned long color,
Thierry Reding440fd522015-01-23 09:05:06 +0100318 u64 start,
319 u64 end);
Daniel Vettere18c0412014-01-23 00:39:13 +0100320bool drm_mm_scan_add_block(struct drm_mm_node *node);
321bool drm_mm_scan_remove_block(struct drm_mm_node *node);
Daniel Vetter709ea972010-07-02 15:02:16 +0100322
Daniel Vettere18c0412014-01-23 00:39:13 +0100323void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
Dave Airliefa8a1232009-08-26 13:13:37 +1000324#ifdef CONFIG_DEBUG_FS
325int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
326#endif
327
Jerome Glisse249d6042009-04-08 17:11:16 +0200328#endif