blob: 984fc2d571a145860d6cfd2962d7ca20da7d0ee5 [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30#ifndef _TTM_BO_DRIVER_H_
31#define _TTM_BO_DRIVER_H_
32
David Howellsa1ce3922012-10-02 18:01:25 +010033#include <ttm/ttm_bo_api.h>
34#include <ttm/ttm_memory.h>
35#include <ttm/ttm_module.h>
Maarten Lankhorst34820322013-06-27 13:48:24 +020036#include <ttm/ttm_placement.h>
David Howellsa1ce3922012-10-02 18:01:25 +010037#include <drm/drm_mm.h>
38#include <drm/drm_global.h>
39#include <linux/workqueue.h>
40#include <linux/fs.h>
41#include <linux/spinlock.h>
Maarten Lankhorstecff6652013-06-27 13:48:17 +020042#include <linux/reservation.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020043
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020044struct ttm_backend_func {
45 /**
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020046 * struct ttm_backend_func member bind
47 *
Jerome Glisse649bf3c2011-11-01 20:46:13 -040048 * @ttm: Pointer to a struct ttm_tt.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020049 * @bo_mem: Pointer to a struct ttm_mem_reg describing the
50 * memory type and location for binding.
51 *
52 * Bind the backend pages into the aperture in the location
53 * indicated by @bo_mem. This function should be able to handle
Konrad Rzeszutek Wilk0d74f862011-06-08 17:06:15 +000054 * differences between aperture and system page sizes.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020055 */
Jerome Glisse649bf3c2011-11-01 20:46:13 -040056 int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020057
58 /**
59 * struct ttm_backend_func member unbind
60 *
Jerome Glisse649bf3c2011-11-01 20:46:13 -040061 * @ttm: Pointer to a struct ttm_tt.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020062 *
63 * Unbind previously bound backend pages. This function should be
Konrad Rzeszutek Wilk0d74f862011-06-08 17:06:15 +000064 * able to handle differences between aperture and system page sizes.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020065 */
Jerome Glisse649bf3c2011-11-01 20:46:13 -040066 int (*unbind) (struct ttm_tt *ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020067
68 /**
69 * struct ttm_backend_func member destroy
70 *
Jerome Glisse649bf3c2011-11-01 20:46:13 -040071 * @ttm: Pointer to a struct ttm_tt.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020072 *
Jerome Glisse649bf3c2011-11-01 20:46:13 -040073 * Destroy the backend. This will be call back from ttm_tt_destroy so
74 * don't call ttm_tt_destroy from the callback or infinite loop.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020075 */
Jerome Glisse649bf3c2011-11-01 20:46:13 -040076 void (*destroy) (struct ttm_tt *ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020077};
78
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020079#define TTM_PAGE_FLAG_WRITE (1 << 3)
80#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
Jan Engelhardt5df23972011-04-04 01:25:18 +020081#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020082#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
Dave Airliead49f502009-07-10 22:36:26 +100083#define TTM_PAGE_FLAG_DMA32 (1 << 7)
Dave Airlie129b78b2012-04-02 11:46:06 +010084#define TTM_PAGE_FLAG_SG (1 << 8)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020085
86enum ttm_caching_state {
87 tt_uncached,
88 tt_wc,
89 tt_cached
90};
91
92/**
93 * struct ttm_tt
94 *
Jerome Glisse649bf3c2011-11-01 20:46:13 -040095 * @bdev: Pointer to a struct ttm_bo_device.
96 * @func: Pointer to a struct ttm_backend_func that describes
97 * the backend methods.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020098 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
99 * pointer.
100 * @pages: Array of pages backing the data.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200101 * @num_pages: Number of pages in the page array.
102 * @bdev: Pointer to the current struct ttm_bo_device.
103 * @be: Pointer to the ttm backend.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200104 * @swap_storage: Pointer to shmem struct file for swap storage.
105 * @caching_state: The current caching state of the pages.
106 * @state: The current binding state of the pages.
107 *
108 * This is a structure holding the pages, caching- and aperture binding
109 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
110 * memory.
111 */
112
113struct ttm_tt {
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400114 struct ttm_bo_device *bdev;
115 struct ttm_backend_func *func;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200116 struct page *dummy_read_page;
117 struct page **pages;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200118 uint32_t page_flags;
119 unsigned long num_pages;
Dave Airlie129b78b2012-04-02 11:46:06 +0100120 struct sg_table *sg; /* for SG objects via dma-buf */
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200121 struct ttm_bo_global *glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200122 struct file *swap_storage;
123 enum ttm_caching_state caching_state;
124 enum {
125 tt_bound,
126 tt_unbound,
127 tt_unpopulated,
128 } state;
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500129};
130
131/**
132 * struct ttm_dma_tt
133 *
134 * @ttm: Base ttm_tt struct.
135 * @dma_address: The DMA (bus) addresses of the pages
136 * @pages_list: used by some page allocation backend
137 *
138 * This is a structure holding the pages, caching- and aperture binding
139 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
140 * memory.
141 */
142struct ttm_dma_tt {
143 struct ttm_tt ttm;
Konrad Rzeszutek Wilkf9820a42010-11-29 13:52:18 -0500144 dma_addr_t *dma_address;
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500145 struct list_head pages_list;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200146};
147
148#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
149#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200150#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
151
Ben Skeggsd961db72010-08-05 10:48:18 +1000152struct ttm_mem_type_manager;
153
154struct ttm_mem_type_manager_func {
Thomas Hellstrom3205bc22010-10-29 10:46:44 +0200155 /**
156 * struct ttm_mem_type_manager member init
157 *
158 * @man: Pointer to a memory type manager.
159 * @p_size: Implementation dependent, but typically the size of the
160 * range to be managed in pages.
161 *
162 * Called to initialize a private range manager. The function is
163 * expected to initialize the man::priv member.
164 * Returns 0 on success, negative error code on failure.
165 */
Ben Skeggsd961db72010-08-05 10:48:18 +1000166 int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
Thomas Hellstrom3205bc22010-10-29 10:46:44 +0200167
168 /**
169 * struct ttm_mem_type_manager member takedown
170 *
171 * @man: Pointer to a memory type manager.
172 *
173 * Called to undo the setup done in init. All allocated resources
174 * should be freed.
175 */
Ben Skeggsd961db72010-08-05 10:48:18 +1000176 int (*takedown)(struct ttm_mem_type_manager *man);
Thomas Hellstrom3205bc22010-10-29 10:46:44 +0200177
178 /**
179 * struct ttm_mem_type_manager member get_node
180 *
181 * @man: Pointer to a memory type manager.
182 * @bo: Pointer to the buffer object we're allocating space for.
183 * @placement: Placement details.
184 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
185 *
186 * This function should allocate space in the memory type managed
187 * by @man. Placement details if
188 * applicable are given by @placement. If successful,
189 * @mem::mm_node should be set to a non-null value, and
190 * @mem::start should be set to a value identifying the beginning
191 * of the range allocated, and the function should return zero.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300192 * If the memory region accommodate the buffer object, @mem::mm_node
Thomas Hellstrom3205bc22010-10-29 10:46:44 +0200193 * should be set to NULL, and the function should return 0.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300194 * If a system error occurred, preventing the request to be fulfilled,
Thomas Hellstrom3205bc22010-10-29 10:46:44 +0200195 * the function should return a negative error code.
196 *
197 * Note that @mem::mm_node will only be dereferenced by
198 * struct ttm_mem_type_manager functions and optionally by the driver,
199 * which has knowledge of the underlying type.
200 *
201 * This function may not be called from within atomic context, so
202 * an implementation can and must use either a mutex or a spinlock to
203 * protect any data structures managing the space.
204 */
Ben Skeggsd961db72010-08-05 10:48:18 +1000205 int (*get_node)(struct ttm_mem_type_manager *man,
206 struct ttm_buffer_object *bo,
207 struct ttm_placement *placement,
208 struct ttm_mem_reg *mem);
Thomas Hellstrom3205bc22010-10-29 10:46:44 +0200209
210 /**
211 * struct ttm_mem_type_manager member put_node
212 *
213 * @man: Pointer to a memory type manager.
214 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
215 *
216 * This function frees memory type resources previously allocated
217 * and that are identified by @mem::mm_node and @mem::start. May not
218 * be called from within atomic context.
219 */
Ben Skeggsd961db72010-08-05 10:48:18 +1000220 void (*put_node)(struct ttm_mem_type_manager *man,
221 struct ttm_mem_reg *mem);
Thomas Hellstrom3205bc22010-10-29 10:46:44 +0200222
223 /**
224 * struct ttm_mem_type_manager member debug
225 *
226 * @man: Pointer to a memory type manager.
227 * @prefix: Prefix to be used in printout to identify the caller.
228 *
229 * This function is called to print out the state of the memory
230 * type manager to aid debugging of out-of-memory conditions.
231 * It may not be called from within atomic context.
232 */
Ben Skeggsd961db72010-08-05 10:48:18 +1000233 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
234};
235
Thomas Hellstromeba67092010-11-11 09:41:57 +0100236/**
237 * struct ttm_mem_type_manager
238 *
239 * @has_type: The memory type has been initialized.
240 * @use_type: The memory type is enabled.
241 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
242 * managed by this memory type.
243 * @gpu_offset: If used, the GPU offset of the first managed page of
244 * fixed memory or the first managed location in an aperture.
245 * @size: Size of the managed region.
246 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
247 * as defined in ttm_placement_common.h
248 * @default_caching: The default caching policy used for a buffer object
249 * placed in this memory type if the user doesn't provide one.
250 * @func: structure pointer implementing the range manager. See above
251 * @priv: Driver private closure for @func.
252 * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
253 * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
254 * reserved by the TTM vm system.
255 * @io_reserve_lru: Optional lru list for unreserving io mem regions.
256 * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
257 * static information. bdev::driver::io_mem_free is never used.
258 * @lru: The lru list for this memory type.
259 *
260 * This structure is used to identify and manage memory types for a device.
261 * It's set up by the ttm_bo_driver::init_mem_type method.
262 */
263
264
265
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200266struct ttm_mem_type_manager {
Ben Skeggsd961db72010-08-05 10:48:18 +1000267 struct ttm_bo_device *bdev;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200268
269 /*
270 * No protection. Constant from start.
271 */
272
273 bool has_type;
274 bool use_type;
275 uint32_t flags;
276 unsigned long gpu_offset;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200277 uint64_t size;
278 uint32_t available_caching;
279 uint32_t default_caching;
Ben Skeggsd961db72010-08-05 10:48:18 +1000280 const struct ttm_mem_type_manager_func *func;
281 void *priv;
Thomas Hellstromeba67092010-11-11 09:41:57 +0100282 struct mutex io_reserve_mutex;
283 bool use_io_reserve_lru;
284 bool io_reserve_fastpath;
285
286 /*
287 * Protected by @io_reserve_mutex:
288 */
289
290 struct list_head io_reserve_lru;
Thomas Hellstrom3205bc22010-10-29 10:46:44 +0200291
292 /*
293 * Protected by the global->lru_lock.
294 */
295
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200296 struct list_head lru;
297};
298
299/**
300 * struct ttm_bo_driver
301 *
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200302 * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
303 * @invalidate_caches: Callback to invalidate read caches when a buffer object
304 * has been evicted.
305 * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
306 * structure.
307 * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
308 * @move: Callback for a driver to hook in accelerated functions to
309 * move a buffer.
310 * If set to NULL, a potentially slow memcpy() move is used.
311 * @sync_obj_signaled: See ttm_fence_api.h
312 * @sync_obj_wait: See ttm_fence_api.h
313 * @sync_obj_flush: See ttm_fence_api.h
314 * @sync_obj_unref: See ttm_fence_api.h
315 * @sync_obj_ref: See ttm_fence_api.h
316 */
317
318struct ttm_bo_driver {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200319 /**
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400320 * ttm_tt_create
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200321 *
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400322 * @bdev: pointer to a struct ttm_bo_device:
323 * @size: Size of the data needed backing.
324 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
325 * @dummy_read_page: See struct ttm_bo_device.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200326 *
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400327 * Create a struct ttm_tt to back data with system memory pages.
328 * No pages are actually allocated.
329 * Returns:
330 * NULL: Out of memory.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200331 */
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400332 struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
333 unsigned long size,
334 uint32_t page_flags,
335 struct page *dummy_read_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200336
337 /**
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400338 * ttm_tt_populate
339 *
340 * @ttm: The struct ttm_tt to contain the backing pages.
341 *
342 * Allocate all backing pages
343 * Returns:
344 * -ENOMEM: Out of memory.
345 */
346 int (*ttm_tt_populate)(struct ttm_tt *ttm);
347
348 /**
349 * ttm_tt_unpopulate
350 *
351 * @ttm: The struct ttm_tt to contain the backing pages.
352 *
353 * Free all backing page
354 */
355 void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
356
357 /**
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200358 * struct ttm_bo_driver member invalidate_caches
359 *
360 * @bdev: the buffer object device.
361 * @flags: new placement of the rebound buffer object.
362 *
363 * A previosly evicted buffer has been rebound in a
364 * potentially new location. Tell the driver that it might
365 * consider invalidating read (texture) caches on the next command
366 * submission as a consequence.
367 */
368
369 int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
370 int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
371 struct ttm_mem_type_manager *man);
372 /**
373 * struct ttm_bo_driver member evict_flags:
374 *
375 * @bo: the buffer object to be evicted
376 *
377 * Return the bo flags for a buffer which is not mapped to the hardware.
378 * These will be placed in proposed_flags so that when the move is
379 * finished, they'll end up in bo->mem.flags
380 */
381
Jerome Glisseca262a9992009-12-08 15:33:32 +0100382 void(*evict_flags) (struct ttm_buffer_object *bo,
383 struct ttm_placement *placement);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200384 /**
385 * struct ttm_bo_driver member move:
386 *
387 * @bo: the buffer to move
388 * @evict: whether this motion is evicting the buffer from
389 * the graphics address space
390 * @interruptible: Use interruptible sleeps if possible when sleeping.
391 * @no_wait: whether this should give up and return -EBUSY
392 * if this move would require sleeping
393 * @new_mem: the new memory region receiving the buffer
394 *
395 * Move a buffer between two memory regions.
396 */
397 int (*move) (struct ttm_buffer_object *bo,
398 bool evict, bool interruptible,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000399 bool no_wait_gpu,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000400 struct ttm_mem_reg *new_mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200401
402 /**
403 * struct ttm_bo_driver_member verify_access
404 *
405 * @bo: Pointer to a buffer object.
406 * @filp: Pointer to a struct file trying to access the object.
407 *
408 * Called from the map / write / read methods to verify that the
409 * caller is permitted to access the buffer object.
410 * This member may be set to NULL, which will refuse this kind of
411 * access for all buffer objects.
412 * This function should return 0 if access is granted, -EPERM otherwise.
413 */
414 int (*verify_access) (struct ttm_buffer_object *bo,
415 struct file *filp);
416
417 /**
418 * In case a driver writer dislikes the TTM fence objects,
419 * the driver writer can replace those with sync objects of
420 * his / her own. If it turns out that no driver writer is
421 * using these. I suggest we remove these hooks and plug in
422 * fences directly. The bo driver needs the following functionality:
423 * See the corresponding functions in the fence object API
424 * documentation.
425 */
426
Maarten Lankhorstdedfdff2012-10-12 15:04:00 +0000427 bool (*sync_obj_signaled) (void *sync_obj);
428 int (*sync_obj_wait) (void *sync_obj,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200429 bool lazy, bool interruptible);
Maarten Lankhorstdedfdff2012-10-12 15:04:00 +0000430 int (*sync_obj_flush) (void *sync_obj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200431 void (*sync_obj_unref) (void **sync_obj);
432 void *(*sync_obj_ref) (void *sync_obj);
Dave Airliee024e112009-06-24 09:48:08 +1000433
434 /* hook to notify driver about a driver move so it
435 * can do tiling things */
436 void (*move_notify)(struct ttm_buffer_object *bo,
437 struct ttm_mem_reg *new_mem);
438 /* notify the driver we are taking a fault on this BO
439 * and have reserved it */
Jerome Glisse82c5da62010-04-09 14:39:23 +0200440 int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
Thomas Hellstrom3f09ea42010-01-13 22:28:40 +0100441
442 /**
443 * notify the driver that we're about to swap out this bo
444 */
445 void (*swap_notify) (struct ttm_buffer_object *bo);
Jerome Glisse82c5da62010-04-09 14:39:23 +0200446
447 /**
448 * Driver callback on when mapping io memory (for bo_move_memcpy
449 * for instance). TTM will take care to call io_mem_free whenever
450 * the mapping is not use anymore. io_mem_reserve & io_mem_free
451 * are balanced.
452 */
453 int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
454 void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200455};
456
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200457/**
458 * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
459 */
460
461struct ttm_bo_global_ref {
Dave Airlieba4420c2010-03-09 10:56:52 +1000462 struct drm_global_reference ref;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200463 struct ttm_mem_global *mem_glob;
464};
465
466/**
467 * struct ttm_bo_global - Buffer object driver global data.
468 *
469 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
470 * @dummy_read_page: Pointer to a dummy page used for mapping requests
471 * of unpopulated pages.
472 * @shrink: A shrink callback object used for buffer object swap.
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200473 * @device_list_mutex: Mutex protecting the device list.
474 * This mutex is held while traversing the device list for pm options.
475 * @lru_lock: Spinlock protecting the bo subsystem lru lists.
476 * @device_list: List of buffer object devices.
477 * @swap_lru: Lru list of buffer objects used for swapping.
478 */
479
480struct ttm_bo_global {
481
482 /**
483 * Constant after init.
484 */
485
486 struct kobject kobj;
487 struct ttm_mem_global *mem_glob;
488 struct page *dummy_read_page;
489 struct ttm_mem_shrink shrink;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200490 struct mutex device_list_mutex;
491 spinlock_t lru_lock;
492
493 /**
494 * Protected by device_list_mutex.
495 */
496 struct list_head device_list;
497
498 /**
499 * Protected by the lru_lock.
500 */
501 struct list_head swap_lru;
502
503 /**
504 * Internal protection.
505 */
506 atomic_t bo_count;
507};
508
509
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200510#define TTM_NUM_MEM_TYPES 8
511
512#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
513 idling before CPU mapping */
514#define TTM_BO_PRIV_FLAG_MAX 1
515/**
516 * struct ttm_bo_device - Buffer object driver device-specific data.
517 *
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200518 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200519 * @man: An array of mem_type_managers.
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000520 * @fence_lock: Protects the synchronizing members on *all* bos belonging
521 * to this device.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200522 * @addr_space_mm: Range manager for the device address space.
523 * lru_lock: Spinlock that protects the buffer+device lru lists and
524 * ddestroy lists.
Thomas Hellstrom65705962010-11-17 12:28:31 +0000525 * @val_seq: Current validation sequence.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200526 * @dev_mapping: A pointer to the struct address_space representing the
527 * device address space.
528 * @wq: Work queue structure for the delayed delete workqueue.
529 *
530 */
531
532struct ttm_bo_device {
533
534 /*
535 * Constant after bo device init / atomic.
536 */
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200537 struct list_head device_list;
538 struct ttm_bo_global *glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200539 struct ttm_bo_driver *driver;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200540 rwlock_t vm_lock;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200541 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000542 spinlock_t fence_lock;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200543 /*
544 * Protected by the vm lock.
545 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200546 struct rb_root addr_space_rb;
547 struct drm_mm addr_space_mm;
548
549 /*
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200550 * Protected by the global:lru lock.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200551 */
552 struct list_head ddestroy;
Thomas Hellstrom65705962010-11-17 12:28:31 +0000553 uint32_t val_seq;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200554
555 /*
556 * Protected by load / firstopen / lastclose /unload sync.
557 */
558
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200559 struct address_space *dev_mapping;
560
561 /*
562 * Internal protection.
563 */
564
565 struct delayed_work wq;
Dave Airliead49f502009-07-10 22:36:26 +1000566
567 bool need_dma32;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200568};
569
570/**
571 * ttm_flag_masked
572 *
573 * @old: Pointer to the result and original value.
574 * @new: New value of bits.
575 * @mask: Mask of bits to change.
576 *
577 * Convenience function to change a number of bits identified by a mask.
578 */
579
580static inline uint32_t
581ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
582{
583 *old ^= (*old ^ new) & mask;
584 return *old;
585}
586
587/**
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400588 * ttm_tt_init
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200589 *
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400590 * @ttm: The struct ttm_tt.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200591 * @bdev: pointer to a struct ttm_bo_device:
592 * @size: Size of the data needed backing.
593 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
594 * @dummy_read_page: See struct ttm_bo_device.
595 *
596 * Create a struct ttm_tt to back data with system memory pages.
597 * No pages are actually allocated.
598 * Returns:
599 * NULL: Out of memory.
600 */
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400601extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
602 unsigned long size, uint32_t page_flags,
603 struct page *dummy_read_page);
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500604extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
605 unsigned long size, uint32_t page_flags,
606 struct page *dummy_read_page);
607
608/**
609 * ttm_tt_fini
610 *
611 * @ttm: the ttm_tt structure.
612 *
613 * Free memory of ttm_tt structure
614 */
615extern void ttm_tt_fini(struct ttm_tt *ttm);
616extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200617
618/**
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200619 * ttm_ttm_bind:
620 *
621 * @ttm: The struct ttm_tt containing backing pages.
622 * @bo_mem: The struct ttm_mem_reg identifying the binding location.
623 *
624 * Bind the pages of @ttm to an aperture location identified by @bo_mem
625 */
626extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
627
628/**
629 * ttm_ttm_destroy:
630 *
631 * @ttm: The struct ttm_tt.
632 *
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400633 * Unbind, unpopulate and destroy common struct ttm_tt.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200634 */
635extern void ttm_tt_destroy(struct ttm_tt *ttm);
636
637/**
638 * ttm_ttm_unbind:
639 *
640 * @ttm: The struct ttm_tt.
641 *
642 * Unbind a struct ttm_tt.
643 */
644extern void ttm_tt_unbind(struct ttm_tt *ttm);
645
646/**
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400647 * ttm_tt_swapin:
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200648 *
649 * @ttm: The struct ttm_tt.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200650 *
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400651 * Swap in a previously swap out ttm_tt.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200652 */
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400653extern int ttm_tt_swapin(struct ttm_tt *ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200654
655/**
656 * ttm_tt_cache_flush:
657 *
658 * @pages: An array of pointers to struct page:s to flush.
659 * @num_pages: Number of pages to flush.
660 *
661 * Flush the data of the indicated pages from the cpu caches.
662 * This is used when changing caching attributes of the pages from
663 * cache-coherent.
664 */
665extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
666
667/**
668 * ttm_tt_set_placement_caching:
669 *
670 * @ttm A struct ttm_tt the backing pages of which will change caching policy.
671 * @placement: Flag indicating the desired caching policy.
672 *
673 * This function will change caching policy of any default kernel mappings of
674 * the pages backing @ttm. If changing from cached to uncached or
675 * write-combined,
676 * all CPU caches will first be flushed to make sure the data of the pages
677 * hit RAM. This function may be very costly as it involves global TLB
678 * and cache flushes and potential page splitting / combining.
679 */
680extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
681extern int ttm_tt_swapout(struct ttm_tt *ttm,
Jan Engelhardt5df23972011-04-04 01:25:18 +0200682 struct file *persistent_swap_storage);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200683
684/*
685 * ttm_bo.c
686 */
687
688/**
689 * ttm_mem_reg_is_pci
690 *
691 * @bdev: Pointer to a struct ttm_bo_device.
692 * @mem: A valid struct ttm_mem_reg.
693 *
694 * Returns true if the memory described by @mem is PCI memory,
695 * false otherwise.
696 */
697extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
698 struct ttm_mem_reg *mem);
699
700/**
701 * ttm_bo_mem_space
702 *
703 * @bo: Pointer to a struct ttm_buffer_object. the data of which
704 * we want to allocate space for.
705 * @proposed_placement: Proposed new placement for the buffer object.
706 * @mem: A struct ttm_mem_reg.
707 * @interruptible: Sleep interruptible when sliping.
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000708 * @no_wait_gpu: Return immediately if the GPU is busy.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200709 *
710 * Allocate memory space for the buffer object pointed to by @bo, using
711 * the placement flags in @mem, potentially evicting other idle buffer objects.
712 * This function may sleep while waiting for space to become available.
713 * Returns:
714 * -EBUSY: No space available (only if no_wait == 1).
715 * -ENOMEM: Could not allocate memory for the buffer object, either due to
716 * fragmentation or concurrent allocators.
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100717 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200718 */
719extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
Jerome Glisseca262a9992009-12-08 15:33:32 +0100720 struct ttm_placement *placement,
721 struct ttm_mem_reg *mem,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000722 bool interruptible,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000723 bool no_wait_gpu);
Ben Skeggs42311ff2010-08-04 12:07:08 +1000724
725extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
726 struct ttm_mem_reg *mem);
Dave Airliec9220b02010-10-08 08:57:10 +1000727extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
728 struct ttm_mem_reg *mem);
Ben Skeggs42311ff2010-08-04 12:07:08 +1000729
Dave Airlieba4420c2010-03-09 10:56:52 +1000730extern void ttm_bo_global_release(struct drm_global_reference *ref);
731extern int ttm_bo_global_init(struct drm_global_reference *ref);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200732
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200733extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
734
735/**
736 * ttm_bo_device_init
737 *
738 * @bdev: A pointer to a struct ttm_bo_device to initialize.
Konrad Rzeszutek Wilk0d74f862011-06-08 17:06:15 +0000739 * @glob: A pointer to an initialized struct ttm_bo_global.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200740 * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
741 * @file_page_offset: Offset into the device address space that is available
742 * for buffer data. This ensures compatibility with other users of the
743 * address space.
744 *
745 * Initializes a struct ttm_bo_device:
746 * Returns:
747 * !0: Failure.
748 */
749extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200750 struct ttm_bo_global *glob,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200751 struct ttm_bo_driver *driver,
Dave Airliead49f502009-07-10 22:36:26 +1000752 uint64_t file_page_offset, bool need_dma32);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200753
754/**
Dave Airliee024e112009-06-24 09:48:08 +1000755 * ttm_bo_unmap_virtual
756 *
757 * @bo: tear down the virtual mappings for this BO
758 */
759extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200760
761/**
Thomas Hellstromeba67092010-11-11 09:41:57 +0100762 * ttm_bo_unmap_virtual
763 *
764 * @bo: tear down the virtual mappings for this BO
765 *
766 * The caller must take ttm_mem_io_lock before calling this function.
767 */
768extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
769
770extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
771extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
772extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
773 bool interruptible);
774extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
775
Maarten Lankhorst34820322013-06-27 13:48:24 +0200776extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
777extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
778
779/**
780 * ttm_bo_reserve_nolru:
781 *
782 * @bo: A pointer to a struct ttm_buffer_object.
783 * @interruptible: Sleep interruptible if waiting.
784 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
785 * @use_ticket: If @bo is already reserved, Only sleep waiting for
786 * it to become unreserved if @ticket->stamp is older.
787 *
788 * Will not remove reserved buffers from the lru lists.
789 * Otherwise identical to ttm_bo_reserve.
790 *
791 * Returns:
792 * -EDEADLK: The reservation may cause a deadlock.
793 * Release all buffer reservations, wait for @bo to become unreserved and
794 * try again. (only if use_sequence == 1).
795 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
796 * a signal. Release all buffer reservations and return to user-space.
797 * -EBUSY: The function needed to sleep, but @no_wait was true
798 * -EALREADY: Bo already reserved using @ticket. This error code will only
799 * be returned if @use_ticket is set to true.
800 */
801static inline int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
802 bool interruptible,
803 bool no_wait, bool use_ticket,
804 struct ww_acquire_ctx *ticket)
805{
806 int ret = 0;
807
808 if (no_wait) {
809 bool success;
810 if (WARN_ON(ticket))
811 return -EBUSY;
812
813 success = ww_mutex_trylock(&bo->resv->lock);
814 return success ? 0 : -EBUSY;
815 }
816
817 if (interruptible)
818 ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket);
819 else
820 ret = ww_mutex_lock(&bo->resv->lock, ticket);
821 if (ret == -EINTR)
822 return -ERESTARTSYS;
823 return ret;
824}
Thomas Hellstromeba67092010-11-11 09:41:57 +0100825
826/**
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200827 * ttm_bo_reserve:
828 *
829 * @bo: A pointer to a struct ttm_buffer_object.
830 * @interruptible: Sleep interruptible if waiting.
831 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200832 * @use_ticket: If @bo is already reserved, Only sleep waiting for
Maarten Lankhorst34820322013-06-27 13:48:24 +0200833 * it to become unreserved if @ticket->stamp is older.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200834 *
835 * Locks a buffer object for validation. (Or prevents other processes from
836 * locking it for validation) and removes it from lru lists, while taking
837 * a number of measures to prevent deadlocks.
838 *
839 * Deadlocks may occur when two processes try to reserve multiple buffers in
840 * different order, either by will or as a result of a buffer being evicted
841 * to make room for a buffer already reserved. (Buffers are reserved before
842 * they are evicted). The following algorithm prevents such deadlocks from
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300843 * occurring:
Maarten Lankhorst63d0a412013-01-15 14:56:37 +0100844 * Processes attempting to reserve multiple buffers other than for eviction,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200845 * (typically execbuf), should first obtain a unique 32-bit
846 * validation sequence number,
Maarten Lankhorst34820322013-06-27 13:48:24 +0200847 * and call this function with @use_ticket == 1 and @ticket->stamp == the unique
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200848 * sequence number. If upon call of this function, the buffer object is already
849 * reserved, the validation sequence is checked against the validation
850 * sequence of the process currently reserving the buffer,
851 * and if the current validation sequence is greater than that of the process
852 * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
853 * waiting for the buffer to become unreserved, after which it retries
854 * reserving.
855 * The caller should, when receiving an -EAGAIN error
856 * release all its buffer reservations, wait for @bo to become unreserved, and
857 * then rerun the validation with the same validation sequence. This procedure
858 * will always guarantee that the process with the lowest validation sequence
859 * will eventually succeed, preventing both deadlocks and starvation.
860 *
861 * Returns:
Maarten Lankhorst34820322013-06-27 13:48:24 +0200862 * -EDEADLK: The reservation may cause a deadlock.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200863 * Release all buffer reservations, wait for @bo to become unreserved and
864 * try again. (only if use_sequence == 1).
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100865 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200866 * a signal. Release all buffer reservations and return to user-space.
Thomas Hellstrom96726fe2010-11-17 12:28:28 +0000867 * -EBUSY: The function needed to sleep, but @no_wait was true
Maarten Lankhorst34820322013-06-27 13:48:24 +0200868 * -EALREADY: Bo already reserved using @ticket. This error code will only
869 * be returned if @use_ticket is set to true.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200870 */
Maarten Lankhorst34820322013-06-27 13:48:24 +0200871static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
872 bool interruptible,
873 bool no_wait, bool use_ticket,
874 struct ww_acquire_ctx *ticket)
875{
876 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200877
Maarten Lankhorst34820322013-06-27 13:48:24 +0200878 WARN_ON(!atomic_read(&bo->kref.refcount));
Maarten Lankhorst5e45d7d2013-01-15 14:57:05 +0100879
Maarten Lankhorst34820322013-06-27 13:48:24 +0200880 ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket,
881 ticket);
882 if (likely(ret == 0))
883 ttm_bo_del_sub_from_lru(bo);
884
885 return ret;
886}
Maarten Lankhorst5e45d7d2013-01-15 14:57:05 +0100887
888/**
889 * ttm_bo_reserve_slowpath:
890 * @bo: A pointer to a struct ttm_buffer_object.
891 * @interruptible: Sleep interruptible if waiting.
892 * @sequence: Set (@bo)->sequence to this value after lock
893 *
894 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
895 * from all our other reservations. Because there are no other reservations
896 * held by us, this function cannot deadlock any more.
897 */
Maarten Lankhorst34820322013-06-27 13:48:24 +0200898static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
899 bool interruptible,
900 struct ww_acquire_ctx *ticket)
901{
902 int ret = 0;
Dave Airlied6ea8882010-11-22 13:24:40 +1000903
Maarten Lankhorst34820322013-06-27 13:48:24 +0200904 WARN_ON(!atomic_read(&bo->kref.refcount));
Dave Airlied6ea8882010-11-22 13:24:40 +1000905
Maarten Lankhorst34820322013-06-27 13:48:24 +0200906 if (interruptible)
907 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
908 ticket);
909 else
910 ww_mutex_lock_slow(&bo->resv->lock, ticket);
911
912 if (likely(ret == 0))
913 ttm_bo_del_sub_from_lru(bo);
914 else if (ret == -EINTR)
915 ret = -ERESTARTSYS;
916
917 return ret;
918}
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200919
920/**
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200921 * ttm_bo_unreserve_ticket
Thomas Hellstrom95762c22010-11-17 12:28:30 +0000922 * @bo: A pointer to a struct ttm_buffer_object.
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200923 * @ticket: ww_acquire_ctx used for reserving
Thomas Hellstrom95762c22010-11-17 12:28:30 +0000924 *
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200925 * Unreserve a previous reservation of @bo made with @ticket.
926 */
Maarten Lankhorst34820322013-06-27 13:48:24 +0200927static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
928 struct ww_acquire_ctx *t)
929{
930 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
931 spin_lock(&bo->glob->lru_lock);
932 ttm_bo_add_to_lru(bo);
933 spin_unlock(&bo->glob->lru_lock);
934 }
935 ww_mutex_unlock(&bo->resv->lock);
936}
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200937
938/**
Maarten Lankhorst34820322013-06-27 13:48:24 +0200939 * ttm_bo_unreserve
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200940 *
Maarten Lankhorst34820322013-06-27 13:48:24 +0200941 * @bo: A pointer to a struct ttm_buffer_object.
942 *
943 * Unreserve a previous reservation of @bo.
Thomas Hellstrom95762c22010-11-17 12:28:30 +0000944 */
Maarten Lankhorst34820322013-06-27 13:48:24 +0200945static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
946{
947 ttm_bo_unreserve_ticket(bo, NULL);
948}
Thomas Hellstrom95762c22010-11-17 12:28:30 +0000949
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200950/*
951 * ttm_bo_util.c
952 */
953
Dave Airlieafe68042013-01-22 13:56:04 +1000954int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
955 struct ttm_mem_reg *mem);
956void ttm_mem_io_free(struct ttm_bo_device *bdev,
957 struct ttm_mem_reg *mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200958/**
959 * ttm_bo_move_ttm
960 *
961 * @bo: A pointer to a struct ttm_buffer_object.
962 * @evict: 1: This is an eviction. Don't try to pipeline.
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000963 * @no_wait_gpu: Return immediately if the GPU is busy.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200964 * @new_mem: struct ttm_mem_reg indicating where to move.
965 *
966 * Optimized move function for a buffer object with both old and
967 * new placement backed by a TTM. The function will, if successful,
968 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
969 * and update the (@bo)->mem placement flags. If unsuccessful, the old
970 * data remains untouched, and it's up to the caller to free the
971 * memory space indicated by @new_mem.
972 * Returns:
973 * !0: Failure.
974 */
975
976extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000977 bool evict, bool no_wait_gpu,
978 struct ttm_mem_reg *new_mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200979
980/**
981 * ttm_bo_move_memcpy
982 *
983 * @bo: A pointer to a struct ttm_buffer_object.
984 * @evict: 1: This is an eviction. Don't try to pipeline.
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000985 * @no_wait_gpu: Return immediately if the GPU is busy.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200986 * @new_mem: struct ttm_mem_reg indicating where to move.
987 *
988 * Fallback move function for a mappable buffer object in mappable memory.
989 * The function will, if successful,
990 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
991 * and update the (@bo)->mem placement flags. If unsuccessful, the old
992 * data remains untouched, and it's up to the caller to free the
993 * memory space indicated by @new_mem.
994 * Returns:
995 * !0: Failure.
996 */
997
998extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000999 bool evict, bool no_wait_gpu,
1000 struct ttm_mem_reg *new_mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001001
1002/**
1003 * ttm_bo_free_old_node
1004 *
1005 * @bo: A pointer to a struct ttm_buffer_object.
1006 *
1007 * Utility function to free an old placement after a successful move.
1008 */
1009extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
1010
1011/**
1012 * ttm_bo_move_accel_cleanup.
1013 *
1014 * @bo: A pointer to a struct ttm_buffer_object.
1015 * @sync_obj: A sync object that signals when moving is complete.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001016 * @evict: This is an evict move. Don't return until the buffer is idle.
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001017 * @no_wait_gpu: Return immediately if the GPU is busy.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001018 * @new_mem: struct ttm_mem_reg indicating where to move.
1019 *
1020 * Accelerated move function to be called when an accelerated move
1021 * has been scheduled. The function will create a new temporary buffer object
1022 * representing the old placement, and put the sync object on both buffer
1023 * objects. After that the newly created buffer object is unref'd to be
1024 * destroyed when the move is complete. This will help pipeline
1025 * buffer moves.
1026 */
1027
1028extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
1029 void *sync_obj,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001030 bool evict, bool no_wait_gpu,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001031 struct ttm_mem_reg *new_mem);
1032/**
1033 * ttm_io_prot
1034 *
1035 * @c_state: Caching state.
1036 * @tmp: Page protection flag for a normal, cached mapping.
1037 *
1038 * Utility function that returns the pgprot_t that should be used for
1039 * setting up a PTE with the caching model indicated by @c_state.
1040 */
Randy Dunlapa55e8d42010-02-24 14:29:14 -08001041extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001042
Ben Skeggsd961db72010-08-05 10:48:18 +10001043extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
1044
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001045#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
1046#define TTM_HAS_AGP
1047#include <linux/agp_backend.h>
1048
1049/**
Jerome Glisse649bf3c2011-11-01 20:46:13 -04001050 * ttm_agp_tt_create
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001051 *
1052 * @bdev: Pointer to a struct ttm_bo_device.
1053 * @bridge: The agp bridge this device is sitting on.
Jerome Glisse649bf3c2011-11-01 20:46:13 -04001054 * @size: Size of the data needed backing.
1055 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
1056 * @dummy_read_page: See struct ttm_bo_device.
1057 *
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001058 *
1059 * Create a TTM backend that uses the indicated AGP bridge as an aperture
1060 * for TT memory. This function uses the linux agpgart interface to
1061 * bind and unbind memory backing a ttm_tt.
1062 */
Jerome Glisse649bf3c2011-11-01 20:46:13 -04001063extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
1064 struct agp_bridge_data *bridge,
1065 unsigned long size, uint32_t page_flags,
1066 struct page *dummy_read_page);
Jerome Glissedea7e0a2012-01-03 17:37:37 -05001067int ttm_agp_tt_populate(struct ttm_tt *ttm);
1068void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001069#endif
1070
1071#endif