blob: 38a2b4770c35f0d440867851a3424d706588d989 [file] [log] [blame]
Pauli Nieminen1403b1a2010-04-01 12:44:57 +00001/*
2 * Copyright (c) Red Hat Inc.
3
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie <airlied@redhat.com>
24 * Jerome Glisse <jglisse@redhat.com>
25 */
26#ifndef TTM_PAGE_ALLOC
27#define TTM_PAGE_ALLOC
28
David Howellsa1ce3922012-10-02 18:01:25 +010029#include <drm/ttm/ttm_bo_driver.h>
30#include <drm/ttm/ttm_memory.h>
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000031
Alexandre Courbot728a0cd2014-02-09 18:43:18 +090032struct device;
33
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000034/**
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000035 * Initialize pool allocator.
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000036 */
Pauli Nieminenc96af792010-04-01 12:45:03 +000037int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000038/**
39 * Free pool allocator.
40 */
41void ttm_page_alloc_fini(void);
42
Pauli Nieminen07458662010-04-01 12:44:58 +000043/**
Jerome Glisseb1e5f172011-11-02 23:59:28 -040044 * ttm_pool_populate:
45 *
46 * @ttm: The struct ttm_tt to contain the backing pages.
47 *
48 * Add backing pages to all of @ttm
49 */
Tom St Denis96bec192017-08-24 06:46:39 -040050int ttm_pool_populate(struct ttm_tt *ttm);
Jerome Glisseb1e5f172011-11-02 23:59:28 -040051
52/**
53 * ttm_pool_unpopulate:
54 *
55 * @ttm: The struct ttm_tt which to free backing pages.
56 *
57 * Free all pages of @ttm
58 */
Tom St Denis96bec192017-08-24 06:46:39 -040059void ttm_pool_unpopulate(struct ttm_tt *ttm);
Jerome Glisseb1e5f172011-11-02 23:59:28 -040060
61/**
Pauli Nieminen07458662010-04-01 12:44:58 +000062 * Output the state of pools to debugfs file
63 */
Tom St Denis96bec192017-08-24 06:46:39 -040064int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
Konrad Rzeszutek Wilk2334b752011-11-03 16:46:34 -040065
66
Thomas Hellstrom7aeb7442013-10-24 01:24:54 -070067#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
Konrad Rzeszutek Wilk2334b752011-11-03 16:46:34 -040068/**
69 * Initialize pool allocator.
70 */
71int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
72
73/**
74 * Free pool allocator.
75 */
76void ttm_dma_page_alloc_fini(void);
77
78/**
79 * Output the state of pools to debugfs file
80 */
Tom St Denis96bec192017-08-24 06:46:39 -040081int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
Konrad Rzeszutek Wilk2334b752011-11-03 16:46:34 -040082
Tom St Denis96bec192017-08-24 06:46:39 -040083int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
84void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
Konrad Rzeszutek Wilk2334b752011-11-03 16:46:34 -040085
Tom St Denisa4dec812017-08-18 10:04:57 -040086
87/**
88 * Populates and DMA maps pages to fullfil a ttm_dma_populate() request
89 */
90int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt);
91
92/**
93 * Unpopulates and DMA unmaps pages as part of a
94 * ttm_dma_unpopulate() request */
95void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt);
96
Konrad Rzeszutek Wilk2334b752011-11-03 16:46:34 -040097#else
98static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
99 unsigned max_pages)
100{
101 return -ENODEV;
102}
103
104static inline void ttm_dma_page_alloc_fini(void) { return; }
105
106static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
107{
108 return 0;
109}
Thomas Hellstrom7aeb7442013-10-24 01:24:54 -0700110static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
111 struct device *dev)
112{
113 return -ENOMEM;
114}
115static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
116 struct device *dev)
117{
118}
Tom St Denisa4dec812017-08-18 10:04:57 -0400119
120static inline int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
121{
122 return -ENOMEM;
123}
124
125static inline void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
126{
127}
128
Konrad Rzeszutek Wilk2334b752011-11-03 16:46:34 -0400129#endif
130
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000131#endif