blob: ff17c1f432fc9e9312ace42491385bb76226b381 [file] [log] [blame]
Ben Skeggs8be21a62012-07-18 17:17:09 +10001#ifndef __NOUVEAU_BO_H__
2#define __NOUVEAU_BO_H__
3
4struct nouveau_channel;
Ben Skeggsebb945a2012-07-20 08:17:34 +10005struct nouveau_fence;
Ben Skeggs8be21a62012-07-18 17:17:09 +10006struct nouveau_vma;
7
Ben Skeggs8be21a62012-07-18 17:17:09 +10008struct nouveau_bo {
9 struct ttm_buffer_object bo;
10 struct ttm_placement placement;
11 u32 valid_domains;
12 u32 placements[3];
13 u32 busy_placements[3];
14 struct ttm_bo_kmap_obj kmap;
15 struct list_head head;
16
17 /* protected by ttm_bo_reserve() */
18 struct drm_file *reserved_by;
19 struct list_head entry;
20 int pbbo_index;
21 bool validate_mapped;
22
23 struct list_head vma_list;
24 unsigned page_shift;
25
26 u32 tile_mode;
27 u32 tile_flags;
Ben Skeggsebb945a2012-07-20 08:17:34 +100028 struct nouveau_drm_tile *tile;
Ben Skeggs8be21a62012-07-18 17:17:09 +100029
David Herrmann55fb74a2013-10-02 10:15:17 +020030 /* Only valid if allocated via nouveau_gem_new() and iff you hold a
31 * gem reference to it! For debugging, use gem.filp != NULL to test
32 * whether it is valid. */
33 struct drm_gem_object gem;
Daniel Vetter0ae6d7b2012-12-11 21:52:30 +010034
35 /* protect by the ttm reservation lock */
Ben Skeggs8be21a62012-07-18 17:17:09 +100036 int pin_refcnt;
37
38 struct ttm_bo_kmap_obj dma_buf_vmap;
Ben Skeggs8be21a62012-07-18 17:17:09 +100039};
40
41static inline struct nouveau_bo *
42nouveau_bo(struct ttm_buffer_object *bo)
43{
44 return container_of(bo, struct nouveau_bo, bo);
45}
46
47static inline int
48nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
49{
50 struct nouveau_bo *prev;
51
52 if (!pnvbo)
53 return -EINVAL;
54 prev = *pnvbo;
55
56 *pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL;
57 if (prev) {
58 struct ttm_buffer_object *bo = &prev->bo;
59
60 ttm_bo_unref(&bo);
61 }
62
63 return 0;
64}
65
66extern struct ttm_bo_driver nouveau_bo_driver;
67
Ben Skeggs49981042012-08-06 19:38:25 +100068void nouveau_bo_move_init(struct nouveau_drm *);
Ben Skeggs8be21a62012-07-18 17:17:09 +100069int nouveau_bo_new(struct drm_device *, int size, int align, u32 flags,
70 u32 tile_mode, u32 tile_flags, struct sg_table *sg,
71 struct nouveau_bo **);
72int nouveau_bo_pin(struct nouveau_bo *, u32 flags);
73int nouveau_bo_unpin(struct nouveau_bo *);
74int nouveau_bo_map(struct nouveau_bo *);
75void nouveau_bo_unmap(struct nouveau_bo *);
76void nouveau_bo_placement_set(struct nouveau_bo *, u32 type, u32 busy);
77u16 nouveau_bo_rd16(struct nouveau_bo *, unsigned index);
78void nouveau_bo_wr16(struct nouveau_bo *, unsigned index, u16 val);
79u32 nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
80void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
81void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
82int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +000083 bool no_wait_gpu);
Ben Skeggs8be21a62012-07-18 17:17:09 +100084
85struct nouveau_vma *
86nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
87
88int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
89 struct nouveau_vma *);
90void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
91
Ben Skeggsebb945a2012-07-20 08:17:34 +100092/* TODO: submit equivalent to TTM generic API upstream? */
93static inline void __iomem *
94nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
95{
96 bool is_iomem;
97 void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual(
98 &nvbo->kmap, &is_iomem);
99 WARN_ON_ONCE(ioptr && !is_iomem);
100 return ioptr;
101}
102
Ben Skeggs8be21a62012-07-18 17:17:09 +1000103#endif