blob: e8ad56ed34fa23b49c0f237a71c11161fe6c7d45 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Jaroslav Kyselac1017a42007-10-15 09:50:19 +02002 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Copyright (c) by Takashi Iwai <tiwai@suse.de>
4 *
5 * EMU10K1 memory page allocation (PTB area)
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/pci.h>
25#include <linux/time.h>
Ingo Molnar62932df2006-01-16 16:34:20 +010026#include <linux/mutex.h>
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <sound/core.h>
29#include <sound/emu10k1.h>
30
31/* page arguments of these two macros are Emu page (4096 bytes), not like
32 * aligned pages in others
33 */
34#define __set_ptb_entry(emu,page,addr) \
35 (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
36
37#define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
38#define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES)
39/* get aligned page from offset address */
40#define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
41/* get offset address from aligned page */
42#define aligned_page_offset(page) ((page) << PAGE_SHIFT)
43
44#if PAGE_SIZE == 4096
45/* page size == EMUPAGESIZE */
46/* fill PTB entrie(s) corresponding to page with addr */
47#define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
48/* fill PTB entrie(s) corresponding to page with silence pointer */
49#define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
50#else
51/* fill PTB entries -- we need to fill UNIT_PAGES entries */
Takashi Iwaieb4698f2005-11-17 14:50:13 +010052static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070053{
54 int i;
55 page *= UNIT_PAGES;
56 for (i = 0; i < UNIT_PAGES; i++, page++) {
57 __set_ptb_entry(emu, page, addr);
58 addr += EMUPAGESIZE;
59 }
60}
Takashi Iwaieb4698f2005-11-17 14:50:13 +010061static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
Linus Torvalds1da177e2005-04-16 15:20:36 -070062{
63 int i;
64 page *= UNIT_PAGES;
65 for (i = 0; i < UNIT_PAGES; i++, page++)
66 /* do not increment ptr */
67 __set_ptb_entry(emu, page, emu->silent_page.addr);
68}
69#endif /* PAGE_SIZE */
70
71
72/*
73 */
Takashi Iwaieb4698f2005-11-17 14:50:13 +010074static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
75static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Takashi Iwaieb4698f2005-11-17 14:50:13 +010077#define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
79
80/* initialize emu10k1 part */
Takashi Iwaieb4698f2005-11-17 14:50:13 +010081static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk)
Linus Torvalds1da177e2005-04-16 15:20:36 -070082{
83 blk->mapped_page = -1;
84 INIT_LIST_HEAD(&blk->mapped_link);
85 INIT_LIST_HEAD(&blk->mapped_order_link);
86 blk->map_locked = 0;
87
88 blk->first_page = get_aligned_page(blk->mem.offset);
89 blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
90 blk->pages = blk->last_page - blk->first_page + 1;
91}
92
93/*
94 * search empty region on PTB with the given size
95 *
96 * if an empty region is found, return the page and store the next mapped block
97 * in nextp
98 * if not found, return a negative error code.
99 */
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100100static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101{
102 int page = 0, found_page = -ENOMEM;
103 int max_size = npages;
104 int size;
105 struct list_head *candidate = &emu->mapped_link_head;
106 struct list_head *pos;
107
108 list_for_each (pos, &emu->mapped_link_head) {
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100109 struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link);
Takashi Iwaida3cec32008-08-08 17:12:14 +0200110 if (blk->mapped_page < 0)
111 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 size = blk->mapped_page - page;
113 if (size == npages) {
114 *nextp = pos;
115 return page;
116 }
117 else if (size > max_size) {
118 /* we look for the maximum empty hole */
119 max_size = size;
120 candidate = pos;
121 found_page = page;
122 }
123 page = blk->mapped_page + blk->pages;
124 }
125 size = MAX_ALIGN_PAGES - page;
126 if (size >= max_size) {
127 *nextp = pos;
128 return page;
129 }
130 *nextp = candidate;
131 return found_page;
132}
133
134/*
135 * map a memory block onto emu10k1's PTB
136 *
137 * call with memblk_lock held
138 */
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100139static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140{
141 int page, pg;
142 struct list_head *next;
143
144 page = search_empty_map_area(emu, blk->pages, &next);
145 if (page < 0) /* not found */
146 return page;
147 /* insert this block in the proper position of mapped list */
148 list_add_tail(&blk->mapped_link, next);
149 /* append this as a newest block in order list */
150 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
151 blk->mapped_page = page;
152 /* fill PTB */
153 for (pg = blk->first_page; pg <= blk->last_page; pg++) {
154 set_ptb_entry(emu, page, emu->page_addr_table[pg]);
155 page++;
156 }
157 return 0;
158}
159
160/*
161 * unmap the block
162 * return the size of resultant empty pages
163 *
164 * call with memblk_lock held
165 */
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100166static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
168 int start_page, end_page, mpage, pg;
169 struct list_head *p;
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100170 struct snd_emu10k1_memblk *q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
172 /* calculate the expected size of empty region */
173 if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
174 q = get_emu10k1_memblk(p, mapped_link);
175 start_page = q->mapped_page + q->pages;
176 } else
177 start_page = 0;
178 if ((p = blk->mapped_link.next) != &emu->mapped_link_head) {
179 q = get_emu10k1_memblk(p, mapped_link);
180 end_page = q->mapped_page;
181 } else
182 end_page = MAX_ALIGN_PAGES;
183
184 /* remove links */
185 list_del(&blk->mapped_link);
186 list_del(&blk->mapped_order_link);
187 /* clear PTB */
188 mpage = blk->mapped_page;
189 for (pg = blk->first_page; pg <= blk->last_page; pg++) {
190 set_silent_ptb(emu, mpage);
191 mpage++;
192 }
193 blk->mapped_page = -1;
194 return end_page - start_page; /* return the new empty size */
195}
196
197/*
198 * search empty pages with the given size, and create a memory block
199 *
200 * unlike synth_alloc the memory block is aligned to the page start
201 */
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100202static struct snd_emu10k1_memblk *
203search_empty(struct snd_emu10k1 *emu, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204{
205 struct list_head *p;
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100206 struct snd_emu10k1_memblk *blk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 int page, psize;
208
209 psize = get_aligned_page(size + PAGE_SIZE -1);
210 page = 0;
211 list_for_each(p, &emu->memhdr->block) {
212 blk = get_emu10k1_memblk(p, mem.list);
213 if (page + psize <= blk->first_page)
214 goto __found_pages;
215 page = blk->last_page + 1;
216 }
217 if (page + psize > emu->max_cache_pages)
218 return NULL;
219
220__found_pages:
221 /* create a new memory block */
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100222 blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 if (blk == NULL)
224 return NULL;
225 blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
226 emu10k1_memblk_init(blk);
227 return blk;
228}
229
230
231/*
232 * check if the given pointer is valid for pages
233 */
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100234static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235{
236 if (addr & ~emu->dma_mask) {
Takashi Iwai99b359b2005-10-20 18:26:44 +0200237 snd_printk(KERN_ERR "max memory size is 0x%lx (addr = 0x%lx)!!\n", emu->dma_mask, (unsigned long)addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 return 0;
239 }
240 if (addr & (EMUPAGESIZE-1)) {
Takashi Iwai99b359b2005-10-20 18:26:44 +0200241 snd_printk(KERN_ERR "page is not aligned\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 return 0;
243 }
244 return 1;
245}
246
247/*
248 * map the given memory block on PTB.
249 * if the block is already mapped, update the link order.
250 * if no empty pages are found, tries to release unsed memory blocks
251 * and retry the mapping.
252 */
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100253int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254{
255 int err;
256 int size;
257 struct list_head *p, *nextp;
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100258 struct snd_emu10k1_memblk *deleted;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 unsigned long flags;
260
261 spin_lock_irqsave(&emu->memblk_lock, flags);
262 if (blk->mapped_page >= 0) {
263 /* update order link */
264 list_del(&blk->mapped_order_link);
265 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
266 spin_unlock_irqrestore(&emu->memblk_lock, flags);
267 return 0;
268 }
269 if ((err = map_memblk(emu, blk)) < 0) {
270 /* no enough page - try to unmap some blocks */
271 /* starting from the oldest block */
272 p = emu->mapped_order_link_head.next;
273 for (; p != &emu->mapped_order_link_head; p = nextp) {
274 nextp = p->next;
275 deleted = get_emu10k1_memblk(p, mapped_order_link);
276 if (deleted->map_locked)
277 continue;
278 size = unmap_memblk(emu, deleted);
279 if (size >= blk->pages) {
280 /* ok the empty region is enough large */
281 err = map_memblk(emu, blk);
282 break;
283 }
284 }
285 }
286 spin_unlock_irqrestore(&emu->memblk_lock, flags);
287 return err;
288}
289
Takashi Iwai2dd31de2006-04-28 15:13:39 +0200290EXPORT_SYMBOL(snd_emu10k1_memblk_map);
291
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292/*
293 * page allocation for DMA
294 */
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100295struct snd_util_memblk *
296snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297{
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100298 struct snd_pcm_runtime *runtime = substream->runtime;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 struct snd_sg_buf *sgbuf = snd_pcm_substream_sgbuf(substream);
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100300 struct snd_util_memhdr *hdr;
301 struct snd_emu10k1_memblk *blk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 int page, err, idx;
303
Takashi Iwaida3cec32008-08-08 17:12:14 +0200304 if (snd_BUG_ON(!emu))
305 return NULL;
306 if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
307 runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE))
308 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 hdr = emu->memhdr;
Takashi Iwaida3cec32008-08-08 17:12:14 +0200310 if (snd_BUG_ON(!hdr))
311 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
Ingo Molnar62932df2006-01-16 16:34:20 +0100313 mutex_lock(&hdr->block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 blk = search_empty(emu, runtime->dma_bytes);
315 if (blk == NULL) {
Ingo Molnar62932df2006-01-16 16:34:20 +0100316 mutex_unlock(&hdr->block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 return NULL;
318 }
319 /* fill buffer addresses but pointers are not stored so that
320 * snd_free_pci_page() is not called in in synth_free()
321 */
322 idx = 0;
323 for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
324 dma_addr_t addr;
325#ifdef CONFIG_SND_DEBUG
326 if (idx >= sgbuf->pages) {
327 printk(KERN_ERR "emu: pages overflow! (%d-%d) for %d\n",
328 blk->first_page, blk->last_page, sgbuf->pages);
Ingo Molnar62932df2006-01-16 16:34:20 +0100329 mutex_unlock(&hdr->block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 return NULL;
331 }
332#endif
333 addr = sgbuf->table[idx].addr;
334 if (! is_valid_page(emu, addr)) {
335 printk(KERN_ERR "emu: failure page = %d\n", idx);
Ingo Molnar62932df2006-01-16 16:34:20 +0100336 mutex_unlock(&hdr->block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 return NULL;
338 }
339 emu->page_addr_table[page] = addr;
340 emu->page_ptr_table[page] = NULL;
341 }
342
343 /* set PTB entries */
344 blk->map_locked = 1; /* do not unmap this block! */
345 err = snd_emu10k1_memblk_map(emu, blk);
346 if (err < 0) {
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100347 __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
Ingo Molnar62932df2006-01-16 16:34:20 +0100348 mutex_unlock(&hdr->block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 return NULL;
350 }
Ingo Molnar62932df2006-01-16 16:34:20 +0100351 mutex_unlock(&hdr->block_mutex);
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100352 return (struct snd_util_memblk *)blk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353}
354
355
356/*
357 * release DMA buffer from page table
358 */
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100359int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360{
Takashi Iwaida3cec32008-08-08 17:12:14 +0200361 if (snd_BUG_ON(!emu || !blk))
362 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 return snd_emu10k1_synth_free(emu, blk);
364}
365
366
367/*
368 * memory allocation using multiple pages (for synth)
369 * Unlike the DMA allocation above, non-contiguous pages are assined.
370 */
371
372/*
373 * allocate a synth sample area
374 */
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100375struct snd_util_memblk *
376snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377{
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100378 struct snd_emu10k1_memblk *blk;
379 struct snd_util_memhdr *hdr = hw->memhdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380
Ingo Molnar62932df2006-01-16 16:34:20 +0100381 mutex_lock(&hdr->block_mutex);
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100382 blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 if (blk == NULL) {
Ingo Molnar62932df2006-01-16 16:34:20 +0100384 mutex_unlock(&hdr->block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 return NULL;
386 }
387 if (synth_alloc_pages(hw, blk)) {
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100388 __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
Ingo Molnar62932df2006-01-16 16:34:20 +0100389 mutex_unlock(&hdr->block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 return NULL;
391 }
392 snd_emu10k1_memblk_map(hw, blk);
Ingo Molnar62932df2006-01-16 16:34:20 +0100393 mutex_unlock(&hdr->block_mutex);
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100394 return (struct snd_util_memblk *)blk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395}
396
Takashi Iwai2dd31de2006-04-28 15:13:39 +0200397EXPORT_SYMBOL(snd_emu10k1_synth_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398
399/*
400 * free a synth sample area
401 */
402int
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100403snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404{
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100405 struct snd_util_memhdr *hdr = emu->memhdr;
406 struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 unsigned long flags;
408
Ingo Molnar62932df2006-01-16 16:34:20 +0100409 mutex_lock(&hdr->block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 spin_lock_irqsave(&emu->memblk_lock, flags);
411 if (blk->mapped_page >= 0)
412 unmap_memblk(emu, blk);
413 spin_unlock_irqrestore(&emu->memblk_lock, flags);
414 synth_free_pages(emu, blk);
415 __snd_util_mem_free(hdr, memblk);
Ingo Molnar62932df2006-01-16 16:34:20 +0100416 mutex_unlock(&hdr->block_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 return 0;
418}
419
Takashi Iwai2dd31de2006-04-28 15:13:39 +0200420EXPORT_SYMBOL(snd_emu10k1_synth_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421
422/* check new allocation range */
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100423static void get_single_page_range(struct snd_util_memhdr *hdr,
424 struct snd_emu10k1_memblk *blk,
425 int *first_page_ret, int *last_page_ret)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426{
427 struct list_head *p;
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100428 struct snd_emu10k1_memblk *q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 int first_page, last_page;
430 first_page = blk->first_page;
431 if ((p = blk->mem.list.prev) != &hdr->block) {
432 q = get_emu10k1_memblk(p, mem.list);
433 if (q->last_page == first_page)
434 first_page++; /* first page was already allocated */
435 }
436 last_page = blk->last_page;
437 if ((p = blk->mem.list.next) != &hdr->block) {
438 q = get_emu10k1_memblk(p, mem.list);
439 if (q->first_page == last_page)
440 last_page--; /* last page was already allocated */
441 }
442 *first_page_ret = first_page;
443 *last_page_ret = last_page;
444}
445
Takashi Iwaia5003fc2008-05-30 09:49:41 +0200446/* release allocated pages */
447static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
448 int last_page)
449{
450 int page;
451
452 for (page = first_page; page <= last_page; page++) {
453 free_page((unsigned long)emu->page_ptr_table[page]);
454 emu->page_addr_table[page] = 0;
455 emu->page_ptr_table[page] = NULL;
456 }
457}
458
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459/*
460 * allocate kernel pages
461 */
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100462static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463{
464 int page, first_page, last_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465
466 emu10k1_memblk_init(blk);
467 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
468 /* allocate kernel pages */
469 for (page = first_page; page <= last_page; page++) {
Takashi Iwaia5003fc2008-05-30 09:49:41 +0200470 /* first try to allocate from <4GB zone */
471 struct page *p = alloc_page(GFP_KERNEL | GFP_DMA32 |
472 __GFP_NOWARN);
Jaroslav Kysela9f515b62008-06-17 16:20:13 +0200473 if (!p || (page_to_pfn(p) & ~(emu->dma_mask >> PAGE_SHIFT))) {
Takashi Iwai28437302008-06-17 16:30:27 +0200474 if (p)
475 __free_page(p);
Takashi Iwaia5003fc2008-05-30 09:49:41 +0200476 /* try to allocate from <16MB zone */
Takashi Iwai28437302008-06-17 16:30:27 +0200477 p = alloc_page(GFP_ATOMIC | GFP_DMA |
Takashi Iwaia5003fc2008-05-30 09:49:41 +0200478 __GFP_NORETRY | /* no OOM-killer */
479 __GFP_NOWARN);
Jaroslav Kysela9f515b62008-06-17 16:20:13 +0200480 }
Takashi Iwaia5003fc2008-05-30 09:49:41 +0200481 if (!p) {
482 __synth_free_pages(emu, first_page, page - 1);
483 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 }
Takashi Iwaia5003fc2008-05-30 09:49:41 +0200485 emu->page_addr_table[page] = page_to_phys(p);
486 emu->page_ptr_table[page] = page_address(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 }
488 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489}
490
491/*
492 * free pages
493 */
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100494static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495{
Takashi Iwaia5003fc2008-05-30 09:49:41 +0200496 int first_page, last_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
498 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
Takashi Iwaia5003fc2008-05-30 09:49:41 +0200499 __synth_free_pages(emu, first_page, last_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 return 0;
501}
502
503/* calculate buffer pointer from offset address */
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100504static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505{
506 char *ptr;
Takashi Iwaida3cec32008-08-08 17:12:14 +0200507 if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages))
508 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 ptr = emu->page_ptr_table[page];
510 if (! ptr) {
Takashi Iwai99b359b2005-10-20 18:26:44 +0200511 printk(KERN_ERR "emu10k1: access to NULL ptr: page = %d\n", page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 return NULL;
513 }
514 ptr += offset & (PAGE_SIZE - 1);
515 return (void*)ptr;
516}
517
518/*
519 * bzero(blk + offset, size)
520 */
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100521int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
522 int offset, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523{
524 int page, nextofs, end_offset, temp, temp1;
525 void *ptr;
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100526 struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527
528 offset += blk->offset & (PAGE_SIZE - 1);
529 end_offset = offset + size;
530 page = get_aligned_page(offset);
531 do {
532 nextofs = aligned_page_offset(page + 1);
533 temp = nextofs - offset;
534 temp1 = end_offset - offset;
535 if (temp1 < temp)
536 temp = temp1;
537 ptr = offset_ptr(emu, page + p->first_page, offset);
538 if (ptr)
539 memset(ptr, 0, temp);
540 offset = nextofs;
541 page++;
542 } while (offset < end_offset);
543 return 0;
544}
545
Takashi Iwai2dd31de2006-04-28 15:13:39 +0200546EXPORT_SYMBOL(snd_emu10k1_synth_bzero);
547
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548/*
549 * copy_from_user(blk + offset, data, size)
550 */
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100551int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
552 int offset, const char __user *data, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553{
554 int page, nextofs, end_offset, temp, temp1;
555 void *ptr;
Takashi Iwaieb4698f2005-11-17 14:50:13 +0100556 struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
558 offset += blk->offset & (PAGE_SIZE - 1);
559 end_offset = offset + size;
560 page = get_aligned_page(offset);
561 do {
562 nextofs = aligned_page_offset(page + 1);
563 temp = nextofs - offset;
564 temp1 = end_offset - offset;
565 if (temp1 < temp)
566 temp = temp1;
567 ptr = offset_ptr(emu, page + p->first_page, offset);
568 if (ptr && copy_from_user(ptr, data, temp))
569 return -EFAULT;
570 offset = nextofs;
571 data += temp;
572 page++;
573 } while (offset < end_offset);
574 return 0;
575}
Takashi Iwai2dd31de2006-04-28 15:13:39 +0200576
577EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user);