blob: 403dbe781122660267ebf0919f9447a7679d72a5 [file] [log] [blame]
Chris Leechc13c8262006-05-23 17:18:44 -07001/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22/*
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
26 * this capability.
27 *
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
30 * such as locking.
31 *
32 * LOCKING:
33 *
Dan Williamsaa1e6f12009-01-06 11:38:17 -070034 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * mutex, dma_list_mutex.
Chris Leechc13c8262006-05-23 17:18:44 -070036 *
Dan Williamsf27c5802009-01-06 11:38:18 -070037 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel(). Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal.
41 *
Chris Leechc13c8262006-05-23 17:18:44 -070042 * Each device has a channels list, which runs unlocked but is never modified
43 * once the device is registered, it's just setup by the driver.
44 *
Dan Williamsf27c5802009-01-06 11:38:18 -070045 * See Documentation/dmaengine.txt for more details
Chris Leechc13c8262006-05-23 17:18:44 -070046 */
47
48#include <linux/init.h>
49#include <linux/module.h>
Dan Williams7405f742007-01-02 11:10:43 -070050#include <linux/mm.h>
Chris Leechc13c8262006-05-23 17:18:44 -070051#include <linux/device.h>
52#include <linux/dmaengine.h>
53#include <linux/hardirq.h>
54#include <linux/spinlock.h>
55#include <linux/percpu.h>
56#include <linux/rcupdate.h>
57#include <linux/mutex.h>
Dan Williams7405f742007-01-02 11:10:43 -070058#include <linux/jiffies.h>
Dan Williams2ba05622009-01-06 11:38:14 -070059#include <linux/rculist.h>
Dan Williams864498a2009-01-06 11:38:21 -070060#include <linux/idr.h>
Chris Leechc13c8262006-05-23 17:18:44 -070061
62static DEFINE_MUTEX(dma_list_mutex);
63static LIST_HEAD(dma_device_list);
Dan Williams6f49a572009-01-06 11:38:14 -070064static long dmaengine_ref_count;
Dan Williams864498a2009-01-06 11:38:21 -070065static struct idr dma_idr;
Chris Leechc13c8262006-05-23 17:18:44 -070066
67/* --- sysfs implementation --- */
68
Dan Williams41d5e592009-01-06 11:38:21 -070069/**
70 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
71 * @dev - device node
72 *
73 * Must be called under dma_list_mutex
74 */
75static struct dma_chan *dev_to_dma_chan(struct device *dev)
76{
77 struct dma_chan_dev *chan_dev;
78
79 chan_dev = container_of(dev, typeof(*chan_dev), device);
80 return chan_dev->chan;
81}
82
Tony Jones891f78e2007-09-25 02:03:03 +020083static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -070084{
Dan Williams41d5e592009-01-06 11:38:21 -070085 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -070086 unsigned long count = 0;
87 int i;
Dan Williams41d5e592009-01-06 11:38:21 -070088 int err;
Chris Leechc13c8262006-05-23 17:18:44 -070089
Dan Williams41d5e592009-01-06 11:38:21 -070090 mutex_lock(&dma_list_mutex);
91 chan = dev_to_dma_chan(dev);
92 if (chan) {
93 for_each_possible_cpu(i)
94 count += per_cpu_ptr(chan->local, i)->memcpy_count;
95 err = sprintf(buf, "%lu\n", count);
96 } else
97 err = -ENODEV;
98 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -070099
Dan Williams41d5e592009-01-06 11:38:21 -0700100 return err;
Chris Leechc13c8262006-05-23 17:18:44 -0700101}
102
Tony Jones891f78e2007-09-25 02:03:03 +0200103static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
104 char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -0700105{
Dan Williams41d5e592009-01-06 11:38:21 -0700106 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -0700107 unsigned long count = 0;
108 int i;
Dan Williams41d5e592009-01-06 11:38:21 -0700109 int err;
Chris Leechc13c8262006-05-23 17:18:44 -0700110
Dan Williams41d5e592009-01-06 11:38:21 -0700111 mutex_lock(&dma_list_mutex);
112 chan = dev_to_dma_chan(dev);
113 if (chan) {
114 for_each_possible_cpu(i)
115 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
116 err = sprintf(buf, "%lu\n", count);
117 } else
118 err = -ENODEV;
119 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700120
Dan Williams41d5e592009-01-06 11:38:21 -0700121 return err;
Chris Leechc13c8262006-05-23 17:18:44 -0700122}
123
Tony Jones891f78e2007-09-25 02:03:03 +0200124static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -0700125{
Dan Williams41d5e592009-01-06 11:38:21 -0700126 struct dma_chan *chan;
127 int err;
Chris Leechc13c8262006-05-23 17:18:44 -0700128
Dan Williams41d5e592009-01-06 11:38:21 -0700129 mutex_lock(&dma_list_mutex);
130 chan = dev_to_dma_chan(dev);
131 if (chan)
132 err = sprintf(buf, "%d\n", chan->client_count);
133 else
134 err = -ENODEV;
135 mutex_unlock(&dma_list_mutex);
136
137 return err;
Chris Leechc13c8262006-05-23 17:18:44 -0700138}
139
Tony Jones891f78e2007-09-25 02:03:03 +0200140static struct device_attribute dma_attrs[] = {
Chris Leechc13c8262006-05-23 17:18:44 -0700141 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
142 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
143 __ATTR(in_use, S_IRUGO, show_in_use, NULL),
144 __ATTR_NULL
145};
146
Dan Williams41d5e592009-01-06 11:38:21 -0700147static void chan_dev_release(struct device *dev)
148{
149 struct dma_chan_dev *chan_dev;
150
151 chan_dev = container_of(dev, typeof(*chan_dev), device);
Dan Williams864498a2009-01-06 11:38:21 -0700152 if (atomic_dec_and_test(chan_dev->idr_ref)) {
153 mutex_lock(&dma_list_mutex);
154 idr_remove(&dma_idr, chan_dev->dev_id);
155 mutex_unlock(&dma_list_mutex);
156 kfree(chan_dev->idr_ref);
157 }
Dan Williams41d5e592009-01-06 11:38:21 -0700158 kfree(chan_dev);
159}
160
Chris Leechc13c8262006-05-23 17:18:44 -0700161static struct class dma_devclass = {
Tony Jones891f78e2007-09-25 02:03:03 +0200162 .name = "dma",
163 .dev_attrs = dma_attrs,
Dan Williams41d5e592009-01-06 11:38:21 -0700164 .dev_release = chan_dev_release,
Chris Leechc13c8262006-05-23 17:18:44 -0700165};
166
167/* --- client and device registration --- */
168
Dan Williams59b5ec22009-01-06 11:38:15 -0700169#define dma_device_satisfies_mask(device, mask) \
170 __dma_device_satisfies_mask((device), &(mask))
Dan Williamsd379b012007-07-09 11:56:42 -0700171static int
Dan Williams59b5ec22009-01-06 11:38:15 -0700172__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
Dan Williamsd379b012007-07-09 11:56:42 -0700173{
174 dma_cap_mask_t has;
175
Dan Williams59b5ec22009-01-06 11:38:15 -0700176 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
Dan Williamsd379b012007-07-09 11:56:42 -0700177 DMA_TX_TYPE_END);
178 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
179}
180
Dan Williams6f49a572009-01-06 11:38:14 -0700181static struct module *dma_chan_to_owner(struct dma_chan *chan)
182{
183 return chan->device->dev->driver->owner;
184}
185
186/**
187 * balance_ref_count - catch up the channel reference count
188 * @chan - channel to balance ->client_count versus dmaengine_ref_count
189 *
190 * balance_ref_count must be called under dma_list_mutex
191 */
192static void balance_ref_count(struct dma_chan *chan)
193{
194 struct module *owner = dma_chan_to_owner(chan);
195
196 while (chan->client_count < dmaengine_ref_count) {
197 __module_get(owner);
198 chan->client_count++;
199 }
200}
201
202/**
203 * dma_chan_get - try to grab a dma channel's parent driver module
204 * @chan - channel to grab
205 *
206 * Must be called under dma_list_mutex
207 */
208static int dma_chan_get(struct dma_chan *chan)
209{
210 int err = -ENODEV;
211 struct module *owner = dma_chan_to_owner(chan);
212
213 if (chan->client_count) {
214 __module_get(owner);
215 err = 0;
216 } else if (try_module_get(owner))
217 err = 0;
218
219 if (err == 0)
220 chan->client_count++;
221
222 /* allocate upon first client reference */
223 if (chan->client_count == 1 && err == 0) {
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700224 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
Dan Williams6f49a572009-01-06 11:38:14 -0700225
226 if (desc_cnt < 0) {
227 err = desc_cnt;
228 chan->client_count = 0;
229 module_put(owner);
Dan Williams59b5ec22009-01-06 11:38:15 -0700230 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
Dan Williams6f49a572009-01-06 11:38:14 -0700231 balance_ref_count(chan);
232 }
233
234 return err;
235}
236
237/**
238 * dma_chan_put - drop a reference to a dma channel's parent driver module
239 * @chan - channel to release
240 *
241 * Must be called under dma_list_mutex
242 */
243static void dma_chan_put(struct dma_chan *chan)
244{
245 if (!chan->client_count)
246 return; /* this channel failed alloc_chan_resources */
247 chan->client_count--;
248 module_put(dma_chan_to_owner(chan));
249 if (chan->client_count == 0)
250 chan->device->device_free_chan_resources(chan);
251}
252
Dan Williams7405f742007-01-02 11:10:43 -0700253enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
254{
255 enum dma_status status;
256 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
257
258 dma_async_issue_pending(chan);
259 do {
260 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
261 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
262 printk(KERN_ERR "dma_sync_wait_timeout!\n");
263 return DMA_ERROR;
264 }
265 } while (status == DMA_IN_PROGRESS);
266
267 return status;
268}
269EXPORT_SYMBOL(dma_sync_wait);
270
Chris Leechc13c8262006-05-23 17:18:44 -0700271/**
Dan Williamsbec08512009-01-06 11:38:14 -0700272 * dma_cap_mask_all - enable iteration over all operation types
273 */
274static dma_cap_mask_t dma_cap_mask_all;
275
276/**
277 * dma_chan_tbl_ent - tracks channel allocations per core/operation
278 * @chan - associated channel for this entry
279 */
280struct dma_chan_tbl_ent {
281 struct dma_chan *chan;
282};
283
284/**
285 * channel_table - percpu lookup table for memory-to-memory offload providers
286 */
287static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END];
288
289static int __init dma_channel_table_init(void)
290{
291 enum dma_transaction_type cap;
292 int err = 0;
293
294 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
295
Dan Williams59b5ec22009-01-06 11:38:15 -0700296 /* 'interrupt', 'private', and 'slave' are channel capabilities,
297 * but are not associated with an operation so they do not need
298 * an entry in the channel_table
Dan Williamsbec08512009-01-06 11:38:14 -0700299 */
300 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
Dan Williams59b5ec22009-01-06 11:38:15 -0700301 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
Dan Williamsbec08512009-01-06 11:38:14 -0700302 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
303
304 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
305 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
306 if (!channel_table[cap]) {
307 err = -ENOMEM;
308 break;
309 }
310 }
311
312 if (err) {
313 pr_err("dmaengine: initialization failure\n");
314 for_each_dma_cap_mask(cap, dma_cap_mask_all)
315 if (channel_table[cap])
316 free_percpu(channel_table[cap]);
317 }
318
319 return err;
320}
Dan Williams652afc22009-01-06 11:38:22 -0700321arch_initcall(dma_channel_table_init);
Dan Williamsbec08512009-01-06 11:38:14 -0700322
323/**
324 * dma_find_channel - find a channel to carry out the operation
325 * @tx_type: transaction type
326 */
327struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
328{
329 struct dma_chan *chan;
330 int cpu;
331
332 WARN_ONCE(dmaengine_ref_count == 0,
333 "client called %s without a reference", __func__);
334
335 cpu = get_cpu();
336 chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
337 put_cpu();
338
339 return chan;
340}
341EXPORT_SYMBOL(dma_find_channel);
342
343/**
Dan Williams2ba05622009-01-06 11:38:14 -0700344 * dma_issue_pending_all - flush all pending operations across all channels
345 */
346void dma_issue_pending_all(void)
347{
348 struct dma_device *device;
349 struct dma_chan *chan;
350
351 WARN_ONCE(dmaengine_ref_count == 0,
352 "client called %s without a reference", __func__);
353
354 rcu_read_lock();
Dan Williams59b5ec22009-01-06 11:38:15 -0700355 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
356 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
357 continue;
Dan Williams2ba05622009-01-06 11:38:14 -0700358 list_for_each_entry(chan, &device->channels, device_node)
359 if (chan->client_count)
360 device->device_issue_pending(chan);
Dan Williams59b5ec22009-01-06 11:38:15 -0700361 }
Dan Williams2ba05622009-01-06 11:38:14 -0700362 rcu_read_unlock();
363}
364EXPORT_SYMBOL(dma_issue_pending_all);
365
366/**
Dan Williamsbec08512009-01-06 11:38:14 -0700367 * nth_chan - returns the nth channel of the given capability
368 * @cap: capability to match
369 * @n: nth channel desired
370 *
371 * Defaults to returning the channel with the desired capability and the
372 * lowest reference count when 'n' cannot be satisfied. Must be called
373 * under dma_list_mutex.
374 */
375static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
376{
377 struct dma_device *device;
378 struct dma_chan *chan;
379 struct dma_chan *ret = NULL;
380 struct dma_chan *min = NULL;
381
382 list_for_each_entry(device, &dma_device_list, global_node) {
Dan Williams59b5ec22009-01-06 11:38:15 -0700383 if (!dma_has_cap(cap, device->cap_mask) ||
384 dma_has_cap(DMA_PRIVATE, device->cap_mask))
Dan Williamsbec08512009-01-06 11:38:14 -0700385 continue;
386 list_for_each_entry(chan, &device->channels, device_node) {
387 if (!chan->client_count)
388 continue;
389 if (!min)
390 min = chan;
391 else if (chan->table_count < min->table_count)
392 min = chan;
393
394 if (n-- == 0) {
395 ret = chan;
396 break; /* done */
397 }
398 }
399 if (ret)
400 break; /* done */
401 }
402
403 if (!ret)
404 ret = min;
405
406 if (ret)
407 ret->table_count++;
408
409 return ret;
410}
411
412/**
413 * dma_channel_rebalance - redistribute the available channels
414 *
415 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
416 * operation type) in the SMP case, and operation isolation (avoid
417 * multi-tasking channels) in the non-SMP case. Must be called under
418 * dma_list_mutex.
419 */
420static void dma_channel_rebalance(void)
421{
422 struct dma_chan *chan;
423 struct dma_device *device;
424 int cpu;
425 int cap;
426 int n;
427
428 /* undo the last distribution */
429 for_each_dma_cap_mask(cap, dma_cap_mask_all)
430 for_each_possible_cpu(cpu)
431 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
432
Dan Williams59b5ec22009-01-06 11:38:15 -0700433 list_for_each_entry(device, &dma_device_list, global_node) {
434 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
435 continue;
Dan Williamsbec08512009-01-06 11:38:14 -0700436 list_for_each_entry(chan, &device->channels, device_node)
437 chan->table_count = 0;
Dan Williams59b5ec22009-01-06 11:38:15 -0700438 }
Dan Williamsbec08512009-01-06 11:38:14 -0700439
440 /* don't populate the channel_table if no clients are available */
441 if (!dmaengine_ref_count)
442 return;
443
444 /* redistribute available channels */
445 n = 0;
446 for_each_dma_cap_mask(cap, dma_cap_mask_all)
447 for_each_online_cpu(cpu) {
448 if (num_possible_cpus() > 1)
449 chan = nth_chan(cap, n++);
450 else
451 chan = nth_chan(cap, -1);
452
453 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
454 }
455}
456
Dan Williamse2346672009-01-06 11:38:21 -0700457static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
458 dma_filter_fn fn, void *fn_param)
Dan Williams59b5ec22009-01-06 11:38:15 -0700459{
460 struct dma_chan *chan;
Dan Williams59b5ec22009-01-06 11:38:15 -0700461
462 if (!__dma_device_satisfies_mask(dev, mask)) {
463 pr_debug("%s: wrong capabilities\n", __func__);
464 return NULL;
465 }
466 /* devices with multiple channels need special handling as we need to
467 * ensure that all channels are either private or public.
468 */
469 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
470 list_for_each_entry(chan, &dev->channels, device_node) {
471 /* some channels are already publicly allocated */
472 if (chan->client_count)
473 return NULL;
474 }
475
476 list_for_each_entry(chan, &dev->channels, device_node) {
477 if (chan->client_count) {
478 pr_debug("%s: %s busy\n",
Dan Williams41d5e592009-01-06 11:38:21 -0700479 __func__, dma_chan_name(chan));
Dan Williams59b5ec22009-01-06 11:38:15 -0700480 continue;
481 }
Dan Williamse2346672009-01-06 11:38:21 -0700482 if (fn && !fn(chan, fn_param)) {
483 pr_debug("%s: %s filter said false\n",
484 __func__, dma_chan_name(chan));
485 continue;
486 }
487 return chan;
Dan Williams59b5ec22009-01-06 11:38:15 -0700488 }
489
Dan Williamse2346672009-01-06 11:38:21 -0700490 return NULL;
Dan Williams59b5ec22009-01-06 11:38:15 -0700491}
492
493/**
494 * dma_request_channel - try to allocate an exclusive channel
495 * @mask: capabilities that the channel must satisfy
496 * @fn: optional callback to disposition available channels
497 * @fn_param: opaque parameter to pass to dma_filter_fn
498 */
499struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
500{
501 struct dma_device *device, *_d;
502 struct dma_chan *chan = NULL;
Dan Williams59b5ec22009-01-06 11:38:15 -0700503 int err;
504
505 /* Find a channel */
506 mutex_lock(&dma_list_mutex);
507 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
Dan Williamse2346672009-01-06 11:38:21 -0700508 chan = private_candidate(mask, device, fn, fn_param);
509 if (chan) {
Dan Williams59b5ec22009-01-06 11:38:15 -0700510 /* Found a suitable channel, try to grab, prep, and
511 * return it. We first set DMA_PRIVATE to disable
512 * balance_ref_count as this channel will not be
513 * published in the general-purpose allocator
514 */
515 dma_cap_set(DMA_PRIVATE, device->cap_mask);
516 err = dma_chan_get(chan);
517
518 if (err == -ENODEV) {
519 pr_debug("%s: %s module removed\n", __func__,
Dan Williams41d5e592009-01-06 11:38:21 -0700520 dma_chan_name(chan));
Dan Williams59b5ec22009-01-06 11:38:15 -0700521 list_del_rcu(&device->global_node);
522 } else if (err)
523 pr_err("dmaengine: failed to get %s: (%d)\n",
Dan Williams41d5e592009-01-06 11:38:21 -0700524 dma_chan_name(chan), err);
Dan Williams59b5ec22009-01-06 11:38:15 -0700525 else
526 break;
Dan Williamse2346672009-01-06 11:38:21 -0700527 chan = NULL;
528 }
Dan Williams59b5ec22009-01-06 11:38:15 -0700529 }
530 mutex_unlock(&dma_list_mutex);
531
532 pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
Dan Williams41d5e592009-01-06 11:38:21 -0700533 chan ? dma_chan_name(chan) : NULL);
Dan Williams59b5ec22009-01-06 11:38:15 -0700534
535 return chan;
536}
537EXPORT_SYMBOL_GPL(__dma_request_channel);
538
539void dma_release_channel(struct dma_chan *chan)
540{
541 mutex_lock(&dma_list_mutex);
542 WARN_ONCE(chan->client_count != 1,
543 "chan reference count %d != 1\n", chan->client_count);
544 dma_chan_put(chan);
545 mutex_unlock(&dma_list_mutex);
546}
547EXPORT_SYMBOL_GPL(dma_release_channel);
548
Dan Williamsbec08512009-01-06 11:38:14 -0700549/**
Dan Williams209b84a2009-01-06 11:38:17 -0700550 * dmaengine_get - register interest in dma_channels
Chris Leechc13c8262006-05-23 17:18:44 -0700551 */
Dan Williams209b84a2009-01-06 11:38:17 -0700552void dmaengine_get(void)
Chris Leechc13c8262006-05-23 17:18:44 -0700553{
Dan Williams6f49a572009-01-06 11:38:14 -0700554 struct dma_device *device, *_d;
555 struct dma_chan *chan;
556 int err;
557
Chris Leechc13c8262006-05-23 17:18:44 -0700558 mutex_lock(&dma_list_mutex);
Dan Williams6f49a572009-01-06 11:38:14 -0700559 dmaengine_ref_count++;
560
561 /* try to grab channels */
Dan Williams59b5ec22009-01-06 11:38:15 -0700562 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
563 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
564 continue;
Dan Williams6f49a572009-01-06 11:38:14 -0700565 list_for_each_entry(chan, &device->channels, device_node) {
566 err = dma_chan_get(chan);
567 if (err == -ENODEV) {
568 /* module removed before we could use it */
Dan Williams2ba05622009-01-06 11:38:14 -0700569 list_del_rcu(&device->global_node);
Dan Williams6f49a572009-01-06 11:38:14 -0700570 break;
571 } else if (err)
572 pr_err("dmaengine: failed to get %s: (%d)\n",
Dan Williams41d5e592009-01-06 11:38:21 -0700573 dma_chan_name(chan), err);
Dan Williams6f49a572009-01-06 11:38:14 -0700574 }
Dan Williams59b5ec22009-01-06 11:38:15 -0700575 }
Dan Williams6f49a572009-01-06 11:38:14 -0700576
Dan Williamsbec08512009-01-06 11:38:14 -0700577 /* if this is the first reference and there were channels
578 * waiting we need to rebalance to get those channels
579 * incorporated into the channel table
580 */
581 if (dmaengine_ref_count == 1)
582 dma_channel_rebalance();
Chris Leechc13c8262006-05-23 17:18:44 -0700583 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700584}
Dan Williams209b84a2009-01-06 11:38:17 -0700585EXPORT_SYMBOL(dmaengine_get);
Chris Leechc13c8262006-05-23 17:18:44 -0700586
587/**
Dan Williams209b84a2009-01-06 11:38:17 -0700588 * dmaengine_put - let dma drivers be removed when ref_count == 0
Chris Leechc13c8262006-05-23 17:18:44 -0700589 */
Dan Williams209b84a2009-01-06 11:38:17 -0700590void dmaengine_put(void)
Chris Leechc13c8262006-05-23 17:18:44 -0700591{
Dan Williamsd379b012007-07-09 11:56:42 -0700592 struct dma_device *device;
Chris Leechc13c8262006-05-23 17:18:44 -0700593 struct dma_chan *chan;
594
Chris Leechc13c8262006-05-23 17:18:44 -0700595 mutex_lock(&dma_list_mutex);
Dan Williams6f49a572009-01-06 11:38:14 -0700596 dmaengine_ref_count--;
597 BUG_ON(dmaengine_ref_count < 0);
598 /* drop channel references */
Dan Williams59b5ec22009-01-06 11:38:15 -0700599 list_for_each_entry(device, &dma_device_list, global_node) {
600 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
601 continue;
Dan Williams6f49a572009-01-06 11:38:14 -0700602 list_for_each_entry(chan, &device->channels, device_node)
603 dma_chan_put(chan);
Dan Williams59b5ec22009-01-06 11:38:15 -0700604 }
Chris Leechc13c8262006-05-23 17:18:44 -0700605 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700606}
Dan Williams209b84a2009-01-06 11:38:17 -0700607EXPORT_SYMBOL(dmaengine_put);
Chris Leechc13c8262006-05-23 17:18:44 -0700608
609/**
Randy Dunlap65088712006-07-03 19:45:31 -0700610 * dma_async_device_register - registers DMA devices found
Chris Leechc13c8262006-05-23 17:18:44 -0700611 * @device: &dma_device
612 */
613int dma_async_device_register(struct dma_device *device)
614{
Jeff Garzikff487fb2007-03-08 09:57:34 -0800615 int chancnt = 0, rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700616 struct dma_chan* chan;
Dan Williams864498a2009-01-06 11:38:21 -0700617 atomic_t *idr_ref;
Chris Leechc13c8262006-05-23 17:18:44 -0700618
619 if (!device)
620 return -ENODEV;
621
Dan Williams7405f742007-01-02 11:10:43 -0700622 /* validate device routines */
623 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
624 !device->device_prep_dma_memcpy);
625 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
626 !device->device_prep_dma_xor);
627 BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
628 !device->device_prep_dma_zero_sum);
629 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
630 !device->device_prep_dma_memset);
Zhang Wei9b941c62008-03-13 17:45:28 -0700631 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
Dan Williams7405f742007-01-02 11:10:43 -0700632 !device->device_prep_dma_interrupt);
Haavard Skinnemoendc0ee6432008-07-08 11:59:35 -0700633 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
634 !device->device_prep_slave_sg);
635 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
636 !device->device_terminate_all);
Dan Williams7405f742007-01-02 11:10:43 -0700637
638 BUG_ON(!device->device_alloc_chan_resources);
639 BUG_ON(!device->device_free_chan_resources);
Dan Williams7405f742007-01-02 11:10:43 -0700640 BUG_ON(!device->device_is_tx_complete);
641 BUG_ON(!device->device_issue_pending);
642 BUG_ON(!device->dev);
643
Dan Williams864498a2009-01-06 11:38:21 -0700644 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
645 if (!idr_ref)
646 return -ENOMEM;
647 atomic_set(idr_ref, 0);
648 idr_retry:
649 if (!idr_pre_get(&dma_idr, GFP_KERNEL))
650 return -ENOMEM;
Dan Williamsb0b42b12008-12-03 17:17:07 -0700651 mutex_lock(&dma_list_mutex);
Dan Williams864498a2009-01-06 11:38:21 -0700652 rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
Dan Williamsb0b42b12008-12-03 17:17:07 -0700653 mutex_unlock(&dma_list_mutex);
Dan Williams864498a2009-01-06 11:38:21 -0700654 if (rc == -EAGAIN)
655 goto idr_retry;
656 else if (rc != 0)
657 return rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700658
659 /* represent channels in sysfs. Probably want devs too */
660 list_for_each_entry(chan, &device->channels, device_node) {
661 chan->local = alloc_percpu(typeof(*chan->local));
662 if (chan->local == NULL)
663 continue;
Dan Williams41d5e592009-01-06 11:38:21 -0700664 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
665 if (chan->dev == NULL) {
666 free_percpu(chan->local);
667 continue;
668 }
Chris Leechc13c8262006-05-23 17:18:44 -0700669
670 chan->chan_id = chancnt++;
Dan Williams41d5e592009-01-06 11:38:21 -0700671 chan->dev->device.class = &dma_devclass;
672 chan->dev->device.parent = device->dev;
673 chan->dev->chan = chan;
Dan Williams864498a2009-01-06 11:38:21 -0700674 chan->dev->idr_ref = idr_ref;
675 chan->dev->dev_id = device->dev_id;
676 atomic_inc(idr_ref);
Dan Williams41d5e592009-01-06 11:38:21 -0700677 dev_set_name(&chan->dev->device, "dma%dchan%d",
Kay Sievers06190d82008-11-11 13:12:33 -0700678 device->dev_id, chan->chan_id);
Chris Leechc13c8262006-05-23 17:18:44 -0700679
Dan Williams41d5e592009-01-06 11:38:21 -0700680 rc = device_register(&chan->dev->device);
Jeff Garzikff487fb2007-03-08 09:57:34 -0800681 if (rc) {
Jeff Garzikff487fb2007-03-08 09:57:34 -0800682 free_percpu(chan->local);
683 chan->local = NULL;
684 goto err_out;
685 }
Dan Williams7cc5bf92008-07-08 11:58:21 -0700686 chan->client_count = 0;
Chris Leechc13c8262006-05-23 17:18:44 -0700687 }
Dan Williams59b5ec22009-01-06 11:38:15 -0700688 device->chancnt = chancnt;
Chris Leechc13c8262006-05-23 17:18:44 -0700689
690 mutex_lock(&dma_list_mutex);
Dan Williams59b5ec22009-01-06 11:38:15 -0700691 /* take references on public channels */
692 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
Dan Williams6f49a572009-01-06 11:38:14 -0700693 list_for_each_entry(chan, &device->channels, device_node) {
694 /* if clients are already waiting for channels we need
695 * to take references on their behalf
696 */
697 if (dma_chan_get(chan) == -ENODEV) {
698 /* note we can only get here for the first
699 * channel as the remaining channels are
700 * guaranteed to get a reference
701 */
702 rc = -ENODEV;
703 mutex_unlock(&dma_list_mutex);
704 goto err_out;
705 }
706 }
Dan Williams2ba05622009-01-06 11:38:14 -0700707 list_add_tail_rcu(&device->global_node, &dma_device_list);
Dan Williamsbec08512009-01-06 11:38:14 -0700708 dma_channel_rebalance();
Chris Leechc13c8262006-05-23 17:18:44 -0700709 mutex_unlock(&dma_list_mutex);
710
Chris Leechc13c8262006-05-23 17:18:44 -0700711 return 0;
Jeff Garzikff487fb2007-03-08 09:57:34 -0800712
713err_out:
714 list_for_each_entry(chan, &device->channels, device_node) {
715 if (chan->local == NULL)
716 continue;
Dan Williams41d5e592009-01-06 11:38:21 -0700717 mutex_lock(&dma_list_mutex);
718 chan->dev->chan = NULL;
719 mutex_unlock(&dma_list_mutex);
720 device_unregister(&chan->dev->device);
Jeff Garzikff487fb2007-03-08 09:57:34 -0800721 free_percpu(chan->local);
722 }
723 return rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700724}
David Brownell765e3d82007-03-16 13:38:05 -0800725EXPORT_SYMBOL(dma_async_device_register);
Chris Leechc13c8262006-05-23 17:18:44 -0700726
727/**
Dan Williams6f49a572009-01-06 11:38:14 -0700728 * dma_async_device_unregister - unregister a DMA device
Randy Dunlap65088712006-07-03 19:45:31 -0700729 * @device: &dma_device
Dan Williamsf27c5802009-01-06 11:38:18 -0700730 *
731 * This routine is called by dma driver exit routines, dmaengine holds module
732 * references to prevent it being called while channels are in use.
Randy Dunlap65088712006-07-03 19:45:31 -0700733 */
734void dma_async_device_unregister(struct dma_device *device)
Chris Leechc13c8262006-05-23 17:18:44 -0700735{
736 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -0700737
738 mutex_lock(&dma_list_mutex);
Dan Williams2ba05622009-01-06 11:38:14 -0700739 list_del_rcu(&device->global_node);
Dan Williamsbec08512009-01-06 11:38:14 -0700740 dma_channel_rebalance();
Chris Leechc13c8262006-05-23 17:18:44 -0700741 mutex_unlock(&dma_list_mutex);
742
743 list_for_each_entry(chan, &device->channels, device_node) {
Dan Williams6f49a572009-01-06 11:38:14 -0700744 WARN_ONCE(chan->client_count,
745 "%s called while %d clients hold a reference\n",
746 __func__, chan->client_count);
Dan Williams41d5e592009-01-06 11:38:21 -0700747 mutex_lock(&dma_list_mutex);
748 chan->dev->chan = NULL;
749 mutex_unlock(&dma_list_mutex);
750 device_unregister(&chan->dev->device);
Chris Leechc13c8262006-05-23 17:18:44 -0700751 }
Chris Leechc13c8262006-05-23 17:18:44 -0700752}
David Brownell765e3d82007-03-16 13:38:05 -0800753EXPORT_SYMBOL(dma_async_device_unregister);
Chris Leechc13c8262006-05-23 17:18:44 -0700754
Dan Williams7405f742007-01-02 11:10:43 -0700755/**
756 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
757 * @chan: DMA channel to offload copy to
758 * @dest: destination address (virtual)
759 * @src: source address (virtual)
760 * @len: length
761 *
762 * Both @dest and @src must be mappable to a bus address according to the
763 * DMA mapping API rules for streaming mappings.
764 * Both @dest and @src must stay memory resident (kernel memory or locked
765 * user space pages).
766 */
767dma_cookie_t
768dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
769 void *src, size_t len)
770{
771 struct dma_device *dev = chan->device;
772 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700773 dma_addr_t dma_dest, dma_src;
Dan Williams7405f742007-01-02 11:10:43 -0700774 dma_cookie_t cookie;
775 int cpu;
776
Dan Williams00367312008-02-02 19:49:57 -0700777 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
778 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
Dan Williams636bdea2008-04-17 20:17:26 -0700779 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
780 DMA_CTRL_ACK);
Dan Williams00367312008-02-02 19:49:57 -0700781
782 if (!tx) {
783 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
784 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
Dan Williams7405f742007-01-02 11:10:43 -0700785 return -ENOMEM;
Dan Williams00367312008-02-02 19:49:57 -0700786 }
Dan Williams7405f742007-01-02 11:10:43 -0700787
Dan Williams7405f742007-01-02 11:10:43 -0700788 tx->callback = NULL;
Dan Williams7405f742007-01-02 11:10:43 -0700789 cookie = tx->tx_submit(tx);
790
791 cpu = get_cpu();
792 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
793 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
794 put_cpu();
795
796 return cookie;
797}
798EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
799
800/**
801 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
802 * @chan: DMA channel to offload copy to
803 * @page: destination page
804 * @offset: offset in page to copy to
805 * @kdata: source address (virtual)
806 * @len: length
807 *
808 * Both @page/@offset and @kdata must be mappable to a bus address according
809 * to the DMA mapping API rules for streaming mappings.
810 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
811 * locked user space pages)
812 */
813dma_cookie_t
814dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
815 unsigned int offset, void *kdata, size_t len)
816{
817 struct dma_device *dev = chan->device;
818 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700819 dma_addr_t dma_dest, dma_src;
Dan Williams7405f742007-01-02 11:10:43 -0700820 dma_cookie_t cookie;
821 int cpu;
822
Dan Williams00367312008-02-02 19:49:57 -0700823 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
824 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
Dan Williams636bdea2008-04-17 20:17:26 -0700825 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
826 DMA_CTRL_ACK);
Dan Williams00367312008-02-02 19:49:57 -0700827
828 if (!tx) {
829 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
830 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
Dan Williams7405f742007-01-02 11:10:43 -0700831 return -ENOMEM;
Dan Williams00367312008-02-02 19:49:57 -0700832 }
Dan Williams7405f742007-01-02 11:10:43 -0700833
Dan Williams7405f742007-01-02 11:10:43 -0700834 tx->callback = NULL;
Dan Williams7405f742007-01-02 11:10:43 -0700835 cookie = tx->tx_submit(tx);
836
837 cpu = get_cpu();
838 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
839 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
840 put_cpu();
841
842 return cookie;
843}
844EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
845
846/**
847 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
848 * @chan: DMA channel to offload copy to
849 * @dest_pg: destination page
850 * @dest_off: offset in page to copy to
851 * @src_pg: source page
852 * @src_off: offset in page to copy from
853 * @len: length
854 *
855 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
856 * address according to the DMA mapping API rules for streaming mappings.
857 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
858 * (kernel memory or locked user space pages).
859 */
860dma_cookie_t
861dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
862 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
863 size_t len)
864{
865 struct dma_device *dev = chan->device;
866 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700867 dma_addr_t dma_dest, dma_src;
Dan Williams7405f742007-01-02 11:10:43 -0700868 dma_cookie_t cookie;
869 int cpu;
870
Dan Williams00367312008-02-02 19:49:57 -0700871 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
872 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
873 DMA_FROM_DEVICE);
Dan Williams636bdea2008-04-17 20:17:26 -0700874 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
875 DMA_CTRL_ACK);
Dan Williams00367312008-02-02 19:49:57 -0700876
877 if (!tx) {
878 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
879 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
Dan Williams7405f742007-01-02 11:10:43 -0700880 return -ENOMEM;
Dan Williams00367312008-02-02 19:49:57 -0700881 }
Dan Williams7405f742007-01-02 11:10:43 -0700882
Dan Williams7405f742007-01-02 11:10:43 -0700883 tx->callback = NULL;
Dan Williams7405f742007-01-02 11:10:43 -0700884 cookie = tx->tx_submit(tx);
885
886 cpu = get_cpu();
887 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
888 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
889 put_cpu();
890
891 return cookie;
892}
893EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
894
895void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
896 struct dma_chan *chan)
897{
898 tx->chan = chan;
899 spin_lock_init(&tx->lock);
Dan Williams7405f742007-01-02 11:10:43 -0700900}
901EXPORT_SYMBOL(dma_async_tx_descriptor_init);
902
Dan Williams07f22112009-01-05 17:14:31 -0700903/* dma_wait_for_async_tx - spin wait for a transaction to complete
904 * @tx: in-flight transaction to wait on
905 *
906 * This routine assumes that tx was obtained from a call to async_memcpy,
907 * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
908 * and submitted). Walking the parent chain is only meant to cover for DMA
909 * drivers that do not implement the DMA_INTERRUPT capability and may race with
910 * the driver's descriptor cleanup routine.
911 */
912enum dma_status
913dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
914{
915 enum dma_status status;
916 struct dma_async_tx_descriptor *iter;
917 struct dma_async_tx_descriptor *parent;
918
919 if (!tx)
920 return DMA_SUCCESS;
921
922 WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for"
Dan Williams41d5e592009-01-06 11:38:21 -0700923 " %s\n", __func__, dma_chan_name(tx->chan));
Dan Williams07f22112009-01-05 17:14:31 -0700924
925 /* poll through the dependency chain, return when tx is complete */
926 do {
927 iter = tx;
928
929 /* find the root of the unsubmitted dependency chain */
930 do {
931 parent = iter->parent;
932 if (!parent)
933 break;
934 else
935 iter = parent;
936 } while (parent);
937
938 /* there is a small window for ->parent == NULL and
939 * ->cookie == -EBUSY
940 */
941 while (iter->cookie == -EBUSY)
942 cpu_relax();
943
944 status = dma_sync_wait(iter->chan, iter->cookie);
945 } while (status == DMA_IN_PROGRESS || (iter != tx));
946
947 return status;
948}
949EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
950
951/* dma_run_dependencies - helper routine for dma drivers to process
952 * (start) dependent operations on their target channel
953 * @tx: transaction with dependencies
954 */
955void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
956{
957 struct dma_async_tx_descriptor *dep = tx->next;
958 struct dma_async_tx_descriptor *dep_next;
959 struct dma_chan *chan;
960
961 if (!dep)
962 return;
963
964 chan = dep->chan;
965
966 /* keep submitting up until a channel switch is detected
967 * in that case we will be called again as a result of
968 * processing the interrupt from async_tx_channel_switch
969 */
970 for (; dep; dep = dep_next) {
971 spin_lock_bh(&dep->lock);
972 dep->parent = NULL;
973 dep_next = dep->next;
974 if (dep_next && dep_next->chan == chan)
975 dep->next = NULL; /* ->next will be submitted */
976 else
977 dep_next = NULL; /* submit current dep and terminate */
978 spin_unlock_bh(&dep->lock);
979
980 dep->tx_submit(dep);
981 }
982
983 chan->device->device_issue_pending(chan);
984}
985EXPORT_SYMBOL_GPL(dma_run_dependencies);
986
Chris Leechc13c8262006-05-23 17:18:44 -0700987static int __init dma_bus_init(void)
988{
Dan Williams864498a2009-01-06 11:38:21 -0700989 idr_init(&dma_idr);
Chris Leechc13c8262006-05-23 17:18:44 -0700990 mutex_init(&dma_list_mutex);
991 return class_register(&dma_devclass);
992}
Dan Williams652afc22009-01-06 11:38:22 -0700993arch_initcall(dma_bus_init);
Chris Leechc13c8262006-05-23 17:18:44 -0700994
Dan Williamsbec08512009-01-06 11:38:14 -0700995