blob: 5a635646e05cfe87a9f92083c5864827c669ccee [file] [log] [blame]
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +03001/*
2 * ACPI helpers for DMA request / controller
3 *
4 * Based on of-dma.c
5 *
6 * Copyright (C) 2013, Intel Corporation
Andy Shevchenkoee8209f2013-05-08 11:55:48 +03007 * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
8 * Mika Westerberg <mika.westerberg@linux.intel.com>
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +03009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/device.h>
Andy Shevchenko0f6a9282014-02-06 13:25:40 +020016#include <linux/err.h>
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +030017#include <linux/module.h>
18#include <linux/list.h>
19#include <linux/mutex.h>
20#include <linux/slab.h>
Andy Shevchenkoee8209f2013-05-08 11:55:48 +030021#include <linux/ioport.h>
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +030022#include <linux/acpi.h>
23#include <linux/acpi_dma.h>
24
25static LIST_HEAD(acpi_dma_list);
26static DEFINE_MUTEX(acpi_dma_lock);
27
28/**
Andy Shevchenkoee8209f2013-05-08 11:55:48 +030029 * acpi_dma_parse_resource_group - match device and parse resource group
30 * @grp: CSRT resource group
31 * @adev: ACPI device to match with
32 * @adma: struct acpi_dma of the given DMA controller
33 *
Andy Shevchenkoee8209f2013-05-08 11:55:48 +030034 * In order to match a device from DSDT table to the corresponding CSRT device
35 * we use MMIO address and IRQ.
Andy Shevchenko39d14472013-12-02 15:16:28 +020036 *
37 * Return:
38 * 1 on success, 0 when no information is available, or appropriate errno value
39 * on error.
Andy Shevchenkoee8209f2013-05-08 11:55:48 +030040 */
41static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
42 struct acpi_device *adev, struct acpi_dma *adma)
43{
44 const struct acpi_csrt_shared_info *si;
45 struct list_head resource_list;
Jiang Liu90e97822015-02-05 13:44:43 +080046 struct resource_entry *rentry;
Andy Shevchenkoee8209f2013-05-08 11:55:48 +030047 resource_size_t mem = 0, irq = 0;
Andy Shevchenkoee8209f2013-05-08 11:55:48 +030048 int ret;
49
50 if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info))
51 return -ENODEV;
52
53 INIT_LIST_HEAD(&resource_list);
54 ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
55 if (ret <= 0)
56 return 0;
57
58 list_for_each_entry(rentry, &resource_list, node) {
Jiang Liu90e97822015-02-05 13:44:43 +080059 if (resource_type(rentry->res) == IORESOURCE_MEM)
60 mem = rentry->res->start;
61 else if (resource_type(rentry->res) == IORESOURCE_IRQ)
62 irq = rentry->res->start;
Andy Shevchenkoee8209f2013-05-08 11:55:48 +030063 }
64
65 acpi_dev_free_resource_list(&resource_list);
66
67 /* Consider initial zero values as resource not found */
68 if (mem == 0 && irq == 0)
69 return 0;
70
71 si = (const struct acpi_csrt_shared_info *)&grp[1];
72
73 /* Match device by MMIO and IRQ */
74 if (si->mmio_base_low != mem || si->gsi_interrupt != irq)
75 return 0;
76
Andy Shevchenkoee8209f2013-05-08 11:55:48 +030077 dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n",
Andy Shevchenkob4d6d332013-08-21 14:27:06 +030078 (char *)&grp->vendor_id, grp->device_id, grp->revision);
Andy Shevchenkoee8209f2013-05-08 11:55:48 +030079
80 /* Check if the request line range is available */
81 if (si->base_request_line == 0 && si->num_handshake_signals == 0)
82 return 0;
83
84 adma->base_request_line = si->base_request_line;
85 adma->end_request_line = si->base_request_line +
86 si->num_handshake_signals - 1;
87
88 dev_dbg(&adev->dev, "request line base: 0x%04x end: 0x%04x\n",
89 adma->base_request_line, adma->end_request_line);
90
91 return 1;
92}
93
94/**
95 * acpi_dma_parse_csrt - parse CSRT to exctract additional DMA resources
96 * @adev: ACPI device to match with
97 * @adma: struct acpi_dma of the given DMA controller
98 *
99 * CSRT or Core System Resources Table is a proprietary ACPI table
100 * introduced by Microsoft. This table can contain devices that are not in
101 * the system DSDT table. In particular DMA controllers might be described
102 * here.
103 *
104 * We are using this table to get the request line range of the specific DMA
105 * controller to be used later.
Andy Shevchenkoee8209f2013-05-08 11:55:48 +0300106 */
107static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
108{
109 struct acpi_csrt_group *grp, *end;
110 struct acpi_table_csrt *csrt;
111 acpi_status status;
112 int ret;
113
114 status = acpi_get_table(ACPI_SIG_CSRT, 0,
115 (struct acpi_table_header **)&csrt);
116 if (ACPI_FAILURE(status)) {
117 if (status != AE_NOT_FOUND)
118 dev_warn(&adev->dev, "failed to get the CSRT table\n");
119 return;
120 }
121
122 grp = (struct acpi_csrt_group *)(csrt + 1);
123 end = (struct acpi_csrt_group *)((void *)csrt + csrt->header.length);
124
125 while (grp < end) {
126 ret = acpi_dma_parse_resource_group(grp, adev, adma);
127 if (ret < 0) {
128 dev_warn(&adev->dev,
129 "error in parsing resource group\n");
130 return;
131 }
132
133 grp = (struct acpi_csrt_group *)((void *)grp + grp->length);
134 }
135}
136
137/**
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +0300138 * acpi_dma_controller_register - Register a DMA controller to ACPI DMA helpers
139 * @dev: struct device of DMA controller
140 * @acpi_dma_xlate: translation function which converts a dma specifier
141 * into a dma_chan structure
142 * @data pointer to controller specific data to be used by
143 * translation function
144 *
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +0300145 * Allocated memory should be freed with appropriate acpi_dma_controller_free()
146 * call.
Andy Shevchenko39d14472013-12-02 15:16:28 +0200147 *
148 * Return:
149 * 0 on success or appropriate errno value on error.
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +0300150 */
151int acpi_dma_controller_register(struct device *dev,
152 struct dma_chan *(*acpi_dma_xlate)
153 (struct acpi_dma_spec *, struct acpi_dma *),
154 void *data)
155{
156 struct acpi_device *adev;
157 struct acpi_dma *adma;
158
159 if (!dev || !acpi_dma_xlate)
160 return -EINVAL;
161
162 /* Check if the device was enumerated by ACPI */
163 if (!ACPI_HANDLE(dev))
164 return -EINVAL;
165
166 if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev))
167 return -EINVAL;
168
169 adma = kzalloc(sizeof(*adma), GFP_KERNEL);
170 if (!adma)
171 return -ENOMEM;
172
173 adma->dev = dev;
174 adma->acpi_dma_xlate = acpi_dma_xlate;
175 adma->data = data;
176
Andy Shevchenkoee8209f2013-05-08 11:55:48 +0300177 acpi_dma_parse_csrt(adev, adma);
178
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +0300179 /* Now queue acpi_dma controller structure in list */
180 mutex_lock(&acpi_dma_lock);
181 list_add_tail(&adma->dma_controllers, &acpi_dma_list);
182 mutex_unlock(&acpi_dma_lock);
183
184 return 0;
185}
186EXPORT_SYMBOL_GPL(acpi_dma_controller_register);
187
188/**
189 * acpi_dma_controller_free - Remove a DMA controller from ACPI DMA helpers list
190 * @dev: struct device of DMA controller
191 *
192 * Memory allocated by acpi_dma_controller_register() is freed here.
Andy Shevchenko39d14472013-12-02 15:16:28 +0200193 *
194 * Return:
195 * 0 on success or appropriate errno value on error.
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +0300196 */
197int acpi_dma_controller_free(struct device *dev)
198{
199 struct acpi_dma *adma;
200
201 if (!dev)
202 return -EINVAL;
203
204 mutex_lock(&acpi_dma_lock);
205
206 list_for_each_entry(adma, &acpi_dma_list, dma_controllers)
207 if (adma->dev == dev) {
208 list_del(&adma->dma_controllers);
209 mutex_unlock(&acpi_dma_lock);
210 kfree(adma);
211 return 0;
212 }
213
214 mutex_unlock(&acpi_dma_lock);
215 return -ENODEV;
216}
217EXPORT_SYMBOL_GPL(acpi_dma_controller_free);
218
219static void devm_acpi_dma_release(struct device *dev, void *res)
220{
221 acpi_dma_controller_free(dev);
222}
223
224/**
225 * devm_acpi_dma_controller_register - resource managed acpi_dma_controller_register()
226 * @dev: device that is registering this DMA controller
227 * @acpi_dma_xlate: translation function
228 * @data pointer to controller specific data
229 *
230 * Managed acpi_dma_controller_register(). DMA controller registered by this
231 * function are automatically freed on driver detach. See
232 * acpi_dma_controller_register() for more information.
Andy Shevchenko39d14472013-12-02 15:16:28 +0200233 *
234 * Return:
235 * 0 on success or appropriate errno value on error.
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +0300236 */
237int devm_acpi_dma_controller_register(struct device *dev,
238 struct dma_chan *(*acpi_dma_xlate)
239 (struct acpi_dma_spec *, struct acpi_dma *),
240 void *data)
241{
242 void *res;
243 int ret;
244
245 res = devres_alloc(devm_acpi_dma_release, 0, GFP_KERNEL);
246 if (!res)
247 return -ENOMEM;
248
249 ret = acpi_dma_controller_register(dev, acpi_dma_xlate, data);
250 if (ret) {
251 devres_free(res);
252 return ret;
253 }
254 devres_add(dev, res);
255 return 0;
256}
257EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register);
258
259/**
260 * devm_acpi_dma_controller_free - resource managed acpi_dma_controller_free()
261 *
262 * Unregister a DMA controller registered with
263 * devm_acpi_dma_controller_register(). Normally this function will not need to
264 * be called and the resource management code will ensure that the resource is
265 * freed.
266 */
267void devm_acpi_dma_controller_free(struct device *dev)
268{
Andy Shevchenko8f012582014-02-06 13:25:39 +0200269 WARN_ON(devres_release(dev, devm_acpi_dma_release, NULL, NULL));
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +0300270}
271EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
272
Andy Shevchenkoee8209f2013-05-08 11:55:48 +0300273/**
274 * acpi_dma_update_dma_spec - prepare dma specifier to pass to translation function
275 * @adma: struct acpi_dma of DMA controller
276 * @dma_spec: dma specifier to update
277 *
Andy Shevchenkoee8209f2013-05-08 11:55:48 +0300278 * Accordingly to ACPI 5.0 Specification Table 6-170 "Fixed DMA Resource
279 * Descriptor":
280 * DMA Request Line bits is a platform-relative number uniquely
281 * identifying the request line assigned. Request line-to-Controller
282 * mapping is done in a controller-specific OS driver.
283 * That's why we can safely adjust slave_id when the appropriate controller is
284 * found.
Andy Shevchenko39d14472013-12-02 15:16:28 +0200285 *
286 * Return:
287 * 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
Andy Shevchenkoee8209f2013-05-08 11:55:48 +0300288 */
289static int acpi_dma_update_dma_spec(struct acpi_dma *adma,
290 struct acpi_dma_spec *dma_spec)
291{
292 /* Set link to the DMA controller device */
293 dma_spec->dev = adma->dev;
294
295 /* Check if the request line range is available */
296 if (adma->base_request_line == 0 && adma->end_request_line == 0)
297 return 0;
298
299 /* Check if slave_id falls to the range */
300 if (dma_spec->slave_id < adma->base_request_line ||
301 dma_spec->slave_id > adma->end_request_line)
302 return -1;
303
304 /*
305 * Here we adjust slave_id. It should be a relative number to the base
306 * request line.
307 */
308 dma_spec->slave_id -= adma->base_request_line;
309
310 return 1;
311}
312
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +0300313struct acpi_dma_parser_data {
314 struct acpi_dma_spec dma_spec;
315 size_t index;
316 size_t n;
317};
318
319/**
320 * acpi_dma_parse_fixed_dma - Parse FixedDMA ACPI resources to a DMA specifier
321 * @res: struct acpi_resource to get FixedDMA resources from
322 * @data: pointer to a helper struct acpi_dma_parser_data
323 */
324static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data)
325{
326 struct acpi_dma_parser_data *pdata = data;
327
328 if (res->type == ACPI_RESOURCE_TYPE_FIXED_DMA) {
329 struct acpi_resource_fixed_dma *dma = &res->data.fixed_dma;
330
331 if (pdata->n++ == pdata->index) {
332 pdata->dma_spec.chan_id = dma->channels;
333 pdata->dma_spec.slave_id = dma->request_lines;
334 }
335 }
336
337 /* Tell the ACPI core to skip this resource */
338 return 1;
339}
340
341/**
342 * acpi_dma_request_slave_chan_by_index - Get the DMA slave channel
343 * @dev: struct device to get DMA request from
344 * @index: index of FixedDMA descriptor for @dev
345 *
Andy Shevchenko39d14472013-12-02 15:16:28 +0200346 * Return:
Andy Shevchenko0f6a9282014-02-06 13:25:40 +0200347 * Pointer to appropriate dma channel on success or an error pointer.
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +0300348 */
349struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
350 size_t index)
351{
352 struct acpi_dma_parser_data pdata;
353 struct acpi_dma_spec *dma_spec = &pdata.dma_spec;
354 struct list_head resource_list;
355 struct acpi_device *adev;
356 struct acpi_dma *adma;
357 struct dma_chan *chan = NULL;
Andy Shevchenkoee8209f2013-05-08 11:55:48 +0300358 int found;
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +0300359
360 /* Check if the device was enumerated by ACPI */
361 if (!dev || !ACPI_HANDLE(dev))
Andy Shevchenko0f6a9282014-02-06 13:25:40 +0200362 return ERR_PTR(-ENODEV);
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +0300363
364 if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev))
Andy Shevchenko0f6a9282014-02-06 13:25:40 +0200365 return ERR_PTR(-ENODEV);
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +0300366
367 memset(&pdata, 0, sizeof(pdata));
368 pdata.index = index;
369
370 /* Initial values for the request line and channel */
371 dma_spec->chan_id = -1;
372 dma_spec->slave_id = -1;
373
374 INIT_LIST_HEAD(&resource_list);
375 acpi_dev_get_resources(adev, &resource_list,
376 acpi_dma_parse_fixed_dma, &pdata);
377 acpi_dev_free_resource_list(&resource_list);
378
379 if (dma_spec->slave_id < 0 || dma_spec->chan_id < 0)
Andy Shevchenko0f6a9282014-02-06 13:25:40 +0200380 return ERR_PTR(-ENODEV);
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +0300381
382 mutex_lock(&acpi_dma_lock);
383
384 list_for_each_entry(adma, &acpi_dma_list, dma_controllers) {
Andy Shevchenkoee8209f2013-05-08 11:55:48 +0300385 /*
386 * We are not going to call translation function if slave_id
387 * doesn't fall to the request range.
388 */
389 found = acpi_dma_update_dma_spec(adma, dma_spec);
390 if (found < 0)
391 continue;
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +0300392 chan = adma->acpi_dma_xlate(dma_spec, adma);
Andy Shevchenkoee8209f2013-05-08 11:55:48 +0300393 /*
394 * Try to get a channel only from the DMA controller that
395 * matches the slave_id. See acpi_dma_update_dma_spec()
396 * description for the details.
397 */
398 if (found > 0 || chan)
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +0300399 break;
400 }
401
402 mutex_unlock(&acpi_dma_lock);
Andy Shevchenko0f6a9282014-02-06 13:25:40 +0200403 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +0300404}
405EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index);
406
407/**
408 * acpi_dma_request_slave_chan_by_name - Get the DMA slave channel
409 * @dev: struct device to get DMA request from
410 * @name: represents corresponding FixedDMA descriptor for @dev
411 *
412 * In order to support both Device Tree and ACPI in a single driver we
413 * translate the names "tx" and "rx" here based on the most common case where
414 * the first FixedDMA descriptor is TX and second is RX.
415 *
Andy Shevchenko39d14472013-12-02 15:16:28 +0200416 * Return:
Andy Shevchenko0f6a9282014-02-06 13:25:40 +0200417 * Pointer to appropriate dma channel on success or an error pointer.
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +0300418 */
419struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
420 const char *name)
421{
422 size_t index;
423
424 if (!strcmp(name, "tx"))
425 index = 0;
426 else if (!strcmp(name, "rx"))
427 index = 1;
428 else
Andy Shevchenko0f6a9282014-02-06 13:25:40 +0200429 return ERR_PTR(-ENODEV);
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +0300430
431 return acpi_dma_request_slave_chan_by_index(dev, index);
432}
433EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name);
434
435/**
436 * acpi_dma_simple_xlate - Simple ACPI DMA engine translation helper
437 * @dma_spec: pointer to ACPI DMA specifier
438 * @adma: pointer to ACPI DMA controller data
439 *
440 * A simple translation function for ACPI based devices. Passes &struct
Andy Shevchenko39d14472013-12-02 15:16:28 +0200441 * dma_spec to the DMA controller driver provided filter function.
442 *
443 * Return:
444 * Pointer to the channel if found or %NULL otherwise.
Andy Shevchenko1b2e98b2013-04-09 14:05:43 +0300445 */
446struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec,
447 struct acpi_dma *adma)
448{
449 struct acpi_dma_filter_info *info = adma->data;
450
451 if (!info || !info->filter_fn)
452 return NULL;
453
454 return dma_request_channel(info->dma_cap, info->filter_fn, dma_spec);
455}
456EXPORT_SYMBOL_GPL(acpi_dma_simple_xlate);