blob: 2e1d9a23920c0c3ebf4b6edda5019b672dab4e82 [file] [log] [blame]
Takashi Sakamoto594ddce2014-04-25 22:45:12 +09001/*
2 * fireworks_hwdep.c - a part of driver for Fireworks based devices
3 *
4 * Copyright (c) 2013-2014 Takashi Sakamoto
5 *
6 * Licensed under the terms of the GNU General Public License, version 2.
7 */
8
9/*
Takashi Sakamoto555e8a82014-04-25 22:45:13 +090010 * This codes have five functionalities.
Takashi Sakamoto594ddce2014-04-25 22:45:12 +090011 *
12 * 1.get information about firewire node
13 * 2.get notification about starting/stopping stream
14 * 3.lock/unlock streaming
Takashi Sakamoto555e8a82014-04-25 22:45:13 +090015 * 4.transmit command of EFW transaction
16 * 5.receive response of EFW transaction
17 *
Takashi Sakamoto594ddce2014-04-25 22:45:12 +090018 */
19
20#include "fireworks.h"
21
22static long
Takashi Sakamoto555e8a82014-04-25 22:45:13 +090023hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
24 loff_t *offset)
25{
26 unsigned int length, till_end, type;
27 struct snd_efw_transaction *t;
Takashi Sakamoto6b1ca4b2016-08-31 22:58:42 +090028 u8 *pull_ptr;
Takashi Sakamoto555e8a82014-04-25 22:45:13 +090029 long count = 0;
30
31 if (remained < sizeof(type) + sizeof(struct snd_efw_transaction))
32 return -ENOSPC;
33
34 /* data type is SNDRV_FIREWIRE_EVENT_EFW_RESPONSE */
35 type = SNDRV_FIREWIRE_EVENT_EFW_RESPONSE;
36 if (copy_to_user(buf, &type, sizeof(type)))
37 return -EFAULT;
38 remained -= sizeof(type);
39 buf += sizeof(type);
40
41 /* write into buffer as many responses as possible */
Takashi Sakamoto6b1ca4b2016-08-31 22:58:42 +090042 spin_lock_irq(&efw->lock);
43
44 /*
45 * When another task reaches here during this task's access to user
46 * space, it picks up current position in buffer and can read the same
47 * series of responses.
48 */
49 pull_ptr = efw->pull_ptr;
50
51 while (efw->push_ptr != pull_ptr) {
52 t = (struct snd_efw_transaction *)(pull_ptr);
Takashi Sakamoto555e8a82014-04-25 22:45:13 +090053 length = be32_to_cpu(t->length) * sizeof(__be32);
54
55 /* confirm enough space for this response */
56 if (remained < length)
57 break;
58
59 /* copy from ring buffer to user buffer */
60 while (length > 0) {
61 till_end = snd_efw_resp_buf_size -
Takashi Sakamoto6b1ca4b2016-08-31 22:58:42 +090062 (unsigned int)(pull_ptr - efw->resp_buf);
Takashi Sakamoto555e8a82014-04-25 22:45:13 +090063 till_end = min_t(unsigned int, length, till_end);
64
Takashi Sakamoto6b1ca4b2016-08-31 22:58:42 +090065 spin_unlock_irq(&efw->lock);
66
67 if (copy_to_user(buf, pull_ptr, till_end))
Takashi Sakamoto555e8a82014-04-25 22:45:13 +090068 return -EFAULT;
69
Takashi Sakamoto6b1ca4b2016-08-31 22:58:42 +090070 spin_lock_irq(&efw->lock);
71
72 pull_ptr += till_end;
73 if (pull_ptr >= efw->resp_buf + snd_efw_resp_buf_size)
74 pull_ptr -= snd_efw_resp_buf_size;
Takashi Sakamoto555e8a82014-04-25 22:45:13 +090075
76 length -= till_end;
77 buf += till_end;
78 count += till_end;
79 remained -= till_end;
80 }
Takashi Sakamoto555e8a82014-04-25 22:45:13 +090081 }
82
Takashi Sakamoto6b1ca4b2016-08-31 22:58:42 +090083 /*
84 * All of tasks can read from the buffer nearly simultaneously, but the
85 * last position for each task is different depending on the length of
86 * given buffer. Here, for simplicity, a position of buffer is set by
87 * the latest task. It's better for a listening application to allow one
88 * thread to read from the buffer. Unless, each task can read different
89 * sequence of responses depending on variation of buffer length.
90 */
91 efw->pull_ptr = pull_ptr;
92
93 spin_unlock_irq(&efw->lock);
94
Takashi Sakamoto555e8a82014-04-25 22:45:13 +090095 return count;
96}
97
98static long
99hwdep_read_locked(struct snd_efw *efw, char __user *buf, long count,
100 loff_t *offset)
101{
Takashi Sakamoto6b1ca4b2016-08-31 22:58:42 +0900102 union snd_firewire_event event = {
103 .lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
104 };
Takashi Sakamoto555e8a82014-04-25 22:45:13 +0900105
Takashi Sakamoto6b1ca4b2016-08-31 22:58:42 +0900106 spin_lock_irq(&efw->lock);
Takashi Sakamoto555e8a82014-04-25 22:45:13 +0900107
Takashi Sakamoto555e8a82014-04-25 22:45:13 +0900108 event.lock_status.status = (efw->dev_lock_count > 0);
109 efw->dev_lock_changed = false;
110
Takashi Sakamoto6b1ca4b2016-08-31 22:58:42 +0900111 spin_unlock_irq(&efw->lock);
112
Takashi Sakamoto555e8a82014-04-25 22:45:13 +0900113 count = min_t(long, count, sizeof(event.lock_status));
114
115 if (copy_to_user(buf, &event, count))
116 return -EFAULT;
117
118 return count;
119}
120
121static long
122hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
Takashi Sakamoto594ddce2014-04-25 22:45:12 +0900123 loff_t *offset)
124{
125 struct snd_efw *efw = hwdep->private_data;
126 DEFINE_WAIT(wait);
Takashi Sakamoto6b1ca4b2016-08-31 22:58:42 +0900127 bool dev_lock_changed;
128 bool queued;
Takashi Sakamoto594ddce2014-04-25 22:45:12 +0900129
130 spin_lock_irq(&efw->lock);
131
Takashi Sakamoto6b1ca4b2016-08-31 22:58:42 +0900132 dev_lock_changed = efw->dev_lock_changed;
133 queued = efw->push_ptr != efw->pull_ptr;
134
135 while (!dev_lock_changed && !queued) {
Takashi Sakamoto594ddce2014-04-25 22:45:12 +0900136 prepare_to_wait(&efw->hwdep_wait, &wait, TASK_INTERRUPTIBLE);
137 spin_unlock_irq(&efw->lock);
138 schedule();
139 finish_wait(&efw->hwdep_wait, &wait);
140 if (signal_pending(current))
141 return -ERESTARTSYS;
142 spin_lock_irq(&efw->lock);
Takashi Sakamoto6b1ca4b2016-08-31 22:58:42 +0900143 dev_lock_changed = efw->dev_lock_changed;
144 queued = efw->push_ptr != efw->pull_ptr;
Takashi Sakamoto594ddce2014-04-25 22:45:12 +0900145 }
146
Takashi Sakamoto594ddce2014-04-25 22:45:12 +0900147 spin_unlock_irq(&efw->lock);
148
Takashi Sakamoto6b1ca4b2016-08-31 22:58:42 +0900149 if (dev_lock_changed)
150 count = hwdep_read_locked(efw, buf, count, offset);
151 else if (queued)
152 count = hwdep_read_resp_buf(efw, buf, count, offset);
153
Takashi Sakamoto555e8a82014-04-25 22:45:13 +0900154 return count;
155}
Takashi Sakamoto594ddce2014-04-25 22:45:12 +0900156
Takashi Sakamoto555e8a82014-04-25 22:45:13 +0900157static long
158hwdep_write(struct snd_hwdep *hwdep, const char __user *data, long count,
159 loff_t *offset)
160{
161 struct snd_efw *efw = hwdep->private_data;
162 u32 seqnum;
163 u8 *buf;
164
165 if (count < sizeof(struct snd_efw_transaction) ||
166 SND_EFW_RESPONSE_MAXIMUM_BYTES < count)
167 return -EINVAL;
168
169 buf = memdup_user(data, count);
170 if (IS_ERR(buf))
Takashi Sakamotoba06b2c2014-05-28 00:14:38 +0900171 return PTR_ERR(buf);
Takashi Sakamoto555e8a82014-04-25 22:45:13 +0900172
173 /* check seqnum is not for kernel-land */
174 seqnum = be32_to_cpu(((struct snd_efw_transaction *)buf)->seqnum);
175 if (seqnum > SND_EFW_TRANSACTION_USER_SEQNUM_MAX) {
176 count = -EINVAL;
177 goto end;
178 }
179
180 if (snd_efw_transaction_cmd(efw->unit, buf, count) < 0)
181 count = -EIO;
182end:
183 kfree(buf);
Takashi Sakamoto594ddce2014-04-25 22:45:12 +0900184 return count;
185}
186
187static unsigned int
188hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait)
189{
190 struct snd_efw *efw = hwdep->private_data;
191 unsigned int events;
192
193 poll_wait(file, &efw->hwdep_wait, wait);
194
195 spin_lock_irq(&efw->lock);
Takashi Sakamoto6b1ca4b2016-08-31 22:58:42 +0900196 if (efw->dev_lock_changed || efw->pull_ptr != efw->push_ptr)
Takashi Sakamoto594ddce2014-04-25 22:45:12 +0900197 events = POLLIN | POLLRDNORM;
198 else
199 events = 0;
200 spin_unlock_irq(&efw->lock);
201
Takashi Sakamoto555e8a82014-04-25 22:45:13 +0900202 return events | POLLOUT;
Takashi Sakamoto594ddce2014-04-25 22:45:12 +0900203}
204
205static int
206hwdep_get_info(struct snd_efw *efw, void __user *arg)
207{
208 struct fw_device *dev = fw_parent_device(efw->unit);
209 struct snd_firewire_get_info info;
210
211 memset(&info, 0, sizeof(info));
212 info.type = SNDRV_FIREWIRE_TYPE_FIREWORKS;
213 info.card = dev->card->index;
214 *(__be32 *)&info.guid[0] = cpu_to_be32(dev->config_rom[3]);
215 *(__be32 *)&info.guid[4] = cpu_to_be32(dev->config_rom[4]);
216 strlcpy(info.device_name, dev_name(&dev->device),
217 sizeof(info.device_name));
218
219 if (copy_to_user(arg, &info, sizeof(info)))
220 return -EFAULT;
221
222 return 0;
223}
224
225static int
226hwdep_lock(struct snd_efw *efw)
227{
228 int err;
229
230 spin_lock_irq(&efw->lock);
231
232 if (efw->dev_lock_count == 0) {
233 efw->dev_lock_count = -1;
234 err = 0;
235 } else {
236 err = -EBUSY;
237 }
238
239 spin_unlock_irq(&efw->lock);
240
241 return err;
242}
243
244static int
245hwdep_unlock(struct snd_efw *efw)
246{
247 int err;
248
249 spin_lock_irq(&efw->lock);
250
251 if (efw->dev_lock_count == -1) {
252 efw->dev_lock_count = 0;
253 err = 0;
254 } else {
255 err = -EBADFD;
256 }
257
258 spin_unlock_irq(&efw->lock);
259
260 return err;
261}
262
263static int
264hwdep_release(struct snd_hwdep *hwdep, struct file *file)
265{
266 struct snd_efw *efw = hwdep->private_data;
267
268 spin_lock_irq(&efw->lock);
269 if (efw->dev_lock_count == -1)
270 efw->dev_lock_count = 0;
271 spin_unlock_irq(&efw->lock);
272
273 return 0;
274}
275
276static int
277hwdep_ioctl(struct snd_hwdep *hwdep, struct file *file,
278 unsigned int cmd, unsigned long arg)
279{
280 struct snd_efw *efw = hwdep->private_data;
281
282 switch (cmd) {
283 case SNDRV_FIREWIRE_IOCTL_GET_INFO:
284 return hwdep_get_info(efw, (void __user *)arg);
285 case SNDRV_FIREWIRE_IOCTL_LOCK:
286 return hwdep_lock(efw);
287 case SNDRV_FIREWIRE_IOCTL_UNLOCK:
288 return hwdep_unlock(efw);
289 default:
290 return -ENOIOCTLCMD;
291 }
292}
293
294#ifdef CONFIG_COMPAT
295static int
296hwdep_compat_ioctl(struct snd_hwdep *hwdep, struct file *file,
297 unsigned int cmd, unsigned long arg)
298{
299 return hwdep_ioctl(hwdep, file, cmd,
300 (unsigned long)compat_ptr(arg));
301}
302#else
303#define hwdep_compat_ioctl NULL
304#endif
305
306static const struct snd_hwdep_ops hwdep_ops = {
307 .read = hwdep_read,
Takashi Sakamoto555e8a82014-04-25 22:45:13 +0900308 .write = hwdep_write,
Takashi Sakamoto594ddce2014-04-25 22:45:12 +0900309 .release = hwdep_release,
310 .poll = hwdep_poll,
311 .ioctl = hwdep_ioctl,
312 .ioctl_compat = hwdep_compat_ioctl,
313};
314
315int snd_efw_create_hwdep_device(struct snd_efw *efw)
316{
317 struct snd_hwdep *hwdep;
318 int err;
319
320 err = snd_hwdep_new(efw->card, "Fireworks", 0, &hwdep);
321 if (err < 0)
322 goto end;
323 strcpy(hwdep->name, "Fireworks");
324 hwdep->iface = SNDRV_HWDEP_IFACE_FW_FIREWORKS;
325 hwdep->ops = hwdep_ops;
326 hwdep->private_data = efw;
327 hwdep->exclusive = true;
328end:
329 return err;
330}
331