blob: 7636833b4623a0227ebfaa97f8c93b60332589e1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * I2O Configuration Interface Driver
3 *
4 * (C) Copyright 1999-2002 Red Hat
5 *
6 * Written by Alan Cox, Building Number Three Ltd
7 *
8 * Fixes/additions:
9 * Deepak Saxena (04/20/1999):
10 * Added basic ioctl() support
11 * Deepak Saxena (06/07/1999):
12 * Added software download ioctl (still testing)
13 * Auvo Häkkinen (09/10/1999):
14 * Changes to i2o_cfg_reply(), ioctl_parms()
15 * Added ioct_validate()
16 * Taneli Vähäkangas (09/30/1999):
17 * Fixed ioctl_swdl()
18 * Taneli Vähäkangas (10/04/1999):
19 * Changed ioctl_swdl(), implemented ioctl_swul() and ioctl_swdel()
20 * Deepak Saxena (11/18/1999):
21 * Added event managmenet support
22 * Alan Cox <alan@redhat.com>:
23 * 2.4 rewrite ported to 2.5
24 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
25 * Added pass-thru support for Adaptec's raidutils
26 *
27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License
29 * as published by the Free Software Foundation; either version
30 * 2 of the License, or (at your option) any later version.
31 */
32
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/miscdevice.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/smp_lock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
39extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int);
40
f4c2c152005-04-10 22:29:42 -050041static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd,
42 unsigned long arg);
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044static spinlock_t i2o_config_lock;
45
46#define MODINC(x,y) ((x) = ((x) + 1) % (y))
47
48struct sg_simple_element {
49 u32 flag_count;
50 u32 addr_bus;
51};
52
53struct i2o_cfg_info {
54 struct file *fp;
55 struct fasync_struct *fasync;
56 struct i2o_evt_info event_q[I2O_EVT_Q_LEN];
57 u16 q_in; // Queue head index
58 u16 q_out; // Queue tail index
59 u16 q_len; // Queue length
60 u16 q_lost; // Number of lost events
61 ulong q_id; // Event queue ID...used as tx_context
62 struct i2o_cfg_info *next;
63};
64static struct i2o_cfg_info *open_files = NULL;
65static ulong i2o_cfg_info_id = 0;
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067static int i2o_cfg_getiops(unsigned long arg)
68{
69 struct i2o_controller *c;
70 u8 __user *user_iop_table = (void __user *)arg;
71 u8 tmp[MAX_I2O_CONTROLLERS];
72 int ret = 0;
73
74 memset(tmp, 0, MAX_I2O_CONTROLLERS);
75
76 list_for_each_entry(c, &i2o_controllers, list)
77 tmp[c->unit] = 1;
78
79 if (copy_to_user(user_iop_table, tmp, MAX_I2O_CONTROLLERS))
80 ret = -EFAULT;
81
82 return ret;
83};
84
85static int i2o_cfg_gethrt(unsigned long arg)
86{
87 struct i2o_controller *c;
88 struct i2o_cmd_hrtlct __user *cmd = (struct i2o_cmd_hrtlct __user *)arg;
89 struct i2o_cmd_hrtlct kcmd;
90 i2o_hrt *hrt;
91 int len;
92 u32 reslen;
93 int ret = 0;
94
95 if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct)))
96 return -EFAULT;
97
98 if (get_user(reslen, kcmd.reslen) < 0)
99 return -EFAULT;
100
101 if (kcmd.resbuf == NULL)
102 return -EFAULT;
103
104 c = i2o_find_iop(kcmd.iop);
105 if (!c)
106 return -ENXIO;
107
108 hrt = (i2o_hrt *) c->hrt.virt;
109
110 len = 8 + ((hrt->entry_len * hrt->num_entries) << 2);
111
112 /* We did a get user...so assuming mem is ok...is this bad? */
113 put_user(len, kcmd.reslen);
114 if (len > reslen)
115 ret = -ENOBUFS;
116 if (copy_to_user(kcmd.resbuf, (void *)hrt, len))
117 ret = -EFAULT;
118
119 return ret;
120};
121
122static int i2o_cfg_getlct(unsigned long arg)
123{
124 struct i2o_controller *c;
125 struct i2o_cmd_hrtlct __user *cmd = (struct i2o_cmd_hrtlct __user *)arg;
126 struct i2o_cmd_hrtlct kcmd;
127 i2o_lct *lct;
128 int len;
129 int ret = 0;
130 u32 reslen;
131
132 if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct)))
133 return -EFAULT;
134
135 if (get_user(reslen, kcmd.reslen) < 0)
136 return -EFAULT;
137
138 if (kcmd.resbuf == NULL)
139 return -EFAULT;
140
141 c = i2o_find_iop(kcmd.iop);
142 if (!c)
143 return -ENXIO;
144
145 lct = (i2o_lct *) c->lct;
146
147 len = (unsigned int)lct->table_size << 2;
148 put_user(len, kcmd.reslen);
149 if (len > reslen)
150 ret = -ENOBUFS;
151 else if (copy_to_user(kcmd.resbuf, lct, len))
152 ret = -EFAULT;
153
154 return ret;
155};
156
157static int i2o_cfg_parms(unsigned long arg, unsigned int type)
158{
159 int ret = 0;
160 struct i2o_controller *c;
161 struct i2o_device *dev;
162 struct i2o_cmd_psetget __user *cmd =
163 (struct i2o_cmd_psetget __user *)arg;
164 struct i2o_cmd_psetget kcmd;
165 u32 reslen;
166 u8 *ops;
167 u8 *res;
168 int len = 0;
169
170 u32 i2o_cmd = (type == I2OPARMGET ?
171 I2O_CMD_UTIL_PARAMS_GET : I2O_CMD_UTIL_PARAMS_SET);
172
173 if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_psetget)))
174 return -EFAULT;
175
176 if (get_user(reslen, kcmd.reslen))
177 return -EFAULT;
178
179 c = i2o_find_iop(kcmd.iop);
180 if (!c)
181 return -ENXIO;
182
183 dev = i2o_iop_find_device(c, kcmd.tid);
184 if (!dev)
185 return -ENXIO;
186
187 ops = (u8 *) kmalloc(kcmd.oplen, GFP_KERNEL);
188 if (!ops)
189 return -ENOMEM;
190
191 if (copy_from_user(ops, kcmd.opbuf, kcmd.oplen)) {
192 kfree(ops);
193 return -EFAULT;
194 }
195
196 /*
197 * It's possible to have a _very_ large table
198 * and that the user asks for all of it at once...
199 */
200 res = (u8 *) kmalloc(65536, GFP_KERNEL);
201 if (!res) {
202 kfree(ops);
203 return -ENOMEM;
204 }
205
206 len = i2o_parm_issue(dev, i2o_cmd, ops, kcmd.oplen, res, 65536);
207 kfree(ops);
208
209 if (len < 0) {
210 kfree(res);
211 return -EAGAIN;
212 }
213
214 put_user(len, kcmd.reslen);
215 if (len > reslen)
216 ret = -ENOBUFS;
217 else if (copy_to_user(kcmd.resbuf, res, len))
218 ret = -EFAULT;
219
220 kfree(res);
221
222 return ret;
223};
224
225static int i2o_cfg_swdl(unsigned long arg)
226{
227 struct i2o_sw_xfer kxfer;
228 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
229 unsigned char maxfrag = 0, curfrag = 1;
230 struct i2o_dma buffer;
231 struct i2o_message __iomem *msg;
232 u32 m;
233 unsigned int status = 0, swlen = 0, fragsize = 8192;
234 struct i2o_controller *c;
235
236 if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
237 return -EFAULT;
238
239 if (get_user(swlen, kxfer.swlen) < 0)
240 return -EFAULT;
241
242 if (get_user(maxfrag, kxfer.maxfrag) < 0)
243 return -EFAULT;
244
245 if (get_user(curfrag, kxfer.curfrag) < 0)
246 return -EFAULT;
247
248 if (curfrag == maxfrag)
249 fragsize = swlen - (maxfrag - 1) * 8192;
250
251 if (!kxfer.buf || !access_ok(VERIFY_READ, kxfer.buf, fragsize))
252 return -EFAULT;
253
254 c = i2o_find_iop(kxfer.iop);
255 if (!c)
256 return -ENXIO;
257
258 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
259 if (m == I2O_QUEUE_EMPTY)
260 return -EBUSY;
261
262 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) {
263 i2o_msg_nop(c, m);
264 return -ENOMEM;
265 }
266
267 __copy_from_user(buffer.virt, kxfer.buf, fragsize);
268
269 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]);
270 writel(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 | ADAPTER_TID,
271 &msg->u.head[1]);
272 writel(i2o_config_driver.context, &msg->u.head[2]);
273 writel(0, &msg->u.head[3]);
274 writel((((u32) kxfer.flags) << 24) | (((u32) kxfer.sw_type) << 16) |
275 (((u32) maxfrag) << 8) | (((u32) curfrag)), &msg->body[0]);
276 writel(swlen, &msg->body[1]);
277 writel(kxfer.sw_id, &msg->body[2]);
278 writel(0xD0000000 | fragsize, &msg->body[3]);
279 writel(buffer.phys, &msg->body[4]);
280
281 osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
282 status = i2o_msg_post_wait_mem(c, m, 60, &buffer);
283
284 if (status != -ETIMEDOUT)
285 i2o_dma_free(&c->pdev->dev, &buffer);
286
287 if (status != I2O_POST_WAIT_OK) {
288 // it fails if you try and send frags out of order
289 // and for some yet unknown reasons too
290 osm_info("swdl failed, DetailedStatus = %d\n", status);
291 return status;
292 }
293
294 return 0;
295};
296
297static int i2o_cfg_swul(unsigned long arg)
298{
299 struct i2o_sw_xfer kxfer;
300 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
301 unsigned char maxfrag = 0, curfrag = 1;
302 struct i2o_dma buffer;
303 struct i2o_message __iomem *msg;
304 u32 m;
305 unsigned int status = 0, swlen = 0, fragsize = 8192;
306 struct i2o_controller *c;
307 int ret = 0;
308
309 if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
310 goto return_fault;
311
312 if (get_user(swlen, kxfer.swlen) < 0)
313 goto return_fault;
314
315 if (get_user(maxfrag, kxfer.maxfrag) < 0)
316 goto return_fault;
317
318 if (get_user(curfrag, kxfer.curfrag) < 0)
319 goto return_fault;
320
321 if (curfrag == maxfrag)
322 fragsize = swlen - (maxfrag - 1) * 8192;
323
324 if (!kxfer.buf)
325 goto return_fault;
326
327 c = i2o_find_iop(kxfer.iop);
328 if (!c)
329 return -ENXIO;
330
331 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
332 if (m == I2O_QUEUE_EMPTY)
333 return -EBUSY;
334
335 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) {
336 i2o_msg_nop(c, m);
337 return -ENOMEM;
338 }
339
340 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]);
341 writel(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID,
342 &msg->u.head[1]);
343 writel(i2o_config_driver.context, &msg->u.head[2]);
344 writel(0, &msg->u.head[3]);
345 writel((u32) kxfer.flags << 24 | (u32) kxfer.
346 sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag,
347 &msg->body[0]);
348 writel(swlen, &msg->body[1]);
349 writel(kxfer.sw_id, &msg->body[2]);
350 writel(0xD0000000 | fragsize, &msg->body[3]);
351 writel(buffer.phys, &msg->body[4]);
352
353 osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
354 status = i2o_msg_post_wait_mem(c, m, 60, &buffer);
355
356 if (status != I2O_POST_WAIT_OK) {
357 if (status != -ETIMEDOUT)
358 i2o_dma_free(&c->pdev->dev, &buffer);
359
360 osm_info("swul failed, DetailedStatus = %d\n", status);
361 return status;
362 }
363
364 if (copy_to_user(kxfer.buf, buffer.virt, fragsize))
365 ret = -EFAULT;
366
367 i2o_dma_free(&c->pdev->dev, &buffer);
368
369return_ret:
370 return ret;
371return_fault:
372 ret = -EFAULT;
373 goto return_ret;
374};
375
376static int i2o_cfg_swdel(unsigned long arg)
377{
378 struct i2o_controller *c;
379 struct i2o_sw_xfer kxfer;
380 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
381 struct i2o_message __iomem *msg;
382 u32 m;
383 unsigned int swlen;
384 int token;
385
386 if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
387 return -EFAULT;
388
389 if (get_user(swlen, kxfer.swlen) < 0)
390 return -EFAULT;
391
392 c = i2o_find_iop(kxfer.iop);
393 if (!c)
394 return -ENXIO;
395
396 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
397 if (m == I2O_QUEUE_EMPTY)
398 return -EBUSY;
399
400 writel(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
401 writel(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID,
402 &msg->u.head[1]);
403 writel(i2o_config_driver.context, &msg->u.head[2]);
404 writel(0, &msg->u.head[3]);
405 writel((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16,
406 &msg->body[0]);
407 writel(swlen, &msg->body[1]);
408 writel(kxfer.sw_id, &msg->body[2]);
409
410 token = i2o_msg_post_wait(c, m, 10);
411
412 if (token != I2O_POST_WAIT_OK) {
413 osm_info("swdel failed, DetailedStatus = %d\n", token);
414 return -ETIMEDOUT;
415 }
416
417 return 0;
418};
419
420static int i2o_cfg_validate(unsigned long arg)
421{
422 int token;
423 int iop = (int)arg;
424 struct i2o_message __iomem *msg;
425 u32 m;
426 struct i2o_controller *c;
427
428 c = i2o_find_iop(iop);
429 if (!c)
430 return -ENXIO;
431
432 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
433 if (m == I2O_QUEUE_EMPTY)
434 return -EBUSY;
435
436 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
437 writel(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop,
438 &msg->u.head[1]);
439 writel(i2o_config_driver.context, &msg->u.head[2]);
440 writel(0, &msg->u.head[3]);
441
442 token = i2o_msg_post_wait(c, m, 10);
443
444 if (token != I2O_POST_WAIT_OK) {
445 osm_info("Can't validate configuration, ErrorStatus = %d\n",
446 token);
447 return -ETIMEDOUT;
448 }
449
450 return 0;
451};
452
453static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp)
454{
455 struct i2o_message __iomem *msg;
456 u32 m;
457 struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg;
458 struct i2o_evt_id kdesc;
459 struct i2o_controller *c;
460 struct i2o_device *d;
461
462 if (copy_from_user(&kdesc, pdesc, sizeof(struct i2o_evt_id)))
463 return -EFAULT;
464
465 /* IOP exists? */
466 c = i2o_find_iop(kdesc.iop);
467 if (!c)
468 return -ENXIO;
469
470 /* Device exists? */
471 d = i2o_iop_find_device(c, kdesc.tid);
472 if (!d)
473 return -ENODEV;
474
475 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
476 if (m == I2O_QUEUE_EMPTY)
477 return -EBUSY;
478
479 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
480 writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | kdesc.tid,
481 &msg->u.head[1]);
482 writel(i2o_config_driver.context, &msg->u.head[2]);
483 writel(i2o_cntxt_list_add(c, fp->private_data), &msg->u.head[3]);
484 writel(kdesc.evt_mask, &msg->body[0]);
485
486 i2o_msg_post(c, m);
487
488 return 0;
489}
490
491static int i2o_cfg_evt_get(unsigned long arg, struct file *fp)
492{
493 struct i2o_cfg_info *p = NULL;
494 struct i2o_evt_get __user *uget = (struct i2o_evt_get __user *)arg;
495 struct i2o_evt_get kget;
496 unsigned long flags;
497
498 for (p = open_files; p; p = p->next)
499 if (p->q_id == (ulong) fp->private_data)
500 break;
501
502 if (!p->q_len)
503 return -ENOENT;
504
505 memcpy(&kget.info, &p->event_q[p->q_out], sizeof(struct i2o_evt_info));
506 MODINC(p->q_out, I2O_EVT_Q_LEN);
507 spin_lock_irqsave(&i2o_config_lock, flags);
508 p->q_len--;
509 kget.pending = p->q_len;
510 kget.lost = p->q_lost;
511 spin_unlock_irqrestore(&i2o_config_lock, flags);
512
513 if (copy_to_user(uget, &kget, sizeof(struct i2o_evt_get)))
514 return -EFAULT;
515 return 0;
516}
517
Markus Lidelb2aaee32005-06-23 22:02:19 -0700518#ifdef CONFIG_I2O_EXT_ADAPTEC
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519#ifdef CONFIG_COMPAT
f4c2c152005-04-10 22:29:42 -0500520static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521{
522 struct i2o_cmd_passthru32 __user *cmd;
523 struct i2o_controller *c;
524 u32 __user *user_msg;
525 u32 *reply = NULL;
526 u32 __user *user_reply = NULL;
527 u32 size = 0;
528 u32 reply_size = 0;
529 u32 rcode = 0;
530 struct i2o_dma sg_list[SG_TABLESIZE];
531 u32 sg_offset = 0;
532 u32 sg_count = 0;
533 u32 i = 0;
Markus Lidel61fbfa82005-06-23 22:02:11 -0700534 u32 sg_index = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 i2o_status_block *sb;
536 struct i2o_message *msg;
537 u32 m;
538 unsigned int iop;
539
540 cmd = (struct i2o_cmd_passthru32 __user *)arg;
541
542 if (get_user(iop, &cmd->iop) || get_user(i, &cmd->msg))
543 return -EFAULT;
544
545 user_msg = compat_ptr(i);
546
547 c = i2o_find_iop(iop);
548 if (!c) {
549 osm_debug("controller %d not found\n", iop);
550 return -ENXIO;
551 }
552
553 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
554
555 sb = c->status_block.virt;
556
557 if (get_user(size, &user_msg[0])) {
558 osm_warn("unable to get size!\n");
559 return -EFAULT;
560 }
561 size = size >> 16;
562
563 if (size > sb->inbound_frame_size) {
564 osm_warn("size of message > inbound_frame_size");
565 return -EFAULT;
566 }
567
568 user_reply = &user_msg[size];
569
570 size <<= 2; // Convert to bytes
571
572 /* Copy in the user's I2O command */
573 if (copy_from_user(msg, user_msg, size)) {
574 osm_warn("unable to copy user message\n");
575 return -EFAULT;
576 }
577 i2o_dump_message(msg);
578
579 if (get_user(reply_size, &user_reply[0]) < 0)
580 return -EFAULT;
581
582 reply_size >>= 16;
583 reply_size <<= 2;
584
585 reply = kmalloc(reply_size, GFP_KERNEL);
586 if (!reply) {
587 printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
588 c->name);
589 return -ENOMEM;
590 }
591 memset(reply, 0, reply_size);
592
593 sg_offset = (msg->u.head[0] >> 4) & 0x0f;
594
595 writel(i2o_config_driver.context, &msg->u.s.icntxt);
596 writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt);
597
598 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
599 if (sg_offset) {
600 struct sg_simple_element *sg;
601
602 if (sg_offset * 4 >= size) {
603 rcode = -EFAULT;
604 goto cleanup;
605 }
606 // TODO 64bit fix
607 sg = (struct sg_simple_element *)((&msg->u.head[0]) +
608 sg_offset);
609 sg_count =
610 (size - sg_offset * 4) / sizeof(struct sg_simple_element);
611 if (sg_count > SG_TABLESIZE) {
612 printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n",
613 c->name, sg_count);
Markus Lidel61fbfa82005-06-23 22:02:11 -0700614 rcode = -EINVAL;
615 goto cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 }
617
618 for (i = 0; i < sg_count; i++) {
619 int sg_size;
620 struct i2o_dma *p;
621
622 if (!(sg[i].flag_count & 0x10000000
623 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT */ )) {
624 printk(KERN_DEBUG
625 "%s:Bad SG element %d - not simple (%x)\n",
626 c->name, i, sg[i].flag_count);
627 rcode = -EINVAL;
628 goto cleanup;
629 }
630 sg_size = sg[i].flag_count & 0xffffff;
Markus Lidel61fbfa82005-06-23 22:02:11 -0700631 p = &(sg_list[sg_index++]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 /* Allocate memory for the transfer */
633 if (i2o_dma_alloc
634 (&c->pdev->dev, p, sg_size,
635 PCI_DMA_BIDIRECTIONAL)) {
636 printk(KERN_DEBUG
637 "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
638 c->name, sg_size, i, sg_count);
639 rcode = -ENOMEM;
Markus Lidel61fbfa82005-06-23 22:02:11 -0700640 goto sg_list_cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 }
642 /* Copy in the user's SG buffer if necessary */
643 if (sg[i].
644 flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) {
645 // TODO 64bit fix
646 if (copy_from_user
647 (p->virt, (void __user *)(unsigned long)sg[i].addr_bus,
648 sg_size)) {
649 printk(KERN_DEBUG
650 "%s: Could not copy SG buf %d FROM user\n",
651 c->name, i);
652 rcode = -EFAULT;
Markus Lidel61fbfa82005-06-23 22:02:11 -0700653 goto sg_list_cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 }
655 }
656 //TODO 64bit fix
657 sg[i].addr_bus = (u32) p->phys;
658 }
659 }
660
661 rcode = i2o_msg_post_wait(c, m, 60);
662 if (rcode)
Markus Lidel61fbfa82005-06-23 22:02:11 -0700663 goto sg_list_cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664
665 if (sg_offset) {
Markus Lidel61fbfa82005-06-23 22:02:11 -0700666 u32 msg[MSG_FRAME_SIZE];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 /* Copy back the Scatter Gather buffers back to user space */
668 u32 j;
669 // TODO 64bit fix
670 struct sg_simple_element *sg;
671 int sg_size;
672
673 // re-acquire the original message to handle correctly the sg copy operation
674 memset(&msg, 0, MSG_FRAME_SIZE * 4);
675 // get user msg size in u32s
676 if (get_user(size, &user_msg[0])) {
677 rcode = -EFAULT;
Markus Lidel61fbfa82005-06-23 22:02:11 -0700678 goto sg_list_cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 }
680 size = size >> 16;
681 size *= 4;
682 /* Copy in the user's I2O command */
683 if (copy_from_user(msg, user_msg, size)) {
684 rcode = -EFAULT;
Markus Lidel61fbfa82005-06-23 22:02:11 -0700685 goto sg_list_cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 }
687 sg_count =
688 (size - sg_offset * 4) / sizeof(struct sg_simple_element);
689
690 // TODO 64bit fix
691 sg = (struct sg_simple_element *)(msg + sg_offset);
692 for (j = 0; j < sg_count; j++) {
693 /* Copy out the SG list to user's buffer if necessary */
694 if (!
695 (sg[j].
696 flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR */ )) {
697 sg_size = sg[j].flag_count & 0xffffff;
698 // TODO 64bit fix
699 if (copy_to_user
700 ((void __user *)(u64) sg[j].addr_bus,
701 sg_list[j].virt, sg_size)) {
702 printk(KERN_WARNING
703 "%s: Could not copy %p TO user %x\n",
704 c->name, sg_list[j].virt,
705 sg[j].addr_bus);
706 rcode = -EFAULT;
Markus Lidel61fbfa82005-06-23 22:02:11 -0700707 goto sg_list_cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 }
709 }
710 }
711 }
712
713 /* Copy back the reply to user space */
714 if (reply_size) {
715 // we wrote our own values for context - now restore the user supplied ones
716 if (copy_from_user(reply + 2, user_msg + 2, sizeof(u32) * 2)) {
717 printk(KERN_WARNING
718 "%s: Could not copy message context FROM user\n",
719 c->name);
720 rcode = -EFAULT;
Markus Lidel61fbfa82005-06-23 22:02:11 -0700721 goto sg_list_cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 }
723 if (copy_to_user(user_reply, reply, reply_size)) {
724 printk(KERN_WARNING
725 "%s: Could not copy reply TO user\n", c->name);
726 rcode = -EFAULT;
727 }
728 }
729
Markus Lidel61fbfa82005-06-23 22:02:11 -0700730 sg_list_cleanup:
731 for (i = 0; i < sg_index; i++)
732 i2o_dma_free(&c->pdev->dev, &sg_list[i]);
733
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 cleanup:
735 kfree(reply);
736 return rcode;
737}
738
f4c2c152005-04-10 22:29:42 -0500739static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
740{
741 int ret;
742 lock_kernel();
743 switch (cmd) {
744 case I2OGETIOPS:
745 ret = i2o_cfg_ioctl(NULL, file, cmd, arg);
746 break;
747 case I2OPASSTHRU32:
748 ret = i2o_cfg_passthru32(file, cmd, arg);
749 break;
750 default:
751 ret = -ENOIOCTLCMD;
752 break;
753 }
754 unlock_kernel();
755 return ret;
756}
757
758#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759
760static int i2o_cfg_passthru(unsigned long arg)
761{
762 struct i2o_cmd_passthru __user *cmd =
763 (struct i2o_cmd_passthru __user *)arg;
764 struct i2o_controller *c;
765 u32 __user *user_msg;
766 u32 *reply = NULL;
767 u32 __user *user_reply = NULL;
768 u32 size = 0;
769 u32 reply_size = 0;
770 u32 rcode = 0;
771 void *sg_list[SG_TABLESIZE];
772 u32 sg_offset = 0;
773 u32 sg_count = 0;
774 int sg_index = 0;
775 u32 i = 0;
776 void *p = NULL;
777 i2o_status_block *sb;
778 struct i2o_message __iomem *msg;
779 u32 m;
780 unsigned int iop;
781
782 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
783 return -EFAULT;
784
785 c = i2o_find_iop(iop);
786 if (!c) {
787 osm_warn("controller %d not found\n", iop);
788 return -ENXIO;
789 }
790
791 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
792
793 sb = c->status_block.virt;
794
795 if (get_user(size, &user_msg[0]))
796 return -EFAULT;
797 size = size >> 16;
798
799 if (size > sb->inbound_frame_size) {
800 osm_warn("size of message > inbound_frame_size");
801 return -EFAULT;
802 }
803
804 user_reply = &user_msg[size];
805
806 size <<= 2; // Convert to bytes
807
808 /* Copy in the user's I2O command */
809 if (copy_from_user(msg, user_msg, size))
810 return -EFAULT;
811
812 if (get_user(reply_size, &user_reply[0]) < 0)
813 return -EFAULT;
814
815 reply_size >>= 16;
816 reply_size <<= 2;
817
818 reply = kmalloc(reply_size, GFP_KERNEL);
819 if (!reply) {
820 printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
821 c->name);
822 return -ENOMEM;
823 }
824 memset(reply, 0, reply_size);
825
826 sg_offset = (msg->u.head[0] >> 4) & 0x0f;
827
828 writel(i2o_config_driver.context, &msg->u.s.icntxt);
829 writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt);
830
831 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
832 if (sg_offset) {
833 struct sg_simple_element *sg;
834
835 if (sg_offset * 4 >= size) {
836 rcode = -EFAULT;
837 goto cleanup;
838 }
839 // TODO 64bit fix
840 sg = (struct sg_simple_element *)((&msg->u.head[0]) +
841 sg_offset);
842 sg_count =
843 (size - sg_offset * 4) / sizeof(struct sg_simple_element);
844 if (sg_count > SG_TABLESIZE) {
845 printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n",
846 c->name, sg_count);
Markus Lidel61fbfa82005-06-23 22:02:11 -0700847 rcode = -EINVAL;
848 goto cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 }
850
851 for (i = 0; i < sg_count; i++) {
852 int sg_size;
853
854 if (!(sg[i].flag_count & 0x10000000
855 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT */ )) {
856 printk(KERN_DEBUG
857 "%s:Bad SG element %d - not simple (%x)\n",
858 c->name, i, sg[i].flag_count);
859 rcode = -EINVAL;
Markus Lidel61fbfa82005-06-23 22:02:11 -0700860 goto sg_list_cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 }
862 sg_size = sg[i].flag_count & 0xffffff;
863 /* Allocate memory for the transfer */
864 p = kmalloc(sg_size, GFP_KERNEL);
865 if (!p) {
866 printk(KERN_DEBUG
867 "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
868 c->name, sg_size, i, sg_count);
869 rcode = -ENOMEM;
Markus Lidel61fbfa82005-06-23 22:02:11 -0700870 goto sg_list_cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 }
872 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
873 /* Copy in the user's SG buffer if necessary */
874 if (sg[i].
875 flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) {
876 // TODO 64bit fix
877 if (copy_from_user
878 (p, (void __user *)sg[i].addr_bus,
879 sg_size)) {
880 printk(KERN_DEBUG
881 "%s: Could not copy SG buf %d FROM user\n",
882 c->name, i);
883 rcode = -EFAULT;
Markus Lidel61fbfa82005-06-23 22:02:11 -0700884 goto sg_list_cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 }
886 }
887 //TODO 64bit fix
888 sg[i].addr_bus = virt_to_bus(p);
889 }
890 }
891
892 rcode = i2o_msg_post_wait(c, m, 60);
893 if (rcode)
Markus Lidel61fbfa82005-06-23 22:02:11 -0700894 goto sg_list_cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895
896 if (sg_offset) {
897 u32 msg[128];
898 /* Copy back the Scatter Gather buffers back to user space */
899 u32 j;
900 // TODO 64bit fix
901 struct sg_simple_element *sg;
902 int sg_size;
903
904 // re-acquire the original message to handle correctly the sg copy operation
905 memset(&msg, 0, MSG_FRAME_SIZE * 4);
906 // get user msg size in u32s
907 if (get_user(size, &user_msg[0])) {
908 rcode = -EFAULT;
Markus Lidel61fbfa82005-06-23 22:02:11 -0700909 goto sg_list_cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 }
911 size = size >> 16;
912 size *= 4;
913 /* Copy in the user's I2O command */
914 if (copy_from_user(msg, user_msg, size)) {
915 rcode = -EFAULT;
Markus Lidel61fbfa82005-06-23 22:02:11 -0700916 goto sg_list_cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 }
918 sg_count =
919 (size - sg_offset * 4) / sizeof(struct sg_simple_element);
920
921 // TODO 64bit fix
922 sg = (struct sg_simple_element *)(msg + sg_offset);
923 for (j = 0; j < sg_count; j++) {
924 /* Copy out the SG list to user's buffer if necessary */
925 if (!
926 (sg[j].
927 flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR */ )) {
928 sg_size = sg[j].flag_count & 0xffffff;
929 // TODO 64bit fix
930 if (copy_to_user
931 ((void __user *)sg[j].addr_bus, sg_list[j],
932 sg_size)) {
933 printk(KERN_WARNING
934 "%s: Could not copy %p TO user %x\n",
935 c->name, sg_list[j],
936 sg[j].addr_bus);
937 rcode = -EFAULT;
Markus Lidel61fbfa82005-06-23 22:02:11 -0700938 goto sg_list_cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 }
940 }
941 }
942 }
943
944 /* Copy back the reply to user space */
945 if (reply_size) {
946 // we wrote our own values for context - now restore the user supplied ones
947 if (copy_from_user(reply + 2, user_msg + 2, sizeof(u32) * 2)) {
948 printk(KERN_WARNING
949 "%s: Could not copy message context FROM user\n",
950 c->name);
951 rcode = -EFAULT;
952 }
953 if (copy_to_user(user_reply, reply, reply_size)) {
954 printk(KERN_WARNING
955 "%s: Could not copy reply TO user\n", c->name);
956 rcode = -EFAULT;
957 }
958 }
959
Markus Lidel61fbfa82005-06-23 22:02:11 -0700960 sg_list_cleanup:
961 for (i = 0; i < sg_index; i++)
962 kfree(sg_list[i]);
963
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 cleanup:
965 kfree(reply);
966 return rcode;
967}
Markus Lidelb2aaee32005-06-23 22:02:19 -0700968#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969
970/*
971 * IOCTL Handler
972 */
973static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd,
974 unsigned long arg)
975{
976 int ret;
977
978 switch (cmd) {
979 case I2OGETIOPS:
980 ret = i2o_cfg_getiops(arg);
981 break;
982
983 case I2OHRTGET:
984 ret = i2o_cfg_gethrt(arg);
985 break;
986
987 case I2OLCTGET:
988 ret = i2o_cfg_getlct(arg);
989 break;
990
991 case I2OPARMSET:
992 ret = i2o_cfg_parms(arg, I2OPARMSET);
993 break;
994
995 case I2OPARMGET:
996 ret = i2o_cfg_parms(arg, I2OPARMGET);
997 break;
998
999 case I2OSWDL:
1000 ret = i2o_cfg_swdl(arg);
1001 break;
1002
1003 case I2OSWUL:
1004 ret = i2o_cfg_swul(arg);
1005 break;
1006
1007 case I2OSWDEL:
1008 ret = i2o_cfg_swdel(arg);
1009 break;
1010
1011 case I2OVALIDATE:
1012 ret = i2o_cfg_validate(arg);
1013 break;
1014
1015 case I2OEVTREG:
1016 ret = i2o_cfg_evt_reg(arg, fp);
1017 break;
1018
1019 case I2OEVTGET:
1020 ret = i2o_cfg_evt_get(arg, fp);
1021 break;
1022
Markus Lidelb2aaee32005-06-23 22:02:19 -07001023#ifdef CONFIG_I2O_EXT_ADAPTEC
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 case I2OPASSTHRU:
1025 ret = i2o_cfg_passthru(arg);
1026 break;
Markus Lidelb2aaee32005-06-23 22:02:19 -07001027#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028
1029 default:
1030 osm_debug("unknown ioctl called!\n");
1031 ret = -EINVAL;
1032 }
1033
1034 return ret;
1035}
1036
1037static int cfg_open(struct inode *inode, struct file *file)
1038{
1039 struct i2o_cfg_info *tmp =
1040 (struct i2o_cfg_info *)kmalloc(sizeof(struct i2o_cfg_info),
1041 GFP_KERNEL);
1042 unsigned long flags;
1043
1044 if (!tmp)
1045 return -ENOMEM;
1046
1047 file->private_data = (void *)(i2o_cfg_info_id++);
1048 tmp->fp = file;
1049 tmp->fasync = NULL;
1050 tmp->q_id = (ulong) file->private_data;
1051 tmp->q_len = 0;
1052 tmp->q_in = 0;
1053 tmp->q_out = 0;
1054 tmp->q_lost = 0;
1055 tmp->next = open_files;
1056
1057 spin_lock_irqsave(&i2o_config_lock, flags);
1058 open_files = tmp;
1059 spin_unlock_irqrestore(&i2o_config_lock, flags);
1060
1061 return 0;
1062}
1063
1064static int cfg_fasync(int fd, struct file *fp, int on)
1065{
1066 ulong id = (ulong) fp->private_data;
1067 struct i2o_cfg_info *p;
1068
1069 for (p = open_files; p; p = p->next)
1070 if (p->q_id == id)
1071 break;
1072
1073 if (!p)
1074 return -EBADF;
1075
1076 return fasync_helper(fd, fp, on, &p->fasync);
1077}
1078
1079static int cfg_release(struct inode *inode, struct file *file)
1080{
1081 ulong id = (ulong) file->private_data;
1082 struct i2o_cfg_info *p1, *p2;
1083 unsigned long flags;
1084
1085 lock_kernel();
1086 p1 = p2 = NULL;
1087
1088 spin_lock_irqsave(&i2o_config_lock, flags);
1089 for (p1 = open_files; p1;) {
1090 if (p1->q_id == id) {
1091
1092 if (p1->fasync)
1093 cfg_fasync(-1, file, 0);
1094 if (p2)
1095 p2->next = p1->next;
1096 else
1097 open_files = p1->next;
1098
1099 kfree(p1);
1100 break;
1101 }
1102 p2 = p1;
1103 p1 = p1->next;
1104 }
1105 spin_unlock_irqrestore(&i2o_config_lock, flags);
1106 unlock_kernel();
1107
1108 return 0;
1109}
1110
1111static struct file_operations config_fops = {
1112 .owner = THIS_MODULE,
1113 .llseek = no_llseek,
1114 .ioctl = i2o_cfg_ioctl,
f4c2c152005-04-10 22:29:42 -05001115#ifdef CONFIG_COMPAT
1116 .compat_ioctl = i2o_cfg_compat_ioctl,
1117#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 .open = cfg_open,
1119 .release = cfg_release,
1120 .fasync = cfg_fasync,
1121};
1122
1123static struct miscdevice i2o_miscdev = {
1124 I2O_MINOR,
1125 "i2octl",
1126 &config_fops
1127};
1128
Markus Lidelf10378f2005-06-23 22:02:16 -07001129static int __init i2o_config_old_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 spin_lock_init(&i2o_config_lock);
1132
1133 if (misc_register(&i2o_miscdev) < 0) {
1134 osm_err("can't register device.\n");
1135 return -EBUSY;
1136 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 return 0;
1138}
1139
Markus Lidelf10378f2005-06-23 22:02:16 -07001140static void i2o_config_old_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 misc_deregister(&i2o_miscdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143}
1144
1145MODULE_AUTHOR("Red Hat Software");