blob: 22415643526167fc584ecf56905a07fe4e68002b [file] [log] [blame]
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +01001/*
2* Filename: cregs.c
3*
4*
5* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
7*
8* (C) Copyright 2013 IBM Corporation
9*
10* This program is free software; you can redistribute it and/or
11* modify it under the terms of the GNU General Public License as
12* published by the Free Software Foundation; either version 2 of the
13* License, or (at your option) any later version.
14*
15* This program is distributed in the hope that it will be useful, but
16* WITHOUT ANY WARRANTY; without even the implied warranty of
17* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18* General Public License for more details.
19*
20* You should have received a copy of the GNU General Public License
21* along with this program; if not, write to the Free Software Foundation,
22* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23*/
24
25#include <linux/completion.h>
26#include <linux/slab.h>
27
28#include "rsxx_priv.h"
29
30#define CREG_TIMEOUT_MSEC 10000
31
32typedef void (*creg_cmd_cb)(struct rsxx_cardinfo *card,
33 struct creg_cmd *cmd,
34 int st);
35
36struct creg_cmd {
37 struct list_head list;
38 creg_cmd_cb cb;
39 void *cb_private;
40 unsigned int op;
41 unsigned int addr;
42 int cnt8;
43 void *buf;
44 unsigned int stream;
45 unsigned int status;
46};
47
48static struct kmem_cache *creg_cmd_pool;
49
50
51/*------------ Private Functions --------------*/
52
53#if defined(__LITTLE_ENDIAN)
54#define LITTLE_ENDIAN 1
55#elif defined(__BIG_ENDIAN)
56#define LITTLE_ENDIAN 0
57#else
58#error Unknown endianess!!! Aborting...
59#endif
60
61static void copy_to_creg_data(struct rsxx_cardinfo *card,
62 int cnt8,
63 void *buf,
64 unsigned int stream)
65{
66 int i = 0;
67 u32 *data = buf;
68
69 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
70 /*
71 * Firmware implementation makes it necessary to byte swap on
72 * little endian processors.
73 */
74 if (LITTLE_ENDIAN && stream)
75 iowrite32be(data[i], card->regmap + CREG_DATA(i));
76 else
77 iowrite32(data[i], card->regmap + CREG_DATA(i));
78 }
79}
80
81
82static void copy_from_creg_data(struct rsxx_cardinfo *card,
83 int cnt8,
84 void *buf,
85 unsigned int stream)
86{
87 int i = 0;
88 u32 *data = buf;
89
90 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
91 /*
92 * Firmware implementation makes it necessary to byte swap on
93 * little endian processors.
94 */
95 if (LITTLE_ENDIAN && stream)
96 data[i] = ioread32be(card->regmap + CREG_DATA(i));
97 else
98 data[i] = ioread32(card->regmap + CREG_DATA(i));
99 }
100}
101
102static struct creg_cmd *pop_active_cmd(struct rsxx_cardinfo *card)
103{
104 struct creg_cmd *cmd;
105
106 /*
107 * Spin lock is needed because this can be called in atomic/interrupt
108 * context.
109 */
Philip J Kelleherc206c702013-02-18 21:35:59 +0100110 spin_lock_bh(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100111 cmd = card->creg_ctrl.active_cmd;
112 card->creg_ctrl.active_cmd = NULL;
Philip J Kelleherc206c702013-02-18 21:35:59 +0100113 spin_unlock_bh(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100114
115 return cmd;
116}
117
118static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
119{
120 iowrite32(cmd->addr, card->regmap + CREG_ADD);
121 iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
122
123 if (cmd->op == CREG_OP_WRITE) {
124 if (cmd->buf)
125 copy_to_creg_data(card, cmd->cnt8,
126 cmd->buf, cmd->stream);
127 }
128
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100129 /* Setting the valid bit will kick off the command. */
130 iowrite32(cmd->op, card->regmap + CREG_CMD);
131}
132
133static void creg_kick_queue(struct rsxx_cardinfo *card)
134{
135 if (card->creg_ctrl.active || list_empty(&card->creg_ctrl.queue))
136 return;
137
138 card->creg_ctrl.active = 1;
139 card->creg_ctrl.active_cmd = list_first_entry(&card->creg_ctrl.queue,
140 struct creg_cmd, list);
141 list_del(&card->creg_ctrl.active_cmd->list);
142 card->creg_ctrl.q_depth--;
143
144 /*
145 * We have to set the timer before we push the new command. Otherwise,
146 * we could create a race condition that would occur if the timer
147 * was not canceled, and expired after the new command was pushed,
148 * but before the command was issued to hardware.
149 */
150 mod_timer(&card->creg_ctrl.cmd_timer,
151 jiffies + msecs_to_jiffies(CREG_TIMEOUT_MSEC));
152
153 creg_issue_cmd(card, card->creg_ctrl.active_cmd);
154}
155
156static int creg_queue_cmd(struct rsxx_cardinfo *card,
157 unsigned int op,
158 unsigned int addr,
159 unsigned int cnt8,
160 void *buf,
161 int stream,
162 creg_cmd_cb callback,
163 void *cb_private)
164{
165 struct creg_cmd *cmd;
166
167 /* Don't queue stuff up if we're halted. */
168 if (unlikely(card->halt))
169 return -EINVAL;
170
171 if (card->creg_ctrl.reset)
172 return -EAGAIN;
173
174 if (cnt8 > MAX_CREG_DATA8)
175 return -EINVAL;
176
177 cmd = kmem_cache_alloc(creg_cmd_pool, GFP_KERNEL);
178 if (!cmd)
179 return -ENOMEM;
180
181 INIT_LIST_HEAD(&cmd->list);
182
183 cmd->op = op;
184 cmd->addr = addr;
185 cmd->cnt8 = cnt8;
186 cmd->buf = buf;
187 cmd->stream = stream;
188 cmd->cb = callback;
189 cmd->cb_private = cb_private;
190 cmd->status = 0;
191
Philip J Kelleherc206c702013-02-18 21:35:59 +0100192 spin_lock(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100193 list_add_tail(&cmd->list, &card->creg_ctrl.queue);
194 card->creg_ctrl.q_depth++;
195 creg_kick_queue(card);
Philip J Kelleherc206c702013-02-18 21:35:59 +0100196 spin_unlock(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100197
198 return 0;
199}
200
201static void creg_cmd_timed_out(unsigned long data)
202{
203 struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
204 struct creg_cmd *cmd;
205
206 cmd = pop_active_cmd(card);
207 if (cmd == NULL) {
208 card->creg_ctrl.creg_stats.creg_timeout++;
209 dev_warn(CARD_TO_DEV(card),
210 "No active command associated with timeout!\n");
211 return;
212 }
213
214 if (cmd->cb)
215 cmd->cb(card, cmd, -ETIMEDOUT);
216
217 kmem_cache_free(creg_cmd_pool, cmd);
218
Philip J Kelleherc206c702013-02-18 21:35:59 +0100219
220 spin_lock(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100221 card->creg_ctrl.active = 0;
222 creg_kick_queue(card);
Philip J Kelleherc206c702013-02-18 21:35:59 +0100223 spin_unlock(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100224}
225
226
227static void creg_cmd_done(struct work_struct *work)
228{
229 struct rsxx_cardinfo *card;
230 struct creg_cmd *cmd;
231 int st = 0;
232
233 card = container_of(work, struct rsxx_cardinfo,
234 creg_ctrl.done_work);
235
236 /*
237 * The timer could not be cancelled for some reason,
238 * race to pop the active command.
239 */
240 if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
241 card->creg_ctrl.creg_stats.failed_cancel_timer++;
242
243 cmd = pop_active_cmd(card);
244 if (cmd == NULL) {
245 dev_err(CARD_TO_DEV(card),
246 "Spurious creg interrupt!\n");
247 return;
248 }
249
250 card->creg_ctrl.creg_stats.stat = ioread32(card->regmap + CREG_STAT);
251 cmd->status = card->creg_ctrl.creg_stats.stat;
252 if ((cmd->status & CREG_STAT_STATUS_MASK) == 0) {
253 dev_err(CARD_TO_DEV(card),
254 "Invalid status on creg command\n");
255 /*
256 * At this point we're probably reading garbage from HW. Don't
257 * do anything else that could mess up the system and let
258 * the sync function return an error.
259 */
260 st = -EIO;
261 goto creg_done;
262 } else if (cmd->status & CREG_STAT_ERROR) {
263 st = -EIO;
264 }
265
266 if ((cmd->op == CREG_OP_READ)) {
267 unsigned int cnt8 = ioread32(card->regmap + CREG_CNT);
268
269 /* Paranoid Sanity Checks */
270 if (!cmd->buf) {
271 dev_err(CARD_TO_DEV(card),
272 "Buffer not given for read.\n");
273 st = -EIO;
274 goto creg_done;
275 }
276 if (cnt8 != cmd->cnt8) {
277 dev_err(CARD_TO_DEV(card),
278 "count mismatch\n");
279 st = -EIO;
280 goto creg_done;
281 }
282
283 copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
284 }
285
286creg_done:
287 if (cmd->cb)
288 cmd->cb(card, cmd, st);
289
290 kmem_cache_free(creg_cmd_pool, cmd);
291
Philip J Kelleherc206c702013-02-18 21:35:59 +0100292 spin_lock(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100293 card->creg_ctrl.active = 0;
294 creg_kick_queue(card);
Philip J Kelleherc206c702013-02-18 21:35:59 +0100295 spin_unlock(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100296}
297
298static void creg_reset(struct rsxx_cardinfo *card)
299{
300 struct creg_cmd *cmd = NULL;
301 struct creg_cmd *tmp;
302 unsigned long flags;
303
Philip J Kelleherc206c702013-02-18 21:35:59 +0100304 /*
305 * mutex_trylock is used here because if reset_lock is taken then a
306 * reset is already happening. So, we can just go ahead and return.
307 */
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100308 if (!mutex_trylock(&card->creg_ctrl.reset_lock))
309 return;
310
311 card->creg_ctrl.reset = 1;
312 spin_lock_irqsave(&card->irq_lock, flags);
313 rsxx_disable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
314 spin_unlock_irqrestore(&card->irq_lock, flags);
315
316 dev_warn(CARD_TO_DEV(card),
317 "Resetting creg interface for recovery\n");
318
319 /* Cancel outstanding commands */
Philip J Kelleherc206c702013-02-18 21:35:59 +0100320 spin_lock(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100321 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
322 list_del(&cmd->list);
323 card->creg_ctrl.q_depth--;
324 if (cmd->cb)
325 cmd->cb(card, cmd, -ECANCELED);
326 kmem_cache_free(creg_cmd_pool, cmd);
327 }
328
329 cmd = card->creg_ctrl.active_cmd;
330 card->creg_ctrl.active_cmd = NULL;
331 if (cmd) {
332 if (timer_pending(&card->creg_ctrl.cmd_timer))
333 del_timer_sync(&card->creg_ctrl.cmd_timer);
334
335 if (cmd->cb)
336 cmd->cb(card, cmd, -ECANCELED);
337 kmem_cache_free(creg_cmd_pool, cmd);
338
339 card->creg_ctrl.active = 0;
340 }
Philip J Kelleherc206c702013-02-18 21:35:59 +0100341 spin_unlock(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100342
343 card->creg_ctrl.reset = 0;
344 spin_lock_irqsave(&card->irq_lock, flags);
345 rsxx_enable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
346 spin_unlock_irqrestore(&card->irq_lock, flags);
347
348 mutex_unlock(&card->creg_ctrl.reset_lock);
349}
350
351/* Used for synchronous accesses */
352struct creg_completion {
353 struct completion *cmd_done;
354 int st;
355 u32 creg_status;
356};
357
358static void creg_cmd_done_cb(struct rsxx_cardinfo *card,
359 struct creg_cmd *cmd,
360 int st)
361{
362 struct creg_completion *cmd_completion;
363
Philip J Kelleherc206c702013-02-18 21:35:59 +0100364 cmd_completion = cmd->cb_private;
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100365 BUG_ON(!cmd_completion);
366
367 cmd_completion->st = st;
368 cmd_completion->creg_status = cmd->status;
369 complete(cmd_completion->cmd_done);
370}
371
372static int __issue_creg_rw(struct rsxx_cardinfo *card,
373 unsigned int op,
374 unsigned int addr,
375 unsigned int cnt8,
376 void *buf,
377 int stream,
378 unsigned int *hw_stat)
379{
380 DECLARE_COMPLETION_ONSTACK(cmd_done);
381 struct creg_completion completion;
382 unsigned long timeout;
383 int st;
384
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100385 completion.cmd_done = &cmd_done;
386 completion.st = 0;
387 completion.creg_status = 0;
388
389 st = creg_queue_cmd(card, op, addr, cnt8, buf, stream, creg_cmd_done_cb,
390 &completion);
391 if (st)
392 return st;
393
Philip J Kelleherc206c702013-02-18 21:35:59 +0100394 /*
Philip J Kelleherf3791202013-02-25 12:27:46 -0600395 * This timeout is necessary for unresponsive hardware. The additional
Philip J Kelleherc206c702013-02-18 21:35:59 +0100396 * 20 seconds to used to guarantee that each cregs requests has time to
397 * complete.
398 */
Philip J Kelleherf3791202013-02-25 12:27:46 -0600399 timeout = msecs_to_jiffies(CREG_TIMEOUT_MSEC *
400 card->creg_ctrl.q_depth + 20000);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100401
402 /*
403 * The creg interface is guaranteed to complete. It has a timeout
404 * mechanism that will kick in if hardware does not respond.
405 */
406 st = wait_for_completion_timeout(completion.cmd_done, timeout);
407 if (st == 0) {
408 /*
409 * This is really bad, because the kernel timer did not
410 * expire and notify us of a timeout!
411 */
412 dev_crit(CARD_TO_DEV(card),
413 "cregs timer failed\n");
414 creg_reset(card);
415 return -EIO;
416 }
417
418 *hw_stat = completion.creg_status;
419
420 if (completion.st) {
421 dev_warn(CARD_TO_DEV(card),
422 "creg command failed(%d x%08x)\n",
423 completion.st, addr);
424 return completion.st;
425 }
426
427 return 0;
428}
429
430static int issue_creg_rw(struct rsxx_cardinfo *card,
431 u32 addr,
432 unsigned int size8,
433 void *data,
434 int stream,
435 int read)
436{
437 unsigned int hw_stat;
438 unsigned int xfer;
439 unsigned int op;
440 int st;
441
442 op = read ? CREG_OP_READ : CREG_OP_WRITE;
443
444 do {
445 xfer = min_t(unsigned int, size8, MAX_CREG_DATA8);
446
447 st = __issue_creg_rw(card, op, addr, xfer,
448 data, stream, &hw_stat);
449 if (st)
450 return st;
451
Philip J Kelleherc206c702013-02-18 21:35:59 +0100452 data = (char *)data + xfer;
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100453 addr += xfer;
454 size8 -= xfer;
455 } while (size8);
456
457 return 0;
458}
459
460/* ---------------------------- Public API ---------------------------------- */
461int rsxx_creg_write(struct rsxx_cardinfo *card,
462 u32 addr,
463 unsigned int size8,
464 void *data,
465 int byte_stream)
466{
467 return issue_creg_rw(card, addr, size8, data, byte_stream, 0);
468}
469
470int rsxx_creg_read(struct rsxx_cardinfo *card,
471 u32 addr,
472 unsigned int size8,
473 void *data,
474 int byte_stream)
475{
476 return issue_creg_rw(card, addr, size8, data, byte_stream, 1);
477}
478
479int rsxx_get_card_state(struct rsxx_cardinfo *card, unsigned int *state)
480{
481 return rsxx_creg_read(card, CREG_ADD_CARD_STATE,
482 sizeof(*state), state, 0);
483}
484
485int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8)
486{
487 unsigned int size;
488 int st;
489
490 st = rsxx_creg_read(card, CREG_ADD_CARD_SIZE,
491 sizeof(size), &size, 0);
492 if (st)
493 return st;
494
495 *size8 = (u64)size * RSXX_HW_BLK_SIZE;
496 return 0;
497}
498
499int rsxx_get_num_targets(struct rsxx_cardinfo *card,
500 unsigned int *n_targets)
501{
502 return rsxx_creg_read(card, CREG_ADD_NUM_TARGETS,
503 sizeof(*n_targets), n_targets, 0);
504}
505
506int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
507 u32 *capabilities)
508{
509 return rsxx_creg_read(card, CREG_ADD_CAPABILITIES,
510 sizeof(*capabilities), capabilities, 0);
511}
512
513int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd)
514{
515 return rsxx_creg_write(card, CREG_ADD_CARD_CMD,
516 sizeof(cmd), &cmd, 0);
517}
518
519
520/*----------------- HW Log Functions -------------------*/
521static void hw_log_msg(struct rsxx_cardinfo *card, const char *str, int len)
522{
523 static char level;
524
525 /*
526 * New messages start with "<#>", where # is the log level. Messages
527 * that extend past the log buffer will use the previous level
528 */
529 if ((len > 3) && (str[0] == '<') && (str[2] == '>')) {
530 level = str[1];
531 str += 3; /* Skip past the log level. */
532 len -= 3;
533 }
534
535 switch (level) {
536 case '0':
537 dev_emerg(CARD_TO_DEV(card), "HW: %.*s", len, str);
538 break;
539 case '1':
540 dev_alert(CARD_TO_DEV(card), "HW: %.*s", len, str);
541 break;
542 case '2':
543 dev_crit(CARD_TO_DEV(card), "HW: %.*s", len, str);
544 break;
545 case '3':
546 dev_err(CARD_TO_DEV(card), "HW: %.*s", len, str);
547 break;
548 case '4':
549 dev_warn(CARD_TO_DEV(card), "HW: %.*s", len, str);
550 break;
551 case '5':
552 dev_notice(CARD_TO_DEV(card), "HW: %.*s", len, str);
553 break;
554 case '6':
555 dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
556 break;
557 case '7':
558 dev_dbg(CARD_TO_DEV(card), "HW: %.*s", len, str);
559 break;
560 default:
561 dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
562 break;
563 }
564}
565
566/*
Philip J Kelleherc206c702013-02-18 21:35:59 +0100567 * The substrncpy function copies the src string (which includes the
568 * terminating '\0' character), up to the count into the dest pointer.
569 * Returns the number of bytes copied to dest.
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100570 */
571static int substrncpy(char *dest, const char *src, int count)
572{
573 int max_cnt = count;
574
575 while (count) {
576 count--;
577 *dest = *src;
578 if (*dest == '\0')
579 break;
580 src++;
581 dest++;
582 }
583 return max_cnt - count;
584}
585
586
587static void read_hw_log_done(struct rsxx_cardinfo *card,
588 struct creg_cmd *cmd,
589 int st)
590{
591 char *buf;
592 char *log_str;
593 int cnt;
594 int len;
595 int off;
596
597 buf = cmd->buf;
598 off = 0;
599
600 /* Failed getting the log message */
601 if (st)
602 return;
603
604 while (off < cmd->cnt8) {
605 log_str = &card->log.buf[card->log.buf_len];
606 cnt = min(cmd->cnt8 - off, LOG_BUF_SIZE8 - card->log.buf_len);
607 len = substrncpy(log_str, &buf[off], cnt);
608
609 off += len;
610 card->log.buf_len += len;
611
612 /*
613 * Flush the log if we've hit the end of a message or if we've
614 * run out of buffer space.
615 */
616 if ((log_str[len - 1] == '\0') ||
617 (card->log.buf_len == LOG_BUF_SIZE8)) {
618 if (card->log.buf_len != 1) /* Don't log blank lines. */
619 hw_log_msg(card, card->log.buf,
620 card->log.buf_len);
621 card->log.buf_len = 0;
622 }
623
624 }
625
626 if (cmd->status & CREG_STAT_LOG_PENDING)
627 rsxx_read_hw_log(card);
628}
629
630int rsxx_read_hw_log(struct rsxx_cardinfo *card)
631{
632 int st;
633
634 st = creg_queue_cmd(card, CREG_OP_READ, CREG_ADD_LOG,
635 sizeof(card->log.tmp), card->log.tmp,
636 1, read_hw_log_done, NULL);
637 if (st)
638 dev_err(CARD_TO_DEV(card),
639 "Failed getting log text\n");
640
641 return st;
642}
643
644/*-------------- IOCTL REG Access ------------------*/
645static int issue_reg_cmd(struct rsxx_cardinfo *card,
646 struct rsxx_reg_access *cmd,
647 int read)
648{
649 unsigned int op = read ? CREG_OP_READ : CREG_OP_WRITE;
650
651 return __issue_creg_rw(card, op, cmd->addr, cmd->cnt, cmd->data,
652 cmd->stream, &cmd->stat);
653}
654
655int rsxx_reg_access(struct rsxx_cardinfo *card,
656 struct rsxx_reg_access __user *ucmd,
657 int read)
658{
659 struct rsxx_reg_access cmd;
660 int st;
661
662 st = copy_from_user(&cmd, ucmd, sizeof(cmd));
663 if (st)
664 return -EFAULT;
665
Philip J Kelleherc206c702013-02-18 21:35:59 +0100666 if (cmd.cnt > RSXX_MAX_REG_CNT)
667 return -EFAULT;
668
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100669 st = issue_reg_cmd(card, &cmd, read);
670 if (st)
671 return st;
672
673 st = put_user(cmd.stat, &ucmd->stat);
674 if (st)
675 return -EFAULT;
676
677 if (read) {
678 st = copy_to_user(ucmd->data, cmd.data, cmd.cnt);
679 if (st)
680 return -EFAULT;
681 }
682
683 return 0;
684}
685
686/*------------ Initialization & Setup --------------*/
687int rsxx_creg_setup(struct rsxx_cardinfo *card)
688{
689 card->creg_ctrl.active_cmd = NULL;
690
691 INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done);
692 mutex_init(&card->creg_ctrl.reset_lock);
693 INIT_LIST_HEAD(&card->creg_ctrl.queue);
Philip J Kelleherc206c702013-02-18 21:35:59 +0100694 spin_lock_init(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100695 setup_timer(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out,
696 (unsigned long) card);
697
698 return 0;
699}
700
701void rsxx_creg_destroy(struct rsxx_cardinfo *card)
702{
703 struct creg_cmd *cmd;
704 struct creg_cmd *tmp;
705 int cnt = 0;
706
707 /* Cancel outstanding commands */
Philip J Kelleherc206c702013-02-18 21:35:59 +0100708 spin_lock(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100709 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
710 list_del(&cmd->list);
711 if (cmd->cb)
712 cmd->cb(card, cmd, -ECANCELED);
713 kmem_cache_free(creg_cmd_pool, cmd);
714 cnt++;
715 }
716
717 if (cnt)
718 dev_info(CARD_TO_DEV(card),
719 "Canceled %d queue creg commands\n", cnt);
720
721 cmd = card->creg_ctrl.active_cmd;
722 card->creg_ctrl.active_cmd = NULL;
723 if (cmd) {
724 if (timer_pending(&card->creg_ctrl.cmd_timer))
725 del_timer_sync(&card->creg_ctrl.cmd_timer);
726
727 if (cmd->cb)
728 cmd->cb(card, cmd, -ECANCELED);
729 dev_info(CARD_TO_DEV(card),
730 "Canceled active creg command\n");
731 kmem_cache_free(creg_cmd_pool, cmd);
732 }
Philip J Kelleherc206c702013-02-18 21:35:59 +0100733 spin_unlock(&card->creg_ctrl.lock);
josh.h.morris@us.ibm.com8722ff82013-02-05 14:15:02 +0100734
735 cancel_work_sync(&card->creg_ctrl.done_work);
736}
737
738
739int rsxx_creg_init(void)
740{
741 creg_cmd_pool = KMEM_CACHE(creg_cmd, SLAB_HWCACHE_ALIGN);
742 if (!creg_cmd_pool)
743 return -ENOMEM;
744
745 return 0;
746}
747
748void rsxx_creg_cleanup(void)
749{
750 kmem_cache_destroy(creg_cmd_pool);
751}