blob: 15ff61c7718b8c8c63671de494571f354b0faae2 [file] [log] [blame]
Logan Gunthorpe080b47d2017-03-06 18:30:54 -06001/*
2 * Microsemi Switchtec(tm) PCIe Management Driver
3 * Copyright (c) 2017, Microsemi Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16#include <linux/interrupt.h>
17#include <linux/module.h>
18#include <linux/fs.h>
19#include <linux/uaccess.h>
20#include <linux/poll.h>
21#include <linux/pci.h>
22#include <linux/cdev.h>
23#include <linux/wait.h>
24
25MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
26MODULE_VERSION("0.1");
27MODULE_LICENSE("GPL");
28MODULE_AUTHOR("Microsemi Corporation");
29
30static int max_devices = 16;
31module_param(max_devices, int, 0644);
32MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
33
34static dev_t switchtec_devt;
35static struct class *switchtec_class;
36static DEFINE_IDA(switchtec_minor_ida);
37
38#define MICROSEMI_VENDOR_ID 0x11f8
39#define MICROSEMI_NTB_CLASSCODE 0x068000
40#define MICROSEMI_MGMT_CLASSCODE 0x058000
41
42#define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024
43#define SWITCHTEC_MAX_PFF_CSR 48
44
45#define SWITCHTEC_EVENT_OCCURRED BIT(0)
46#define SWITCHTEC_EVENT_CLEAR BIT(0)
47#define SWITCHTEC_EVENT_EN_LOG BIT(1)
48#define SWITCHTEC_EVENT_EN_CLI BIT(2)
49#define SWITCHTEC_EVENT_EN_IRQ BIT(3)
50#define SWITCHTEC_EVENT_FATAL BIT(4)
51
52enum {
53 SWITCHTEC_GAS_MRPC_OFFSET = 0x0000,
54 SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000,
55 SWITCHTEC_GAS_SW_EVENT_OFFSET = 0x1800,
56 SWITCHTEC_GAS_SYS_INFO_OFFSET = 0x2000,
57 SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200,
58 SWITCHTEC_GAS_PART_CFG_OFFSET = 0x4000,
59 SWITCHTEC_GAS_NTB_OFFSET = 0x10000,
60 SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000,
61};
62
63struct mrpc_regs {
64 u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
65 u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
66 u32 cmd;
67 u32 status;
68 u32 ret_value;
69} __packed;
70
71enum mrpc_status {
72 SWITCHTEC_MRPC_STATUS_INPROGRESS = 1,
73 SWITCHTEC_MRPC_STATUS_DONE = 2,
74 SWITCHTEC_MRPC_STATUS_ERROR = 0xFF,
75 SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100,
76};
77
78struct sw_event_regs {
79 u64 event_report_ctrl;
80 u64 reserved1;
81 u64 part_event_bitmap;
82 u64 reserved2;
83 u32 global_summary;
84 u32 reserved3[3];
85 u32 stack_error_event_hdr;
86 u32 stack_error_event_data;
87 u32 reserved4[4];
88 u32 ppu_error_event_hdr;
89 u32 ppu_error_event_data;
90 u32 reserved5[4];
91 u32 isp_error_event_hdr;
92 u32 isp_error_event_data;
93 u32 reserved6[4];
94 u32 sys_reset_event_hdr;
95 u32 reserved7[5];
96 u32 fw_exception_hdr;
97 u32 reserved8[5];
98 u32 fw_nmi_hdr;
99 u32 reserved9[5];
100 u32 fw_non_fatal_hdr;
101 u32 reserved10[5];
102 u32 fw_fatal_hdr;
103 u32 reserved11[5];
104 u32 twi_mrpc_comp_hdr;
105 u32 twi_mrpc_comp_data;
106 u32 reserved12[4];
107 u32 twi_mrpc_comp_async_hdr;
108 u32 twi_mrpc_comp_async_data;
109 u32 reserved13[4];
110 u32 cli_mrpc_comp_hdr;
111 u32 cli_mrpc_comp_data;
112 u32 reserved14[4];
113 u32 cli_mrpc_comp_async_hdr;
114 u32 cli_mrpc_comp_async_data;
115 u32 reserved15[4];
116 u32 gpio_interrupt_hdr;
117 u32 gpio_interrupt_data;
118 u32 reserved16[4];
119} __packed;
120
121struct sys_info_regs {
122 u32 device_id;
123 u32 device_version;
124 u32 firmware_version;
125 u32 reserved1;
126 u32 vendor_table_revision;
127 u32 table_format_version;
128 u32 partition_id;
129 u32 cfg_file_fmt_version;
130 u32 reserved2[58];
131 char vendor_id[8];
132 char product_id[16];
133 char product_revision[4];
134 char component_vendor[8];
135 u16 component_id;
136 u8 component_revision;
137} __packed;
138
139struct flash_info_regs {
140 u32 flash_part_map_upd_idx;
141
142 struct active_partition_info {
143 u32 address;
144 u32 build_version;
145 u32 build_string;
146 } active_img;
147
148 struct active_partition_info active_cfg;
149 struct active_partition_info inactive_img;
150 struct active_partition_info inactive_cfg;
151
152 u32 flash_length;
153
154 struct partition_info {
155 u32 address;
156 u32 length;
157 } cfg0;
158
159 struct partition_info cfg1;
160 struct partition_info img0;
161 struct partition_info img1;
162 struct partition_info nvlog;
163 struct partition_info vendor[8];
164};
165
166struct ntb_info_regs {
167 u8 partition_count;
168 u8 partition_id;
169 u16 reserved1;
170 u64 ep_map;
171 u16 requester_id;
172} __packed;
173
174struct part_cfg_regs {
175 u32 status;
176 u32 state;
177 u32 port_cnt;
178 u32 usp_port_mode;
179 u32 usp_pff_inst_id;
180 u32 vep_pff_inst_id;
181 u32 dsp_pff_inst_id[47];
182 u32 reserved1[11];
183 u16 vep_vector_number;
184 u16 usp_vector_number;
185 u32 port_event_bitmap;
186 u32 reserved2[3];
187 u32 part_event_summary;
188 u32 reserved3[3];
189 u32 part_reset_hdr;
190 u32 part_reset_data[5];
191 u32 mrpc_comp_hdr;
192 u32 mrpc_comp_data[5];
193 u32 mrpc_comp_async_hdr;
194 u32 mrpc_comp_async_data[5];
195 u32 dyn_binding_hdr;
196 u32 dyn_binding_data[5];
197 u32 reserved4[159];
198} __packed;
199
200enum {
201 SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0,
202 SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1,
203 SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2,
204 SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3,
205};
206
207struct pff_csr_regs {
208 u16 vendor_id;
209 u16 device_id;
210 u32 pci_cfg_header[15];
211 u32 pci_cap_region[48];
212 u32 pcie_cap_region[448];
213 u32 indirect_gas_window[128];
214 u32 indirect_gas_window_off;
215 u32 reserved[127];
216 u32 pff_event_summary;
217 u32 reserved2[3];
218 u32 aer_in_p2p_hdr;
219 u32 aer_in_p2p_data[5];
220 u32 aer_in_vep_hdr;
221 u32 aer_in_vep_data[5];
222 u32 dpc_hdr;
223 u32 dpc_data[5];
224 u32 cts_hdr;
225 u32 cts_data[5];
226 u32 reserved3[6];
227 u32 hotplug_hdr;
228 u32 hotplug_data[5];
229 u32 ier_hdr;
230 u32 ier_data[5];
231 u32 threshold_hdr;
232 u32 threshold_data[5];
233 u32 power_mgmt_hdr;
234 u32 power_mgmt_data[5];
235 u32 tlp_throttling_hdr;
236 u32 tlp_throttling_data[5];
237 u32 force_speed_hdr;
238 u32 force_speed_data[5];
239 u32 credit_timeout_hdr;
240 u32 credit_timeout_data[5];
241 u32 link_state_hdr;
242 u32 link_state_data[5];
243 u32 reserved4[174];
244} __packed;
245
246struct switchtec_dev {
247 struct pci_dev *pdev;
248 struct device dev;
249 struct cdev cdev;
250
251 int partition;
252 int partition_count;
253 int pff_csr_count;
254 char pff_local[SWITCHTEC_MAX_PFF_CSR];
255
256 void __iomem *mmio;
257 struct mrpc_regs __iomem *mmio_mrpc;
258 struct sw_event_regs __iomem *mmio_sw_event;
259 struct sys_info_regs __iomem *mmio_sys_info;
260 struct flash_info_regs __iomem *mmio_flash_info;
261 struct ntb_info_regs __iomem *mmio_ntb;
262 struct part_cfg_regs __iomem *mmio_part_cfg;
263 struct part_cfg_regs __iomem *mmio_part_cfg_all;
264 struct pff_csr_regs __iomem *mmio_pff_csr;
265
266 /*
267 * The mrpc mutex must be held when accessing the other
268 * mrpc_ fields, alive flag and stuser->state field
269 */
270 struct mutex mrpc_mutex;
271 struct list_head mrpc_queue;
272 int mrpc_busy;
273 struct work_struct mrpc_work;
274 struct delayed_work mrpc_timeout;
275 bool alive;
276
277 wait_queue_head_t event_wq;
278 atomic_t event_cnt;
279};
280
281static struct switchtec_dev *to_stdev(struct device *dev)
282{
283 return container_of(dev, struct switchtec_dev, dev);
284}
285
286enum mrpc_state {
287 MRPC_IDLE = 0,
288 MRPC_QUEUED,
289 MRPC_RUNNING,
290 MRPC_DONE,
291};
292
293struct switchtec_user {
294 struct switchtec_dev *stdev;
295
296 enum mrpc_state state;
297
298 struct completion comp;
299 struct kref kref;
300 struct list_head list;
301
302 u32 cmd;
303 u32 status;
304 u32 return_code;
305 size_t data_len;
306 size_t read_len;
307 unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
308 int event_cnt;
309};
310
311static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
312{
313 struct switchtec_user *stuser;
314
315 stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
316 if (!stuser)
317 return ERR_PTR(-ENOMEM);
318
319 get_device(&stdev->dev);
320 stuser->stdev = stdev;
321 kref_init(&stuser->kref);
322 INIT_LIST_HEAD(&stuser->list);
323 init_completion(&stuser->comp);
324 stuser->event_cnt = atomic_read(&stdev->event_cnt);
325
326 dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
327
328 return stuser;
329}
330
331static void stuser_free(struct kref *kref)
332{
333 struct switchtec_user *stuser;
334
335 stuser = container_of(kref, struct switchtec_user, kref);
336
337 dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
338
339 put_device(&stuser->stdev->dev);
340 kfree(stuser);
341}
342
343static void stuser_put(struct switchtec_user *stuser)
344{
345 kref_put(&stuser->kref, stuser_free);
346}
347
348static void stuser_set_state(struct switchtec_user *stuser,
349 enum mrpc_state state)
350{
351 /* requires the mrpc_mutex to already be held when called */
352
353 const char * const state_names[] = {
354 [MRPC_IDLE] = "IDLE",
355 [MRPC_QUEUED] = "QUEUED",
356 [MRPC_RUNNING] = "RUNNING",
357 [MRPC_DONE] = "DONE",
358 };
359
360 stuser->state = state;
361
362 dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
363 stuser, state_names[state]);
364}
365
366static void mrpc_complete_cmd(struct switchtec_dev *stdev);
367
368static void mrpc_cmd_submit(struct switchtec_dev *stdev)
369{
370 /* requires the mrpc_mutex to already be held when called */
371
372 struct switchtec_user *stuser;
373
374 if (stdev->mrpc_busy)
375 return;
376
377 if (list_empty(&stdev->mrpc_queue))
378 return;
379
380 stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
381 list);
382
383 stuser_set_state(stuser, MRPC_RUNNING);
384 stdev->mrpc_busy = 1;
385 memcpy_toio(&stdev->mmio_mrpc->input_data,
386 stuser->data, stuser->data_len);
387 iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
388
389 stuser->status = ioread32(&stdev->mmio_mrpc->status);
390 if (stuser->status != SWITCHTEC_MRPC_STATUS_INPROGRESS)
391 mrpc_complete_cmd(stdev);
392
393 schedule_delayed_work(&stdev->mrpc_timeout,
394 msecs_to_jiffies(500));
395}
396
397static int mrpc_queue_cmd(struct switchtec_user *stuser)
398{
399 /* requires the mrpc_mutex to already be held when called */
400
401 struct switchtec_dev *stdev = stuser->stdev;
402
403 kref_get(&stuser->kref);
404 stuser->read_len = sizeof(stuser->data);
405 stuser_set_state(stuser, MRPC_QUEUED);
406 init_completion(&stuser->comp);
407 list_add_tail(&stuser->list, &stdev->mrpc_queue);
408
409 mrpc_cmd_submit(stdev);
410
411 return 0;
412}
413
414static void mrpc_complete_cmd(struct switchtec_dev *stdev)
415{
416 /* requires the mrpc_mutex to already be held when called */
417 struct switchtec_user *stuser;
418
419 if (list_empty(&stdev->mrpc_queue))
420 return;
421
422 stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
423 list);
424
425 stuser->status = ioread32(&stdev->mmio_mrpc->status);
426 if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
427 return;
428
429 stuser_set_state(stuser, MRPC_DONE);
430 stuser->return_code = 0;
431
432 if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
433 goto out;
434
435 stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
436 if (stuser->return_code != 0)
437 goto out;
438
439 memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
440 stuser->read_len);
441
442out:
443 complete_all(&stuser->comp);
444 list_del_init(&stuser->list);
445 stuser_put(stuser);
446 stdev->mrpc_busy = 0;
447
448 mrpc_cmd_submit(stdev);
449}
450
451static void mrpc_event_work(struct work_struct *work)
452{
453 struct switchtec_dev *stdev;
454
455 stdev = container_of(work, struct switchtec_dev, mrpc_work);
456
457 dev_dbg(&stdev->dev, "%s\n", __func__);
458
459 mutex_lock(&stdev->mrpc_mutex);
460 cancel_delayed_work(&stdev->mrpc_timeout);
461 mrpc_complete_cmd(stdev);
462 mutex_unlock(&stdev->mrpc_mutex);
463}
464
465static void mrpc_timeout_work(struct work_struct *work)
466{
467 struct switchtec_dev *stdev;
468 u32 status;
469
470 stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
471
472 dev_dbg(&stdev->dev, "%s\n", __func__);
473
474 mutex_lock(&stdev->mrpc_mutex);
475
476 status = ioread32(&stdev->mmio_mrpc->status);
477 if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
478 schedule_delayed_work(&stdev->mrpc_timeout,
479 msecs_to_jiffies(500));
480 goto out;
481 }
482
483 mrpc_complete_cmd(stdev);
484
485out:
486 mutex_unlock(&stdev->mrpc_mutex);
487}
488
Logan Gunthorpe5d8e1882017-03-02 16:24:33 -0700489static ssize_t device_version_show(struct device *dev,
490 struct device_attribute *attr, char *buf)
491{
492 struct switchtec_dev *stdev = to_stdev(dev);
493 u32 ver;
494
495 ver = ioread32(&stdev->mmio_sys_info->device_version);
496
497 return sprintf(buf, "%x\n", ver);
498}
499static DEVICE_ATTR_RO(device_version);
500
501static ssize_t fw_version_show(struct device *dev,
502 struct device_attribute *attr, char *buf)
503{
504 struct switchtec_dev *stdev = to_stdev(dev);
505 u32 ver;
506
507 ver = ioread32(&stdev->mmio_sys_info->firmware_version);
508
509 return sprintf(buf, "%08x\n", ver);
510}
511static DEVICE_ATTR_RO(fw_version);
512
513static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
514{
515 int i;
516
517 memcpy_fromio(buf, attr, len);
518 buf[len] = '\n';
519 buf[len + 1] = 0;
520
521 for (i = len - 1; i > 0; i--) {
522 if (buf[i] != ' ')
523 break;
524 buf[i] = '\n';
525 buf[i + 1] = 0;
526 }
527
528 return strlen(buf);
529}
530
531#define DEVICE_ATTR_SYS_INFO_STR(field) \
532static ssize_t field ## _show(struct device *dev, \
533 struct device_attribute *attr, char *buf) \
534{ \
535 struct switchtec_dev *stdev = to_stdev(dev); \
536 return io_string_show(buf, &stdev->mmio_sys_info->field, \
537 sizeof(stdev->mmio_sys_info->field)); \
538} \
539\
540static DEVICE_ATTR_RO(field)
541
542DEVICE_ATTR_SYS_INFO_STR(vendor_id);
543DEVICE_ATTR_SYS_INFO_STR(product_id);
544DEVICE_ATTR_SYS_INFO_STR(product_revision);
545DEVICE_ATTR_SYS_INFO_STR(component_vendor);
546
547static ssize_t component_id_show(struct device *dev,
548 struct device_attribute *attr, char *buf)
549{
550 struct switchtec_dev *stdev = to_stdev(dev);
551 int id = ioread16(&stdev->mmio_sys_info->component_id);
552
553 return sprintf(buf, "PM%04X\n", id);
554}
555static DEVICE_ATTR_RO(component_id);
556
557static ssize_t component_revision_show(struct device *dev,
558 struct device_attribute *attr, char *buf)
559{
560 struct switchtec_dev *stdev = to_stdev(dev);
561 int rev = ioread8(&stdev->mmio_sys_info->component_revision);
562
563 return sprintf(buf, "%d\n", rev);
564}
565static DEVICE_ATTR_RO(component_revision);
566
567static ssize_t partition_show(struct device *dev,
568 struct device_attribute *attr, char *buf)
569{
570 struct switchtec_dev *stdev = to_stdev(dev);
571
572 return sprintf(buf, "%d\n", stdev->partition);
573}
574static DEVICE_ATTR_RO(partition);
575
576static ssize_t partition_count_show(struct device *dev,
577 struct device_attribute *attr, char *buf)
578{
579 struct switchtec_dev *stdev = to_stdev(dev);
580
581 return sprintf(buf, "%d\n", stdev->partition_count);
582}
583static DEVICE_ATTR_RO(partition_count);
584
585static struct attribute *switchtec_device_attrs[] = {
586 &dev_attr_device_version.attr,
587 &dev_attr_fw_version.attr,
588 &dev_attr_vendor_id.attr,
589 &dev_attr_product_id.attr,
590 &dev_attr_product_revision.attr,
591 &dev_attr_component_vendor.attr,
592 &dev_attr_component_id.attr,
593 &dev_attr_component_revision.attr,
594 &dev_attr_partition.attr,
595 &dev_attr_partition_count.attr,
596 NULL,
597};
598
599ATTRIBUTE_GROUPS(switchtec_device);
600
Logan Gunthorpe080b47d2017-03-06 18:30:54 -0600601static int switchtec_dev_open(struct inode *inode, struct file *filp)
602{
603 struct switchtec_dev *stdev;
604 struct switchtec_user *stuser;
605
606 stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
607
608 stuser = stuser_create(stdev);
609 if (IS_ERR(stuser))
610 return PTR_ERR(stuser);
611
612 filp->private_data = stuser;
613 nonseekable_open(inode, filp);
614
615 dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
616
617 return 0;
618}
619
620static int switchtec_dev_release(struct inode *inode, struct file *filp)
621{
622 struct switchtec_user *stuser = filp->private_data;
623
624 stuser_put(stuser);
625
626 return 0;
627}
628
629static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
630{
631 if (mutex_lock_interruptible(&stdev->mrpc_mutex))
632 return -EINTR;
633
634 if (!stdev->alive) {
635 mutex_unlock(&stdev->mrpc_mutex);
636 return -ENODEV;
637 }
638
639 return 0;
640}
641
642static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
643 size_t size, loff_t *off)
644{
645 struct switchtec_user *stuser = filp->private_data;
646 struct switchtec_dev *stdev = stuser->stdev;
647 int rc;
648
649 if (size < sizeof(stuser->cmd) ||
650 size > sizeof(stuser->cmd) + sizeof(stuser->data))
651 return -EINVAL;
652
653 stuser->data_len = size - sizeof(stuser->cmd);
654
655 rc = lock_mutex_and_test_alive(stdev);
656 if (rc)
657 return rc;
658
659 if (stuser->state != MRPC_IDLE) {
660 rc = -EBADE;
661 goto out;
662 }
663
664 rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
665 if (rc) {
666 rc = -EFAULT;
667 goto out;
668 }
669
670 data += sizeof(stuser->cmd);
671 rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
672 if (rc) {
673 rc = -EFAULT;
674 goto out;
675 }
676
677 rc = mrpc_queue_cmd(stuser);
678
679out:
680 mutex_unlock(&stdev->mrpc_mutex);
681
682 if (rc)
683 return rc;
684
685 return size;
686}
687
688static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
689 size_t size, loff_t *off)
690{
691 struct switchtec_user *stuser = filp->private_data;
692 struct switchtec_dev *stdev = stuser->stdev;
693 int rc;
694
695 if (size < sizeof(stuser->cmd) ||
696 size > sizeof(stuser->cmd) + sizeof(stuser->data))
697 return -EINVAL;
698
699 rc = lock_mutex_and_test_alive(stdev);
700 if (rc)
701 return rc;
702
703 if (stuser->state == MRPC_IDLE) {
704 mutex_unlock(&stdev->mrpc_mutex);
705 return -EBADE;
706 }
707
708 stuser->read_len = size - sizeof(stuser->return_code);
709
710 mutex_unlock(&stdev->mrpc_mutex);
711
712 if (filp->f_flags & O_NONBLOCK) {
713 if (!try_wait_for_completion(&stuser->comp))
714 return -EAGAIN;
715 } else {
716 rc = wait_for_completion_interruptible(&stuser->comp);
717 if (rc < 0)
718 return rc;
719 }
720
721 rc = lock_mutex_and_test_alive(stdev);
722 if (rc)
723 return rc;
724
725 if (stuser->state != MRPC_DONE) {
726 mutex_unlock(&stdev->mrpc_mutex);
727 return -EBADE;
728 }
729
730 rc = copy_to_user(data, &stuser->return_code,
731 sizeof(stuser->return_code));
732 if (rc) {
733 rc = -EFAULT;
734 goto out;
735 }
736
737 data += sizeof(stuser->return_code);
738 rc = copy_to_user(data, &stuser->data,
739 size - sizeof(stuser->return_code));
740 if (rc) {
741 rc = -EFAULT;
742 goto out;
743 }
744
745 stuser_set_state(stuser, MRPC_IDLE);
746
747out:
748 mutex_unlock(&stdev->mrpc_mutex);
749
750 if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE)
751 return size;
752 else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
753 return -ENXIO;
754 else
755 return -EBADMSG;
756}
757
758static unsigned int switchtec_dev_poll(struct file *filp, poll_table *wait)
759{
760 struct switchtec_user *stuser = filp->private_data;
761 struct switchtec_dev *stdev = stuser->stdev;
762 int ret = 0;
763
764 poll_wait(filp, &stuser->comp.wait, wait);
765 poll_wait(filp, &stdev->event_wq, wait);
766
767 if (lock_mutex_and_test_alive(stdev))
768 return POLLIN | POLLRDHUP | POLLOUT | POLLERR | POLLHUP;
769
770 mutex_unlock(&stdev->mrpc_mutex);
771
772 if (try_wait_for_completion(&stuser->comp))
773 ret |= POLLIN | POLLRDNORM;
774
775 if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
776 ret |= POLLPRI | POLLRDBAND;
777
778 return ret;
779}
780
781static const struct file_operations switchtec_fops = {
782 .owner = THIS_MODULE,
783 .open = switchtec_dev_open,
784 .release = switchtec_dev_release,
785 .write = switchtec_dev_write,
786 .read = switchtec_dev_read,
787 .poll = switchtec_dev_poll,
788};
789
790static void stdev_release(struct device *dev)
791{
792 struct switchtec_dev *stdev = to_stdev(dev);
793
794 kfree(stdev);
795}
796
797static void stdev_kill(struct switchtec_dev *stdev)
798{
799 struct switchtec_user *stuser, *tmpuser;
800
801 pci_clear_master(stdev->pdev);
802
803 cancel_delayed_work_sync(&stdev->mrpc_timeout);
804
805 /* Mark the hardware as unavailable and complete all completions */
806 mutex_lock(&stdev->mrpc_mutex);
807 stdev->alive = false;
808
809 /* Wake up and kill any users waiting on an MRPC request */
810 list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
811 complete_all(&stuser->comp);
812 list_del_init(&stuser->list);
813 stuser_put(stuser);
814 }
815
816 mutex_unlock(&stdev->mrpc_mutex);
817
818 /* Wake up any users waiting on event_wq */
819 wake_up_interruptible(&stdev->event_wq);
820}
821
822static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
823{
824 struct switchtec_dev *stdev;
825 int minor;
826 struct device *dev;
827 struct cdev *cdev;
828 int rc;
829
830 stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
831 dev_to_node(&pdev->dev));
832 if (!stdev)
833 return ERR_PTR(-ENOMEM);
834
835 stdev->alive = true;
836 stdev->pdev = pdev;
837 INIT_LIST_HEAD(&stdev->mrpc_queue);
838 mutex_init(&stdev->mrpc_mutex);
839 stdev->mrpc_busy = 0;
840 INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
841 INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
842 init_waitqueue_head(&stdev->event_wq);
843 atomic_set(&stdev->event_cnt, 0);
844
845 dev = &stdev->dev;
846 device_initialize(dev);
847 dev->class = switchtec_class;
848 dev->parent = &pdev->dev;
Logan Gunthorpe5d8e1882017-03-02 16:24:33 -0700849 dev->groups = switchtec_device_groups;
Logan Gunthorpe080b47d2017-03-06 18:30:54 -0600850 dev->release = stdev_release;
851
852 minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
853 GFP_KERNEL);
854 if (minor < 0) {
855 rc = minor;
856 goto err_put;
857 }
858
859 dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
860 dev_set_name(dev, "switchtec%d", minor);
861
862 cdev = &stdev->cdev;
863 cdev_init(cdev, &switchtec_fops);
864 cdev->owner = THIS_MODULE;
865 cdev->kobj.parent = &dev->kobj;
866
867 return stdev;
868
869err_put:
870 put_device(&stdev->dev);
871 return ERR_PTR(rc);
872}
873
874static irqreturn_t switchtec_event_isr(int irq, void *dev)
875{
876 struct switchtec_dev *stdev = dev;
877 u32 reg;
878 irqreturn_t ret = IRQ_NONE;
879
880 reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
881 if (reg & SWITCHTEC_EVENT_OCCURRED) {
882 dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
883 ret = IRQ_HANDLED;
884 schedule_work(&stdev->mrpc_work);
885 iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
886 }
887
888 return ret;
889}
890
891static int switchtec_init_isr(struct switchtec_dev *stdev)
892{
893 int nvecs;
894 int event_irq;
895
896 nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4,
897 PCI_IRQ_MSIX | PCI_IRQ_MSI);
898 if (nvecs < 0)
899 return nvecs;
900
901 event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
902 if (event_irq < 0 || event_irq >= nvecs)
903 return -EFAULT;
904
905 event_irq = pci_irq_vector(stdev->pdev, event_irq);
906 if (event_irq < 0)
907 return event_irq;
908
909 return devm_request_irq(&stdev->pdev->dev, event_irq,
910 switchtec_event_isr, 0,
911 KBUILD_MODNAME, stdev);
912}
913
914static void init_pff(struct switchtec_dev *stdev)
915{
916 int i;
917 u32 reg;
918 struct part_cfg_regs *pcfg = stdev->mmio_part_cfg;
919
920 for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
921 reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
922 if (reg != MICROSEMI_VENDOR_ID)
923 break;
924 }
925
926 stdev->pff_csr_count = i;
927
928 reg = ioread32(&pcfg->usp_pff_inst_id);
929 if (reg < SWITCHTEC_MAX_PFF_CSR)
930 stdev->pff_local[reg] = 1;
931
932 reg = ioread32(&pcfg->vep_pff_inst_id);
933 if (reg < SWITCHTEC_MAX_PFF_CSR)
934 stdev->pff_local[reg] = 1;
935
936 for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
937 reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
938 if (reg < SWITCHTEC_MAX_PFF_CSR)
939 stdev->pff_local[reg] = 1;
940 }
941}
942
943static int switchtec_init_pci(struct switchtec_dev *stdev,
944 struct pci_dev *pdev)
945{
946 int rc;
947
948 rc = pcim_enable_device(pdev);
949 if (rc)
950 return rc;
951
952 rc = pcim_iomap_regions(pdev, 0x1, KBUILD_MODNAME);
953 if (rc)
954 return rc;
955
956 pci_set_master(pdev);
957
958 stdev->mmio = pcim_iomap_table(pdev)[0];
959 stdev->mmio_mrpc = stdev->mmio + SWITCHTEC_GAS_MRPC_OFFSET;
960 stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
961 stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
962 stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
963 stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
964 stdev->partition = ioread8(&stdev->mmio_ntb->partition_id);
965 stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
966 stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
967 stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
968 stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
969
970 init_pff(stdev);
971
972 pci_set_drvdata(pdev, stdev);
973
974 return 0;
975}
976
977static int switchtec_pci_probe(struct pci_dev *pdev,
978 const struct pci_device_id *id)
979{
980 struct switchtec_dev *stdev;
981 int rc;
982
983 stdev = stdev_create(pdev);
984 if (IS_ERR(stdev))
985 return PTR_ERR(stdev);
986
987 rc = switchtec_init_pci(stdev, pdev);
988 if (rc)
989 goto err_put;
990
991 rc = switchtec_init_isr(stdev);
992 if (rc) {
993 dev_err(&stdev->dev, "failed to init isr.\n");
994 goto err_put;
995 }
996
997 iowrite32(SWITCHTEC_EVENT_CLEAR |
998 SWITCHTEC_EVENT_EN_IRQ,
999 &stdev->mmio_part_cfg->mrpc_comp_hdr);
1000
1001 rc = cdev_add(&stdev->cdev, stdev->dev.devt, 1);
1002 if (rc)
1003 goto err_put;
1004
1005 rc = device_add(&stdev->dev);
1006 if (rc)
1007 goto err_devadd;
1008
1009 dev_info(&stdev->dev, "Management device registered.\n");
1010
1011 return 0;
1012
1013err_devadd:
1014 cdev_del(&stdev->cdev);
1015 stdev_kill(stdev);
1016err_put:
1017 ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1018 put_device(&stdev->dev);
1019 return rc;
1020}
1021
1022static void switchtec_pci_remove(struct pci_dev *pdev)
1023{
1024 struct switchtec_dev *stdev = pci_get_drvdata(pdev);
1025
1026 pci_set_drvdata(pdev, NULL);
1027
1028 device_del(&stdev->dev);
1029 cdev_del(&stdev->cdev);
1030 ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1031 dev_info(&stdev->dev, "unregistered.\n");
1032
1033 stdev_kill(stdev);
1034 put_device(&stdev->dev);
1035}
1036
1037#define SWITCHTEC_PCI_DEVICE(device_id) \
1038 { \
1039 .vendor = MICROSEMI_VENDOR_ID, \
1040 .device = device_id, \
1041 .subvendor = PCI_ANY_ID, \
1042 .subdevice = PCI_ANY_ID, \
1043 .class = MICROSEMI_MGMT_CLASSCODE, \
1044 .class_mask = 0xFFFFFFFF, \
1045 }, \
1046 { \
1047 .vendor = MICROSEMI_VENDOR_ID, \
1048 .device = device_id, \
1049 .subvendor = PCI_ANY_ID, \
1050 .subdevice = PCI_ANY_ID, \
1051 .class = MICROSEMI_NTB_CLASSCODE, \
1052 .class_mask = 0xFFFFFFFF, \
1053 }
1054
1055static const struct pci_device_id switchtec_pci_tbl[] = {
1056 SWITCHTEC_PCI_DEVICE(0x8531), //PFX 24xG3
1057 SWITCHTEC_PCI_DEVICE(0x8532), //PFX 32xG3
1058 SWITCHTEC_PCI_DEVICE(0x8533), //PFX 48xG3
1059 SWITCHTEC_PCI_DEVICE(0x8534), //PFX 64xG3
1060 SWITCHTEC_PCI_DEVICE(0x8535), //PFX 80xG3
1061 SWITCHTEC_PCI_DEVICE(0x8536), //PFX 96xG3
1062 SWITCHTEC_PCI_DEVICE(0x8543), //PSX 48xG3
1063 SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3
1064 SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3
1065 SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3
1066 {0}
1067};
1068MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
1069
1070static struct pci_driver switchtec_pci_driver = {
1071 .name = KBUILD_MODNAME,
1072 .id_table = switchtec_pci_tbl,
1073 .probe = switchtec_pci_probe,
1074 .remove = switchtec_pci_remove,
1075};
1076
1077static int __init switchtec_init(void)
1078{
1079 int rc;
1080
1081 rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
1082 "switchtec");
1083 if (rc)
1084 return rc;
1085
1086 switchtec_class = class_create(THIS_MODULE, "switchtec");
1087 if (IS_ERR(switchtec_class)) {
1088 rc = PTR_ERR(switchtec_class);
1089 goto err_create_class;
1090 }
1091
1092 rc = pci_register_driver(&switchtec_pci_driver);
1093 if (rc)
1094 goto err_pci_register;
1095
1096 pr_info(KBUILD_MODNAME ": loaded.\n");
1097
1098 return 0;
1099
1100err_pci_register:
1101 class_destroy(switchtec_class);
1102
1103err_create_class:
1104 unregister_chrdev_region(switchtec_devt, max_devices);
1105
1106 return rc;
1107}
1108module_init(switchtec_init);
1109
1110static void __exit switchtec_exit(void)
1111{
1112 pci_unregister_driver(&switchtec_pci_driver);
1113 class_destroy(switchtec_class);
1114 unregister_chrdev_region(switchtec_devt, max_devices);
1115 ida_destroy(&switchtec_minor_ida);
1116
1117 pr_info(KBUILD_MODNAME ": unloaded.\n");
1118}
1119module_exit(switchtec_exit);