blob: f6a63406c76e0b170e348c882abcf2723f2d401d [file] [log] [blame]
Logan Gunthorpe080b47d2017-03-06 18:30:54 -06001/*
2 * Microsemi Switchtec(tm) PCIe Management Driver
3 * Copyright (c) 2017, Microsemi Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
Logan Gunthorpe52eabba2017-03-02 16:24:34 -070016#include <linux/switchtec_ioctl.h>
17
Logan Gunthorpe080b47d2017-03-06 18:30:54 -060018#include <linux/interrupt.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/uaccess.h>
22#include <linux/poll.h>
23#include <linux/pci.h>
24#include <linux/cdev.h>
25#include <linux/wait.h>
26
27MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
28MODULE_VERSION("0.1");
29MODULE_LICENSE("GPL");
30MODULE_AUTHOR("Microsemi Corporation");
31
32static int max_devices = 16;
33module_param(max_devices, int, 0644);
34MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
35
36static dev_t switchtec_devt;
37static struct class *switchtec_class;
38static DEFINE_IDA(switchtec_minor_ida);
39
40#define MICROSEMI_VENDOR_ID 0x11f8
41#define MICROSEMI_NTB_CLASSCODE 0x068000
42#define MICROSEMI_MGMT_CLASSCODE 0x058000
43
44#define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024
45#define SWITCHTEC_MAX_PFF_CSR 48
46
47#define SWITCHTEC_EVENT_OCCURRED BIT(0)
48#define SWITCHTEC_EVENT_CLEAR BIT(0)
49#define SWITCHTEC_EVENT_EN_LOG BIT(1)
50#define SWITCHTEC_EVENT_EN_CLI BIT(2)
51#define SWITCHTEC_EVENT_EN_IRQ BIT(3)
52#define SWITCHTEC_EVENT_FATAL BIT(4)
53
54enum {
55 SWITCHTEC_GAS_MRPC_OFFSET = 0x0000,
56 SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000,
57 SWITCHTEC_GAS_SW_EVENT_OFFSET = 0x1800,
58 SWITCHTEC_GAS_SYS_INFO_OFFSET = 0x2000,
59 SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200,
60 SWITCHTEC_GAS_PART_CFG_OFFSET = 0x4000,
61 SWITCHTEC_GAS_NTB_OFFSET = 0x10000,
62 SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000,
63};
64
65struct mrpc_regs {
66 u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
67 u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
68 u32 cmd;
69 u32 status;
70 u32 ret_value;
71} __packed;
72
73enum mrpc_status {
74 SWITCHTEC_MRPC_STATUS_INPROGRESS = 1,
75 SWITCHTEC_MRPC_STATUS_DONE = 2,
76 SWITCHTEC_MRPC_STATUS_ERROR = 0xFF,
77 SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100,
78};
79
80struct sw_event_regs {
81 u64 event_report_ctrl;
82 u64 reserved1;
83 u64 part_event_bitmap;
84 u64 reserved2;
85 u32 global_summary;
86 u32 reserved3[3];
87 u32 stack_error_event_hdr;
88 u32 stack_error_event_data;
89 u32 reserved4[4];
90 u32 ppu_error_event_hdr;
91 u32 ppu_error_event_data;
92 u32 reserved5[4];
93 u32 isp_error_event_hdr;
94 u32 isp_error_event_data;
95 u32 reserved6[4];
96 u32 sys_reset_event_hdr;
97 u32 reserved7[5];
98 u32 fw_exception_hdr;
99 u32 reserved8[5];
100 u32 fw_nmi_hdr;
101 u32 reserved9[5];
102 u32 fw_non_fatal_hdr;
103 u32 reserved10[5];
104 u32 fw_fatal_hdr;
105 u32 reserved11[5];
106 u32 twi_mrpc_comp_hdr;
107 u32 twi_mrpc_comp_data;
108 u32 reserved12[4];
109 u32 twi_mrpc_comp_async_hdr;
110 u32 twi_mrpc_comp_async_data;
111 u32 reserved13[4];
112 u32 cli_mrpc_comp_hdr;
113 u32 cli_mrpc_comp_data;
114 u32 reserved14[4];
115 u32 cli_mrpc_comp_async_hdr;
116 u32 cli_mrpc_comp_async_data;
117 u32 reserved15[4];
118 u32 gpio_interrupt_hdr;
119 u32 gpio_interrupt_data;
120 u32 reserved16[4];
121} __packed;
122
123struct sys_info_regs {
124 u32 device_id;
125 u32 device_version;
126 u32 firmware_version;
127 u32 reserved1;
128 u32 vendor_table_revision;
129 u32 table_format_version;
130 u32 partition_id;
131 u32 cfg_file_fmt_version;
132 u32 reserved2[58];
133 char vendor_id[8];
134 char product_id[16];
135 char product_revision[4];
136 char component_vendor[8];
137 u16 component_id;
138 u8 component_revision;
139} __packed;
140
141struct flash_info_regs {
142 u32 flash_part_map_upd_idx;
143
144 struct active_partition_info {
145 u32 address;
146 u32 build_version;
147 u32 build_string;
148 } active_img;
149
150 struct active_partition_info active_cfg;
151 struct active_partition_info inactive_img;
152 struct active_partition_info inactive_cfg;
153
154 u32 flash_length;
155
156 struct partition_info {
157 u32 address;
158 u32 length;
159 } cfg0;
160
161 struct partition_info cfg1;
162 struct partition_info img0;
163 struct partition_info img1;
164 struct partition_info nvlog;
165 struct partition_info vendor[8];
166};
167
168struct ntb_info_regs {
169 u8 partition_count;
170 u8 partition_id;
171 u16 reserved1;
172 u64 ep_map;
173 u16 requester_id;
174} __packed;
175
176struct part_cfg_regs {
177 u32 status;
178 u32 state;
179 u32 port_cnt;
180 u32 usp_port_mode;
181 u32 usp_pff_inst_id;
182 u32 vep_pff_inst_id;
183 u32 dsp_pff_inst_id[47];
184 u32 reserved1[11];
185 u16 vep_vector_number;
186 u16 usp_vector_number;
187 u32 port_event_bitmap;
188 u32 reserved2[3];
189 u32 part_event_summary;
190 u32 reserved3[3];
191 u32 part_reset_hdr;
192 u32 part_reset_data[5];
193 u32 mrpc_comp_hdr;
194 u32 mrpc_comp_data[5];
195 u32 mrpc_comp_async_hdr;
196 u32 mrpc_comp_async_data[5];
197 u32 dyn_binding_hdr;
198 u32 dyn_binding_data[5];
199 u32 reserved4[159];
200} __packed;
201
202enum {
203 SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0,
204 SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1,
205 SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2,
206 SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3,
207};
208
209struct pff_csr_regs {
210 u16 vendor_id;
211 u16 device_id;
212 u32 pci_cfg_header[15];
213 u32 pci_cap_region[48];
214 u32 pcie_cap_region[448];
215 u32 indirect_gas_window[128];
216 u32 indirect_gas_window_off;
217 u32 reserved[127];
218 u32 pff_event_summary;
219 u32 reserved2[3];
220 u32 aer_in_p2p_hdr;
221 u32 aer_in_p2p_data[5];
222 u32 aer_in_vep_hdr;
223 u32 aer_in_vep_data[5];
224 u32 dpc_hdr;
225 u32 dpc_data[5];
226 u32 cts_hdr;
227 u32 cts_data[5];
228 u32 reserved3[6];
229 u32 hotplug_hdr;
230 u32 hotplug_data[5];
231 u32 ier_hdr;
232 u32 ier_data[5];
233 u32 threshold_hdr;
234 u32 threshold_data[5];
235 u32 power_mgmt_hdr;
236 u32 power_mgmt_data[5];
237 u32 tlp_throttling_hdr;
238 u32 tlp_throttling_data[5];
239 u32 force_speed_hdr;
240 u32 force_speed_data[5];
241 u32 credit_timeout_hdr;
242 u32 credit_timeout_data[5];
243 u32 link_state_hdr;
244 u32 link_state_data[5];
245 u32 reserved4[174];
246} __packed;
247
248struct switchtec_dev {
249 struct pci_dev *pdev;
250 struct device dev;
251 struct cdev cdev;
252
253 int partition;
254 int partition_count;
255 int pff_csr_count;
256 char pff_local[SWITCHTEC_MAX_PFF_CSR];
257
258 void __iomem *mmio;
259 struct mrpc_regs __iomem *mmio_mrpc;
260 struct sw_event_regs __iomem *mmio_sw_event;
261 struct sys_info_regs __iomem *mmio_sys_info;
262 struct flash_info_regs __iomem *mmio_flash_info;
263 struct ntb_info_regs __iomem *mmio_ntb;
264 struct part_cfg_regs __iomem *mmio_part_cfg;
265 struct part_cfg_regs __iomem *mmio_part_cfg_all;
266 struct pff_csr_regs __iomem *mmio_pff_csr;
267
268 /*
269 * The mrpc mutex must be held when accessing the other
270 * mrpc_ fields, alive flag and stuser->state field
271 */
272 struct mutex mrpc_mutex;
273 struct list_head mrpc_queue;
274 int mrpc_busy;
275 struct work_struct mrpc_work;
276 struct delayed_work mrpc_timeout;
277 bool alive;
278
279 wait_queue_head_t event_wq;
280 atomic_t event_cnt;
281};
282
283static struct switchtec_dev *to_stdev(struct device *dev)
284{
285 return container_of(dev, struct switchtec_dev, dev);
286}
287
288enum mrpc_state {
289 MRPC_IDLE = 0,
290 MRPC_QUEUED,
291 MRPC_RUNNING,
292 MRPC_DONE,
293};
294
295struct switchtec_user {
296 struct switchtec_dev *stdev;
297
298 enum mrpc_state state;
299
300 struct completion comp;
301 struct kref kref;
302 struct list_head list;
303
304 u32 cmd;
305 u32 status;
306 u32 return_code;
307 size_t data_len;
308 size_t read_len;
309 unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
310 int event_cnt;
311};
312
313static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
314{
315 struct switchtec_user *stuser;
316
317 stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
318 if (!stuser)
319 return ERR_PTR(-ENOMEM);
320
321 get_device(&stdev->dev);
322 stuser->stdev = stdev;
323 kref_init(&stuser->kref);
324 INIT_LIST_HEAD(&stuser->list);
325 init_completion(&stuser->comp);
326 stuser->event_cnt = atomic_read(&stdev->event_cnt);
327
328 dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
329
330 return stuser;
331}
332
333static void stuser_free(struct kref *kref)
334{
335 struct switchtec_user *stuser;
336
337 stuser = container_of(kref, struct switchtec_user, kref);
338
339 dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
340
341 put_device(&stuser->stdev->dev);
342 kfree(stuser);
343}
344
345static void stuser_put(struct switchtec_user *stuser)
346{
347 kref_put(&stuser->kref, stuser_free);
348}
349
350static void stuser_set_state(struct switchtec_user *stuser,
351 enum mrpc_state state)
352{
353 /* requires the mrpc_mutex to already be held when called */
354
355 const char * const state_names[] = {
356 [MRPC_IDLE] = "IDLE",
357 [MRPC_QUEUED] = "QUEUED",
358 [MRPC_RUNNING] = "RUNNING",
359 [MRPC_DONE] = "DONE",
360 };
361
362 stuser->state = state;
363
364 dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
365 stuser, state_names[state]);
366}
367
368static void mrpc_complete_cmd(struct switchtec_dev *stdev);
369
370static void mrpc_cmd_submit(struct switchtec_dev *stdev)
371{
372 /* requires the mrpc_mutex to already be held when called */
373
374 struct switchtec_user *stuser;
375
376 if (stdev->mrpc_busy)
377 return;
378
379 if (list_empty(&stdev->mrpc_queue))
380 return;
381
382 stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
383 list);
384
385 stuser_set_state(stuser, MRPC_RUNNING);
386 stdev->mrpc_busy = 1;
387 memcpy_toio(&stdev->mmio_mrpc->input_data,
388 stuser->data, stuser->data_len);
389 iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
390
391 stuser->status = ioread32(&stdev->mmio_mrpc->status);
392 if (stuser->status != SWITCHTEC_MRPC_STATUS_INPROGRESS)
393 mrpc_complete_cmd(stdev);
394
395 schedule_delayed_work(&stdev->mrpc_timeout,
396 msecs_to_jiffies(500));
397}
398
399static int mrpc_queue_cmd(struct switchtec_user *stuser)
400{
401 /* requires the mrpc_mutex to already be held when called */
402
403 struct switchtec_dev *stdev = stuser->stdev;
404
405 kref_get(&stuser->kref);
406 stuser->read_len = sizeof(stuser->data);
407 stuser_set_state(stuser, MRPC_QUEUED);
408 init_completion(&stuser->comp);
409 list_add_tail(&stuser->list, &stdev->mrpc_queue);
410
411 mrpc_cmd_submit(stdev);
412
413 return 0;
414}
415
416static void mrpc_complete_cmd(struct switchtec_dev *stdev)
417{
418 /* requires the mrpc_mutex to already be held when called */
419 struct switchtec_user *stuser;
420
421 if (list_empty(&stdev->mrpc_queue))
422 return;
423
424 stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
425 list);
426
427 stuser->status = ioread32(&stdev->mmio_mrpc->status);
428 if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
429 return;
430
431 stuser_set_state(stuser, MRPC_DONE);
432 stuser->return_code = 0;
433
434 if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
435 goto out;
436
437 stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
438 if (stuser->return_code != 0)
439 goto out;
440
441 memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
442 stuser->read_len);
443
444out:
445 complete_all(&stuser->comp);
446 list_del_init(&stuser->list);
447 stuser_put(stuser);
448 stdev->mrpc_busy = 0;
449
450 mrpc_cmd_submit(stdev);
451}
452
453static void mrpc_event_work(struct work_struct *work)
454{
455 struct switchtec_dev *stdev;
456
457 stdev = container_of(work, struct switchtec_dev, mrpc_work);
458
459 dev_dbg(&stdev->dev, "%s\n", __func__);
460
461 mutex_lock(&stdev->mrpc_mutex);
462 cancel_delayed_work(&stdev->mrpc_timeout);
463 mrpc_complete_cmd(stdev);
464 mutex_unlock(&stdev->mrpc_mutex);
465}
466
467static void mrpc_timeout_work(struct work_struct *work)
468{
469 struct switchtec_dev *stdev;
470 u32 status;
471
472 stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
473
474 dev_dbg(&stdev->dev, "%s\n", __func__);
475
476 mutex_lock(&stdev->mrpc_mutex);
477
478 status = ioread32(&stdev->mmio_mrpc->status);
479 if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
480 schedule_delayed_work(&stdev->mrpc_timeout,
481 msecs_to_jiffies(500));
482 goto out;
483 }
484
485 mrpc_complete_cmd(stdev);
486
487out:
488 mutex_unlock(&stdev->mrpc_mutex);
489}
490
Logan Gunthorpe5d8e1882017-03-02 16:24:33 -0700491static ssize_t device_version_show(struct device *dev,
492 struct device_attribute *attr, char *buf)
493{
494 struct switchtec_dev *stdev = to_stdev(dev);
495 u32 ver;
496
497 ver = ioread32(&stdev->mmio_sys_info->device_version);
498
499 return sprintf(buf, "%x\n", ver);
500}
501static DEVICE_ATTR_RO(device_version);
502
503static ssize_t fw_version_show(struct device *dev,
504 struct device_attribute *attr, char *buf)
505{
506 struct switchtec_dev *stdev = to_stdev(dev);
507 u32 ver;
508
509 ver = ioread32(&stdev->mmio_sys_info->firmware_version);
510
511 return sprintf(buf, "%08x\n", ver);
512}
513static DEVICE_ATTR_RO(fw_version);
514
515static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
516{
517 int i;
518
519 memcpy_fromio(buf, attr, len);
520 buf[len] = '\n';
521 buf[len + 1] = 0;
522
523 for (i = len - 1; i > 0; i--) {
524 if (buf[i] != ' ')
525 break;
526 buf[i] = '\n';
527 buf[i + 1] = 0;
528 }
529
530 return strlen(buf);
531}
532
533#define DEVICE_ATTR_SYS_INFO_STR(field) \
534static ssize_t field ## _show(struct device *dev, \
535 struct device_attribute *attr, char *buf) \
536{ \
537 struct switchtec_dev *stdev = to_stdev(dev); \
538 return io_string_show(buf, &stdev->mmio_sys_info->field, \
539 sizeof(stdev->mmio_sys_info->field)); \
540} \
541\
542static DEVICE_ATTR_RO(field)
543
544DEVICE_ATTR_SYS_INFO_STR(vendor_id);
545DEVICE_ATTR_SYS_INFO_STR(product_id);
546DEVICE_ATTR_SYS_INFO_STR(product_revision);
547DEVICE_ATTR_SYS_INFO_STR(component_vendor);
548
549static ssize_t component_id_show(struct device *dev,
550 struct device_attribute *attr, char *buf)
551{
552 struct switchtec_dev *stdev = to_stdev(dev);
553 int id = ioread16(&stdev->mmio_sys_info->component_id);
554
555 return sprintf(buf, "PM%04X\n", id);
556}
557static DEVICE_ATTR_RO(component_id);
558
559static ssize_t component_revision_show(struct device *dev,
560 struct device_attribute *attr, char *buf)
561{
562 struct switchtec_dev *stdev = to_stdev(dev);
563 int rev = ioread8(&stdev->mmio_sys_info->component_revision);
564
565 return sprintf(buf, "%d\n", rev);
566}
567static DEVICE_ATTR_RO(component_revision);
568
569static ssize_t partition_show(struct device *dev,
570 struct device_attribute *attr, char *buf)
571{
572 struct switchtec_dev *stdev = to_stdev(dev);
573
574 return sprintf(buf, "%d\n", stdev->partition);
575}
576static DEVICE_ATTR_RO(partition);
577
578static ssize_t partition_count_show(struct device *dev,
579 struct device_attribute *attr, char *buf)
580{
581 struct switchtec_dev *stdev = to_stdev(dev);
582
583 return sprintf(buf, "%d\n", stdev->partition_count);
584}
585static DEVICE_ATTR_RO(partition_count);
586
587static struct attribute *switchtec_device_attrs[] = {
588 &dev_attr_device_version.attr,
589 &dev_attr_fw_version.attr,
590 &dev_attr_vendor_id.attr,
591 &dev_attr_product_id.attr,
592 &dev_attr_product_revision.attr,
593 &dev_attr_component_vendor.attr,
594 &dev_attr_component_id.attr,
595 &dev_attr_component_revision.attr,
596 &dev_attr_partition.attr,
597 &dev_attr_partition_count.attr,
598 NULL,
599};
600
601ATTRIBUTE_GROUPS(switchtec_device);
602
Logan Gunthorpe080b47d2017-03-06 18:30:54 -0600603static int switchtec_dev_open(struct inode *inode, struct file *filp)
604{
605 struct switchtec_dev *stdev;
606 struct switchtec_user *stuser;
607
608 stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
609
610 stuser = stuser_create(stdev);
611 if (IS_ERR(stuser))
612 return PTR_ERR(stuser);
613
614 filp->private_data = stuser;
615 nonseekable_open(inode, filp);
616
617 dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
618
619 return 0;
620}
621
622static int switchtec_dev_release(struct inode *inode, struct file *filp)
623{
624 struct switchtec_user *stuser = filp->private_data;
625
626 stuser_put(stuser);
627
628 return 0;
629}
630
631static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
632{
633 if (mutex_lock_interruptible(&stdev->mrpc_mutex))
634 return -EINTR;
635
636 if (!stdev->alive) {
637 mutex_unlock(&stdev->mrpc_mutex);
638 return -ENODEV;
639 }
640
641 return 0;
642}
643
644static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
645 size_t size, loff_t *off)
646{
647 struct switchtec_user *stuser = filp->private_data;
648 struct switchtec_dev *stdev = stuser->stdev;
649 int rc;
650
651 if (size < sizeof(stuser->cmd) ||
652 size > sizeof(stuser->cmd) + sizeof(stuser->data))
653 return -EINVAL;
654
655 stuser->data_len = size - sizeof(stuser->cmd);
656
657 rc = lock_mutex_and_test_alive(stdev);
658 if (rc)
659 return rc;
660
661 if (stuser->state != MRPC_IDLE) {
662 rc = -EBADE;
663 goto out;
664 }
665
666 rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
667 if (rc) {
668 rc = -EFAULT;
669 goto out;
670 }
671
672 data += sizeof(stuser->cmd);
673 rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
674 if (rc) {
675 rc = -EFAULT;
676 goto out;
677 }
678
679 rc = mrpc_queue_cmd(stuser);
680
681out:
682 mutex_unlock(&stdev->mrpc_mutex);
683
684 if (rc)
685 return rc;
686
687 return size;
688}
689
690static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
691 size_t size, loff_t *off)
692{
693 struct switchtec_user *stuser = filp->private_data;
694 struct switchtec_dev *stdev = stuser->stdev;
695 int rc;
696
697 if (size < sizeof(stuser->cmd) ||
698 size > sizeof(stuser->cmd) + sizeof(stuser->data))
699 return -EINVAL;
700
701 rc = lock_mutex_and_test_alive(stdev);
702 if (rc)
703 return rc;
704
705 if (stuser->state == MRPC_IDLE) {
706 mutex_unlock(&stdev->mrpc_mutex);
707 return -EBADE;
708 }
709
710 stuser->read_len = size - sizeof(stuser->return_code);
711
712 mutex_unlock(&stdev->mrpc_mutex);
713
714 if (filp->f_flags & O_NONBLOCK) {
715 if (!try_wait_for_completion(&stuser->comp))
716 return -EAGAIN;
717 } else {
718 rc = wait_for_completion_interruptible(&stuser->comp);
719 if (rc < 0)
720 return rc;
721 }
722
723 rc = lock_mutex_and_test_alive(stdev);
724 if (rc)
725 return rc;
726
727 if (stuser->state != MRPC_DONE) {
728 mutex_unlock(&stdev->mrpc_mutex);
729 return -EBADE;
730 }
731
732 rc = copy_to_user(data, &stuser->return_code,
733 sizeof(stuser->return_code));
734 if (rc) {
735 rc = -EFAULT;
736 goto out;
737 }
738
739 data += sizeof(stuser->return_code);
740 rc = copy_to_user(data, &stuser->data,
741 size - sizeof(stuser->return_code));
742 if (rc) {
743 rc = -EFAULT;
744 goto out;
745 }
746
747 stuser_set_state(stuser, MRPC_IDLE);
748
749out:
750 mutex_unlock(&stdev->mrpc_mutex);
751
752 if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE)
753 return size;
754 else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
755 return -ENXIO;
756 else
757 return -EBADMSG;
758}
759
760static unsigned int switchtec_dev_poll(struct file *filp, poll_table *wait)
761{
762 struct switchtec_user *stuser = filp->private_data;
763 struct switchtec_dev *stdev = stuser->stdev;
764 int ret = 0;
765
766 poll_wait(filp, &stuser->comp.wait, wait);
767 poll_wait(filp, &stdev->event_wq, wait);
768
769 if (lock_mutex_and_test_alive(stdev))
770 return POLLIN | POLLRDHUP | POLLOUT | POLLERR | POLLHUP;
771
772 mutex_unlock(&stdev->mrpc_mutex);
773
774 if (try_wait_for_completion(&stuser->comp))
775 ret |= POLLIN | POLLRDNORM;
776
777 if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
778 ret |= POLLPRI | POLLRDBAND;
779
780 return ret;
781}
782
Logan Gunthorpe52eabba2017-03-02 16:24:34 -0700783static int ioctl_flash_info(struct switchtec_dev *stdev,
784 struct switchtec_ioctl_flash_info __user *uinfo)
785{
786 struct switchtec_ioctl_flash_info info = {0};
787 struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
788
789 info.flash_length = ioread32(&fi->flash_length);
790 info.num_partitions = SWITCHTEC_IOCTL_NUM_PARTITIONS;
791
792 if (copy_to_user(uinfo, &info, sizeof(info)))
793 return -EFAULT;
794
795 return 0;
796}
797
798static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
799 struct partition_info __iomem *pi)
800{
801 info->address = ioread32(&pi->address);
802 info->length = ioread32(&pi->length);
803}
804
805static int ioctl_flash_part_info(struct switchtec_dev *stdev,
806 struct switchtec_ioctl_flash_part_info __user *uinfo)
807{
808 struct switchtec_ioctl_flash_part_info info = {0};
809 struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
810 u32 active_addr = -1;
811
812 if (copy_from_user(&info, uinfo, sizeof(info)))
813 return -EFAULT;
814
815 switch (info.flash_partition) {
816 case SWITCHTEC_IOCTL_PART_CFG0:
817 active_addr = ioread32(&fi->active_cfg);
818 set_fw_info_part(&info, &fi->cfg0);
819 break;
820 case SWITCHTEC_IOCTL_PART_CFG1:
821 active_addr = ioread32(&fi->active_cfg);
822 set_fw_info_part(&info, &fi->cfg1);
823 break;
824 case SWITCHTEC_IOCTL_PART_IMG0:
825 active_addr = ioread32(&fi->active_img);
826 set_fw_info_part(&info, &fi->img0);
827 break;
828 case SWITCHTEC_IOCTL_PART_IMG1:
829 active_addr = ioread32(&fi->active_img);
830 set_fw_info_part(&info, &fi->img1);
831 break;
832 case SWITCHTEC_IOCTL_PART_NVLOG:
833 set_fw_info_part(&info, &fi->nvlog);
834 break;
835 case SWITCHTEC_IOCTL_PART_VENDOR0:
836 set_fw_info_part(&info, &fi->vendor[0]);
837 break;
838 case SWITCHTEC_IOCTL_PART_VENDOR1:
839 set_fw_info_part(&info, &fi->vendor[1]);
840 break;
841 case SWITCHTEC_IOCTL_PART_VENDOR2:
842 set_fw_info_part(&info, &fi->vendor[2]);
843 break;
844 case SWITCHTEC_IOCTL_PART_VENDOR3:
845 set_fw_info_part(&info, &fi->vendor[3]);
846 break;
847 case SWITCHTEC_IOCTL_PART_VENDOR4:
848 set_fw_info_part(&info, &fi->vendor[4]);
849 break;
850 case SWITCHTEC_IOCTL_PART_VENDOR5:
851 set_fw_info_part(&info, &fi->vendor[5]);
852 break;
853 case SWITCHTEC_IOCTL_PART_VENDOR6:
854 set_fw_info_part(&info, &fi->vendor[6]);
855 break;
856 case SWITCHTEC_IOCTL_PART_VENDOR7:
857 set_fw_info_part(&info, &fi->vendor[7]);
858 break;
859 default:
860 return -EINVAL;
861 }
862
863 if (info.address == active_addr)
864 info.active = 1;
865
866 if (copy_to_user(uinfo, &info, sizeof(info)))
867 return -EFAULT;
868
869 return 0;
870}
871
872static int ioctl_event_summary(struct switchtec_dev *stdev,
873 struct switchtec_user *stuser,
874 struct switchtec_ioctl_event_summary __user *usum)
875{
876 struct switchtec_ioctl_event_summary s = {0};
877 int i;
878 u32 reg;
879
880 s.global = ioread32(&stdev->mmio_sw_event->global_summary);
881 s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
882 s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
883
884 for (i = 0; i < stdev->partition_count; i++) {
885 reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
886 s.part[i] = reg;
887 }
888
889 for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
890 reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
891 if (reg != MICROSEMI_VENDOR_ID)
892 break;
893
894 reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
895 s.pff[i] = reg;
896 }
897
898 if (copy_to_user(usum, &s, sizeof(s)))
899 return -EFAULT;
900
901 stuser->event_cnt = atomic_read(&stdev->event_cnt);
902
903 return 0;
904}
905
906static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
907 size_t offset, int index)
908{
909 return (void __iomem *)stdev->mmio_sw_event + offset;
910}
911
912static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
913 size_t offset, int index)
914{
915 return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
916}
917
918static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
919 size_t offset, int index)
920{
921 return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
922}
923
924#define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
925#define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
926#define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
927
928const struct event_reg {
929 size_t offset;
930 u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
931 size_t offset, int index);
932} event_regs[] = {
933 EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr),
934 EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr),
935 EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr),
936 EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr),
937 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr),
938 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr),
939 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr),
940 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr),
941 EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr),
942 EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC,
943 twi_mrpc_comp_async_hdr),
944 EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr),
945 EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
946 cli_mrpc_comp_async_hdr),
947 EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
948 EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
949 EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
950 EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
951 EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
952 EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
953 EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
954 EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
955 EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
956 EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
957 EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
958 EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
959 EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr),
960 EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr),
961 EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr),
962 EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr),
963 EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr),
964};
965
966static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
967 int event_id, int index)
968{
969 size_t off;
970
971 if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
972 return ERR_PTR(-EINVAL);
973
974 off = event_regs[event_id].offset;
975
976 if (event_regs[event_id].map_reg == part_ev_reg) {
977 if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
978 index = stdev->partition;
979 else if (index < 0 || index >= stdev->partition_count)
980 return ERR_PTR(-EINVAL);
981 } else if (event_regs[event_id].map_reg == pff_ev_reg) {
982 if (index < 0 || index >= stdev->pff_csr_count)
983 return ERR_PTR(-EINVAL);
984 }
985
986 return event_regs[event_id].map_reg(stdev, off, index);
987}
988
989static int event_ctl(struct switchtec_dev *stdev,
990 struct switchtec_ioctl_event_ctl *ctl)
991{
992 int i;
993 u32 __iomem *reg;
994 u32 hdr;
995
996 reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
997 if (IS_ERR(reg))
998 return PTR_ERR(reg);
999
1000 hdr = ioread32(reg);
1001 for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
1002 ctl->data[i] = ioread32(&reg[i + 1]);
1003
1004 ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED;
1005 ctl->count = (hdr >> 5) & 0xFF;
1006
1007 if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR))
1008 hdr &= ~SWITCHTEC_EVENT_CLEAR;
1009 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL)
1010 hdr |= SWITCHTEC_EVENT_EN_IRQ;
1011 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL)
1012 hdr &= ~SWITCHTEC_EVENT_EN_IRQ;
1013 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG)
1014 hdr |= SWITCHTEC_EVENT_EN_LOG;
1015 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG)
1016 hdr &= ~SWITCHTEC_EVENT_EN_LOG;
1017 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI)
1018 hdr |= SWITCHTEC_EVENT_EN_CLI;
1019 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI)
1020 hdr &= ~SWITCHTEC_EVENT_EN_CLI;
1021 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL)
1022 hdr |= SWITCHTEC_EVENT_FATAL;
1023 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL)
1024 hdr &= ~SWITCHTEC_EVENT_FATAL;
1025
1026 if (ctl->flags)
1027 iowrite32(hdr, reg);
1028
1029 ctl->flags = 0;
1030 if (hdr & SWITCHTEC_EVENT_EN_IRQ)
1031 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL;
1032 if (hdr & SWITCHTEC_EVENT_EN_LOG)
1033 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG;
1034 if (hdr & SWITCHTEC_EVENT_EN_CLI)
1035 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI;
1036 if (hdr & SWITCHTEC_EVENT_FATAL)
1037 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL;
1038
1039 return 0;
1040}
1041
1042static int ioctl_event_ctl(struct switchtec_dev *stdev,
1043 struct switchtec_ioctl_event_ctl __user *uctl)
1044{
1045 int ret;
1046 int nr_idxs;
1047 struct switchtec_ioctl_event_ctl ctl;
1048
1049 if (copy_from_user(&ctl, uctl, sizeof(ctl)))
1050 return -EFAULT;
1051
1052 if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
1053 return -EINVAL;
1054
1055 if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED)
1056 return -EINVAL;
1057
1058 if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) {
1059 if (event_regs[ctl.event_id].map_reg == global_ev_reg)
1060 nr_idxs = 1;
1061 else if (event_regs[ctl.event_id].map_reg == part_ev_reg)
1062 nr_idxs = stdev->partition_count;
1063 else if (event_regs[ctl.event_id].map_reg == pff_ev_reg)
1064 nr_idxs = stdev->pff_csr_count;
1065 else
1066 return -EINVAL;
1067
1068 for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
1069 ret = event_ctl(stdev, &ctl);
1070 if (ret < 0)
1071 return ret;
1072 }
1073 } else {
1074 ret = event_ctl(stdev, &ctl);
1075 if (ret < 0)
1076 return ret;
1077 }
1078
1079 if (copy_to_user(uctl, &ctl, sizeof(ctl)))
1080 return -EFAULT;
1081
1082 return 0;
1083}
1084
1085static int ioctl_pff_to_port(struct switchtec_dev *stdev,
1086 struct switchtec_ioctl_pff_port *up)
1087{
1088 int i, part;
1089 u32 reg;
1090 struct part_cfg_regs *pcfg;
1091 struct switchtec_ioctl_pff_port p;
1092
1093 if (copy_from_user(&p, up, sizeof(p)))
1094 return -EFAULT;
1095
1096 p.port = -1;
1097 for (part = 0; part < stdev->partition_count; part++) {
1098 pcfg = &stdev->mmio_part_cfg_all[part];
1099 p.partition = part;
1100
1101 reg = ioread32(&pcfg->usp_pff_inst_id);
1102 if (reg == p.pff) {
1103 p.port = 0;
1104 break;
1105 }
1106
1107 reg = ioread32(&pcfg->vep_pff_inst_id);
1108 if (reg == p.pff) {
1109 p.port = SWITCHTEC_IOCTL_PFF_VEP;
1110 break;
1111 }
1112
1113 for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
1114 reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
1115 if (reg != p.pff)
1116 continue;
1117
1118 p.port = i + 1;
1119 break;
1120 }
1121
1122 if (p.port != -1)
1123 break;
1124 }
1125
1126 if (copy_to_user(up, &p, sizeof(p)))
1127 return -EFAULT;
1128
1129 return 0;
1130}
1131
1132static int ioctl_port_to_pff(struct switchtec_dev *stdev,
1133 struct switchtec_ioctl_pff_port *up)
1134{
1135 struct switchtec_ioctl_pff_port p;
1136 struct part_cfg_regs *pcfg;
1137
1138 if (copy_from_user(&p, up, sizeof(p)))
1139 return -EFAULT;
1140
1141 if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
1142 pcfg = stdev->mmio_part_cfg;
1143 else if (p.partition < stdev->partition_count)
1144 pcfg = &stdev->mmio_part_cfg_all[p.partition];
1145 else
1146 return -EINVAL;
1147
1148 switch (p.port) {
1149 case 0:
1150 p.pff = ioread32(&pcfg->usp_pff_inst_id);
1151 break;
1152 case SWITCHTEC_IOCTL_PFF_VEP:
1153 p.pff = ioread32(&pcfg->vep_pff_inst_id);
1154 break;
1155 default:
1156 if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
1157 return -EINVAL;
1158 p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
1159 break;
1160 }
1161
1162 if (copy_to_user(up, &p, sizeof(p)))
1163 return -EFAULT;
1164
1165 return 0;
1166}
1167
1168static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
1169 unsigned long arg)
1170{
1171 struct switchtec_user *stuser = filp->private_data;
1172 struct switchtec_dev *stdev = stuser->stdev;
1173 int rc;
1174 void __user *argp = (void __user *)arg;
1175
1176 rc = lock_mutex_and_test_alive(stdev);
1177 if (rc)
1178 return rc;
1179
1180 switch (cmd) {
1181 case SWITCHTEC_IOCTL_FLASH_INFO:
1182 rc = ioctl_flash_info(stdev, argp);
1183 break;
1184 case SWITCHTEC_IOCTL_FLASH_PART_INFO:
1185 rc = ioctl_flash_part_info(stdev, argp);
1186 break;
1187 case SWITCHTEC_IOCTL_EVENT_SUMMARY:
1188 rc = ioctl_event_summary(stdev, stuser, argp);
1189 break;
1190 case SWITCHTEC_IOCTL_EVENT_CTL:
1191 rc = ioctl_event_ctl(stdev, argp);
1192 break;
1193 case SWITCHTEC_IOCTL_PFF_TO_PORT:
1194 rc = ioctl_pff_to_port(stdev, argp);
1195 break;
1196 case SWITCHTEC_IOCTL_PORT_TO_PFF:
1197 rc = ioctl_port_to_pff(stdev, argp);
1198 break;
1199 default:
1200 rc = -ENOTTY;
1201 break;
1202 }
1203
1204 mutex_unlock(&stdev->mrpc_mutex);
1205 return rc;
1206}
1207
Logan Gunthorpe080b47d2017-03-06 18:30:54 -06001208static const struct file_operations switchtec_fops = {
1209 .owner = THIS_MODULE,
1210 .open = switchtec_dev_open,
1211 .release = switchtec_dev_release,
1212 .write = switchtec_dev_write,
1213 .read = switchtec_dev_read,
1214 .poll = switchtec_dev_poll,
Logan Gunthorpe52eabba2017-03-02 16:24:34 -07001215 .unlocked_ioctl = switchtec_dev_ioctl,
1216 .compat_ioctl = switchtec_dev_ioctl,
Logan Gunthorpe080b47d2017-03-06 18:30:54 -06001217};
1218
1219static void stdev_release(struct device *dev)
1220{
1221 struct switchtec_dev *stdev = to_stdev(dev);
1222
1223 kfree(stdev);
1224}
1225
1226static void stdev_kill(struct switchtec_dev *stdev)
1227{
1228 struct switchtec_user *stuser, *tmpuser;
1229
1230 pci_clear_master(stdev->pdev);
1231
1232 cancel_delayed_work_sync(&stdev->mrpc_timeout);
1233
1234 /* Mark the hardware as unavailable and complete all completions */
1235 mutex_lock(&stdev->mrpc_mutex);
1236 stdev->alive = false;
1237
1238 /* Wake up and kill any users waiting on an MRPC request */
1239 list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
1240 complete_all(&stuser->comp);
1241 list_del_init(&stuser->list);
1242 stuser_put(stuser);
1243 }
1244
1245 mutex_unlock(&stdev->mrpc_mutex);
1246
1247 /* Wake up any users waiting on event_wq */
1248 wake_up_interruptible(&stdev->event_wq);
1249}
1250
1251static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
1252{
1253 struct switchtec_dev *stdev;
1254 int minor;
1255 struct device *dev;
1256 struct cdev *cdev;
1257 int rc;
1258
1259 stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
1260 dev_to_node(&pdev->dev));
1261 if (!stdev)
1262 return ERR_PTR(-ENOMEM);
1263
1264 stdev->alive = true;
1265 stdev->pdev = pdev;
1266 INIT_LIST_HEAD(&stdev->mrpc_queue);
1267 mutex_init(&stdev->mrpc_mutex);
1268 stdev->mrpc_busy = 0;
1269 INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
1270 INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
1271 init_waitqueue_head(&stdev->event_wq);
1272 atomic_set(&stdev->event_cnt, 0);
1273
1274 dev = &stdev->dev;
1275 device_initialize(dev);
1276 dev->class = switchtec_class;
1277 dev->parent = &pdev->dev;
Logan Gunthorpe5d8e1882017-03-02 16:24:33 -07001278 dev->groups = switchtec_device_groups;
Logan Gunthorpe080b47d2017-03-06 18:30:54 -06001279 dev->release = stdev_release;
1280
1281 minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
1282 GFP_KERNEL);
1283 if (minor < 0) {
1284 rc = minor;
1285 goto err_put;
1286 }
1287
1288 dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
1289 dev_set_name(dev, "switchtec%d", minor);
1290
1291 cdev = &stdev->cdev;
1292 cdev_init(cdev, &switchtec_fops);
1293 cdev->owner = THIS_MODULE;
Logan Gunthorpe080b47d2017-03-06 18:30:54 -06001294
1295 return stdev;
1296
1297err_put:
1298 put_device(&stdev->dev);
1299 return ERR_PTR(rc);
1300}
1301
Logan Gunthorpe52eabba2017-03-02 16:24:34 -07001302static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
1303{
1304 size_t off = event_regs[eid].offset;
1305 u32 __iomem *hdr_reg;
1306 u32 hdr;
1307
1308 hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
1309 hdr = ioread32(hdr_reg);
1310
1311 if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
1312 return 0;
1313
1314 dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
1315 hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
1316 iowrite32(hdr, hdr_reg);
1317
1318 return 1;
1319}
1320
1321static int mask_all_events(struct switchtec_dev *stdev, int eid)
1322{
1323 int idx;
1324 int count = 0;
1325
1326 if (event_regs[eid].map_reg == part_ev_reg) {
1327 for (idx = 0; idx < stdev->partition_count; idx++)
1328 count += mask_event(stdev, eid, idx);
1329 } else if (event_regs[eid].map_reg == pff_ev_reg) {
1330 for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1331 if (!stdev->pff_local[idx])
1332 continue;
1333 count += mask_event(stdev, eid, idx);
1334 }
1335 } else {
1336 count += mask_event(stdev, eid, 0);
1337 }
1338
1339 return count;
1340}
1341
Logan Gunthorpe080b47d2017-03-06 18:30:54 -06001342static irqreturn_t switchtec_event_isr(int irq, void *dev)
1343{
1344 struct switchtec_dev *stdev = dev;
1345 u32 reg;
1346 irqreturn_t ret = IRQ_NONE;
Logan Gunthorpe52eabba2017-03-02 16:24:34 -07001347 int eid, event_count = 0;
Logan Gunthorpe080b47d2017-03-06 18:30:54 -06001348
1349 reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
1350 if (reg & SWITCHTEC_EVENT_OCCURRED) {
1351 dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
1352 ret = IRQ_HANDLED;
1353 schedule_work(&stdev->mrpc_work);
1354 iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
1355 }
1356
Logan Gunthorpe52eabba2017-03-02 16:24:34 -07001357 for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
1358 event_count += mask_all_events(stdev, eid);
1359
1360 if (event_count) {
1361 atomic_inc(&stdev->event_cnt);
1362 wake_up_interruptible(&stdev->event_wq);
1363 dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
1364 event_count);
1365 return IRQ_HANDLED;
1366 }
1367
Logan Gunthorpe080b47d2017-03-06 18:30:54 -06001368 return ret;
1369}
1370
1371static int switchtec_init_isr(struct switchtec_dev *stdev)
1372{
1373 int nvecs;
1374 int event_irq;
1375
1376 nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4,
1377 PCI_IRQ_MSIX | PCI_IRQ_MSI);
1378 if (nvecs < 0)
1379 return nvecs;
1380
1381 event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
1382 if (event_irq < 0 || event_irq >= nvecs)
1383 return -EFAULT;
1384
1385 event_irq = pci_irq_vector(stdev->pdev, event_irq);
1386 if (event_irq < 0)
1387 return event_irq;
1388
1389 return devm_request_irq(&stdev->pdev->dev, event_irq,
1390 switchtec_event_isr, 0,
1391 KBUILD_MODNAME, stdev);
1392}
1393
1394static void init_pff(struct switchtec_dev *stdev)
1395{
1396 int i;
1397 u32 reg;
1398 struct part_cfg_regs *pcfg = stdev->mmio_part_cfg;
1399
1400 for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
1401 reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
1402 if (reg != MICROSEMI_VENDOR_ID)
1403 break;
1404 }
1405
1406 stdev->pff_csr_count = i;
1407
1408 reg = ioread32(&pcfg->usp_pff_inst_id);
1409 if (reg < SWITCHTEC_MAX_PFF_CSR)
1410 stdev->pff_local[reg] = 1;
1411
1412 reg = ioread32(&pcfg->vep_pff_inst_id);
1413 if (reg < SWITCHTEC_MAX_PFF_CSR)
1414 stdev->pff_local[reg] = 1;
1415
1416 for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
1417 reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
1418 if (reg < SWITCHTEC_MAX_PFF_CSR)
1419 stdev->pff_local[reg] = 1;
1420 }
1421}
1422
1423static int switchtec_init_pci(struct switchtec_dev *stdev,
1424 struct pci_dev *pdev)
1425{
1426 int rc;
1427
1428 rc = pcim_enable_device(pdev);
1429 if (rc)
1430 return rc;
1431
1432 rc = pcim_iomap_regions(pdev, 0x1, KBUILD_MODNAME);
1433 if (rc)
1434 return rc;
1435
1436 pci_set_master(pdev);
1437
1438 stdev->mmio = pcim_iomap_table(pdev)[0];
1439 stdev->mmio_mrpc = stdev->mmio + SWITCHTEC_GAS_MRPC_OFFSET;
1440 stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
1441 stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
1442 stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
1443 stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
Logan Gunthorpe9871e9b2017-05-22 16:52:30 -05001444 stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id);
Logan Gunthorpe080b47d2017-03-06 18:30:54 -06001445 stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
1446 stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
1447 stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
1448 stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
1449
Logan Gunthorpe9871e9b2017-05-22 16:52:30 -05001450 if (stdev->partition_count < 1)
1451 stdev->partition_count = 1;
1452
Logan Gunthorpe080b47d2017-03-06 18:30:54 -06001453 init_pff(stdev);
1454
1455 pci_set_drvdata(pdev, stdev);
1456
1457 return 0;
1458}
1459
1460static int switchtec_pci_probe(struct pci_dev *pdev,
1461 const struct pci_device_id *id)
1462{
1463 struct switchtec_dev *stdev;
1464 int rc;
1465
1466 stdev = stdev_create(pdev);
1467 if (IS_ERR(stdev))
1468 return PTR_ERR(stdev);
1469
1470 rc = switchtec_init_pci(stdev, pdev);
1471 if (rc)
1472 goto err_put;
1473
1474 rc = switchtec_init_isr(stdev);
1475 if (rc) {
1476 dev_err(&stdev->dev, "failed to init isr.\n");
1477 goto err_put;
1478 }
1479
1480 iowrite32(SWITCHTEC_EVENT_CLEAR |
1481 SWITCHTEC_EVENT_EN_IRQ,
1482 &stdev->mmio_part_cfg->mrpc_comp_hdr);
1483
Logan Gunthorpee40cf642017-05-22 16:52:24 -05001484 rc = cdev_device_add(&stdev->cdev, &stdev->dev);
Logan Gunthorpe080b47d2017-03-06 18:30:54 -06001485 if (rc)
1486 goto err_devadd;
1487
1488 dev_info(&stdev->dev, "Management device registered.\n");
1489
1490 return 0;
1491
1492err_devadd:
Logan Gunthorpe080b47d2017-03-06 18:30:54 -06001493 stdev_kill(stdev);
1494err_put:
1495 ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1496 put_device(&stdev->dev);
1497 return rc;
1498}
1499
1500static void switchtec_pci_remove(struct pci_dev *pdev)
1501{
1502 struct switchtec_dev *stdev = pci_get_drvdata(pdev);
1503
1504 pci_set_drvdata(pdev, NULL);
1505
Logan Gunthorpee40cf642017-05-22 16:52:24 -05001506 cdev_device_del(&stdev->cdev, &stdev->dev);
Logan Gunthorpe080b47d2017-03-06 18:30:54 -06001507 ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1508 dev_info(&stdev->dev, "unregistered.\n");
1509
1510 stdev_kill(stdev);
1511 put_device(&stdev->dev);
1512}
1513
1514#define SWITCHTEC_PCI_DEVICE(device_id) \
1515 { \
1516 .vendor = MICROSEMI_VENDOR_ID, \
1517 .device = device_id, \
1518 .subvendor = PCI_ANY_ID, \
1519 .subdevice = PCI_ANY_ID, \
1520 .class = MICROSEMI_MGMT_CLASSCODE, \
1521 .class_mask = 0xFFFFFFFF, \
1522 }, \
1523 { \
1524 .vendor = MICROSEMI_VENDOR_ID, \
1525 .device = device_id, \
1526 .subvendor = PCI_ANY_ID, \
1527 .subdevice = PCI_ANY_ID, \
1528 .class = MICROSEMI_NTB_CLASSCODE, \
1529 .class_mask = 0xFFFFFFFF, \
1530 }
1531
1532static const struct pci_device_id switchtec_pci_tbl[] = {
1533 SWITCHTEC_PCI_DEVICE(0x8531), //PFX 24xG3
1534 SWITCHTEC_PCI_DEVICE(0x8532), //PFX 32xG3
1535 SWITCHTEC_PCI_DEVICE(0x8533), //PFX 48xG3
1536 SWITCHTEC_PCI_DEVICE(0x8534), //PFX 64xG3
1537 SWITCHTEC_PCI_DEVICE(0x8535), //PFX 80xG3
1538 SWITCHTEC_PCI_DEVICE(0x8536), //PFX 96xG3
1539 SWITCHTEC_PCI_DEVICE(0x8543), //PSX 48xG3
1540 SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3
1541 SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3
1542 SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3
1543 {0}
1544};
1545MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
1546
1547static struct pci_driver switchtec_pci_driver = {
1548 .name = KBUILD_MODNAME,
1549 .id_table = switchtec_pci_tbl,
1550 .probe = switchtec_pci_probe,
1551 .remove = switchtec_pci_remove,
1552};
1553
1554static int __init switchtec_init(void)
1555{
1556 int rc;
1557
1558 rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
1559 "switchtec");
1560 if (rc)
1561 return rc;
1562
1563 switchtec_class = class_create(THIS_MODULE, "switchtec");
1564 if (IS_ERR(switchtec_class)) {
1565 rc = PTR_ERR(switchtec_class);
1566 goto err_create_class;
1567 }
1568
1569 rc = pci_register_driver(&switchtec_pci_driver);
1570 if (rc)
1571 goto err_pci_register;
1572
1573 pr_info(KBUILD_MODNAME ": loaded.\n");
1574
1575 return 0;
1576
1577err_pci_register:
1578 class_destroy(switchtec_class);
1579
1580err_create_class:
1581 unregister_chrdev_region(switchtec_devt, max_devices);
1582
1583 return rc;
1584}
1585module_init(switchtec_init);
1586
1587static void __exit switchtec_exit(void)
1588{
1589 pci_unregister_driver(&switchtec_pci_driver);
1590 class_destroy(switchtec_class);
1591 unregister_chrdev_region(switchtec_devt, max_devices);
1592 ida_destroy(&switchtec_minor_ida);
1593
1594 pr_info(KBUILD_MODNAME ": unloaded.\n");
1595}
1596module_exit(switchtec_exit);