blob: 2e13614d41e8ec784e776164493e0eaaddeceef8 [file] [log] [blame]
Pratyush Anandb9500542011-03-22 16:33:58 -07001/*
2 * drivers/misc/spear13xx_pcie_gadget.c
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Pratyush Anand<pratyush.anand@st.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/clk.h>
13#include <linux/slab.h>
14#include <linux/delay.h>
15#include <linux/io.h>
16#include <linux/interrupt.h>
17#include <linux/irq.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/platform_device.h>
21#include <linux/pci_regs.h>
22#include <linux/configfs.h>
23#include <mach/pcie.h>
24#include <mach/misc_regs.h>
25
26#define IN0_MEM_SIZE (200 * 1024 * 1024 - 1)
27/* In current implementation address translation is done using IN0 only.
28 * So IN1 start address and IN0 end address has been kept same
29*/
30#define IN1_MEM_SIZE (0 * 1024 * 1024 - 1)
31#define IN_IO_SIZE (20 * 1024 * 1024 - 1)
32#define IN_CFG0_SIZE (12 * 1024 * 1024 - 1)
33#define IN_CFG1_SIZE (12 * 1024 * 1024 - 1)
34#define IN_MSG_SIZE (12 * 1024 * 1024 - 1)
35/* Keep default BAR size as 4K*/
36/* AORAM would be mapped by default*/
37#define INBOUND_ADDR_MASK (SPEAR13XX_SYSRAM1_SIZE - 1)
38
39#define INT_TYPE_NO_INT 0
40#define INT_TYPE_INTX 1
41#define INT_TYPE_MSI 2
42struct spear_pcie_gadget_config {
43 void __iomem *base;
44 void __iomem *va_app_base;
45 void __iomem *va_dbi_base;
46 char int_type[10];
47 ulong requested_msi;
48 ulong configured_msi;
49 ulong bar0_size;
50 ulong bar0_rw_offset;
51 void __iomem *va_bar0_address;
52};
53
54struct pcie_gadget_target {
55 struct configfs_subsystem subsys;
56 struct spear_pcie_gadget_config config;
57};
58
59struct pcie_gadget_target_attr {
60 struct configfs_attribute attr;
61 ssize_t (*show)(struct spear_pcie_gadget_config *config,
62 char *buf);
63 ssize_t (*store)(struct spear_pcie_gadget_config *config,
64 const char *buf,
65 size_t count);
66};
67
68static void enable_dbi_access(struct pcie_app_reg __iomem *app_reg)
69{
70 /* Enable DBI access */
71 writel(readl(&app_reg->slv_armisc) | (1 << AXI_OP_DBI_ACCESS_ID),
72 &app_reg->slv_armisc);
73 writel(readl(&app_reg->slv_awmisc) | (1 << AXI_OP_DBI_ACCESS_ID),
74 &app_reg->slv_awmisc);
75
76}
77
78static void disable_dbi_access(struct pcie_app_reg __iomem *app_reg)
79{
80 /* disable DBI access */
81 writel(readl(&app_reg->slv_armisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
82 &app_reg->slv_armisc);
83 writel(readl(&app_reg->slv_awmisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
84 &app_reg->slv_awmisc);
85
86}
87
88static void spear_dbi_read_reg(struct spear_pcie_gadget_config *config,
89 int where, int size, u32 *val)
90{
91 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
92 ulong va_address;
93
94 /* Enable DBI access */
95 enable_dbi_access(app_reg);
96
97 va_address = (ulong)config->va_dbi_base + (where & ~0x3);
98
99 *val = readl(va_address);
100
101 if (size == 1)
102 *val = (*val >> (8 * (where & 3))) & 0xff;
103 else if (size == 2)
104 *val = (*val >> (8 * (where & 3))) & 0xffff;
105
106 /* Disable DBI access */
107 disable_dbi_access(app_reg);
108}
109
110static void spear_dbi_write_reg(struct spear_pcie_gadget_config *config,
111 int where, int size, u32 val)
112{
113 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
114 ulong va_address;
115
116 /* Enable DBI access */
117 enable_dbi_access(app_reg);
118
119 va_address = (ulong)config->va_dbi_base + (where & ~0x3);
120
121 if (size == 4)
122 writel(val, va_address);
123 else if (size == 2)
124 writew(val, va_address + (where & 2));
125 else if (size == 1)
126 writeb(val, va_address + (where & 3));
127
128 /* Disable DBI access */
129 disable_dbi_access(app_reg);
130}
131
132#define PCI_FIND_CAP_TTL 48
133
134static int pci_find_own_next_cap_ttl(struct spear_pcie_gadget_config *config,
135 u32 pos, int cap, int *ttl)
136{
137 u32 id;
138
139 while ((*ttl)--) {
140 spear_dbi_read_reg(config, pos, 1, &pos);
141 if (pos < 0x40)
142 break;
143 pos &= ~3;
144 spear_dbi_read_reg(config, pos + PCI_CAP_LIST_ID, 1, &id);
145 if (id == 0xff)
146 break;
147 if (id == cap)
148 return pos;
149 pos += PCI_CAP_LIST_NEXT;
150 }
151 return 0;
152}
153
154static int pci_find_own_next_cap(struct spear_pcie_gadget_config *config,
155 u32 pos, int cap)
156{
157 int ttl = PCI_FIND_CAP_TTL;
158
159 return pci_find_own_next_cap_ttl(config, pos, cap, &ttl);
160}
161
162static int pci_find_own_cap_start(struct spear_pcie_gadget_config *config,
163 u8 hdr_type)
164{
165 u32 status;
166
167 spear_dbi_read_reg(config, PCI_STATUS, 2, &status);
168 if (!(status & PCI_STATUS_CAP_LIST))
169 return 0;
170
171 switch (hdr_type) {
172 case PCI_HEADER_TYPE_NORMAL:
173 case PCI_HEADER_TYPE_BRIDGE:
174 return PCI_CAPABILITY_LIST;
175 case PCI_HEADER_TYPE_CARDBUS:
176 return PCI_CB_CAPABILITY_LIST;
177 default:
178 return 0;
179 }
180
181 return 0;
182}
183
184/*
185 * Tell if a device supports a given PCI capability.
186 * Returns the address of the requested capability structure within the
187 * device's PCI configuration space or 0 in case the device does not
188 * support it. Possible values for @cap:
189 *
190 * %PCI_CAP_ID_PM Power Management
191 * %PCI_CAP_ID_AGP Accelerated Graphics Port
192 * %PCI_CAP_ID_VPD Vital Product Data
193 * %PCI_CAP_ID_SLOTID Slot Identification
194 * %PCI_CAP_ID_MSI Message Signalled Interrupts
195 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
196 * %PCI_CAP_ID_PCIX PCI-X
197 * %PCI_CAP_ID_EXP PCI Express
198 */
199static int pci_find_own_capability(struct spear_pcie_gadget_config *config,
200 int cap)
201{
202 u32 pos;
203 u32 hdr_type;
204
205 spear_dbi_read_reg(config, PCI_HEADER_TYPE, 1, &hdr_type);
206
207 pos = pci_find_own_cap_start(config, hdr_type);
208 if (pos)
209 pos = pci_find_own_next_cap(config, pos, cap);
210
211 return pos;
212}
213
214static irqreturn_t spear_pcie_gadget_irq(int irq, void *dev_id)
215{
216 return 0;
217}
218
219/*
220 * configfs interfaces show/store functions
221 */
222static ssize_t pcie_gadget_show_link(
223 struct spear_pcie_gadget_config *config,
224 char *buf)
225{
226 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
227
228 if (readl(&app_reg->app_status_1) & ((u32)1 << XMLH_LINK_UP_ID))
229 return sprintf(buf, "UP");
230 else
231 return sprintf(buf, "DOWN");
232}
233
234static ssize_t pcie_gadget_store_link(
235 struct spear_pcie_gadget_config *config,
236 const char *buf, size_t count)
237{
238 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
239
240 if (sysfs_streq(buf, "UP"))
241 writel(readl(&app_reg->app_ctrl_0) | (1 << APP_LTSSM_ENABLE_ID),
242 &app_reg->app_ctrl_0);
243 else if (sysfs_streq(buf, "DOWN"))
244 writel(readl(&app_reg->app_ctrl_0)
245 & ~(1 << APP_LTSSM_ENABLE_ID),
246 &app_reg->app_ctrl_0);
247 else
248 return -EINVAL;
249 return count;
250}
251
252static ssize_t pcie_gadget_show_int_type(
253 struct spear_pcie_gadget_config *config,
254 char *buf)
255{
256 return sprintf(buf, "%s", config->int_type);
257}
258
259static ssize_t pcie_gadget_store_int_type(
260 struct spear_pcie_gadget_config *config,
261 const char *buf, size_t count)
262{
263 u32 cap, vec, flags;
264 ulong vector;
265
266 if (sysfs_streq(buf, "INTA"))
267 spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
268
269 else if (sysfs_streq(buf, "MSI")) {
270 vector = config->requested_msi;
271 vec = 0;
272 while (vector > 1) {
273 vector /= 2;
274 vec++;
275 }
276 spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 0);
277 cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
278 spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
279 flags &= ~PCI_MSI_FLAGS_QMASK;
280 flags |= vec << 1;
281 spear_dbi_write_reg(config, cap + PCI_MSI_FLAGS, 1, flags);
282 } else
283 return -EINVAL;
284
285 strcpy(config->int_type, buf);
286
287 return count;
288}
289
290static ssize_t pcie_gadget_show_no_of_msi(
291 struct spear_pcie_gadget_config *config,
292 char *buf)
293{
294 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
295 u32 cap, vec, flags;
296 ulong vector;
297
298 if ((readl(&app_reg->msg_status) & (1 << CFG_MSI_EN_ID))
299 != (1 << CFG_MSI_EN_ID))
300 vector = 0;
301 else {
302 cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
303 spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
304 flags &= ~PCI_MSI_FLAGS_QSIZE;
305 vec = flags >> 4;
306 vector = 1;
307 while (vec--)
308 vector *= 2;
309 }
310 config->configured_msi = vector;
311
312 return sprintf(buf, "%lu", vector);
313}
314
315static ssize_t pcie_gadget_store_no_of_msi(
316 struct spear_pcie_gadget_config *config,
317 const char *buf, size_t count)
318{
Jingoo Hanf7b41272013-06-04 13:15:16 +0900319 int ret;
320
321 ret = kstrtoul(buf, 0, &config->requested_msi);
322 if (ret)
323 return ret;
324
Pratyush Anandb9500542011-03-22 16:33:58 -0700325 if (config->requested_msi > 32)
326 config->requested_msi = 32;
327
328 return count;
329}
330
331static ssize_t pcie_gadget_store_inta(
332 struct spear_pcie_gadget_config *config,
333 const char *buf, size_t count)
334{
335 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
336 ulong en;
Jingoo Hanf7b41272013-06-04 13:15:16 +0900337 int ret;
Pratyush Anandb9500542011-03-22 16:33:58 -0700338
Jingoo Hanf7b41272013-06-04 13:15:16 +0900339 ret = kstrtoul(buf, 0, &en);
340 if (ret)
341 return ret;
Pratyush Anandb9500542011-03-22 16:33:58 -0700342
343 if (en)
344 writel(readl(&app_reg->app_ctrl_0) | (1 << SYS_INT_ID),
345 &app_reg->app_ctrl_0);
346 else
347 writel(readl(&app_reg->app_ctrl_0) & ~(1 << SYS_INT_ID),
348 &app_reg->app_ctrl_0);
349
350 return count;
351}
352
353static ssize_t pcie_gadget_store_send_msi(
354 struct spear_pcie_gadget_config *config,
355 const char *buf, size_t count)
356{
357 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
358 ulong vector;
359 u32 ven_msi;
Jingoo Hanf7b41272013-06-04 13:15:16 +0900360 int ret;
Pratyush Anandb9500542011-03-22 16:33:58 -0700361
Jingoo Hanf7b41272013-06-04 13:15:16 +0900362 ret = kstrtoul(buf, 0, &vector);
363 if (ret)
364 return ret;
Pratyush Anandb9500542011-03-22 16:33:58 -0700365
366 if (!config->configured_msi)
367 return -EINVAL;
368
369 if (vector >= config->configured_msi)
370 return -EINVAL;
371
372 ven_msi = readl(&app_reg->ven_msi_1);
373 ven_msi &= ~VEN_MSI_FUN_NUM_MASK;
374 ven_msi |= 0 << VEN_MSI_FUN_NUM_ID;
375 ven_msi &= ~VEN_MSI_TC_MASK;
376 ven_msi |= 0 << VEN_MSI_TC_ID;
377 ven_msi &= ~VEN_MSI_VECTOR_MASK;
378 ven_msi |= vector << VEN_MSI_VECTOR_ID;
379
380 /* generating interrupt for msi vector */
381 ven_msi |= VEN_MSI_REQ_EN;
382 writel(ven_msi, &app_reg->ven_msi_1);
383 udelay(1);
384 ven_msi &= ~VEN_MSI_REQ_EN;
385 writel(ven_msi, &app_reg->ven_msi_1);
386
387 return count;
388}
389
390static ssize_t pcie_gadget_show_vendor_id(
391 struct spear_pcie_gadget_config *config,
392 char *buf)
393{
394 u32 id;
395
396 spear_dbi_read_reg(config, PCI_VENDOR_ID, 2, &id);
397
398 return sprintf(buf, "%x", id);
399}
400
401static ssize_t pcie_gadget_store_vendor_id(
402 struct spear_pcie_gadget_config *config,
403 const char *buf, size_t count)
404{
405 ulong id;
Jingoo Hanf7b41272013-06-04 13:15:16 +0900406 int ret;
Pratyush Anandb9500542011-03-22 16:33:58 -0700407
Jingoo Hanf7b41272013-06-04 13:15:16 +0900408 ret = kstrtoul(buf, 0, &id);
409 if (ret)
410 return ret;
Pratyush Anandb9500542011-03-22 16:33:58 -0700411
412 spear_dbi_write_reg(config, PCI_VENDOR_ID, 2, id);
413
414 return count;
415}
416
417static ssize_t pcie_gadget_show_device_id(
418 struct spear_pcie_gadget_config *config,
419 char *buf)
420{
421 u32 id;
422
423 spear_dbi_read_reg(config, PCI_DEVICE_ID, 2, &id);
424
425 return sprintf(buf, "%x", id);
426}
427
428static ssize_t pcie_gadget_store_device_id(
429 struct spear_pcie_gadget_config *config,
430 const char *buf, size_t count)
431{
432 ulong id;
Jingoo Hanf7b41272013-06-04 13:15:16 +0900433 int ret;
Pratyush Anandb9500542011-03-22 16:33:58 -0700434
Jingoo Hanf7b41272013-06-04 13:15:16 +0900435 ret = kstrtoul(buf, 0, &id);
436 if (ret)
437 return ret;
Pratyush Anandb9500542011-03-22 16:33:58 -0700438
439 spear_dbi_write_reg(config, PCI_DEVICE_ID, 2, id);
440
441 return count;
442}
443
444static ssize_t pcie_gadget_show_bar0_size(
445 struct spear_pcie_gadget_config *config,
446 char *buf)
447{
448 return sprintf(buf, "%lx", config->bar0_size);
449}
450
451static ssize_t pcie_gadget_store_bar0_size(
452 struct spear_pcie_gadget_config *config,
453 const char *buf, size_t count)
454{
455 ulong size;
456 u32 pos, pos1;
457 u32 no_of_bit = 0;
Jingoo Hanf7b41272013-06-04 13:15:16 +0900458 int ret;
Pratyush Anandb9500542011-03-22 16:33:58 -0700459
Jingoo Hanf7b41272013-06-04 13:15:16 +0900460 ret = kstrtoul(buf, 0, &size);
461 if (ret)
462 return ret;
463
Pratyush Anandb9500542011-03-22 16:33:58 -0700464 /* min bar size is 256 */
465 if (size <= 0x100)
466 size = 0x100;
467 /* max bar size is 1MB*/
468 else if (size >= 0x100000)
469 size = 0x100000;
470 else {
471 pos = 0;
472 pos1 = 0;
473 while (pos < 21) {
474 pos = find_next_bit((ulong *)&size, 21, pos);
475 if (pos != 21)
476 pos1 = pos + 1;
477 pos++;
478 no_of_bit++;
479 }
480 if (no_of_bit == 2)
481 pos1--;
482
483 size = 1 << pos1;
484 }
485 config->bar0_size = size;
486 spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, size - 1);
487
488 return count;
489}
490
491static ssize_t pcie_gadget_show_bar0_address(
492 struct spear_pcie_gadget_config *config,
493 char *buf)
494{
495 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
496
497 u32 address = readl(&app_reg->pim0_mem_addr_start);
498
499 return sprintf(buf, "%x", address);
500}
501
502static ssize_t pcie_gadget_store_bar0_address(
503 struct spear_pcie_gadget_config *config,
504 const char *buf, size_t count)
505{
506 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
507 ulong address;
Jingoo Hanf7b41272013-06-04 13:15:16 +0900508 int ret;
Pratyush Anandb9500542011-03-22 16:33:58 -0700509
Jingoo Hanf7b41272013-06-04 13:15:16 +0900510 ret = kstrtoul(buf, 0, &address);
511 if (ret)
512 return ret;
Pratyush Anandb9500542011-03-22 16:33:58 -0700513
514 address &= ~(config->bar0_size - 1);
515 if (config->va_bar0_address)
516 iounmap(config->va_bar0_address);
517 config->va_bar0_address = ioremap(address, config->bar0_size);
518 if (!config->va_bar0_address)
519 return -ENOMEM;
520
521 writel(address, &app_reg->pim0_mem_addr_start);
522
523 return count;
524}
525
526static ssize_t pcie_gadget_show_bar0_rw_offset(
527 struct spear_pcie_gadget_config *config,
528 char *buf)
529{
530 return sprintf(buf, "%lx", config->bar0_rw_offset);
531}
532
533static ssize_t pcie_gadget_store_bar0_rw_offset(
534 struct spear_pcie_gadget_config *config,
535 const char *buf, size_t count)
536{
537 ulong offset;
Jingoo Hanf7b41272013-06-04 13:15:16 +0900538 int ret;
Pratyush Anandb9500542011-03-22 16:33:58 -0700539
Jingoo Hanf7b41272013-06-04 13:15:16 +0900540 ret = kstrtoul(buf, 0, &offset);
541 if (ret)
542 return ret;
Pratyush Anandb9500542011-03-22 16:33:58 -0700543
544 if (offset % 4)
545 return -EINVAL;
546
547 config->bar0_rw_offset = offset;
548
549 return count;
550}
551
552static ssize_t pcie_gadget_show_bar0_data(
553 struct spear_pcie_gadget_config *config,
554 char *buf)
555{
556 ulong data;
557
558 if (!config->va_bar0_address)
559 return -ENOMEM;
560
561 data = readl((ulong)config->va_bar0_address + config->bar0_rw_offset);
562
563 return sprintf(buf, "%lx", data);
564}
565
566static ssize_t pcie_gadget_store_bar0_data(
567 struct spear_pcie_gadget_config *config,
568 const char *buf, size_t count)
569{
570 ulong data;
Jingoo Hanf7b41272013-06-04 13:15:16 +0900571 int ret;
Pratyush Anandb9500542011-03-22 16:33:58 -0700572
Jingoo Hanf7b41272013-06-04 13:15:16 +0900573 ret = kstrtoul(buf, 0, &data);
574 if (ret)
575 return ret;
Pratyush Anandb9500542011-03-22 16:33:58 -0700576
577 if (!config->va_bar0_address)
578 return -ENOMEM;
579
580 writel(data, (ulong)config->va_bar0_address + config->bar0_rw_offset);
581
582 return count;
583}
584
585/*
586 * Attribute definitions.
587 */
588
589#define PCIE_GADGET_TARGET_ATTR_RO(_name) \
590static struct pcie_gadget_target_attr pcie_gadget_target_##_name = \
591 __CONFIGFS_ATTR(_name, S_IRUGO, pcie_gadget_show_##_name, NULL)
592
593#define PCIE_GADGET_TARGET_ATTR_WO(_name) \
594static struct pcie_gadget_target_attr pcie_gadget_target_##_name = \
595 __CONFIGFS_ATTR(_name, S_IWUSR, NULL, pcie_gadget_store_##_name)
596
597#define PCIE_GADGET_TARGET_ATTR_RW(_name) \
598static struct pcie_gadget_target_attr pcie_gadget_target_##_name = \
599 __CONFIGFS_ATTR(_name, S_IRUGO | S_IWUSR, pcie_gadget_show_##_name, \
600 pcie_gadget_store_##_name)
601PCIE_GADGET_TARGET_ATTR_RW(link);
602PCIE_GADGET_TARGET_ATTR_RW(int_type);
603PCIE_GADGET_TARGET_ATTR_RW(no_of_msi);
604PCIE_GADGET_TARGET_ATTR_WO(inta);
605PCIE_GADGET_TARGET_ATTR_WO(send_msi);
606PCIE_GADGET_TARGET_ATTR_RW(vendor_id);
607PCIE_GADGET_TARGET_ATTR_RW(device_id);
608PCIE_GADGET_TARGET_ATTR_RW(bar0_size);
609PCIE_GADGET_TARGET_ATTR_RW(bar0_address);
610PCIE_GADGET_TARGET_ATTR_RW(bar0_rw_offset);
611PCIE_GADGET_TARGET_ATTR_RW(bar0_data);
612
613static struct configfs_attribute *pcie_gadget_target_attrs[] = {
614 &pcie_gadget_target_link.attr,
615 &pcie_gadget_target_int_type.attr,
616 &pcie_gadget_target_no_of_msi.attr,
617 &pcie_gadget_target_inta.attr,
618 &pcie_gadget_target_send_msi.attr,
619 &pcie_gadget_target_vendor_id.attr,
620 &pcie_gadget_target_device_id.attr,
621 &pcie_gadget_target_bar0_size.attr,
622 &pcie_gadget_target_bar0_address.attr,
623 &pcie_gadget_target_bar0_rw_offset.attr,
624 &pcie_gadget_target_bar0_data.attr,
625 NULL,
626};
627
628static struct pcie_gadget_target *to_target(struct config_item *item)
629{
630 return item ?
631 container_of(to_configfs_subsystem(to_config_group(item)),
632 struct pcie_gadget_target, subsys) : NULL;
633}
634
635/*
636 * Item operations and type for pcie_gadget_target.
637 */
638
639static ssize_t pcie_gadget_target_attr_show(struct config_item *item,
640 struct configfs_attribute *attr,
641 char *buf)
642{
643 ssize_t ret = -EINVAL;
644 struct pcie_gadget_target *target = to_target(item);
645 struct pcie_gadget_target_attr *t_attr =
646 container_of(attr, struct pcie_gadget_target_attr, attr);
647
648 if (t_attr->show)
649 ret = t_attr->show(&target->config, buf);
650 return ret;
651}
652
653static ssize_t pcie_gadget_target_attr_store(struct config_item *item,
654 struct configfs_attribute *attr,
655 const char *buf,
656 size_t count)
657{
658 ssize_t ret = -EINVAL;
659 struct pcie_gadget_target *target = to_target(item);
660 struct pcie_gadget_target_attr *t_attr =
661 container_of(attr, struct pcie_gadget_target_attr, attr);
662
663 if (t_attr->store)
664 ret = t_attr->store(&target->config, buf, count);
665 return ret;
666}
667
668static struct configfs_item_operations pcie_gadget_target_item_ops = {
669 .show_attribute = pcie_gadget_target_attr_show,
670 .store_attribute = pcie_gadget_target_attr_store,
671};
672
673static struct config_item_type pcie_gadget_target_type = {
674 .ct_attrs = pcie_gadget_target_attrs,
675 .ct_item_ops = &pcie_gadget_target_item_ops,
676 .ct_owner = THIS_MODULE,
677};
678
679static void spear13xx_pcie_device_init(struct spear_pcie_gadget_config *config)
680{
681 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
682
683 /*setup registers for outbound translation */
684
685 writel(config->base, &app_reg->in0_mem_addr_start);
686 writel(app_reg->in0_mem_addr_start + IN0_MEM_SIZE,
687 &app_reg->in0_mem_addr_limit);
688 writel(app_reg->in0_mem_addr_limit + 1, &app_reg->in1_mem_addr_start);
689 writel(app_reg->in1_mem_addr_start + IN1_MEM_SIZE,
690 &app_reg->in1_mem_addr_limit);
691 writel(app_reg->in1_mem_addr_limit + 1, &app_reg->in_io_addr_start);
692 writel(app_reg->in_io_addr_start + IN_IO_SIZE,
693 &app_reg->in_io_addr_limit);
694 writel(app_reg->in_io_addr_limit + 1, &app_reg->in_cfg0_addr_start);
695 writel(app_reg->in_cfg0_addr_start + IN_CFG0_SIZE,
696 &app_reg->in_cfg0_addr_limit);
697 writel(app_reg->in_cfg0_addr_limit + 1, &app_reg->in_cfg1_addr_start);
698 writel(app_reg->in_cfg1_addr_start + IN_CFG1_SIZE,
699 &app_reg->in_cfg1_addr_limit);
700 writel(app_reg->in_cfg1_addr_limit + 1, &app_reg->in_msg_addr_start);
701 writel(app_reg->in_msg_addr_start + IN_MSG_SIZE,
702 &app_reg->in_msg_addr_limit);
703
704 writel(app_reg->in0_mem_addr_start, &app_reg->pom0_mem_addr_start);
705 writel(app_reg->in1_mem_addr_start, &app_reg->pom1_mem_addr_start);
706 writel(app_reg->in_io_addr_start, &app_reg->pom_io_addr_start);
707
708 /*setup registers for inbound translation */
709
710 /* Keep AORAM mapped at BAR0 as default */
711 config->bar0_size = INBOUND_ADDR_MASK + 1;
712 spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, INBOUND_ADDR_MASK);
713 spear_dbi_write_reg(config, PCI_BASE_ADDRESS_0, 4, 0xC);
714 config->va_bar0_address = ioremap(SPEAR13XX_SYSRAM1_BASE,
715 config->bar0_size);
716
717 writel(SPEAR13XX_SYSRAM1_BASE, &app_reg->pim0_mem_addr_start);
718 writel(0, &app_reg->pim1_mem_addr_start);
719 writel(INBOUND_ADDR_MASK + 1, &app_reg->mem0_addr_offset_limit);
720
721 writel(0x0, &app_reg->pim_io_addr_start);
722 writel(0x0, &app_reg->pim_io_addr_start);
723 writel(0x0, &app_reg->pim_rom_addr_start);
724
725 writel(DEVICE_TYPE_EP | (1 << MISCTRL_EN_ID)
726 | ((u32)1 << REG_TRANSLATION_ENABLE),
727 &app_reg->app_ctrl_0);
728 /* disable all rx interrupts */
729 writel(0, &app_reg->int_mask);
730
731 /* Select INTA as default*/
732 spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
733}
734
Bill Pemberton80c8ae22012-11-19 13:23:05 -0500735static int spear_pcie_gadget_probe(struct platform_device *pdev)
Pratyush Anandb9500542011-03-22 16:33:58 -0700736{
737 struct resource *res0, *res1;
738 unsigned int status = 0;
739 int irq;
740 struct clk *clk;
741 static struct pcie_gadget_target *target;
742 struct spear_pcie_gadget_config *config;
743 struct config_item *cg_item;
744 struct configfs_subsystem *subsys;
745
746 /* get resource for application registers*/
747
748 res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
749 if (!res0) {
750 dev_err(&pdev->dev, "no resource defined\n");
751 return -EBUSY;
752 }
753 if (!request_mem_region(res0->start, resource_size(res0),
754 pdev->name)) {
755 dev_err(&pdev->dev, "pcie gadget region already claimed\n");
756 return -EBUSY;
757 }
758 /* get resource for dbi registers*/
759
760 res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
761 if (!res1) {
762 dev_err(&pdev->dev, "no resource defined\n");
763 goto err_rel_res0;
764 }
765 if (!request_mem_region(res1->start, resource_size(res1),
766 pdev->name)) {
767 dev_err(&pdev->dev, "pcie gadget region already claimed\n");
768 goto err_rel_res0;
769 }
770
771 target = kzalloc(sizeof(*target), GFP_KERNEL);
772 if (!target) {
773 dev_err(&pdev->dev, "out of memory\n");
774 status = -ENOMEM;
775 goto err_rel_res;
776 }
777
778 cg_item = &target->subsys.su_group.cg_item;
779 sprintf(cg_item->ci_namebuf, "pcie_gadget.%d", pdev->id);
780 cg_item->ci_type = &pcie_gadget_target_type;
781 config = &target->config;
782 config->va_app_base = (void __iomem *)ioremap(res0->start,
783 resource_size(res0));
784 if (!config->va_app_base) {
785 dev_err(&pdev->dev, "ioremap fail\n");
786 status = -ENOMEM;
787 goto err_kzalloc;
788 }
789
790 config->base = (void __iomem *)res1->start;
791
792 config->va_dbi_base = (void __iomem *)ioremap(res1->start,
793 resource_size(res1));
794 if (!config->va_dbi_base) {
795 dev_err(&pdev->dev, "ioremap fail\n");
796 status = -ENOMEM;
797 goto err_iounmap_app;
798 }
799
Jingoo Han9093ca82013-05-23 19:35:23 +0900800 platform_set_drvdata(pdev, target);
Pratyush Anandb9500542011-03-22 16:33:58 -0700801
802 irq = platform_get_irq(pdev, 0);
803 if (irq < 0) {
804 dev_err(&pdev->dev, "no update irq?\n");
805 status = irq;
806 goto err_iounmap;
807 }
808
809 status = request_irq(irq, spear_pcie_gadget_irq, 0, pdev->name, NULL);
810 if (status) {
Joe Perches85ee7a12011-04-23 20:38:19 -0700811 dev_err(&pdev->dev,
812 "pcie gadget interrupt IRQ%d already claimed\n", irq);
Pratyush Anandb9500542011-03-22 16:33:58 -0700813 goto err_iounmap;
814 }
815
816 /* Register configfs hooks */
817 subsys = &target->subsys;
818 config_group_init(&subsys->su_group);
819 mutex_init(&subsys->su_mutex);
820 status = configfs_register_subsystem(subsys);
821 if (status)
822 goto err_irq;
823
824 /*
825 * init basic pcie application registers
826 * do not enable clock if it is PCIE0.Ideally , all controller should
827 * have been independent from others with respect to clock. But PCIE1
828 * and 2 depends on PCIE0.So PCIE0 clk is provided during board init.
829 */
830 if (pdev->id == 1) {
831 /*
832 * Ideally CFG Clock should have been also enabled here. But
833 * it is done currently during board init routne
834 */
835 clk = clk_get_sys("pcie1", NULL);
836 if (IS_ERR(clk)) {
837 pr_err("%s:couldn't get clk for pcie1\n", __func__);
Wei Yongjunc265a0d2013-05-31 20:01:30 +0800838 status = PTR_ERR(clk);
Pratyush Anandb9500542011-03-22 16:33:58 -0700839 goto err_irq;
840 }
Wei Yongjunc265a0d2013-05-31 20:01:30 +0800841 status = clk_enable(clk);
842 if (status) {
Pratyush Anandb9500542011-03-22 16:33:58 -0700843 pr_err("%s:couldn't enable clk for pcie1\n", __func__);
844 goto err_irq;
845 }
846 } else if (pdev->id == 2) {
847 /*
848 * Ideally CFG Clock should have been also enabled here. But
849 * it is done currently during board init routne
850 */
851 clk = clk_get_sys("pcie2", NULL);
852 if (IS_ERR(clk)) {
853 pr_err("%s:couldn't get clk for pcie2\n", __func__);
Wei Yongjunc265a0d2013-05-31 20:01:30 +0800854 status = PTR_ERR(clk);
Pratyush Anandb9500542011-03-22 16:33:58 -0700855 goto err_irq;
856 }
Wei Yongjunc265a0d2013-05-31 20:01:30 +0800857 status = clk_enable(clk);
858 if (status) {
Pratyush Anandb9500542011-03-22 16:33:58 -0700859 pr_err("%s:couldn't enable clk for pcie2\n", __func__);
860 goto err_irq;
861 }
862 }
863 spear13xx_pcie_device_init(config);
864
865 return 0;
866err_irq:
867 free_irq(irq, NULL);
868err_iounmap:
869 iounmap(config->va_dbi_base);
870err_iounmap_app:
871 iounmap(config->va_app_base);
872err_kzalloc:
Axel Lin5a1e6f72011-06-15 15:08:21 -0700873 kfree(target);
Pratyush Anandb9500542011-03-22 16:33:58 -0700874err_rel_res:
875 release_mem_region(res1->start, resource_size(res1));
876err_rel_res0:
877 release_mem_region(res0->start, resource_size(res0));
878 return status;
879}
880
Bill Pemberton486a5c22012-11-19 13:26:02 -0500881static int spear_pcie_gadget_remove(struct platform_device *pdev)
Pratyush Anandb9500542011-03-22 16:33:58 -0700882{
883 struct resource *res0, *res1;
884 static struct pcie_gadget_target *target;
885 struct spear_pcie_gadget_config *config;
886 int irq;
887
888 res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
889 res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
890 irq = platform_get_irq(pdev, 0);
Jingoo Han9093ca82013-05-23 19:35:23 +0900891 target = platform_get_drvdata(pdev);
Pratyush Anandb9500542011-03-22 16:33:58 -0700892 config = &target->config;
893
894 free_irq(irq, NULL);
895 iounmap(config->va_dbi_base);
896 iounmap(config->va_app_base);
897 release_mem_region(res1->start, resource_size(res1));
898 release_mem_region(res0->start, resource_size(res0));
899 configfs_unregister_subsystem(&target->subsys);
900 kfree(target);
901
902 return 0;
903}
904
905static void spear_pcie_gadget_shutdown(struct platform_device *pdev)
906{
907}
908
909static struct platform_driver spear_pcie_gadget_driver = {
910 .probe = spear_pcie_gadget_probe,
911 .remove = spear_pcie_gadget_remove,
912 .shutdown = spear_pcie_gadget_shutdown,
913 .driver = {
914 .name = "pcie-gadget-spear",
915 .bus = &platform_bus_type
916 },
917};
918
Axel Linb00e1262012-01-22 15:33:49 +0800919module_platform_driver(spear_pcie_gadget_driver);
Pratyush Anandb9500542011-03-22 16:33:58 -0700920
Axel Lin161f1412011-10-31 10:20:28 +0800921MODULE_ALIAS("platform:pcie-gadget-spear");
Pratyush Anandb9500542011-03-22 16:33:58 -0700922MODULE_AUTHOR("Pratyush Anand");
923MODULE_LICENSE("GPL");