blob: 3b70ca4979272eb9e0d956db3f4bd00ea5686784 [file] [log] [blame]
Kyle Yan3a641f42016-11-21 14:00:04 -08001/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/io.h>
16#include <linux/slab.h>
17#include <linux/interrupt.h>
18#include <linux/irq.h>
19#include <linux/cpu_pm.h>
20#include <linux/platform_device.h>
21#include <soc/qcom/scm.h>
22#include <linux/of.h>
23#include <linux/clk.h>
24
25#define MODULE_NAME "gladiator_error_reporting"
26
27#define INVALID_NUM 0xDEADBEEF
28
29struct reg_off {
30 unsigned int gladiator_id_coreid;
31 unsigned int gladiator_id_revisionid;
32 unsigned int gladiator_faulten;
33 unsigned int gladiator_errvld;
34 unsigned int gladiator_errclr;
35 unsigned int gladiator_errlog0;
36 unsigned int gladiator_errlog1;
37 unsigned int gladiator_errlog2;
38 unsigned int gladiator_errlog3;
39 unsigned int gladiator_errlog4;
40 unsigned int gladiator_errlog5;
41 unsigned int gladiator_errlog6;
42 unsigned int gladiator_errlog7;
43 unsigned int gladiator_errlog8;
44 unsigned int observer_0_id_coreid;
45 unsigned int observer_0_id_revisionid;
46 unsigned int observer_0_faulten;
47 unsigned int observer_0_errvld;
48 unsigned int observer_0_errclr;
49 unsigned int observer_0_errlog0;
50 unsigned int observer_0_errlog1;
51 unsigned int observer_0_errlog2;
52 unsigned int observer_0_errlog3;
53 unsigned int observer_0_errlog4;
54 unsigned int observer_0_errlog5;
55 unsigned int observer_0_errlog6;
56 unsigned int observer_0_errlog7;
57 unsigned int observer_0_errlog8;
58 unsigned int observer_0_stallen;
59};
60
61struct reg_masks_shift {
62 unsigned int gld_trans_opcode_mask;
63 unsigned int gld_trans_opcode_shift;
64 unsigned int gld_error_type_mask;
65 unsigned int gld_error_type_shift;
66 unsigned int gld_len1_mask;
67 unsigned int gld_len1_shift;
68 unsigned int gld_trans_sourceid_mask;
69 unsigned int gld_trans_sourceid_shift;
70 unsigned int gld_trans_targetid_mask;
71 unsigned int gld_trans_targetid_shift;
72 unsigned int gld_errlog_error;
73 unsigned int gld_errlog5_error_type_mask;
74 unsigned int gld_errlog5_error_type_shift;
75 unsigned int gld_ace_port_parity_mask;
76 unsigned int gld_ace_port_parity_shift;
77 unsigned int gld_ace_port_disconnect_mask;
78 unsigned int gld_ace_port_disconnect_shift;
79 unsigned int gld_ace_port_directory_mask;
80 unsigned int gld_ace_port_directory_shift;
81 unsigned int gld_index_parity_mask;
82 unsigned int gld_index_parity_shift;
83 unsigned int obs_trans_opcode_mask;
84 unsigned int obs_trans_opcode_shift;
85 unsigned int obs_error_type_mask;
86 unsigned int obs_error_type_shift;
87 unsigned int obs_len1_mask;
88 unsigned int obs_len1_shift;
89};
90
91struct msm_gladiator_data {
92 void __iomem *gladiator_virt_base;
93 int erp_irq;
94 struct notifier_block pm_notifier_block;
95 struct clk *qdss_clk;
96 struct reg_off *reg_offs;
97 struct reg_masks_shift *reg_masks_shifts;
98 bool glad_v2;
99 bool glad_v3;
100};
101
102static int enable_panic_on_error;
103module_param(enable_panic_on_error, int, 0000);
104
105enum gld_trans_opcode {
106 GLD_RD,
107 GLD_RDX,
108 GLD_RDL,
109 GLD_RESERVED,
110 GLD_WR,
111 GLD_WRC,
112 GLD_PRE,
113};
114
115enum obs_trans_opcode {
116 OBS_RD,
117 OBS_RDW,
118 OBS_RDL,
119 OBS_RDX,
120 OBS_WR,
121 OBS_WRW,
122 OBS_WRC,
123 OBS_RESERVED,
124 OBS_PRE,
125 OBS_URG,
126};
127
128enum obs_err_code {
129 OBS_SLV,
130 OBS_DEC,
131 OBS_UNS,
132 OBS_DISC,
133 OBS_SEC,
134 OBS_HIDE,
135 OBS_TMO,
136 OBS_RSV,
137};
138
139enum err_log {
140 ID_COREID,
141 ID_REVISIONID,
142 FAULTEN,
143 ERRVLD,
144 ERRCLR,
145 ERR_LOG0,
146 ERR_LOG1,
147 ERR_LOG2,
148 ERR_LOG3,
149 ERR_LOG4,
150 ERR_LOG5,
151 ERR_LOG6,
152 ERR_LOG7,
153 ERR_LOG8,
154 STALLEN,
155 MAX_NUM,
156};
157
158enum type_logger_error {
159 DATA_TRANSFER_ERROR,
160 DVM_ERROR,
161 TX_ERROR,
162 TXR_ERROR,
163 DISCONNECT_ERROR,
164 DIRECTORY_ERROR,
165 PARITY_ERROR,
Lingutla Chandrasekhar9ed41652017-08-31 18:20:20 +0530166 PHYSICAL_ADDRESS_ERROR,
Kyle Yan3a641f42016-11-21 14:00:04 -0800167};
168
169static void clear_gladiator_error(void __iomem *gladiator_virt_base,
170 struct reg_off *offs)
171{
172 writel_relaxed(1, gladiator_virt_base + offs->gladiator_errclr);
173 writel_relaxed(1, gladiator_virt_base + offs->observer_0_errclr);
174}
175
176static inline void print_gld_transaction(unsigned int opc)
177{
178 switch (opc) {
179 case GLD_RD:
180 pr_alert("Transaction type: READ\n");
181 break;
182 case GLD_RDX:
183 pr_alert("Transaction type: EXCLUSIVE READ\n");
184 break;
185 case GLD_RDL:
186 pr_alert("Transaction type: LINKED READ\n");
187 break;
188 case GLD_WR:
189 pr_alert("Transaction type: WRITE\n");
190 break;
191 case GLD_WRC:
192 pr_alert("Transaction type: CONDITIONAL WRITE\n");
193 break;
194 case GLD_PRE:
195 pr_alert("Transaction: Preamble packet of linked sequence\n");
196 break;
197 default:
198 pr_alert("Transaction type: Unknown; value:%u\n", opc);
199 }
200}
201
202static inline void print_gld_errtype(unsigned int errtype)
203{
Lingutla Chandrasekhar9ed41652017-08-31 18:20:20 +0530204 char *errors = "Disconnect, Directory, Parity, Physical address";
205
Kyle Yan3a641f42016-11-21 14:00:04 -0800206 if (errtype == 0)
207 pr_alert("Error type: Snoop data transfer\n");
208 else if (errtype == 1)
209 pr_alert("Error type: DVM error\n");
210 else if (errtype == 3)
Lingutla Chandrasekhar9ed41652017-08-31 18:20:20 +0530211 pr_alert("Error type: %s\n", errors);
Kyle Yan3a641f42016-11-21 14:00:04 -0800212 else
213 pr_alert("Error type: Unknown; value:%u\n", errtype);
214}
215
216static void decode_gld_errlog0(u32 err_reg,
217 struct reg_masks_shift *mask_shifts)
218{
219 unsigned int opc, errtype, len1;
220
221 opc = (err_reg & mask_shifts->gld_trans_opcode_mask) >>
222 mask_shifts->gld_trans_opcode_shift;
223 errtype = (err_reg & mask_shifts->gld_error_type_mask) >>
224 mask_shifts->gld_error_type_shift;
225 len1 = (err_reg & mask_shifts->gld_len1_mask) >>
226 mask_shifts->gld_len1_shift;
227
228 print_gld_transaction(opc);
229 print_gld_errtype(errtype);
230 pr_alert("number of payload bytes: %d\n", len1 + 1);
231}
232
233static void decode_gld_errlog1(u32 err_reg,
234 struct reg_masks_shift *mask_shifts)
235{
236 if ((err_reg & mask_shifts->gld_errlog_error) ==
237 mask_shifts->gld_errlog_error)
238 pr_alert("Transaction issued on IO target generic interface\n");
239 else
240 pr_alert("Transaction source ID: %d\n",
241 (err_reg & mask_shifts->gld_trans_sourceid_mask)
242 >> mask_shifts->gld_trans_sourceid_shift);
243}
244
245static void decode_gld_errlog2(u32 err_reg,
246 struct reg_masks_shift *mask_shifts)
247{
248 if ((err_reg & mask_shifts->gld_errlog_error) ==
249 mask_shifts->gld_errlog_error)
250 pr_alert("Error response coming from: external DVM network\n");
251 else
252 pr_alert("Error response coming from: Target ID: %d\n",
253 (err_reg & mask_shifts->gld_trans_targetid_mask)
254 >> mask_shifts->gld_trans_targetid_shift);
255}
256
257static void decode_ace_port_index(u32 type, u32 error,
258 struct reg_masks_shift *mask_shifts)
259{
260 unsigned int port;
261
262 switch (type) {
263 case DISCONNECT_ERROR:
264 port = (error & mask_shifts->gld_ace_port_disconnect_mask)
265 >> mask_shifts->gld_ace_port_disconnect_shift;
266 pr_alert("ACE port index: %d\n", port);
267 break;
268 case DIRECTORY_ERROR:
269 port = (error & mask_shifts->gld_ace_port_directory_mask)
270 >> mask_shifts->gld_ace_port_directory_shift;
271 pr_alert("ACE port index: %d\n", port);
272 break;
273 case PARITY_ERROR:
274 port = (error & mask_shifts->gld_ace_port_parity_mask)
275 >> mask_shifts->gld_ace_port_parity_shift;
276 pr_alert("ACE port index: %d\n", port);
277 }
278}
279
280static void decode_index_parity(u32 error, struct reg_masks_shift *mask_shifts)
281{
282 pr_alert("Index: %d\n",
283 (error & mask_shifts->gld_index_parity_mask)
284 >> mask_shifts->gld_index_parity_shift);
285}
286
287static void decode_gld_logged_error(u32 err_reg5,
288 struct reg_masks_shift *mask_shifts)
289{
290 unsigned int log_err_type, i, value;
291
292 log_err_type = (err_reg5 & mask_shifts->gld_errlog5_error_type_mask)
293 >> mask_shifts->gld_errlog5_error_type_shift;
Lingutla Chandrasekhar9ed41652017-08-31 18:20:20 +0530294 for (i = 0 ; i <= 7 ; i++) {
Kyle Yan3a641f42016-11-21 14:00:04 -0800295 value = log_err_type & 0x1;
296 switch (i) {
297 case DATA_TRANSFER_ERROR:
298 if (value == 0)
299 continue;
300 pr_alert("Error type: Data transfer error\n");
301 break;
302 case DVM_ERROR:
303 if (value == 0)
304 continue;
305 pr_alert("Error type: DVM error\n");
306 break;
307 case TX_ERROR:
308 if (value == 0)
309 continue;
310 pr_alert("Error type: Tx error\n");
311 break;
312 case TXR_ERROR:
313 if (value == 0)
314 continue;
315 pr_alert("Error type: TxR error\n");
316 break;
317 case DISCONNECT_ERROR:
318 if (value == 0)
319 continue;
320 pr_alert("Error type: Disconnect error\n");
321 decode_ace_port_index(
322 DISCONNECT_ERROR,
323 err_reg5,
324 mask_shifts);
325 break;
326 case DIRECTORY_ERROR:
327 if (value == 0)
328 continue;
329 pr_alert("Error type: Directory error\n");
330 decode_ace_port_index(
331 DIRECTORY_ERROR,
332 err_reg5,
333 mask_shifts);
334 break;
335 case PARITY_ERROR:
336 if (value == 0)
337 continue;
338 pr_alert("Error type: Parity error\n");
339 decode_ace_port_index(PARITY_ERROR, err_reg5,
340 mask_shifts);
341 decode_index_parity(err_reg5, mask_shifts);
342 break;
Lingutla Chandrasekhar9ed41652017-08-31 18:20:20 +0530343 case PHYSICAL_ADDRESS_ERROR:
344 if (value == 0)
345 continue;
346 pr_alert("Error type: Physical address error\n");
347 pr_alert("Address is greater than SoC address range\n");
348 break;
Kyle Yan3a641f42016-11-21 14:00:04 -0800349 }
Lingutla Chandrasekhar9ed41652017-08-31 18:20:20 +0530350
Kyle Yan3a641f42016-11-21 14:00:04 -0800351 log_err_type = log_err_type >> 1;
352 }
353}
354
355static void decode_gld_errlog(u32 err_reg, unsigned int err_log,
356 struct msm_gladiator_data *msm_gld_data)
357{
358 switch (err_log) {
359 case ERR_LOG0:
360 decode_gld_errlog0(err_reg, msm_gld_data->reg_masks_shifts);
361 break;
362 case ERR_LOG1:
363 decode_gld_errlog1(err_reg, msm_gld_data->reg_masks_shifts);
364 break;
365 case ERR_LOG2:
366 decode_gld_errlog2(err_reg, msm_gld_data->reg_masks_shifts);
367 break;
368 case ERR_LOG3:
369 pr_alert("Lower 32-bits of error address: %08x\n", err_reg);
370 break;
371 case ERR_LOG4:
372 pr_alert("Upper 32-bits of error address: %08x\n", err_reg);
373 break;
374 case ERR_LOG5:
375 pr_alert("Lower 32-bits of user: %08x\n", err_reg);
376 break;
377 case ERR_LOG6:
378 pr_alert("Mid 32-bits(63-32) of user: %08x\n", err_reg);
379 break;
380 case ERR_LOG7:
381 break;
382 case ERR_LOG8:
383 pr_alert("Upper 32-bits(95-64) of user: %08x\n", err_reg);
384 break;
385 default:
386 pr_alert("Invalid error register; reg num:%u\n", err_log);
387 }
388}
389
390static inline void print_obs_transaction(unsigned int opc)
391{
392 switch (opc) {
393 case OBS_RD:
394 pr_alert("Transaction type: READ\n");
395 break;
396 case OBS_RDW:
397 pr_alert("Transaction type: WRAPPED READ\n");
398 break;
399 case OBS_RDL:
400 pr_alert("Transaction type: LINKED READ\n");
401 break;
402 case OBS_RDX:
403 pr_alert("Transaction type: EXCLUSIVE READ\n");
404 break;
405 case OBS_WR:
406 pr_alert("Transaction type: WRITE\n");
407 break;
408 case OBS_WRW:
409 pr_alert("Transaction type: WRAPPED WRITE\n");
410 break;
411 case OBS_WRC:
412 pr_alert("Transaction type: CONDITIONAL WRITE\n");
413 break;
414 case OBS_PRE:
415 pr_alert("Transaction: Preamble packet of linked sequence\n");
416 break;
417 case OBS_URG:
418 pr_alert("Transaction type: Urgency Packet\n");
419 break;
420 default:
421 pr_alert("Transaction type: Unknown; value:%u\n", opc);
422 }
423}
424
425static inline void print_obs_errcode(unsigned int errcode)
426{
427 switch (errcode) {
428 case OBS_SLV:
429 pr_alert("Error code: Target error detected by slave\n");
430 pr_alert("Source: Target\n");
431 break;
432 case OBS_DEC:
433 pr_alert("Error code: Address decode error\n");
434 pr_alert("Source: Initiator NIU\n");
435 break;
436 case OBS_UNS:
437 pr_alert("Error code: Unsupported request\n");
438 pr_alert("Source: Target NIU\n");
439 break;
440 case OBS_DISC:
441 pr_alert("Error code: Disconnected target or domain\n");
442 pr_alert("Source: Power Disconnect\n");
443 break;
444 case OBS_SEC:
445 pr_alert("Error code: Security violation\n");
446 pr_alert("Source: Initiator NIU or Firewall\n");
447 break;
448 case OBS_HIDE:
449 pr_alert("Error :Hidden security violation, reported as OK\n");
450 pr_alert("Source: Firewall\n");
451 break;
452 case OBS_TMO:
453 pr_alert("Error code: Time-out\n");
454 pr_alert("Source: Target NIU\n");
455 break;
456 default:
457 pr_alert("Error code: Unknown; code:%u\n", errcode);
458 }
459}
460
461static void decode_obs_errlog0(u32 err_reg,
462 struct reg_masks_shift *mask_shifts)
463{
464 unsigned int opc, errcode;
465
466 opc = (err_reg & mask_shifts->obs_trans_opcode_mask) >>
467 mask_shifts->obs_trans_opcode_shift;
468 errcode = (err_reg & mask_shifts->obs_error_type_mask) >>
469 mask_shifts->obs_error_type_shift;
470
471 print_obs_transaction(opc);
472 print_obs_errcode(errcode);
473}
474
475static void decode_obs_errlog0_len(u32 err_reg,
476 struct reg_masks_shift *mask_shifts)
477{
478 unsigned int len1;
479
480 len1 = (err_reg & mask_shifts->obs_len1_mask) >>
481 mask_shifts->obs_len1_shift;
482 pr_alert("number of payload bytes: %d\n", len1 + 1);
483}
484
485static void decode_obs_errlog(u32 err_reg, unsigned int err_log,
486 struct msm_gladiator_data *msm_gld_data)
487{
488 switch (err_log) {
489 case ERR_LOG0:
490 decode_obs_errlog0(err_reg, msm_gld_data->reg_masks_shifts);
491 decode_obs_errlog0_len(err_reg, msm_gld_data->reg_masks_shifts);
492 break;
493 case ERR_LOG1:
494 pr_alert("RouteId of the error: %08x\n", err_reg);
495 break;
496 case ERR_LOG2:
497 /* reserved error log register */
498 break;
499 case ERR_LOG3:
500 pr_alert("Lower 32-bits of error address: %08x\n", err_reg);
501 break;
502 case ERR_LOG4:
503 pr_alert("Upper 12-bits of error address: %08x\n", err_reg);
504 break;
505 case ERR_LOG5:
506 pr_alert("Lower 13-bits of user: %08x\n", err_reg);
507 break;
508 case ERR_LOG6:
509 /* reserved error log register */
510 break;
511 case ERR_LOG7:
512 pr_alert("Security filed of the logged error: %08x\n", err_reg);
513 break;
514 case ERR_LOG8:
515 /* reserved error log register */
516 break;
517 case STALLEN:
518 pr_alert("stall mode of the error logger: %08x\n",
519 err_reg & 0x1);
520 break;
521 default:
522 pr_alert("Invalid error register; reg num:%u\n", err_log);
523 }
524}
525
526static void decode_obs_errlog_v3(u32 err_reg, unsigned int err_log,
527 struct msm_gladiator_data *msm_gld_data)
528{
529 switch (err_log) {
530 case ERR_LOG0:
531 decode_obs_errlog0(err_reg, msm_gld_data->reg_masks_shifts);
532 break;
533 case ERR_LOG1:
534 decode_obs_errlog0_len(err_reg, msm_gld_data->reg_masks_shifts);
535 break;
536 case ERR_LOG2:
537 pr_alert("Path of the error: %08x\n", err_reg);
538 break;
539 case ERR_LOG3:
540 pr_alert("ExtID of the error: %08x\n", err_reg);
541 break;
542 case ERR_LOG4:
543 pr_alert("ERRLOG2_LSB: %08x\n", err_reg);
544 break;
545 case ERR_LOG5:
546 pr_alert("ERRLOG2_MSB: %08x\n", err_reg);
547 break;
548 case ERR_LOG6:
549 pr_alert("ERRLOG3_LSB: %08x\n", err_reg);
550 break;
551 case ERR_LOG7:
552 pr_alert("ERRLOG3_MSB: %08x\n", err_reg);
553 break;
554 case FAULTEN:
555 pr_alert("stall mode of the error logger: %08x\n",
556 err_reg & 0x3);
557 break;
558 default:
559 pr_alert("Invalid error register; reg num:%u\n", err_log);
560 }
561}
562
563static u32 get_gld_offset(unsigned int err_log, struct reg_off *offs)
564{
565 u32 offset = 0;
566
567 switch (err_log) {
568 case FAULTEN:
569 offset = offs->gladiator_faulten;
570 break;
571 case ERRVLD:
572 offset = offs->gladiator_errvld;
573 break;
574 case ERRCLR:
575 offset = offs->gladiator_errclr;
576 break;
577 case ERR_LOG0:
578 offset = offs->gladiator_errlog0;
579 break;
580 case ERR_LOG1:
581 offset = offs->gladiator_errlog1;
582 break;
583 case ERR_LOG2:
584 offset = offs->gladiator_errlog2;
585 break;
586 case ERR_LOG3:
587 offset = offs->gladiator_errlog3;
588 break;
589 case ERR_LOG4:
590 offset = offs->gladiator_errlog4;
591 break;
592 case ERR_LOG5:
593 offset = offs->gladiator_errlog5;
594 break;
595 case ERR_LOG6:
596 offset = offs->gladiator_errlog6;
597 break;
598 case ERR_LOG7:
599 offset = offs->gladiator_errlog7;
600 break;
601 case ERR_LOG8:
602 offset = offs->gladiator_errlog8;
603 break;
604 default:
605 pr_alert("Invalid gladiator error register; reg num:%u\n",
606 err_log);
607 }
608 return offset;
609}
610
611static u32 get_obs_offset(unsigned int err_log, struct reg_off *offs)
612{
613 u32 offset = 0;
614
615 switch (err_log) {
616 case FAULTEN:
617 offset = offs->observer_0_faulten;
618 break;
619 case ERRVLD:
620 offset = offs->observer_0_errvld;
621 break;
622 case ERRCLR:
623 offset = offs->observer_0_errclr;
624 break;
625 case ERR_LOG0:
626 offset = offs->observer_0_errlog0;
627 break;
628 case ERR_LOG1:
629 offset = offs->observer_0_errlog1;
630 break;
631 case ERR_LOG2:
632 offset = offs->observer_0_errlog2;
633 break;
634 case ERR_LOG3:
635 offset = offs->observer_0_errlog3;
636 break;
637 case ERR_LOG4:
638 offset = offs->observer_0_errlog4;
639 break;
640 case ERR_LOG5:
641 offset = offs->observer_0_errlog5;
642 break;
643 case ERR_LOG6:
644 offset = offs->observer_0_errlog6;
645 break;
646 case ERR_LOG7:
647 offset = offs->observer_0_errlog7;
648 break;
649 case ERR_LOG8:
650 offset = offs->observer_0_errlog8;
651 break;
652 case STALLEN:
653 offset = offs->observer_0_stallen;
654 break;
655 default:
656 pr_alert("Invalid observer error register; reg num:%u\n",
657 err_log);
658 }
659 return offset;
660}
661
662static void decode_gld_errlog5(struct msm_gladiator_data *msm_gld_data)
663{
664 unsigned int errtype;
665 u32 err_reg0, err_reg5;
666 struct reg_masks_shift *mask_shifts = msm_gld_data->reg_masks_shifts;
667
668 err_reg0 = readl_relaxed(msm_gld_data->gladiator_virt_base +
669 get_gld_offset(ERR_LOG0, msm_gld_data->reg_offs));
670 err_reg5 = readl_relaxed(msm_gld_data->gladiator_virt_base +
671 get_gld_offset(ERR_LOG5, msm_gld_data->reg_offs));
672
673 errtype = (err_reg0 & mask_shifts->gld_error_type_mask) >>
674 mask_shifts->gld_error_type_shift;
675 if (errtype == 3)
676 decode_gld_logged_error(err_reg5, mask_shifts);
677 else if (errtype == 0 || errtype == 1)
678 pr_alert("Lower 32-bits of user: %08x\n", err_reg5);
679 else
680 pr_alert("Error type: Unknown; value:%u\n", errtype);
681}
682
683static void dump_gld_err_regs(struct msm_gladiator_data *msm_gld_data,
684 unsigned int err_buf[MAX_NUM])
685{
686 unsigned int err_log;
687 unsigned int start = FAULTEN;
688 unsigned int end = ERR_LOG8;
689
690 if (msm_gld_data->glad_v2 || msm_gld_data->glad_v3) {
691 start = FAULTEN;
692 end = ERR_LOG8;
693 }
694
695 pr_alert("Main log register data:\n");
696 for (err_log = start; err_log <= end; err_log++) {
697 err_buf[err_log] = readl_relaxed(
698 msm_gld_data->gladiator_virt_base +
699 get_gld_offset(err_log,
700 msm_gld_data->reg_offs));
701 pr_alert("%08x ", err_buf[err_log]);
702 }
703}
704
705static void dump_obsrv_err_regs(struct msm_gladiator_data *msm_gld_data,
706 unsigned int err_buf[MAX_NUM])
707{
708 unsigned int err_log;
709 unsigned int start = ID_COREID;
710 unsigned int end = STALLEN;
711
712 if (msm_gld_data->glad_v2) {
713 start = ID_COREID;
714 end = STALLEN;
715 } else if (msm_gld_data->glad_v3) {
716 start = FAULTEN;
717 end = ERR_LOG7;
718 }
719
720 pr_alert("Observer log register data:\n");
721 for (err_log = start; err_log <= end; err_log++) {
722 err_buf[err_log] = readl_relaxed(
723 msm_gld_data->gladiator_virt_base +
724 get_obs_offset(
725 err_log,
726 msm_gld_data->reg_offs)
727 );
728 pr_alert("%08x ", err_buf[err_log]);
729 }
730}
731
732static void parse_gld_err_regs(struct msm_gladiator_data *msm_gld_data,
733 unsigned int err_buf[MAX_NUM])
734{
735 unsigned int err_log;
736
737 pr_alert("Main error log register data:\n");
738 for (err_log = ERR_LOG0; err_log <= ERR_LOG8; err_log++) {
739 /* skip log register 7 as its reserved */
740 if (err_log == ERR_LOG7)
741 continue;
742 if (err_log == ERR_LOG5) {
743 decode_gld_errlog5(msm_gld_data);
744 continue;
745 }
746 decode_gld_errlog(err_buf[err_log], err_log,
747 msm_gld_data);
748 }
749}
750
751static void parse_obsrv_err_regs(struct msm_gladiator_data *msm_gld_data,
752 unsigned int err_buf[MAX_NUM])
753{
754 unsigned int err_log;
755
756 pr_alert("Observor error log register data:\n");
757 if (msm_gld_data->glad_v2) {
758 for (err_log = ERR_LOG0; err_log <= STALLEN; err_log++) {
759 /* skip log register 2, 6 and 8 as they are reserved */
760 if ((err_log == ERR_LOG2) || (err_log == ERR_LOG6)
761 || (err_log == ERR_LOG8))
762 continue;
763 decode_obs_errlog(err_buf[err_log], err_log,
764 msm_gld_data);
765 }
766 } else if (msm_gld_data->glad_v3) {
767 decode_obs_errlog_v3(err_buf[STALLEN], STALLEN,
768 msm_gld_data);
769 for (err_log = ERR_LOG0; err_log <= ERR_LOG7; err_log++) {
770 decode_obs_errlog_v3(err_buf[err_log], err_log,
771 msm_gld_data);
772 }
773 }
774
775}
776
777static irqreturn_t msm_gladiator_isr(int irq, void *dev_id)
778{
779 unsigned int gld_err_buf[MAX_NUM], obs_err_buf[MAX_NUM];
780
781 struct msm_gladiator_data *msm_gld_data = dev_id;
782
783 /* Check validity */
784 bool gld_err_valid = readl_relaxed(msm_gld_data->gladiator_virt_base +
785 msm_gld_data->reg_offs->gladiator_errvld);
786
787 bool obsrv_err_valid = readl_relaxed(
788 msm_gld_data->gladiator_virt_base +
789 msm_gld_data->reg_offs->observer_0_errvld);
790
791 if (!gld_err_valid && !obsrv_err_valid) {
792 pr_err("%s Invalid Gladiator error reported, clear it\n",
793 __func__);
794 /* Clear IRQ */
795 clear_gladiator_error(msm_gld_data->gladiator_virt_base,
796 msm_gld_data->reg_offs);
797 return IRQ_HANDLED;
798 }
799 pr_alert("Gladiator Error Detected:\n");
800 if (gld_err_valid)
801 dump_gld_err_regs(msm_gld_data, gld_err_buf);
802
803 if (obsrv_err_valid)
804 dump_obsrv_err_regs(msm_gld_data, obs_err_buf);
805
806 if (gld_err_valid)
807 parse_gld_err_regs(msm_gld_data, gld_err_buf);
808
809 if (obsrv_err_valid)
810 parse_obsrv_err_regs(msm_gld_data, obs_err_buf);
811
812 /* Clear IRQ */
813 clear_gladiator_error(msm_gld_data->gladiator_virt_base,
814 msm_gld_data->reg_offs);
815 if (enable_panic_on_error)
816 panic("Gladiator Cache Interconnect Error Detected!\n");
817 else
818 WARN(1, "Gladiator Cache Interconnect Error Detected\n");
819
820 return IRQ_HANDLED;
821}
822
823static const struct of_device_id gladiator_erp_match_table[] = {
824 { .compatible = "qcom,msm-gladiator-v2" },
825 { .compatible = "qcom,msm-gladiator-v3" },
826 {},
827};
828
829static int parse_dt_node(struct platform_device *pdev,
830 struct msm_gladiator_data *msm_gld_data)
831{
832 int ret = 0;
833 struct resource *res;
834
835 res = platform_get_resource_byname(pdev,
836 IORESOURCE_MEM, "gladiator_base");
837 if (!res)
838 return -ENODEV;
839 if (!devm_request_mem_region(&pdev->dev, res->start,
840 resource_size(res),
841 "msm-gladiator-erp")) {
842
843 dev_err(&pdev->dev, "%s cannot reserve gladiator erp region\n",
844 __func__);
845 return -ENXIO;
846 }
847 msm_gld_data->gladiator_virt_base = devm_ioremap(&pdev->dev,
848 res->start, resource_size(res));
849 if (!msm_gld_data->gladiator_virt_base) {
850 dev_err(&pdev->dev, "%s cannot map gladiator register space\n",
851 __func__);
852 return -ENXIO;
853 }
854 msm_gld_data->erp_irq = platform_get_irq(pdev, 0);
855 if (!msm_gld_data->erp_irq)
856 return -ENODEV;
857
858 /* clear existing errors before enabling the interrupt */
859 clear_gladiator_error(msm_gld_data->gladiator_virt_base,
860 msm_gld_data->reg_offs);
861 ret = devm_request_irq(&pdev->dev, msm_gld_data->erp_irq,
862 msm_gladiator_isr, IRQF_TRIGGER_HIGH,
863 "gladiator-error", msm_gld_data);
864 if (ret)
865 dev_err(&pdev->dev, "Failed to register irq handler\n");
866
867 return ret;
868}
869
870static inline void gladiator_irq_init(void __iomem *gladiator_virt_base,
871 struct reg_off *offs)
872{
873 writel_relaxed(1, gladiator_virt_base + offs->gladiator_faulten);
874 writel_relaxed(1, gladiator_virt_base + offs->observer_0_faulten);
875}
876
877#define CCI_LEVEL 2
878static int gladiator_erp_pm_callback(struct notifier_block *nb,
879 unsigned long val, void *data)
880{
881 unsigned int level = (unsigned long) data;
882 struct msm_gladiator_data *msm_gld_data = container_of(nb,
883 struct msm_gladiator_data, pm_notifier_block);
884
885 if (level != CCI_LEVEL)
886 return NOTIFY_DONE;
887
888 switch (val) {
889 case CPU_CLUSTER_PM_EXIT:
890 gladiator_irq_init(msm_gld_data->gladiator_virt_base,
891 msm_gld_data->reg_offs);
892 break;
893 default:
894 return NOTIFY_DONE;
895 }
896
897 return NOTIFY_OK;
898}
899
900static void init_offsets_and_masks_v2(struct msm_gladiator_data *msm_gld_data)
901{
902 msm_gld_data->reg_offs->gladiator_id_coreid = 0x0;
903 msm_gld_data->reg_offs->gladiator_id_revisionid = 0x4;
904 msm_gld_data->reg_offs->gladiator_faulten = 0x1010;
905 msm_gld_data->reg_offs->gladiator_errvld = 0x1014;
906 msm_gld_data->reg_offs->gladiator_errclr = 0x1018;
907 msm_gld_data->reg_offs->gladiator_errlog0 = 0x101C;
908 msm_gld_data->reg_offs->gladiator_errlog1 = 0x1020;
909 msm_gld_data->reg_offs->gladiator_errlog2 = 0x1024;
910 msm_gld_data->reg_offs->gladiator_errlog3 = 0x1028;
911 msm_gld_data->reg_offs->gladiator_errlog4 = 0x102C;
912 msm_gld_data->reg_offs->gladiator_errlog5 = 0x1030;
913 msm_gld_data->reg_offs->gladiator_errlog6 = 0x1034;
914 msm_gld_data->reg_offs->gladiator_errlog7 = 0x1038;
915 msm_gld_data->reg_offs->gladiator_errlog8 = 0x103C;
916 msm_gld_data->reg_offs->observer_0_id_coreid = 0x8000;
917 msm_gld_data->reg_offs->observer_0_id_revisionid = 0x8004;
918 msm_gld_data->reg_offs->observer_0_faulten = 0x8008;
919 msm_gld_data->reg_offs->observer_0_errvld = 0x800C;
920 msm_gld_data->reg_offs->observer_0_errclr = 0x8010;
921 msm_gld_data->reg_offs->observer_0_errlog0 = 0x8014;
922 msm_gld_data->reg_offs->observer_0_errlog1 = 0x8018;
923 msm_gld_data->reg_offs->observer_0_errlog2 = 0x801C;
924 msm_gld_data->reg_offs->observer_0_errlog3 = 0x8020;
925 msm_gld_data->reg_offs->observer_0_errlog4 = 0x8024;
926 msm_gld_data->reg_offs->observer_0_errlog5 = 0x8028;
927 msm_gld_data->reg_offs->observer_0_errlog6 = 0x802C;
928 msm_gld_data->reg_offs->observer_0_errlog7 = 0x8030;
929 msm_gld_data->reg_offs->observer_0_errlog8 = 0x8034;
930 msm_gld_data->reg_offs->observer_0_stallen = 0x8038;
931
932 msm_gld_data->reg_masks_shifts->gld_trans_opcode_mask = 0xE;
933 msm_gld_data->reg_masks_shifts->gld_trans_opcode_shift = 1;
934 msm_gld_data->reg_masks_shifts->gld_error_type_mask = 0x700;
935 msm_gld_data->reg_masks_shifts->gld_error_type_shift = 8;
936 msm_gld_data->reg_masks_shifts->gld_len1_mask = 0xFFF;
937 msm_gld_data->reg_masks_shifts->gld_len1_shift = 16;
938 msm_gld_data->reg_masks_shifts->gld_trans_sourceid_mask = 0x7;
939 msm_gld_data->reg_masks_shifts->gld_trans_sourceid_shift = 0;
940 msm_gld_data->reg_masks_shifts->gld_trans_targetid_mask = 0x7;
941 msm_gld_data->reg_masks_shifts->gld_trans_targetid_shift = 0;
942 msm_gld_data->reg_masks_shifts->gld_errlog_error = 0x7;
943 msm_gld_data->reg_masks_shifts->gld_errlog5_error_type_mask =
944 0xFF000000;
945 msm_gld_data->reg_masks_shifts->gld_errlog5_error_type_shift = 24;
946 msm_gld_data->reg_masks_shifts->gld_ace_port_parity_mask = 0xc000;
947 msm_gld_data->reg_masks_shifts->gld_ace_port_parity_shift = 14;
948 msm_gld_data->reg_masks_shifts->gld_ace_port_disconnect_mask = 0xf0000;
949 msm_gld_data->reg_masks_shifts->gld_ace_port_disconnect_shift = 16;
950 msm_gld_data->reg_masks_shifts->gld_ace_port_directory_mask = 0xf00000;
951 msm_gld_data->reg_masks_shifts->gld_ace_port_directory_shift = 20;
952 msm_gld_data->reg_masks_shifts->gld_index_parity_mask = 0x1FFF;
953 msm_gld_data->reg_masks_shifts->gld_index_parity_shift = 0;
954 msm_gld_data->reg_masks_shifts->obs_trans_opcode_mask = 0x1E;
955 msm_gld_data->reg_masks_shifts->obs_trans_opcode_shift = 1;
956 msm_gld_data->reg_masks_shifts->obs_error_type_mask = 0x700;
957 msm_gld_data->reg_masks_shifts->obs_error_type_shift = 8;
958 msm_gld_data->reg_masks_shifts->obs_len1_mask = 0x7F0;
959 msm_gld_data->reg_masks_shifts->obs_len1_shift = 16;
960}
961
962static void init_offsets_and_masks_v3(struct msm_gladiator_data *msm_gld_data)
963{
964 msm_gld_data->reg_offs->gladiator_id_coreid = 0x0;
965 msm_gld_data->reg_offs->gladiator_id_revisionid = 0x4;
966 msm_gld_data->reg_offs->gladiator_faulten = 0x1010;
967 msm_gld_data->reg_offs->gladiator_errvld = 0x1014;
968 msm_gld_data->reg_offs->gladiator_errclr = 0x1018;
969 msm_gld_data->reg_offs->gladiator_errlog0 = 0x101C;
970 msm_gld_data->reg_offs->gladiator_errlog1 = 0x1020;
971 msm_gld_data->reg_offs->gladiator_errlog2 = 0x1024;
972 msm_gld_data->reg_offs->gladiator_errlog3 = 0x1028;
973 msm_gld_data->reg_offs->gladiator_errlog4 = 0x102C;
974 msm_gld_data->reg_offs->gladiator_errlog5 = 0x1030;
975 msm_gld_data->reg_offs->gladiator_errlog6 = 0x1034;
976 msm_gld_data->reg_offs->gladiator_errlog7 = 0x1038;
977 msm_gld_data->reg_offs->gladiator_errlog8 = 0x103C;
978 msm_gld_data->reg_offs->observer_0_id_coreid = INVALID_NUM;
979 msm_gld_data->reg_offs->observer_0_id_revisionid = INVALID_NUM;
980 msm_gld_data->reg_offs->observer_0_faulten = 0x2008;
981 msm_gld_data->reg_offs->observer_0_errvld = 0x2010;
982 msm_gld_data->reg_offs->observer_0_errclr = 0x2018;
983 msm_gld_data->reg_offs->observer_0_errlog0 = 0x2020;
984 msm_gld_data->reg_offs->observer_0_errlog1 = 0x2024;
985 msm_gld_data->reg_offs->observer_0_errlog2 = 0x2028;
986 msm_gld_data->reg_offs->observer_0_errlog3 = 0x202C;
987 msm_gld_data->reg_offs->observer_0_errlog4 = 0x2030;
988 msm_gld_data->reg_offs->observer_0_errlog5 = 0x2034;
989 msm_gld_data->reg_offs->observer_0_errlog6 = 0x2038;
990 msm_gld_data->reg_offs->observer_0_errlog7 = 0x203C;
991 msm_gld_data->reg_offs->observer_0_errlog8 = INVALID_NUM;
992 msm_gld_data->reg_offs->observer_0_stallen = INVALID_NUM;
993
994 msm_gld_data->reg_masks_shifts->gld_trans_opcode_mask = 0xE;
995 msm_gld_data->reg_masks_shifts->gld_trans_opcode_shift = 1;
996 msm_gld_data->reg_masks_shifts->gld_error_type_mask = 0x700;
997 msm_gld_data->reg_masks_shifts->gld_error_type_shift = 8;
998 msm_gld_data->reg_masks_shifts->gld_len1_mask = 0xFFF0000;
999 msm_gld_data->reg_masks_shifts->gld_len1_shift = 16;
1000 msm_gld_data->reg_masks_shifts->gld_trans_sourceid_mask = 0x7;
1001 msm_gld_data->reg_masks_shifts->gld_trans_sourceid_shift = 0;
1002 msm_gld_data->reg_masks_shifts->gld_trans_targetid_mask = 0x7;
1003 msm_gld_data->reg_masks_shifts->gld_trans_targetid_shift = 0;
1004 msm_gld_data->reg_masks_shifts->gld_errlog_error = 0x7;
1005 msm_gld_data->reg_masks_shifts->gld_errlog5_error_type_mask =
1006 0xFF000000;
1007 msm_gld_data->reg_masks_shifts->gld_errlog5_error_type_shift = 24;
1008 msm_gld_data->reg_masks_shifts->gld_ace_port_parity_mask = 0xc000;
1009 msm_gld_data->reg_masks_shifts->gld_ace_port_parity_shift = 14;
1010 msm_gld_data->reg_masks_shifts->gld_ace_port_disconnect_mask = 0xf0000;
1011 msm_gld_data->reg_masks_shifts->gld_ace_port_disconnect_shift = 16;
1012 msm_gld_data->reg_masks_shifts->gld_ace_port_directory_mask = 0xf00000;
1013 msm_gld_data->reg_masks_shifts->gld_ace_port_directory_shift = 20;
1014 msm_gld_data->reg_masks_shifts->gld_index_parity_mask = 0x1FFF;
1015 msm_gld_data->reg_masks_shifts->gld_index_parity_shift = 0;
1016 msm_gld_data->reg_masks_shifts->obs_trans_opcode_mask = 0x70;
1017 msm_gld_data->reg_masks_shifts->obs_trans_opcode_shift = 4;
1018 msm_gld_data->reg_masks_shifts->obs_error_type_mask = 0x700;
1019 msm_gld_data->reg_masks_shifts->obs_error_type_shift = 8;
1020 msm_gld_data->reg_masks_shifts->obs_len1_mask = 0x1FF;
1021 msm_gld_data->reg_masks_shifts->obs_len1_shift = 0;
1022}
1023
1024static int gladiator_erp_probe(struct platform_device *pdev)
1025{
1026 int ret = -1;
1027 struct msm_gladiator_data *msm_gld_data;
1028
1029 msm_gld_data = devm_kzalloc(&pdev->dev,
1030 sizeof(struct msm_gladiator_data), GFP_KERNEL);
1031 if (!msm_gld_data) {
1032 ret = -ENOMEM;
1033 goto bail;
1034 }
1035
1036 msm_gld_data->reg_offs = devm_kzalloc(&pdev->dev,
1037 sizeof(struct reg_off), GFP_KERNEL);
1038 msm_gld_data->reg_masks_shifts = devm_kzalloc(&pdev->dev,
1039 sizeof(struct reg_masks_shift), GFP_KERNEL);
1040
1041 if (!msm_gld_data->reg_offs || !msm_gld_data->reg_masks_shifts) {
1042 ret = -ENOMEM;
1043 goto bail;
1044 }
1045
1046 msm_gld_data->glad_v2 = of_device_is_compatible(pdev->dev.of_node,
1047 "qcom,msm-gladiator-v2");
1048 msm_gld_data->glad_v3 = of_device_is_compatible(pdev->dev.of_node,
1049 "qcom,msm-gladiator-v3");
1050
1051 if (msm_gld_data->glad_v2)
1052 init_offsets_and_masks_v2(msm_gld_data);
1053 else if (msm_gld_data->glad_v3)
1054 init_offsets_and_masks_v3(msm_gld_data);
1055
1056 if (msm_gld_data->glad_v2) {
1057 if (of_property_match_string(pdev->dev.of_node,
1058 "clock-names", "atb_clk") >= 0) {
1059 msm_gld_data->qdss_clk = devm_clk_get(&pdev->dev,
1060 "atb_clk");
1061 if (IS_ERR(msm_gld_data->qdss_clk)) {
1062 dev_err(&pdev->dev, "Failed to get QDSS ATB clock\n");
1063 goto bail;
1064 }
1065 } else {
1066 dev_err(&pdev->dev, "No matching string of QDSS ATB clock\n");
1067 goto bail;
1068 }
1069
1070 ret = clk_prepare_enable(msm_gld_data->qdss_clk);
1071 if (ret)
1072 goto err_atb_clk;
1073 }
1074
1075 ret = parse_dt_node(pdev, msm_gld_data);
1076 if (ret)
1077 goto bail;
1078 msm_gld_data->pm_notifier_block.notifier_call =
1079 gladiator_erp_pm_callback;
1080
1081 gladiator_irq_init(msm_gld_data->gladiator_virt_base,
1082 msm_gld_data->reg_offs);
1083 platform_set_drvdata(pdev, msm_gld_data);
1084 cpu_pm_register_notifier(&msm_gld_data->pm_notifier_block);
1085#ifdef CONFIG_PANIC_ON_GLADIATOR_ERROR
1086 enable_panic_on_error = 1;
1087#endif
1088 dev_info(&pdev->dev, "MSM Gladiator Error Reporting Initialized\n");
1089 return ret;
1090
1091err_atb_clk:
1092 clk_disable_unprepare(msm_gld_data->qdss_clk);
1093
1094bail:
1095 dev_err(&pdev->dev, "Probe failed bailing out\n");
1096 return ret;
1097}
1098
1099static int gladiator_erp_remove(struct platform_device *pdev)
1100{
1101 struct msm_gladiator_data *msm_gld_data = platform_get_drvdata(pdev);
1102
1103 platform_set_drvdata(pdev, NULL);
1104 cpu_pm_unregister_notifier(&msm_gld_data->pm_notifier_block);
1105 clk_disable_unprepare(msm_gld_data->qdss_clk);
1106 return 0;
1107}
1108
1109static struct platform_driver gladiator_erp_driver = {
1110 .probe = gladiator_erp_probe,
1111 .remove = gladiator_erp_remove,
1112 .driver = {
1113 .name = MODULE_NAME,
1114 .owner = THIS_MODULE,
1115 .of_match_table = gladiator_erp_match_table,
1116 },
1117};
1118
1119static int __init init_gladiator_erp(void)
1120{
1121 int ret;
1122
1123 ret = scm_is_secure_device();
1124 if (ret == 0) {
1125 pr_info("Gladiator Error Reporting not available\n");
1126 return -ENODEV;
1127 }
1128
1129 return platform_driver_register(&gladiator_erp_driver);
1130}
1131module_init(init_gladiator_erp);
1132
1133static void __exit exit_gladiator_erp(void)
1134{
1135 return platform_driver_unregister(&gladiator_erp_driver);
1136}
1137module_exit(exit_gladiator_erp);
1138
1139MODULE_DESCRIPTION("Gladiator Error Reporting");
1140MODULE_LICENSE("GPL v2");