blob: 9a52349874ee182e79c3fff1863caf0e603178c5 [file] [log] [blame]
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001/*
2 * Single-step support.
3 *
4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/kernel.h>
Gui,Jian0d69a052006-11-01 10:50:15 +080012#include <linux/kprobes.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100013#include <linux/ptrace.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070014#include <linux/prefetch.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100015#include <asm/sstep.h>
16#include <asm/processor.h>
Paul Mackerras0016a4c2010-06-15 14:48:58 +100017#include <asm/uaccess.h>
18#include <asm/cputable.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100019
20extern char system_call_common[];
21
Paul Mackerrasc0325242005-10-28 22:48:08 +100022#ifdef CONFIG_PPC64
Paul Mackerras14cf11a2005-09-26 16:04:21 +100023/* Bits in SRR1 that are copied from MSR */
Stephen Rothwellaf308372006-03-23 17:38:10 +110024#define MSR_MASK 0xffffffff87c0ffffUL
Paul Mackerrasc0325242005-10-28 22:48:08 +100025#else
26#define MSR_MASK 0x87c0ffff
27#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +100028
Paul Mackerras0016a4c2010-06-15 14:48:58 +100029/* Bits in XER */
30#define XER_SO 0x80000000U
31#define XER_OV 0x40000000U
32#define XER_CA 0x20000000U
33
Sean MacLennancd64d162010-09-01 07:21:21 +000034#ifdef CONFIG_PPC_FPU
Paul Mackerras0016a4c2010-06-15 14:48:58 +100035/*
36 * Functions in ldstfp.S
37 */
38extern int do_lfs(int rn, unsigned long ea);
39extern int do_lfd(int rn, unsigned long ea);
40extern int do_stfs(int rn, unsigned long ea);
41extern int do_stfd(int rn, unsigned long ea);
42extern int do_lvx(int rn, unsigned long ea);
43extern int do_stvx(int rn, unsigned long ea);
44extern int do_lxvd2x(int rn, unsigned long ea);
45extern int do_stxvd2x(int rn, unsigned long ea);
Sean MacLennancd64d162010-09-01 07:21:21 +000046#endif
Paul Mackerras0016a4c2010-06-15 14:48:58 +100047
Paul Mackerras14cf11a2005-09-26 16:04:21 +100048/*
Michael Ellermanb91e1362011-04-07 21:56:04 +000049 * Emulate the truncation of 64 bit values in 32-bit mode.
50 */
51static unsigned long truncate_if_32bit(unsigned long msr, unsigned long val)
52{
53#ifdef __powerpc64__
54 if ((msr & MSR_64BIT) == 0)
55 val &= 0xffffffffUL;
56#endif
57 return val;
58}
59
60/*
Paul Mackerras14cf11a2005-09-26 16:04:21 +100061 * Determine whether a conditional branch instruction would branch.
62 */
Gui,Jian0d69a052006-11-01 10:50:15 +080063static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs)
Paul Mackerras14cf11a2005-09-26 16:04:21 +100064{
65 unsigned int bo = (instr >> 21) & 0x1f;
66 unsigned int bi;
67
68 if ((bo & 4) == 0) {
69 /* decrement counter */
70 --regs->ctr;
71 if (((bo >> 1) & 1) ^ (regs->ctr == 0))
72 return 0;
73 }
74 if ((bo & 0x10) == 0) {
75 /* check bit from CR */
76 bi = (instr >> 16) & 0x1f;
77 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
78 return 0;
79 }
80 return 1;
81}
82
Paul Mackerras0016a4c2010-06-15 14:48:58 +100083
84static long __kprobes address_ok(struct pt_regs *regs, unsigned long ea, int nb)
85{
86 if (!user_mode(regs))
87 return 1;
88 return __access_ok(ea, nb, USER_DS);
89}
90
Paul Mackerras14cf11a2005-09-26 16:04:21 +100091/*
Paul Mackerras0016a4c2010-06-15 14:48:58 +100092 * Calculate effective address for a D-form instruction
93 */
94static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs)
95{
96 int ra;
97 unsigned long ea;
98
99 ra = (instr >> 16) & 0x1f;
100 ea = (signed short) instr; /* sign-extend */
101 if (ra) {
102 ea += regs->gpr[ra];
103 if (instr & 0x04000000) /* update forms */
104 regs->gpr[ra] = ea;
105 }
Michael Ellermanb91e1362011-04-07 21:56:04 +0000106
107 return truncate_if_32bit(regs->msr, ea);
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000108}
109
110#ifdef __powerpc64__
111/*
112 * Calculate effective address for a DS-form instruction
113 */
114static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *regs)
115{
116 int ra;
117 unsigned long ea;
118
119 ra = (instr >> 16) & 0x1f;
120 ea = (signed short) (instr & ~3); /* sign-extend */
121 if (ra) {
122 ea += regs->gpr[ra];
123 if ((instr & 3) == 1) /* update forms */
124 regs->gpr[ra] = ea;
125 }
Michael Ellermanb91e1362011-04-07 21:56:04 +0000126
127 return truncate_if_32bit(regs->msr, ea);
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000128}
129#endif /* __powerpc64 */
130
131/*
132 * Calculate effective address for an X-form instruction
133 */
134static unsigned long __kprobes xform_ea(unsigned int instr, struct pt_regs *regs,
135 int do_update)
136{
137 int ra, rb;
138 unsigned long ea;
139
140 ra = (instr >> 16) & 0x1f;
141 rb = (instr >> 11) & 0x1f;
142 ea = regs->gpr[rb];
143 if (ra) {
144 ea += regs->gpr[ra];
145 if (do_update) /* update forms */
146 regs->gpr[ra] = ea;
147 }
Michael Ellermanb91e1362011-04-07 21:56:04 +0000148
149 return truncate_if_32bit(regs->msr, ea);
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000150}
151
152/*
153 * Return the largest power of 2, not greater than sizeof(unsigned long),
154 * such that x is a multiple of it.
155 */
156static inline unsigned long max_align(unsigned long x)
157{
158 x |= sizeof(unsigned long);
159 return x & -x; /* isolates rightmost bit */
160}
161
162
163static inline unsigned long byterev_2(unsigned long x)
164{
165 return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
166}
167
168static inline unsigned long byterev_4(unsigned long x)
169{
170 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
171 ((x & 0xff00) << 8) | ((x & 0xff) << 24);
172}
173
174#ifdef __powerpc64__
175static inline unsigned long byterev_8(unsigned long x)
176{
177 return (byterev_4(x) << 32) | byterev_4(x >> 32);
178}
179#endif
180
181static int __kprobes read_mem_aligned(unsigned long *dest, unsigned long ea,
182 int nb)
183{
184 int err = 0;
185 unsigned long x = 0;
186
187 switch (nb) {
188 case 1:
189 err = __get_user(x, (unsigned char __user *) ea);
190 break;
191 case 2:
192 err = __get_user(x, (unsigned short __user *) ea);
193 break;
194 case 4:
195 err = __get_user(x, (unsigned int __user *) ea);
196 break;
197#ifdef __powerpc64__
198 case 8:
199 err = __get_user(x, (unsigned long __user *) ea);
200 break;
201#endif
202 }
203 if (!err)
204 *dest = x;
205 return err;
206}
207
208static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
209 int nb, struct pt_regs *regs)
210{
211 int err;
212 unsigned long x, b, c;
213
214 /* unaligned, do this in pieces */
215 x = 0;
216 for (; nb > 0; nb -= c) {
217 c = max_align(ea);
218 if (c > nb)
219 c = max_align(nb);
220 err = read_mem_aligned(&b, ea, c);
221 if (err)
222 return err;
223 x = (x << (8 * c)) + b;
224 ea += c;
225 }
226 *dest = x;
227 return 0;
228}
229
230/*
231 * Read memory at address ea for nb bytes, return 0 for success
232 * or -EFAULT if an error occurred.
233 */
234static int __kprobes read_mem(unsigned long *dest, unsigned long ea, int nb,
235 struct pt_regs *regs)
236{
237 if (!address_ok(regs, ea, nb))
238 return -EFAULT;
239 if ((ea & (nb - 1)) == 0)
240 return read_mem_aligned(dest, ea, nb);
241 return read_mem_unaligned(dest, ea, nb, regs);
242}
243
244static int __kprobes write_mem_aligned(unsigned long val, unsigned long ea,
245 int nb)
246{
247 int err = 0;
248
249 switch (nb) {
250 case 1:
251 err = __put_user(val, (unsigned char __user *) ea);
252 break;
253 case 2:
254 err = __put_user(val, (unsigned short __user *) ea);
255 break;
256 case 4:
257 err = __put_user(val, (unsigned int __user *) ea);
258 break;
259#ifdef __powerpc64__
260 case 8:
261 err = __put_user(val, (unsigned long __user *) ea);
262 break;
263#endif
264 }
265 return err;
266}
267
268static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea,
269 int nb, struct pt_regs *regs)
270{
271 int err;
272 unsigned long c;
273
274 /* unaligned or little-endian, do this in pieces */
275 for (; nb > 0; nb -= c) {
276 c = max_align(ea);
277 if (c > nb)
278 c = max_align(nb);
279 err = write_mem_aligned(val >> (nb - c) * 8, ea, c);
280 if (err)
281 return err;
282 ++ea;
283 }
284 return 0;
285}
286
287/*
288 * Write memory at address ea for nb bytes, return 0 for success
289 * or -EFAULT if an error occurred.
290 */
291static int __kprobes write_mem(unsigned long val, unsigned long ea, int nb,
292 struct pt_regs *regs)
293{
294 if (!address_ok(regs, ea, nb))
295 return -EFAULT;
296 if ((ea & (nb - 1)) == 0)
297 return write_mem_aligned(val, ea, nb);
298 return write_mem_unaligned(val, ea, nb, regs);
299}
300
Sean MacLennancd64d162010-09-01 07:21:21 +0000301#ifdef CONFIG_PPC_FPU
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000302/*
303 * Check the address and alignment, and call func to do the actual
304 * load or store.
305 */
306static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long),
307 unsigned long ea, int nb,
308 struct pt_regs *regs)
309{
310 int err;
311 unsigned long val[sizeof(double) / sizeof(long)];
312 unsigned long ptr;
313
314 if (!address_ok(regs, ea, nb))
315 return -EFAULT;
316 if ((ea & 3) == 0)
317 return (*func)(rn, ea);
318 ptr = (unsigned long) &val[0];
319 if (sizeof(unsigned long) == 8 || nb == 4) {
320 err = read_mem_unaligned(&val[0], ea, nb, regs);
321 ptr += sizeof(unsigned long) - nb;
322 } else {
323 /* reading a double on 32-bit */
324 err = read_mem_unaligned(&val[0], ea, 4, regs);
325 if (!err)
326 err = read_mem_unaligned(&val[1], ea + 4, 4, regs);
327 }
328 if (err)
329 return err;
330 return (*func)(rn, ptr);
331}
332
333static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long),
334 unsigned long ea, int nb,
335 struct pt_regs *regs)
336{
337 int err;
338 unsigned long val[sizeof(double) / sizeof(long)];
339 unsigned long ptr;
340
341 if (!address_ok(regs, ea, nb))
342 return -EFAULT;
343 if ((ea & 3) == 0)
344 return (*func)(rn, ea);
345 ptr = (unsigned long) &val[0];
346 if (sizeof(unsigned long) == 8 || nb == 4) {
347 ptr += sizeof(unsigned long) - nb;
348 err = (*func)(rn, ptr);
349 if (err)
350 return err;
351 err = write_mem_unaligned(val[0], ea, nb, regs);
352 } else {
353 /* writing a double on 32-bit */
354 err = (*func)(rn, ptr);
355 if (err)
356 return err;
357 err = write_mem_unaligned(val[0], ea, 4, regs);
358 if (!err)
359 err = write_mem_unaligned(val[1], ea + 4, 4, regs);
360 }
361 return err;
362}
Sean MacLennancd64d162010-09-01 07:21:21 +0000363#endif
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000364
365#ifdef CONFIG_ALTIVEC
366/* For Altivec/VMX, no need to worry about alignment */
367static int __kprobes do_vec_load(int rn, int (*func)(int, unsigned long),
368 unsigned long ea, struct pt_regs *regs)
369{
370 if (!address_ok(regs, ea & ~0xfUL, 16))
371 return -EFAULT;
372 return (*func)(rn, ea);
373}
374
375static int __kprobes do_vec_store(int rn, int (*func)(int, unsigned long),
376 unsigned long ea, struct pt_regs *regs)
377{
378 if (!address_ok(regs, ea & ~0xfUL, 16))
379 return -EFAULT;
380 return (*func)(rn, ea);
381}
382#endif /* CONFIG_ALTIVEC */
383
384#ifdef CONFIG_VSX
385static int __kprobes do_vsx_load(int rn, int (*func)(int, unsigned long),
386 unsigned long ea, struct pt_regs *regs)
387{
388 int err;
389 unsigned long val[2];
390
391 if (!address_ok(regs, ea, 16))
392 return -EFAULT;
393 if ((ea & 3) == 0)
394 return (*func)(rn, ea);
395 err = read_mem_unaligned(&val[0], ea, 8, regs);
396 if (!err)
397 err = read_mem_unaligned(&val[1], ea + 8, 8, regs);
398 if (!err)
399 err = (*func)(rn, (unsigned long) &val[0]);
400 return err;
401}
402
403static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long),
404 unsigned long ea, struct pt_regs *regs)
405{
406 int err;
407 unsigned long val[2];
408
409 if (!address_ok(regs, ea, 16))
410 return -EFAULT;
411 if ((ea & 3) == 0)
412 return (*func)(rn, ea);
413 err = (*func)(rn, (unsigned long) &val[0]);
414 if (err)
415 return err;
416 err = write_mem_unaligned(val[0], ea, 8, regs);
417 if (!err)
418 err = write_mem_unaligned(val[1], ea + 8, 8, regs);
419 return err;
420}
421#endif /* CONFIG_VSX */
422
423#define __put_user_asmx(x, addr, err, op, cr) \
424 __asm__ __volatile__( \
425 "1: " op " %2,0,%3\n" \
426 " mfcr %1\n" \
427 "2:\n" \
428 ".section .fixup,\"ax\"\n" \
429 "3: li %0,%4\n" \
430 " b 2b\n" \
431 ".previous\n" \
432 ".section __ex_table,\"a\"\n" \
433 PPC_LONG_ALIGN "\n" \
434 PPC_LONG "1b,3b\n" \
435 ".previous" \
436 : "=r" (err), "=r" (cr) \
437 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
438
439#define __get_user_asmx(x, addr, err, op) \
440 __asm__ __volatile__( \
441 "1: "op" %1,0,%2\n" \
442 "2:\n" \
443 ".section .fixup,\"ax\"\n" \
444 "3: li %0,%3\n" \
445 " b 2b\n" \
446 ".previous\n" \
447 ".section __ex_table,\"a\"\n" \
448 PPC_LONG_ALIGN "\n" \
449 PPC_LONG "1b,3b\n" \
450 ".previous" \
451 : "=r" (err), "=r" (x) \
452 : "r" (addr), "i" (-EFAULT), "0" (err))
453
454#define __cacheop_user_asmx(addr, err, op) \
455 __asm__ __volatile__( \
456 "1: "op" 0,%1\n" \
457 "2:\n" \
458 ".section .fixup,\"ax\"\n" \
459 "3: li %0,%3\n" \
460 " b 2b\n" \
461 ".previous\n" \
462 ".section __ex_table,\"a\"\n" \
463 PPC_LONG_ALIGN "\n" \
464 PPC_LONG "1b,3b\n" \
465 ".previous" \
466 : "=r" (err) \
467 : "r" (addr), "i" (-EFAULT), "0" (err))
468
469static void __kprobes set_cr0(struct pt_regs *regs, int rd)
470{
471 long val = regs->gpr[rd];
472
473 regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
474#ifdef __powerpc64__
Michael Ellermanb91e1362011-04-07 21:56:04 +0000475 if (!(regs->msr & MSR_64BIT))
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000476 val = (int) val;
477#endif
478 if (val < 0)
479 regs->ccr |= 0x80000000;
480 else if (val > 0)
481 regs->ccr |= 0x40000000;
482 else
483 regs->ccr |= 0x20000000;
484}
485
486static void __kprobes add_with_carry(struct pt_regs *regs, int rd,
487 unsigned long val1, unsigned long val2,
488 unsigned long carry_in)
489{
490 unsigned long val = val1 + val2;
491
492 if (carry_in)
493 ++val;
494 regs->gpr[rd] = val;
495#ifdef __powerpc64__
Michael Ellermanb91e1362011-04-07 21:56:04 +0000496 if (!(regs->msr & MSR_64BIT)) {
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000497 val = (unsigned int) val;
498 val1 = (unsigned int) val1;
499 }
500#endif
501 if (val < val1 || (carry_in && val == val1))
502 regs->xer |= XER_CA;
503 else
504 regs->xer &= ~XER_CA;
505}
506
507static void __kprobes do_cmp_signed(struct pt_regs *regs, long v1, long v2,
508 int crfld)
509{
510 unsigned int crval, shift;
511
512 crval = (regs->xer >> 31) & 1; /* get SO bit */
513 if (v1 < v2)
514 crval |= 8;
515 else if (v1 > v2)
516 crval |= 4;
517 else
518 crval |= 2;
519 shift = (7 - crfld) * 4;
520 regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
521}
522
523static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1,
524 unsigned long v2, int crfld)
525{
526 unsigned int crval, shift;
527
528 crval = (regs->xer >> 31) & 1; /* get SO bit */
529 if (v1 < v2)
530 crval |= 8;
531 else if (v1 > v2)
532 crval |= 4;
533 else
534 crval |= 2;
535 shift = (7 - crfld) * 4;
536 regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
537}
538
539/*
540 * Elements of 32-bit rotate and mask instructions.
541 */
542#define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
543 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
544#ifdef __powerpc64__
545#define MASK64_L(mb) (~0UL >> (mb))
546#define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
547#define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
548#define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
549#else
550#define DATA32(x) (x)
551#endif
552#define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
553
554/*
555 * Emulate instructions that cause a transfer of control,
556 * loads and stores, and a few other instructions.
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000557 * Returns 1 if the step was emulated, 0 if not,
558 * or -1 if the instruction is one that should not be stepped,
559 * such as an rfid, or a mtmsrd that would clear MSR_RI.
560 */
Gui,Jian0d69a052006-11-01 10:50:15 +0800561int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000562{
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000563 unsigned int opcode, ra, rb, rd, spr, u;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000564 unsigned long int imm;
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000565 unsigned long int val, val2;
566 unsigned long int ea;
567 unsigned int cr, mb, me, sh;
568 int err;
569 unsigned long old_ra;
570 long ival;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000571
572 opcode = instr >> 26;
573 switch (opcode) {
574 case 16: /* bc */
575 imm = (signed short)(instr & 0xfffc);
576 if ((instr & 2) == 0)
577 imm += regs->nip;
578 regs->nip += 4;
Michael Ellermanb91e1362011-04-07 21:56:04 +0000579 regs->nip = truncate_if_32bit(regs->msr, regs->nip);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000580 if (instr & 1)
581 regs->link = regs->nip;
582 if (branch_taken(instr, regs))
583 regs->nip = imm;
584 return 1;
Paul Mackerrasc0325242005-10-28 22:48:08 +1000585#ifdef CONFIG_PPC64
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000586 case 17: /* sc */
587 /*
588 * N.B. this uses knowledge about how the syscall
589 * entry code works. If that is changed, this will
590 * need to be changed also.
591 */
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000592 if (regs->gpr[0] == 0x1ebe &&
593 cpu_has_feature(CPU_FTR_REAL_LE)) {
594 regs->msr ^= MSR_LE;
595 goto instr_done;
596 }
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000597 regs->gpr[9] = regs->gpr[13];
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000598 regs->gpr[10] = MSR_KERNEL;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000599 regs->gpr[11] = regs->nip + 4;
600 regs->gpr[12] = regs->msr & MSR_MASK;
601 regs->gpr[13] = (unsigned long) get_paca();
602 regs->nip = (unsigned long) &system_call_common;
603 regs->msr = MSR_KERNEL;
604 return 1;
Paul Mackerrasc0325242005-10-28 22:48:08 +1000605#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000606 case 18: /* b */
607 imm = instr & 0x03fffffc;
608 if (imm & 0x02000000)
609 imm -= 0x04000000;
610 if ((instr & 2) == 0)
611 imm += regs->nip;
Michael Ellermanb91e1362011-04-07 21:56:04 +0000612 if (instr & 1)
613 regs->link = truncate_if_32bit(regs->msr, regs->nip + 4);
614 imm = truncate_if_32bit(regs->msr, imm);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000615 regs->nip = imm;
616 return 1;
617 case 19:
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000618 switch ((instr >> 1) & 0x3ff) {
619 case 16: /* bclr */
620 case 528: /* bcctr */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000621 imm = (instr & 0x400)? regs->ctr: regs->link;
Michael Ellermanb91e1362011-04-07 21:56:04 +0000622 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
623 imm = truncate_if_32bit(regs->msr, imm);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000624 if (instr & 1)
625 regs->link = regs->nip;
626 if (branch_taken(instr, regs))
627 regs->nip = imm;
628 return 1;
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000629
630 case 18: /* rfid, scary */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000631 return -1;
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000632
633 case 150: /* isync */
634 isync();
635 goto instr_done;
636
637 case 33: /* crnor */
638 case 129: /* crandc */
639 case 193: /* crxor */
640 case 225: /* crnand */
641 case 257: /* crand */
642 case 289: /* creqv */
643 case 417: /* crorc */
644 case 449: /* cror */
645 ra = (instr >> 16) & 0x1f;
646 rb = (instr >> 11) & 0x1f;
647 rd = (instr >> 21) & 0x1f;
648 ra = (regs->ccr >> (31 - ra)) & 1;
649 rb = (regs->ccr >> (31 - rb)) & 1;
650 val = (instr >> (6 + ra * 2 + rb)) & 1;
651 regs->ccr = (regs->ccr & ~(1UL << (31 - rd))) |
652 (val << (31 - rd));
653 goto instr_done;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000654 }
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000655 break;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000656 case 31:
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000657 switch ((instr >> 1) & 0x3ff) {
658 case 598: /* sync */
659#ifdef __powerpc64__
660 switch ((instr >> 21) & 3) {
661 case 1: /* lwsync */
662 asm volatile("lwsync" : : : "memory");
663 goto instr_done;
664 case 2: /* ptesync */
665 asm volatile("ptesync" : : : "memory");
666 goto instr_done;
667 }
668#endif
669 mb();
670 goto instr_done;
671
672 case 854: /* eieio */
673 eieio();
674 goto instr_done;
675 }
676 break;
677 }
678
679 /* Following cases refer to regs->gpr[], so we need all regs */
680 if (!FULL_REGS(regs))
681 return 0;
682
683 rd = (instr >> 21) & 0x1f;
684 ra = (instr >> 16) & 0x1f;
685 rb = (instr >> 11) & 0x1f;
686
687 switch (opcode) {
688 case 7: /* mulli */
689 regs->gpr[rd] = regs->gpr[ra] * (short) instr;
690 goto instr_done;
691
692 case 8: /* subfic */
693 imm = (short) instr;
694 add_with_carry(regs, rd, ~regs->gpr[ra], imm, 1);
695 goto instr_done;
696
697 case 10: /* cmpli */
698 imm = (unsigned short) instr;
699 val = regs->gpr[ra];
700#ifdef __powerpc64__
701 if ((rd & 1) == 0)
702 val = (unsigned int) val;
703#endif
704 do_cmp_unsigned(regs, val, imm, rd >> 2);
705 goto instr_done;
706
707 case 11: /* cmpi */
708 imm = (short) instr;
709 val = regs->gpr[ra];
710#ifdef __powerpc64__
711 if ((rd & 1) == 0)
712 val = (int) val;
713#endif
714 do_cmp_signed(regs, val, imm, rd >> 2);
715 goto instr_done;
716
717 case 12: /* addic */
718 imm = (short) instr;
719 add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
720 goto instr_done;
721
722 case 13: /* addic. */
723 imm = (short) instr;
724 add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
725 set_cr0(regs, rd);
726 goto instr_done;
727
728 case 14: /* addi */
729 imm = (short) instr;
730 if (ra)
731 imm += regs->gpr[ra];
732 regs->gpr[rd] = imm;
733 goto instr_done;
734
735 case 15: /* addis */
736 imm = ((short) instr) << 16;
737 if (ra)
738 imm += regs->gpr[ra];
739 regs->gpr[rd] = imm;
740 goto instr_done;
741
742 case 20: /* rlwimi */
743 mb = (instr >> 6) & 0x1f;
744 me = (instr >> 1) & 0x1f;
745 val = DATA32(regs->gpr[rd]);
746 imm = MASK32(mb, me);
747 regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
748 goto logical_done;
749
750 case 21: /* rlwinm */
751 mb = (instr >> 6) & 0x1f;
752 me = (instr >> 1) & 0x1f;
753 val = DATA32(regs->gpr[rd]);
754 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
755 goto logical_done;
756
757 case 23: /* rlwnm */
758 mb = (instr >> 6) & 0x1f;
759 me = (instr >> 1) & 0x1f;
760 rb = regs->gpr[rb] & 0x1f;
761 val = DATA32(regs->gpr[rd]);
762 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
763 goto logical_done;
764
765 case 24: /* ori */
766 imm = (unsigned short) instr;
767 regs->gpr[ra] = regs->gpr[rd] | imm;
768 goto instr_done;
769
770 case 25: /* oris */
771 imm = (unsigned short) instr;
772 regs->gpr[ra] = regs->gpr[rd] | (imm << 16);
773 goto instr_done;
774
775 case 26: /* xori */
776 imm = (unsigned short) instr;
777 regs->gpr[ra] = regs->gpr[rd] ^ imm;
778 goto instr_done;
779
780 case 27: /* xoris */
781 imm = (unsigned short) instr;
782 regs->gpr[ra] = regs->gpr[rd] ^ (imm << 16);
783 goto instr_done;
784
785 case 28: /* andi. */
786 imm = (unsigned short) instr;
787 regs->gpr[ra] = regs->gpr[rd] & imm;
788 set_cr0(regs, ra);
789 goto instr_done;
790
791 case 29: /* andis. */
792 imm = (unsigned short) instr;
793 regs->gpr[ra] = regs->gpr[rd] & (imm << 16);
794 set_cr0(regs, ra);
795 goto instr_done;
796
797#ifdef __powerpc64__
798 case 30: /* rld* */
799 mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
800 val = regs->gpr[rd];
801 if ((instr & 0x10) == 0) {
802 sh = rb | ((instr & 2) << 4);
803 val = ROTATE(val, sh);
804 switch ((instr >> 2) & 3) {
805 case 0: /* rldicl */
806 regs->gpr[ra] = val & MASK64_L(mb);
807 goto logical_done;
808 case 1: /* rldicr */
809 regs->gpr[ra] = val & MASK64_R(mb);
810 goto logical_done;
811 case 2: /* rldic */
812 regs->gpr[ra] = val & MASK64(mb, 63 - sh);
813 goto logical_done;
814 case 3: /* rldimi */
815 imm = MASK64(mb, 63 - sh);
816 regs->gpr[ra] = (regs->gpr[ra] & ~imm) |
817 (val & imm);
818 goto logical_done;
819 }
820 } else {
821 sh = regs->gpr[rb] & 0x3f;
822 val = ROTATE(val, sh);
823 switch ((instr >> 1) & 7) {
824 case 0: /* rldcl */
825 regs->gpr[ra] = val & MASK64_L(mb);
826 goto logical_done;
827 case 1: /* rldcr */
828 regs->gpr[ra] = val & MASK64_R(mb);
829 goto logical_done;
830 }
831 }
832#endif
833
834 case 31:
835 switch ((instr >> 1) & 0x3ff) {
836 case 83: /* mfmsr */
837 if (regs->msr & MSR_PR)
838 break;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000839 regs->gpr[rd] = regs->msr & MSR_MASK;
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000840 goto instr_done;
841 case 146: /* mtmsr */
842 if (regs->msr & MSR_PR)
843 break;
Paul Mackerrasc0325242005-10-28 22:48:08 +1000844 imm = regs->gpr[rd];
845 if ((imm & MSR_RI) == 0)
846 /* can't step mtmsr that would clear MSR_RI */
847 return -1;
848 regs->msr = imm;
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000849 goto instr_done;
Paul Mackerrasc0325242005-10-28 22:48:08 +1000850#ifdef CONFIG_PPC64
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000851 case 178: /* mtmsrd */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000852 /* only MSR_EE and MSR_RI get changed if bit 15 set */
853 /* mtmsrd doesn't change MSR_HV and MSR_ME */
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000854 if (regs->msr & MSR_PR)
855 break;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000856 imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL;
857 imm = (regs->msr & MSR_MASK & ~imm)
858 | (regs->gpr[rd] & imm);
859 if ((imm & MSR_RI) == 0)
860 /* can't step mtmsrd that would clear MSR_RI */
861 return -1;
862 regs->msr = imm;
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000863 goto instr_done;
Paul Mackerrasc0325242005-10-28 22:48:08 +1000864#endif
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000865 case 19: /* mfcr */
Ananth N Mavinakayanahalli68881992007-04-18 15:56:38 +1000866 regs->gpr[rd] = regs->ccr;
867 regs->gpr[rd] &= 0xffffffffUL;
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000868 goto instr_done;
869
870 case 144: /* mtcrf */
871 imm = 0xf0000000UL;
872 val = regs->gpr[rd];
873 for (sh = 0; sh < 8; ++sh) {
874 if (instr & (0x80000 >> sh))
875 regs->ccr = (regs->ccr & ~imm) |
876 (val & imm);
877 imm >>= 4;
878 }
879 goto instr_done;
880
881 case 339: /* mfspr */
Ananth N Mavinakayanahalli68881992007-04-18 15:56:38 +1000882 spr = (instr >> 11) & 0x3ff;
883 switch (spr) {
884 case 0x20: /* mfxer */
885 regs->gpr[rd] = regs->xer;
886 regs->gpr[rd] &= 0xffffffffUL;
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000887 goto instr_done;
Ananth N Mavinakayanahalli68881992007-04-18 15:56:38 +1000888 case 0x100: /* mflr */
889 regs->gpr[rd] = regs->link;
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000890 goto instr_done;
Ananth N Mavinakayanahalli68881992007-04-18 15:56:38 +1000891 case 0x120: /* mfctr */
892 regs->gpr[rd] = regs->ctr;
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000893 goto instr_done;
Ananth N Mavinakayanahalli68881992007-04-18 15:56:38 +1000894 }
895 break;
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000896
897 case 467: /* mtspr */
Ananth N Mavinakayanahalli68881992007-04-18 15:56:38 +1000898 spr = (instr >> 11) & 0x3ff;
899 switch (spr) {
900 case 0x20: /* mtxer */
901 regs->xer = (regs->gpr[rd] & 0xffffffffUL);
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000902 goto instr_done;
Ananth N Mavinakayanahalli68881992007-04-18 15:56:38 +1000903 case 0x100: /* mtlr */
904 regs->link = regs->gpr[rd];
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000905 goto instr_done;
Ananth N Mavinakayanahalli68881992007-04-18 15:56:38 +1000906 case 0x120: /* mtctr */
907 regs->ctr = regs->gpr[rd];
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000908 goto instr_done;
Ananth N Mavinakayanahalli68881992007-04-18 15:56:38 +1000909 }
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000910 break;
911
912/*
913 * Compare instructions
914 */
915 case 0: /* cmp */
916 val = regs->gpr[ra];
917 val2 = regs->gpr[rb];
918#ifdef __powerpc64__
919 if ((rd & 1) == 0) {
920 /* word (32-bit) compare */
921 val = (int) val;
922 val2 = (int) val2;
923 }
924#endif
925 do_cmp_signed(regs, val, val2, rd >> 2);
926 goto instr_done;
927
928 case 32: /* cmpl */
929 val = regs->gpr[ra];
930 val2 = regs->gpr[rb];
931#ifdef __powerpc64__
932 if ((rd & 1) == 0) {
933 /* word (32-bit) compare */
934 val = (unsigned int) val;
935 val2 = (unsigned int) val2;
936 }
937#endif
938 do_cmp_unsigned(regs, val, val2, rd >> 2);
939 goto instr_done;
940
941/*
942 * Arithmetic instructions
943 */
944 case 8: /* subfc */
945 add_with_carry(regs, rd, ~regs->gpr[ra],
946 regs->gpr[rb], 1);
947 goto arith_done;
948#ifdef __powerpc64__
949 case 9: /* mulhdu */
950 asm("mulhdu %0,%1,%2" : "=r" (regs->gpr[rd]) :
951 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
952 goto arith_done;
953#endif
954 case 10: /* addc */
955 add_with_carry(regs, rd, regs->gpr[ra],
956 regs->gpr[rb], 0);
957 goto arith_done;
958
959 case 11: /* mulhwu */
960 asm("mulhwu %0,%1,%2" : "=r" (regs->gpr[rd]) :
961 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
962 goto arith_done;
963
964 case 40: /* subf */
965 regs->gpr[rd] = regs->gpr[rb] - regs->gpr[ra];
966 goto arith_done;
967#ifdef __powerpc64__
968 case 73: /* mulhd */
969 asm("mulhd %0,%1,%2" : "=r" (regs->gpr[rd]) :
970 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
971 goto arith_done;
972#endif
973 case 75: /* mulhw */
974 asm("mulhw %0,%1,%2" : "=r" (regs->gpr[rd]) :
975 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
976 goto arith_done;
977
978 case 104: /* neg */
979 regs->gpr[rd] = -regs->gpr[ra];
980 goto arith_done;
981
982 case 136: /* subfe */
983 add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb],
984 regs->xer & XER_CA);
985 goto arith_done;
986
987 case 138: /* adde */
988 add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb],
989 regs->xer & XER_CA);
990 goto arith_done;
991
992 case 200: /* subfze */
993 add_with_carry(regs, rd, ~regs->gpr[ra], 0L,
994 regs->xer & XER_CA);
995 goto arith_done;
996
997 case 202: /* addze */
998 add_with_carry(regs, rd, regs->gpr[ra], 0L,
999 regs->xer & XER_CA);
1000 goto arith_done;
1001
1002 case 232: /* subfme */
1003 add_with_carry(regs, rd, ~regs->gpr[ra], -1L,
1004 regs->xer & XER_CA);
1005 goto arith_done;
1006#ifdef __powerpc64__
1007 case 233: /* mulld */
1008 regs->gpr[rd] = regs->gpr[ra] * regs->gpr[rb];
1009 goto arith_done;
1010#endif
1011 case 234: /* addme */
1012 add_with_carry(regs, rd, regs->gpr[ra], -1L,
1013 regs->xer & XER_CA);
1014 goto arith_done;
1015
1016 case 235: /* mullw */
1017 regs->gpr[rd] = (unsigned int) regs->gpr[ra] *
1018 (unsigned int) regs->gpr[rb];
1019 goto arith_done;
1020
1021 case 266: /* add */
1022 regs->gpr[rd] = regs->gpr[ra] + regs->gpr[rb];
1023 goto arith_done;
1024#ifdef __powerpc64__
1025 case 457: /* divdu */
1026 regs->gpr[rd] = regs->gpr[ra] / regs->gpr[rb];
1027 goto arith_done;
1028#endif
1029 case 459: /* divwu */
1030 regs->gpr[rd] = (unsigned int) regs->gpr[ra] /
1031 (unsigned int) regs->gpr[rb];
1032 goto arith_done;
1033#ifdef __powerpc64__
1034 case 489: /* divd */
1035 regs->gpr[rd] = (long int) regs->gpr[ra] /
1036 (long int) regs->gpr[rb];
1037 goto arith_done;
1038#endif
1039 case 491: /* divw */
1040 regs->gpr[rd] = (int) regs->gpr[ra] /
1041 (int) regs->gpr[rb];
1042 goto arith_done;
1043
1044
1045/*
1046 * Logical instructions
1047 */
1048 case 26: /* cntlzw */
1049 asm("cntlzw %0,%1" : "=r" (regs->gpr[ra]) :
1050 "r" (regs->gpr[rd]));
1051 goto logical_done;
1052#ifdef __powerpc64__
1053 case 58: /* cntlzd */
1054 asm("cntlzd %0,%1" : "=r" (regs->gpr[ra]) :
1055 "r" (regs->gpr[rd]));
1056 goto logical_done;
1057#endif
1058 case 28: /* and */
1059 regs->gpr[ra] = regs->gpr[rd] & regs->gpr[rb];
1060 goto logical_done;
1061
1062 case 60: /* andc */
1063 regs->gpr[ra] = regs->gpr[rd] & ~regs->gpr[rb];
1064 goto logical_done;
1065
1066 case 124: /* nor */
1067 regs->gpr[ra] = ~(regs->gpr[rd] | regs->gpr[rb]);
1068 goto logical_done;
1069
1070 case 284: /* xor */
1071 regs->gpr[ra] = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1072 goto logical_done;
1073
1074 case 316: /* xor */
1075 regs->gpr[ra] = regs->gpr[rd] ^ regs->gpr[rb];
1076 goto logical_done;
1077
1078 case 412: /* orc */
1079 regs->gpr[ra] = regs->gpr[rd] | ~regs->gpr[rb];
1080 goto logical_done;
1081
1082 case 444: /* or */
1083 regs->gpr[ra] = regs->gpr[rd] | regs->gpr[rb];
1084 goto logical_done;
1085
1086 case 476: /* nand */
1087 regs->gpr[ra] = ~(regs->gpr[rd] & regs->gpr[rb]);
1088 goto logical_done;
1089
1090 case 922: /* extsh */
1091 regs->gpr[ra] = (signed short) regs->gpr[rd];
1092 goto logical_done;
1093
1094 case 954: /* extsb */
1095 regs->gpr[ra] = (signed char) regs->gpr[rd];
1096 goto logical_done;
1097#ifdef __powerpc64__
1098 case 986: /* extsw */
1099 regs->gpr[ra] = (signed int) regs->gpr[rd];
1100 goto logical_done;
1101#endif
1102
1103/*
1104 * Shift instructions
1105 */
1106 case 24: /* slw */
1107 sh = regs->gpr[rb] & 0x3f;
1108 if (sh < 32)
1109 regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL;
1110 else
1111 regs->gpr[ra] = 0;
1112 goto logical_done;
1113
1114 case 536: /* srw */
1115 sh = regs->gpr[rb] & 0x3f;
1116 if (sh < 32)
1117 regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh;
1118 else
1119 regs->gpr[ra] = 0;
1120 goto logical_done;
1121
1122 case 792: /* sraw */
1123 sh = regs->gpr[rb] & 0x3f;
1124 ival = (signed int) regs->gpr[rd];
1125 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
1126 if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0))
1127 regs->xer |= XER_CA;
1128 else
1129 regs->xer &= ~XER_CA;
1130 goto logical_done;
1131
1132 case 824: /* srawi */
1133 sh = rb;
1134 ival = (signed int) regs->gpr[rd];
1135 regs->gpr[ra] = ival >> sh;
1136 if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
1137 regs->xer |= XER_CA;
1138 else
1139 regs->xer &= ~XER_CA;
1140 goto logical_done;
1141
1142#ifdef __powerpc64__
1143 case 27: /* sld */
1144 sh = regs->gpr[rd] & 0x7f;
1145 if (sh < 64)
1146 regs->gpr[ra] = regs->gpr[rd] << sh;
1147 else
1148 regs->gpr[ra] = 0;
1149 goto logical_done;
1150
1151 case 539: /* srd */
1152 sh = regs->gpr[rb] & 0x7f;
1153 if (sh < 64)
1154 regs->gpr[ra] = regs->gpr[rd] >> sh;
1155 else
1156 regs->gpr[ra] = 0;
1157 goto logical_done;
1158
1159 case 794: /* srad */
1160 sh = regs->gpr[rb] & 0x7f;
1161 ival = (signed long int) regs->gpr[rd];
1162 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
1163 if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0))
1164 regs->xer |= XER_CA;
1165 else
1166 regs->xer &= ~XER_CA;
1167 goto logical_done;
1168
1169 case 826: /* sradi with sh_5 = 0 */
1170 case 827: /* sradi with sh_5 = 1 */
1171 sh = rb | ((instr & 2) << 4);
1172 ival = (signed long int) regs->gpr[rd];
1173 regs->gpr[ra] = ival >> sh;
1174 if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
1175 regs->xer |= XER_CA;
1176 else
1177 regs->xer &= ~XER_CA;
1178 goto logical_done;
1179#endif /* __powerpc64__ */
1180
1181/*
1182 * Cache instructions
1183 */
1184 case 54: /* dcbst */
1185 ea = xform_ea(instr, regs, 0);
1186 if (!address_ok(regs, ea, 8))
1187 return 0;
1188 err = 0;
1189 __cacheop_user_asmx(ea, err, "dcbst");
1190 if (err)
1191 return 0;
1192 goto instr_done;
1193
1194 case 86: /* dcbf */
1195 ea = xform_ea(instr, regs, 0);
1196 if (!address_ok(regs, ea, 8))
1197 return 0;
1198 err = 0;
1199 __cacheop_user_asmx(ea, err, "dcbf");
1200 if (err)
1201 return 0;
1202 goto instr_done;
1203
1204 case 246: /* dcbtst */
1205 if (rd == 0) {
1206 ea = xform_ea(instr, regs, 0);
1207 prefetchw((void *) ea);
1208 }
1209 goto instr_done;
1210
1211 case 278: /* dcbt */
1212 if (rd == 0) {
1213 ea = xform_ea(instr, regs, 0);
1214 prefetch((void *) ea);
1215 }
1216 goto instr_done;
1217
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001218 }
Paul Mackerras0016a4c2010-06-15 14:48:58 +10001219 break;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001220 }
Paul Mackerras0016a4c2010-06-15 14:48:58 +10001221
1222 /*
1223 * Following cases are for loads and stores, so bail out
1224 * if we're in little-endian mode.
1225 */
1226 if (regs->msr & MSR_LE)
1227 return 0;
1228
1229 /*
1230 * Save register RA in case it's an update form load or store
1231 * and the access faults.
1232 */
1233 old_ra = regs->gpr[ra];
1234
1235 switch (opcode) {
1236 case 31:
1237 u = instr & 0x40;
1238 switch ((instr >> 1) & 0x3ff) {
1239 case 20: /* lwarx */
1240 ea = xform_ea(instr, regs, 0);
1241 if (ea & 3)
1242 break; /* can't handle misaligned */
1243 err = -EFAULT;
1244 if (!address_ok(regs, ea, 4))
1245 goto ldst_done;
1246 err = 0;
1247 __get_user_asmx(val, ea, err, "lwarx");
1248 if (!err)
1249 regs->gpr[rd] = val;
1250 goto ldst_done;
1251
1252 case 150: /* stwcx. */
1253 ea = xform_ea(instr, regs, 0);
1254 if (ea & 3)
1255 break; /* can't handle misaligned */
1256 err = -EFAULT;
1257 if (!address_ok(regs, ea, 4))
1258 goto ldst_done;
1259 err = 0;
1260 __put_user_asmx(regs->gpr[rd], ea, err, "stwcx.", cr);
1261 if (!err)
1262 regs->ccr = (regs->ccr & 0x0fffffff) |
1263 (cr & 0xe0000000) |
1264 ((regs->xer >> 3) & 0x10000000);
1265 goto ldst_done;
1266
1267#ifdef __powerpc64__
1268 case 84: /* ldarx */
1269 ea = xform_ea(instr, regs, 0);
1270 if (ea & 7)
1271 break; /* can't handle misaligned */
1272 err = -EFAULT;
1273 if (!address_ok(regs, ea, 8))
1274 goto ldst_done;
1275 err = 0;
1276 __get_user_asmx(val, ea, err, "ldarx");
1277 if (!err)
1278 regs->gpr[rd] = val;
1279 goto ldst_done;
1280
1281 case 214: /* stdcx. */
1282 ea = xform_ea(instr, regs, 0);
1283 if (ea & 7)
1284 break; /* can't handle misaligned */
1285 err = -EFAULT;
1286 if (!address_ok(regs, ea, 8))
1287 goto ldst_done;
1288 err = 0;
1289 __put_user_asmx(regs->gpr[rd], ea, err, "stdcx.", cr);
1290 if (!err)
1291 regs->ccr = (regs->ccr & 0x0fffffff) |
1292 (cr & 0xe0000000) |
1293 ((regs->xer >> 3) & 0x10000000);
1294 goto ldst_done;
1295
1296 case 21: /* ldx */
1297 case 53: /* ldux */
1298 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1299 8, regs);
1300 goto ldst_done;
1301#endif
1302
1303 case 23: /* lwzx */
1304 case 55: /* lwzux */
1305 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1306 4, regs);
1307 goto ldst_done;
1308
1309 case 87: /* lbzx */
1310 case 119: /* lbzux */
1311 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1312 1, regs);
1313 goto ldst_done;
1314
1315#ifdef CONFIG_ALTIVEC
1316 case 103: /* lvx */
1317 case 359: /* lvxl */
1318 if (!(regs->msr & MSR_VEC))
1319 break;
1320 ea = xform_ea(instr, regs, 0);
1321 err = do_vec_load(rd, do_lvx, ea, regs);
1322 goto ldst_done;
1323
1324 case 231: /* stvx */
1325 case 487: /* stvxl */
1326 if (!(regs->msr & MSR_VEC))
1327 break;
1328 ea = xform_ea(instr, regs, 0);
1329 err = do_vec_store(rd, do_stvx, ea, regs);
1330 goto ldst_done;
1331#endif /* CONFIG_ALTIVEC */
1332
1333#ifdef __powerpc64__
1334 case 149: /* stdx */
1335 case 181: /* stdux */
1336 val = regs->gpr[rd];
1337 err = write_mem(val, xform_ea(instr, regs, u), 8, regs);
1338 goto ldst_done;
1339#endif
1340
1341 case 151: /* stwx */
1342 case 183: /* stwux */
1343 val = regs->gpr[rd];
1344 err = write_mem(val, xform_ea(instr, regs, u), 4, regs);
1345 goto ldst_done;
1346
1347 case 215: /* stbx */
1348 case 247: /* stbux */
1349 val = regs->gpr[rd];
1350 err = write_mem(val, xform_ea(instr, regs, u), 1, regs);
1351 goto ldst_done;
1352
1353 case 279: /* lhzx */
1354 case 311: /* lhzux */
1355 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1356 2, regs);
1357 goto ldst_done;
1358
1359#ifdef __powerpc64__
1360 case 341: /* lwax */
1361 case 373: /* lwaux */
1362 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1363 4, regs);
1364 if (!err)
1365 regs->gpr[rd] = (signed int) regs->gpr[rd];
1366 goto ldst_done;
1367#endif
1368
1369 case 343: /* lhax */
1370 case 375: /* lhaux */
1371 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1372 2, regs);
1373 if (!err)
1374 regs->gpr[rd] = (signed short) regs->gpr[rd];
1375 goto ldst_done;
1376
1377 case 407: /* sthx */
1378 case 439: /* sthux */
1379 val = regs->gpr[rd];
1380 err = write_mem(val, xform_ea(instr, regs, u), 2, regs);
1381 goto ldst_done;
1382
1383#ifdef __powerpc64__
1384 case 532: /* ldbrx */
1385 err = read_mem(&val, xform_ea(instr, regs, 0), 8, regs);
1386 if (!err)
1387 regs->gpr[rd] = byterev_8(val);
1388 goto ldst_done;
1389
1390#endif
1391
1392 case 534: /* lwbrx */
1393 err = read_mem(&val, xform_ea(instr, regs, 0), 4, regs);
1394 if (!err)
1395 regs->gpr[rd] = byterev_4(val);
1396 goto ldst_done;
1397
Sean MacLennancd64d162010-09-01 07:21:21 +00001398#ifdef CONFIG_PPC_CPU
Paul Mackerras0016a4c2010-06-15 14:48:58 +10001399 case 535: /* lfsx */
1400 case 567: /* lfsux */
1401 if (!(regs->msr & MSR_FP))
1402 break;
1403 ea = xform_ea(instr, regs, u);
1404 err = do_fp_load(rd, do_lfs, ea, 4, regs);
1405 goto ldst_done;
1406
1407 case 599: /* lfdx */
1408 case 631: /* lfdux */
1409 if (!(regs->msr & MSR_FP))
1410 break;
1411 ea = xform_ea(instr, regs, u);
1412 err = do_fp_load(rd, do_lfd, ea, 8, regs);
1413 goto ldst_done;
1414
1415 case 663: /* stfsx */
1416 case 695: /* stfsux */
1417 if (!(regs->msr & MSR_FP))
1418 break;
1419 ea = xform_ea(instr, regs, u);
1420 err = do_fp_store(rd, do_stfs, ea, 4, regs);
1421 goto ldst_done;
1422
1423 case 727: /* stfdx */
1424 case 759: /* stfdux */
1425 if (!(regs->msr & MSR_FP))
1426 break;
1427 ea = xform_ea(instr, regs, u);
1428 err = do_fp_store(rd, do_stfd, ea, 8, regs);
1429 goto ldst_done;
Sean MacLennancd64d162010-09-01 07:21:21 +00001430#endif
Paul Mackerras0016a4c2010-06-15 14:48:58 +10001431
1432#ifdef __powerpc64__
1433 case 660: /* stdbrx */
1434 val = byterev_8(regs->gpr[rd]);
1435 err = write_mem(val, xform_ea(instr, regs, 0), 8, regs);
1436 goto ldst_done;
1437
1438#endif
1439 case 662: /* stwbrx */
1440 val = byterev_4(regs->gpr[rd]);
1441 err = write_mem(val, xform_ea(instr, regs, 0), 4, regs);
1442 goto ldst_done;
1443
1444 case 790: /* lhbrx */
1445 err = read_mem(&val, xform_ea(instr, regs, 0), 2, regs);
1446 if (!err)
1447 regs->gpr[rd] = byterev_2(val);
1448 goto ldst_done;
1449
1450 case 918: /* sthbrx */
1451 val = byterev_2(regs->gpr[rd]);
1452 err = write_mem(val, xform_ea(instr, regs, 0), 2, regs);
1453 goto ldst_done;
1454
1455#ifdef CONFIG_VSX
1456 case 844: /* lxvd2x */
1457 case 876: /* lxvd2ux */
1458 if (!(regs->msr & MSR_VSX))
1459 break;
1460 rd |= (instr & 1) << 5;
1461 ea = xform_ea(instr, regs, u);
1462 err = do_vsx_load(rd, do_lxvd2x, ea, regs);
1463 goto ldst_done;
1464
1465 case 972: /* stxvd2x */
1466 case 1004: /* stxvd2ux */
1467 if (!(regs->msr & MSR_VSX))
1468 break;
1469 rd |= (instr & 1) << 5;
1470 ea = xform_ea(instr, regs, u);
1471 err = do_vsx_store(rd, do_stxvd2x, ea, regs);
1472 goto ldst_done;
1473
1474#endif /* CONFIG_VSX */
1475 }
1476 break;
1477
1478 case 32: /* lwz */
1479 case 33: /* lwzu */
1480 err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 4, regs);
1481 goto ldst_done;
1482
1483 case 34: /* lbz */
1484 case 35: /* lbzu */
1485 err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 1, regs);
1486 goto ldst_done;
1487
1488 case 36: /* stw */
1489 case 37: /* stwu */
1490 val = regs->gpr[rd];
1491 err = write_mem(val, dform_ea(instr, regs), 4, regs);
1492 goto ldst_done;
1493
1494 case 38: /* stb */
1495 case 39: /* stbu */
1496 val = regs->gpr[rd];
1497 err = write_mem(val, dform_ea(instr, regs), 1, regs);
1498 goto ldst_done;
1499
1500 case 40: /* lhz */
1501 case 41: /* lhzu */
1502 err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 2, regs);
1503 goto ldst_done;
1504
1505 case 42: /* lha */
1506 case 43: /* lhau */
1507 err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 2, regs);
1508 if (!err)
1509 regs->gpr[rd] = (signed short) regs->gpr[rd];
1510 goto ldst_done;
1511
1512 case 44: /* sth */
1513 case 45: /* sthu */
1514 val = regs->gpr[rd];
1515 err = write_mem(val, dform_ea(instr, regs), 2, regs);
1516 goto ldst_done;
1517
1518 case 46: /* lmw */
1519 ra = (instr >> 16) & 0x1f;
1520 if (ra >= rd)
1521 break; /* invalid form, ra in range to load */
1522 ea = dform_ea(instr, regs);
1523 do {
1524 err = read_mem(&regs->gpr[rd], ea, 4, regs);
1525 if (err)
1526 return 0;
1527 ea += 4;
1528 } while (++rd < 32);
1529 goto instr_done;
1530
1531 case 47: /* stmw */
1532 ea = dform_ea(instr, regs);
1533 do {
1534 err = write_mem(regs->gpr[rd], ea, 4, regs);
1535 if (err)
1536 return 0;
1537 ea += 4;
1538 } while (++rd < 32);
1539 goto instr_done;
1540
Sean MacLennancd64d162010-09-01 07:21:21 +00001541#ifdef CONFIG_PPC_FPU
Paul Mackerras0016a4c2010-06-15 14:48:58 +10001542 case 48: /* lfs */
1543 case 49: /* lfsu */
1544 if (!(regs->msr & MSR_FP))
1545 break;
1546 ea = dform_ea(instr, regs);
1547 err = do_fp_load(rd, do_lfs, ea, 4, regs);
1548 goto ldst_done;
1549
1550 case 50: /* lfd */
1551 case 51: /* lfdu */
1552 if (!(regs->msr & MSR_FP))
1553 break;
1554 ea = dform_ea(instr, regs);
1555 err = do_fp_load(rd, do_lfd, ea, 8, regs);
1556 goto ldst_done;
1557
1558 case 52: /* stfs */
1559 case 53: /* stfsu */
1560 if (!(regs->msr & MSR_FP))
1561 break;
1562 ea = dform_ea(instr, regs);
1563 err = do_fp_store(rd, do_stfs, ea, 4, regs);
1564 goto ldst_done;
1565
1566 case 54: /* stfd */
1567 case 55: /* stfdu */
1568 if (!(regs->msr & MSR_FP))
1569 break;
1570 ea = dform_ea(instr, regs);
1571 err = do_fp_store(rd, do_stfd, ea, 8, regs);
1572 goto ldst_done;
Sean MacLennancd64d162010-09-01 07:21:21 +00001573#endif
Paul Mackerras0016a4c2010-06-15 14:48:58 +10001574
1575#ifdef __powerpc64__
1576 case 58: /* ld[u], lwa */
1577 switch (instr & 3) {
1578 case 0: /* ld */
1579 err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
1580 8, regs);
1581 goto ldst_done;
1582 case 1: /* ldu */
1583 err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
1584 8, regs);
1585 goto ldst_done;
1586 case 2: /* lwa */
1587 err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
1588 4, regs);
1589 if (!err)
1590 regs->gpr[rd] = (signed int) regs->gpr[rd];
1591 goto ldst_done;
1592 }
1593 break;
1594
1595 case 62: /* std[u] */
1596 val = regs->gpr[rd];
1597 switch (instr & 3) {
1598 case 0: /* std */
1599 err = write_mem(val, dsform_ea(instr, regs), 8, regs);
1600 goto ldst_done;
1601 case 1: /* stdu */
1602 err = write_mem(val, dsform_ea(instr, regs), 8, regs);
1603 goto ldst_done;
1604 }
1605 break;
1606#endif /* __powerpc64__ */
1607
1608 }
1609 err = -EINVAL;
1610
1611 ldst_done:
1612 if (err) {
1613 regs->gpr[ra] = old_ra;
1614 return 0; /* invoke DSI if -EFAULT? */
1615 }
1616 instr_done:
Michael Ellermanb91e1362011-04-07 21:56:04 +00001617 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
Paul Mackerras0016a4c2010-06-15 14:48:58 +10001618 return 1;
1619
1620 logical_done:
1621 if (instr & 1)
1622 set_cr0(regs, ra);
1623 goto instr_done;
1624
1625 arith_done:
1626 if (instr & 1)
1627 set_cr0(regs, rd);
1628 goto instr_done;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001629}